diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..fccb30a9e --- /dev/null +++ b/.flake8 @@ -0,0 +1,5 @@ +[flake8] +ignore = E203, E266, E501, W503, W504, E741, C901 +max-line-length = 88 +max-complexity = 18 +select = B,C,E,F,W,T4,B9,B950 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 000000000..43d5feacd --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,32 @@ +# Pre-commit git hooks, run locally before every commit +# Init with +# $ pip install -r requirements-dev.txt +# $ pre-commit install + +repos: +- repo: https://github.com/pre-commit/pre-commit-hooks + rev: v2.1.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + #- id: check-docstring-first + - id: check-json + #- id: check-added-large-files + - id: check-yaml + - id: debug-statements + #- id: name-tests-test + - id: double-quote-string-fixer + - id: requirements-txt-fixer +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.1 + hooks: + - id: flake8 +- repo: https://github.com/pre-commit/mirrors-autopep8 + rev: v1.4.4 + hooks: + - id: autopep8 +- repo: https://github.com/asottile/reorder_python_imports + rev: v1.3.5 + hooks: + - id: reorder-python-imports + language_version: python3 diff --git a/API.json b/API.json index 962e8d3fb..edac5584f 100644 --- a/API.json +++ b/API.json @@ -1,5 +1,5 @@ { - "$schema": "http://json-schema.org/draft-03/schema#", + "$schema": "http://json-schema.org/draft-07/schema#", "title": "Kimchi API", "description": "Json schema for Kimchi API", "type": "object", @@ -9,15 +9,16 @@ "type": "object", "properties": { "type": { + "type": "string", "enum": ["spice", "vnc"], "error": "KCHVM0014E" }, "listen": { "error": "KCHVM0015E", - "type": [ + "anyOf": [ { "type": "string", - "format": "ip-address" + "format": "ipv4" }, { "type": "string", @@ -93,18 +94,20 @@ "interface": { "description": "Host network interface. This indicates how to configure the host network interface (Ethernet, Bond, VLAN) as direct macvtap or as OVS interface to a VM.", "type": "object", + "required": [ + "type", + "name" + ], "properties": { "type": { "description": "Host network interface type. Valid types are: 'macvtap' for host network interface (Ethernet, Bond, VLAN) to be connected as direct MacVTap or 'ovs' for openvswitch host network interface to be connected as virtual switch to a VM.", "type": "string", "pattern": "^(macvtap|ovs)$", - "required": "True", "error": "KCHTMPL0034E" }, "name": { "description": "The host network interface name. It should be name of host network interface(Ethernet, Bond, VLAN) for type 'macvtap' and name of host openvswitch bridge interface for type ovs", "type": "string", - "required": "True", "error": "KCHTMPL0035E" }, "mode": { @@ -121,6 +124,10 @@ "properties": { "storagepools_create": { "type": "object", + "required": [ + "name", + "type" + ], "error": "KCHPOOL0026E", "properties": { "name": { @@ -128,14 +135,12 @@ "type": "string", "minLength": 1, "pattern": "^[^/]*$", - "required": true, "error": "KCHPOOL0016E" }, "type": { "description": "The type of the defined Storage Pool", "type": "string", "pattern": "^dir|netfs|logical|kimchi-iso|iscsi|scsi$", - "required": true, "error": "KCHPOOL0017E" }, "path": { @@ -270,23 +275,28 @@ }, "storagevolume_update": { "type": "object", + "required": [ + "chunk", + "chunk_size" + ], "properties": { "chunk": { "description": "Upload storage volume chunk", - "error": "KCHVOL0024E", - "required": true + "error": "KCHVOL0024E" }, "chunk_size": { "description": "Chunk size of uploaded storage volume", "type": "string", - "error": "KCHVOL0024E", - "required": true + "error": "KCHVOL0024E" } }, "additionalProperties": false }, "vms_create": { "type": "object", + "required": [ + "template" + ], "error": "KCHVM0016E", "properties": { "name": { @@ -309,7 +319,6 @@ "description": "The URI of a template to use when building a VM", "type": "string", "pattern": "^/plugins/kimchi/templates/(.*?)/?$", - "required": true, "error": "KCHVM0012E" }, "storagepool": { @@ -430,11 +439,13 @@ }, "vm_migrate": { "type": "object", + "required": [ + "remote_host" + ], "properties": { "remote_host": { "description": "IP address or hostname of the remote server", "type": "string", - "required": true, "minLength": 1, "error": "KCHVM0060E" }, @@ -459,6 +470,10 @@ }, "networks_create": { "type": "object", + "required": [ + "name", + "connection" + ], "error": "KCHNET0016E", "properties": { "name": { @@ -466,14 +481,12 @@ "type": "string", "minLength": 1, "pattern": "^[^/\"]*$", - "required": true, "error": "KCHNET0011E" }, "connection": { "description": "Specifies how this network should be connected to the other networks", "type": "string", "pattern": "^isolated|nat|bridge|macvtap|vepa|passthrough$", - "required": true, "error": "KCHNET0012E" }, "subnet": { @@ -529,13 +542,15 @@ }, "vmifaces_create": { "type": "object", + "required": [ + "type" + ], "error": "KCHVMIF0007E", "properties": { "type": { "description": "The type of VM network interface that libvirt supports. Type 'macvtap' for host network interface (Ethernet, Bond, VLAN) to be connected as direct MacVTap or 'ovs' for openvswitch host network interface to be connected as virtual switch to a VM or 'network' for libvirt virtual network to be connected to VM. ", "type": "string", "pattern": "^network|macvtap|ovs$", - "required": true, "error": "KCHVMIF0004E" }, "network": { @@ -582,6 +597,9 @@ }, "templates_create": { "type": "object", + "required": [ + "source_media" + ], "error": "KCHTMPL0016E", "properties": { "name": { @@ -612,12 +630,14 @@ "memory": { "$ref": "#/kimchitype/memory" }, "source_media": { "type" : "object", + "required": [ + "type" + ], "properties" : { "type": { "description": "Type of source media: disk or netboot", "type": "string", - "pattern": "^disk|netboot$", - "required": true + "pattern": "^disk|netboot$" }, "path": { "description": "Path for installation media (ISO, disk, remote ISO)", @@ -625,8 +645,7 @@ "pattern" : "^((/)|(http)[s]?:|[t]?(ftp)[s]?:)+.*$" } }, - "additionalProperties": false, - "required": true + "additionalProperties": false }, "disks": { "description": "List of disks", @@ -728,13 +747,15 @@ }, "vmstorages_create": { "type": "object", + "required": [ + "type" + ], "error": "KCHVMSTOR0012E", "properties": { "type": { "description": "The storage type", "type": "string", "pattern": "^cdrom|disk$", - "required": true, "error": "KCHVMSTOR0002E" }, "pool": { @@ -759,13 +780,15 @@ }, "vmstorage_update": { "type": "object", + "required": [ + "path" + ], "error": "KCHVMSTOR0013E", "properties": { "path": { "description": "Path of iso image file or disk mount point", "type": "string", "pattern": "^(|(/)|(http)[s]?:|[t]?(ftp)[s]?:)+.*$", - "required": true, "error": "KCHVMSTOR0003E" } }, @@ -909,12 +932,14 @@ }, "vmhostdevs_create": { "type": "object", + "required": [ + "name" + ], "properties": { "name": { "description": "Then name of the device to assign to VM", "type": "string", "pattern": "^[_A-Za-z0-9-]+$", - "required": true, "error": "KCHVMHDEV0004E" } }, diff --git a/IBM-license-blacklist b/IBM-license-blacklist deleted file mode 100644 index 78fd2cb4f..000000000 --- a/IBM-license-blacklist +++ /dev/null @@ -1,38 +0,0 @@ -.gitignore -ABOUT-NLS -API.json -AUTHORS -CONTRIBUTE.md -COPYING -COPYING.ASL2 -COPYING.LGPL -ChangeLog -INSTALL -VERSION -build-aux/config.rpath -build-aux/genChangelog -build-aux/pkg-version -config.rpath -contrib/DEBIAN/control.in -contrib/kimchi.spec.fedora.in -contrib/kimchi.spec.suse.in -contrib/kimchid.service..* -distros.d/.*.json -docs/.*.md -kimchi.conf -m4/.*.m4 -po/LINGUAS -po/Makefile.in.in -po/Makevars -po/POTFILES.in -po/kimchi.pot -template.conf -ui/config/tab-ext.xml -ui/images/.*.svg -ui/pages/help/dita-help.xsl -ui/pages/help/.*/.*.dita -ui/robots.txt -ui/serial/libs/.*.js -ui/spice-html5/.*.js -ui/spice-html5/css/spice.css -ui/spice-html5/pages/spice_auto.html diff --git a/Makefile.am b/Makefile.am index 3fb7731a6..bde34bd8e 100644 --- a/Makefile.am +++ b/Makefile.am @@ -86,8 +86,6 @@ check-local: git grep --cached -Il '' | grep -v '^ui/spice-html5/' | \ xargs egrep '.* +$$' \ && echo "ERROR: Whitespaces found" || echo "Ok"; \ - echo "IBM copyright year verification ..." ; \ - /bin/bash ../../../../check-IBM-license-header.sh ; \ fi; @if [ -f $(RPMLINT) ]; then \ ./check_spec_errors.sh; \ diff --git a/__init__.py b/__init__.py index 067527187..a3936525b 100644 --- a/__init__.py +++ b/__init__.py @@ -16,6 +16,5 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.plugins.kimchi.root import Kimchi __all__ = [Kimchi] diff --git a/config.py.in b/config.py.in index cbc8b3230..f22aafc76 100644 --- a/config.py.in +++ b/config.py.in @@ -77,7 +77,7 @@ config = get_config() def find_qemu_binary(find_emulator=False): try: connect = libvirt.open(None) - except Exception, e: + except Exception as e: raise Exception("Unable to get qemu binary location: %s" % e) try: xml = connect.getCapabilities() @@ -96,7 +96,7 @@ def find_qemu_binary(find_emulator=False): /domain[@type='kvm']/emulator" % arch res = xpath_get_text(xml, expr) location = res[0] - except Exception, e: + except Exception as e: raise Exception("Unable to get qemu binary location: %s" % e) finally: connect.close() @@ -175,7 +175,7 @@ class KimchiConfig(PluginConfig): 'tools.staticdir.dir': get_virtviewerfiles_path(), 'tools.staticdir.on': True}} - for uri, data in static_config.iteritems(): + for uri, data in static_config.items(): custom_config[uri] = {'tools.nocache.on': True, 'tools.wokauth.on': True} path = data['path'] diff --git a/configure.ac b/configure.ac index cec47b7ba..ff7add7a1 100644 --- a/configure.ac +++ b/configure.ac @@ -33,7 +33,7 @@ AS_IF([test "x$PACKAGE_RELEASE" = x], AC_CONFIG_AUX_DIR([build-aux]) AM_INIT_AUTOMAKE([-Wno-portability]) -AM_PATH_PYTHON([2.6]) +AM_PATH_PYTHON([3.6]) AC_PATH_PROG([PEP8], [pep8], [/usr/bin/pep8]) AC_PATH_PROG([GIT], [git], [/usr/bin/git]) AC_PATH_PROG([RPMLINT], [rpmlint], [/usr/bin/rpmlint]) diff --git a/contrib/check_i18n.py b/contrib/check_i18n.py index 86712fa44..6a882cdc0 100755 --- a/contrib/check_i18n.py +++ b/contrib/check_i18n.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python2 +#!/usr/bin/env python3 # # Project Kimchi # @@ -17,36 +17,35 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import imp +import importlib import os import re import sys # Match all conversion specifier with mapping key -PATTERN = re.compile(r'''%\([^)]+\) # Mapping key +PATTERN = re.compile( + r"""%\([^)]+\) # Mapping key [#0\-+]? # Conversion flags (optional) (\d+|\*)? # Minimum field width (optional) (\.(\d+|\*))? # Precision (optional) [lLh]? # Length modifier (optional) - [cdeEfFgGioursxX%] # Conversion type''', - re.VERBOSE) -BAD_PATTERN = re.compile(r"%\([^)]*?\)") + [cdeEfFgGioursxX%] # Conversion type""", + re.VERBOSE, +) +BAD_PATTERN = re.compile(r'%\([^)]*?\)') def load_i18n_module(i18nfile): - path = os.path.dirname(i18nfile) - mname = i18nfile.replace("./", "_").replace("/", "_").rstrip(".py") - mobj = imp.find_module("i18n", [path]) - return imp.load_module(mname, *mobj) + mname = i18nfile.replace('/', '.').rstrip('.py').lstrip('src.') + return importlib.import_module(mname) def check_string_formatting(messages): - for k, v in messages.iteritems(): - if BAD_PATTERN.findall(PATTERN.sub(" ", v)): - print "bad i18n string formatting:" - print " %s: %s" % (k, v) + for k, v in messages.items(): + if BAD_PATTERN.findall(PATTERN.sub(' ', v)): + print('bad i18n string formatting:') + print(f' {k}: {v}') exit(1) @@ -55,27 +54,30 @@ def find_message_key(path, k): for root, dirs, files in os.walk(path): for f in files: fname = os.path.join(root, f) - if (not fname.endswith("i18n.py") and fname.endswith(".py") or - fname.endswith(".json")): + if ( + not fname.endswith('i18n.py') + and fname.endswith('.py') + or fname.endswith('.json') + ): with open(fname) as f: - string = "".join(f.readlines()) + string = ''.join(f.readlines()) if k in string: return True return False - for k in messages.iterkeys(): + for k in messages.keys(): if not find_message_key(path, k): - print " %s is obsolete, it is no longer in use" % k + print(f' {k} is obsolete, it is no longer in use') exit(1) def main(): - print "Checking for invalid i18n string..." + print('Checking for invalid i18n string...') for f in sys.argv[1:]: messages = load_i18n_module(f).messages check_string_formatting(messages) check_obsolete_messages(os.path.dirname(f), messages) - print "Checking for invalid i18n string successfully" + print('Checking for invalid i18n string successfully') if __name__ == '__main__': diff --git a/control/__init__.py b/control/__init__.py index 022f9739b..2880d4b93 100644 --- a/control/__init__.py +++ b/control/__init__.py @@ -16,10 +16,8 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os - from wok.control.utils import load_url_sub_node diff --git a/control/config.py b/control/config.py index 21ec1b7d5..65758282e 100644 --- a/control/config.py +++ b/control/config.py @@ -16,12 +16,12 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import Collection, Resource +from wok.control.base import Collection +from wok.control.base import Resource from wok.control.utils import UrlSubNode -@UrlSubNode("config") +@UrlSubNode('config') class Config(Resource): def __init__(self, model, id=None): super(Config, self).__init__(model, id) diff --git a/control/cpuinfo.py b/control/cpuinfo.py index 322ff8fb9..8d493cb01 100644 --- a/control/cpuinfo.py +++ b/control/cpuinfo.py @@ -16,8 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - - from wok.control.base import Resource @@ -25,7 +23,7 @@ class CPUInfo(Resource): def __init__(self, model): super(CPUInfo, self).__init__(model) self.admin_methods = ['GET'] - self.uri_fmt = "/host/cpuinfo" + self.uri_fmt = '/host/cpuinfo' @property def data(self): diff --git a/control/groups.py b/control/groups.py index 1e037bd91..63279e2ed 100644 --- a/control/groups.py +++ b/control/groups.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.control.base import SimpleCollection from wok.control.utils import UrlSubNode diff --git a/control/host.py b/control/host.py index 787289574..fa4bb79ec 100644 --- a/control/host.py +++ b/control/host.py @@ -16,14 +16,13 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import platform from wok.control.base import Collection -from wok.control.base import Resource, SimpleCollection +from wok.control.base import Resource +from wok.control.base import SimpleCollection from wok.control.utils import UrlSubNode from wok.exception import NotFoundError - from wok.plugins.kimchi.control.cpuinfo import CPUInfo from wok.plugins.kimchi.utils import is_s390x @@ -49,7 +48,7 @@ def data(self): class VolumeGroups(Collection): def __init__(self, model): super(VolumeGroups, self).__init__(model) - self.uri_fmt = "/host/vgs" + self.uri_fmt = '/host/vgs' self.admin_methods = ['GET'] self.resource = VolumeGroup @@ -57,7 +56,7 @@ def __init__(self, model): class VolumeGroup(Resource): def __init__(self, model, id=None): super(VolumeGroup, self).__init__(model, id) - self.uri_fmt = "/host/vgs/%s" + self.uri_fmt = '/host/vgs/%s' self.admin_methods = ['GET'] @property @@ -68,7 +67,7 @@ def data(self): class VMHolders(SimpleCollection): def __init__(self, model, device_id): super(VMHolders, self).__init__(model) - self.model_args = (device_id, ) + self.model_args = (device_id,) class Devices(Collection): @@ -97,15 +96,22 @@ def __init__(self, model): # sorted by their path def _get_resources(self, flag_filter): res_list = super(Partitions, self)._get_resources(flag_filter) - res_list = filter(lambda x: x.info['available'], res_list) + res_list = list(filter(lambda x: x.info['available'], res_list)) if is_s390x(): # On s390x arch filter out the DASD block devices which # don't have any partition(s). This is necessary because # DASD devices without any partitions are not valid # block device(s) for operations like pvcreate on s390x - res_list = filter(lambda x: (x.info['name'].startswith( - 'dasd') and x.info['type'] == 'part') or ( - not x.info['name'].startswith('dasd')), res_list) + res_list = list( + filter( + lambda x: ( + x.info['name'].startswith( + 'dasd') and x.info['type'] == 'part' + ) or + (not x.info['name'].startswith('dasd')), + res_list, + ) + ) res_list.sort(key=lambda x: x.info['path']) return res_list @@ -118,6 +124,6 @@ def __init__(self, model, id): @property def data(self): if not self.info['available']: - raise NotFoundError("KCHPART0001E", {'name': self.info['name']}) + raise NotFoundError('KCHPART0001E', {'name': self.info['name']}) return self.info diff --git a/control/interfaces.py b/control/interfaces.py index 7aba66489..d067d8b5a 100644 --- a/control/interfaces.py +++ b/control/interfaces.py @@ -16,8 +16,8 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import Collection, Resource +from wok.control.base import Collection +from wok.control.base import Resource from wok.control.utils import UrlSubNode @@ -33,7 +33,7 @@ class Interface(Resource): def __init__(self, model, ident): super(Interface, self).__init__(model, ident) self.admin_methods = ['GET'] - self.uri_fmt = "/interfaces/%s" + self.uri_fmt = '/interfaces/%s' @property def data(self): diff --git a/control/networks.py b/control/networks.py index d74387345..d61205c1d 100644 --- a/control/networks.py +++ b/control/networks.py @@ -16,21 +16,21 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import Collection, Resource +from wok.control.base import Collection +from wok.control.base import Resource from wok.control.utils import UrlSubNode NETWORKS_REQUESTS = { - 'POST': {'default': "KCHNET0001L"}, + 'POST': {'default': 'KCHNET0001L'}, } NETWORK_REQUESTS = { - 'DELETE': {'default': "KCHNET0002L"}, - 'PUT': {'default': "KCHNET0003L"}, + 'DELETE': {'default': 'KCHNET0002L'}, + 'PUT': {'default': 'KCHNET0003L'}, 'POST': { - 'activate': "KCHNET0004L", - 'deactivate': "KCHNET0005L", + 'activate': 'KCHNET0004L', + 'deactivate': 'KCHNET0005L', }, } @@ -51,7 +51,7 @@ class Network(Resource): def __init__(self, model, ident): super(Network, self).__init__(model, ident) self.admin_methods = ['PUT', 'POST', 'DELETE'] - self.uri_fmt = "/networks/%s" + self.uri_fmt = '/networks/%s' self.activate = self.generate_action_handler('activate') self.deactivate = self.generate_action_handler('deactivate', destructive=True) diff --git a/control/ovsbridges.py b/control/ovsbridges.py index 9e2078429..7e11e2fa7 100644 --- a/control/ovsbridges.py +++ b/control/ovsbridges.py @@ -16,12 +16,11 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.control.base import SimpleCollection from wok.control.utils import UrlSubNode -@UrlSubNode("ovsbridges", True) +@UrlSubNode('ovsbridges', True) class OVSBridges(SimpleCollection): def __init__(self, model): super(OVSBridges, self).__init__(model) diff --git a/control/storagepools.py b/control/storagepools.py index 26aab7dc2..06b654b2d 100644 --- a/control/storagepools.py +++ b/control/storagepools.py @@ -16,30 +16,24 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import cherrypy - -from wok.control.base import Collection, Resource -from wok.control.utils import get_class_name, model_fn -from wok.control.utils import validate_params +from wok.control.base import Collection +from wok.control.base import Resource +from wok.control.utils import get_class_name +from wok.control.utils import model_fn from wok.control.utils import UrlSubNode - +from wok.control.utils import validate_params from wok.plugins.kimchi.control.storagevolumes import IsoVolumes from wok.plugins.kimchi.control.storagevolumes import StorageVolumes from wok.plugins.kimchi.model.storagepools import ISO_POOL_NAME -STORAGEPOOLS_REQUESTS = { - 'POST': {'default': "KCHPOOL0001L"}, -} +STORAGEPOOLS_REQUESTS = {'POST': {'default': 'KCHPOOL0001L'}} STORAGEPOOL_REQUESTS = { - 'DELETE': {'default': "KCHPOOL0002L"}, - 'PUT': {'default': "KCHPOOL0003L"}, - 'POST': { - 'activate': "KCHPOOL0004L", - 'deactivate': "KCHPOOL0005L", - }, + 'DELETE': {'default': 'KCHPOOL0002L'}, + 'PUT': {'default': 'KCHPOOL0003L'}, + 'POST': {'activate': 'KCHPOOL0004L', 'deactivate': 'KCHPOOL0005L'}, } @@ -95,27 +89,29 @@ class StoragePool(Resource): def __init__(self, model, ident): super(StoragePool, self).__init__(model, ident) self.admin_methods = ['PUT', 'POST', 'DELETE'] - self.uri_fmt = "/storagepools/%s" + self.uri_fmt = '/storagepools/%s' self.activate = self.generate_action_handler('activate') - self.deactivate = self.generate_action_handler('deactivate', - destructive=True) + self.deactivate = self.generate_action_handler( + 'deactivate', destructive=True) self.storagevolumes = StorageVolumes(self.model, ident) self.log_map = STORAGEPOOL_REQUESTS @property def data(self): - res = {'name': self.ident, - 'state': self.info['state'], - 'capacity': self.info['capacity'], - 'allocated': self.info['allocated'], - 'available': self.info['available'], - 'path': self.info['path'], - 'source': self.info['source'], - 'type': self.info['type'], - 'nr_volumes': self.info['nr_volumes'], - 'autostart': self.info['autostart'], - 'persistent': self.info['persistent'], - 'in_use': self.info['in_use']} + res = { + 'name': self.ident, + 'state': self.info['state'], + 'capacity': self.info['capacity'], + 'allocated': self.info['allocated'], + 'available': self.info['available'], + 'path': self.info['path'], + 'source': self.info['source'], + 'type': self.info['type'], + 'nr_volumes': self.info['nr_volumes'], + 'autostart': self.info['autostart'], + 'persistent': self.info['persistent'], + 'in_use': self.info['in_use'], + } val = self.info.get('task_id') if val: @@ -131,6 +127,8 @@ def __init__(self, model): @property def data(self): - return {'name': self.ident, - 'state': self.info['state'], - 'type': self.info['type']} + return { + 'name': self.ident, + 'state': self.info['state'], + 'type': self.info['type'], + } diff --git a/control/storageservers.py b/control/storageservers.py index 9d18514d9..e741f6cbb 100644 --- a/control/storageservers.py +++ b/control/storageservers.py @@ -16,10 +16,12 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok import template -from wok.control.base import Collection, Resource -from wok.control.utils import get_class_name, model_fn, UrlSubNode +from wok.control.base import Collection +from wok.control.base import Resource +from wok.control.utils import get_class_name +from wok.control.utils import model_fn +from wok.control.utils import UrlSubNode @UrlSubNode('storageservers', True) @@ -34,8 +36,7 @@ class StorageServer(Resource): def __init__(self, model, ident): super(StorageServer, self).__init__(model, ident) self.admin_methods = ['GET'] - self.storagetargets = StorageTargets(self.model, - self.ident.decode("utf-8")) + self.storagetargets = StorageTargets(self.model, self.ident) @property def data(self): @@ -47,8 +48,8 @@ def __init__(self, model, server): super(StorageTargets, self).__init__(model) self.admin_methods = ['GET'] self.server = server - self.resource_args = [self.server, ] - self.model_args = [self.server, ] + self.resource_args = [self.server] + self.model_args = [self.server] def get(self, filter_params): res_list = [] diff --git a/control/storagevolumes.py b/control/storagevolumes.py index a27396f3d..8043d2805 100644 --- a/control/storagevolumes.py +++ b/control/storagevolumes.py @@ -16,24 +16,20 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok import template -from wok.control.base import AsyncCollection, Collection, Resource -from wok.control.utils import get_class_name, model_fn +from wok.control.base import AsyncCollection +from wok.control.base import Collection +from wok.control.base import Resource +from wok.control.utils import get_class_name +from wok.control.utils import model_fn -STORAGEVOLUMES_REQUESTS = { - 'POST': {'default': "KCHVOL0001L"}, -} +STORAGEVOLUMES_REQUESTS = {'POST': {'default': 'KCHVOL0001L'}} STORAGEVOLUME_REQUESTS = { - 'DELETE': {'default': "KCHVOL0002L"}, - 'PUT': {'default': "KCHVOL0003L"}, - 'POST': { - 'wipe': "KCHVOL0004L", - 'resize': "KCHVOL0005L", - 'clone': "KCHVOL0006L", - }, + 'DELETE': {'default': 'KCHVOL0002L'}, + 'PUT': {'default': 'KCHVOL0003L'}, + 'POST': {'wipe': 'KCHVOL0004L', 'resize': 'KCHVOL0005L', 'clone': 'KCHVOL0006L'}, } @@ -42,19 +38,16 @@ def __init__(self, model, pool): super(StorageVolumes, self).__init__(model) self.resource = StorageVolume self.pool = pool - self.resource_args = [self.pool, ] - self.model_args = [self.pool, ] + self.resource_args = [self.pool] + self.model_args = [self.pool] self.log_map = STORAGEVOLUMES_REQUESTS - self.log_args.update({ - 'name': '', - 'pool': self.pool.encode('utf-8') if self.pool else '', - }) + self.log_args.update( + {'name': '', 'pool': self.pool if self.pool else ''}) def filter_data(self, resources, fields_filter): # filter directory from storage volumes fields_filter.update({'type': ['file', 'block', 'network']}) - return super(StorageVolumes, self).filter_data(resources, - fields_filter) + return super(StorageVolumes, self).filter_data(resources, fields_filter) class StorageVolume(Resource): @@ -71,22 +64,22 @@ def __init__(self, model, pool, ident): # set user log messages and make sure all parameters are present self.log_map = STORAGEVOLUME_REQUESTS - self.log_args.update({ - 'pool': self.pool.encode('utf-8') if self.pool else '', - 'size': '', - }) + self.log_args.update( + {'pool': self.pool if self.pool else '', 'size': ''}) @property def data(self): - res = {'name': self.ident, - 'type': self.info['type'], - 'capacity': self.info['capacity'], - 'allocation': self.info['allocation'], - 'path': self.info['path'], - 'used_by': self.info['used_by'], - 'format': self.info['format'], - 'isvalid': self.info['isvalid'], - 'has_permission': self.info['has_permission']} + res = { + 'name': self.ident, + 'type': self.info['type'], + 'capacity': self.info['capacity'], + 'allocation': self.info['allocation'], + 'path': self.info['path'], + 'used_by': self.info['used_by'], + 'format': self.info['format'], + 'isvalid': self.info['isvalid'], + 'has_permission': self.info['has_permission'], + } for key in ('os_version', 'os_distro', 'bootable', 'base'): val = self.info.get(key) diff --git a/control/templates.py b/control/templates.py index a5d7dd5fa..e1bd08ec8 100644 --- a/control/templates.py +++ b/control/templates.py @@ -16,22 +16,22 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os -from wok.control.base import Collection, Resource +from wok.control.base import Collection +from wok.control.base import Resource from wok.control.utils import UrlSubNode TEMPLATES_REQUESTS = { - 'POST': {'default': "KCHTMPL0001L"}, + 'POST': {'default': 'KCHTMPL0001L'}, } TEMPLATE_REQUESTS = { - 'DELETE': {'default': "KCHTMPL0002L"}, - 'PUT': {'default': "KCHTMPL0003L"}, + 'DELETE': {'default': 'KCHTMPL0002L'}, + 'PUT': {'default': 'KCHTMPL0003L'}, 'POST': { - 'clone': "KCHTMPL0004L", + 'clone': 'KCHTMPL0004L', }, } @@ -52,7 +52,7 @@ class Template(Resource): def __init__(self, model, ident): super(Template, self).__init__(model, ident) self.admin_methods = ['PUT', 'POST', 'DELETE'] - self.uri_fmt = "/templates/%s" + self.uri_fmt = '/templates/%s' self.clone = self.generate_action_handler('clone') self.log_map = TEMPLATE_REQUESTS diff --git a/control/users.py b/control/users.py index c71b621df..b0e6a9e60 100644 --- a/control/users.py +++ b/control/users.py @@ -16,9 +16,10 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.control.base import SimpleCollection -from wok.control.utils import get_class_name, model_fn, UrlSubNode +from wok.control.utils import get_class_name +from wok.control.utils import model_fn +from wok.control.utils import UrlSubNode from wok.template import render diff --git a/control/vm/__init__.py b/control/vm/__init__.py index 022f9739b..2880d4b93 100644 --- a/control/vm/__init__.py +++ b/control/vm/__init__.py @@ -16,10 +16,8 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os - from wok.control.utils import load_url_sub_node diff --git a/control/vm/hostdevs.py b/control/vm/hostdevs.py index c39d7fbfd..8a93777e7 100644 --- a/control/vm/hostdevs.py +++ b/control/vm/hostdevs.py @@ -16,37 +16,29 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import AsyncCollection, AsyncResource +from wok.control.base import AsyncCollection +from wok.control.base import AsyncResource from wok.control.utils import UrlSubNode -VMHOSTDEVS_REQUESTS = { - 'POST': {'default': "KCHVMHDEV0001L"}, -} +VMHOSTDEVS_REQUESTS = {'POST': {'default': 'KCHVMHDEV0001L'}} -VMHOSTDEV_REQUESTS = { - 'DELETE': { - 'default': "KCHVMHDEV0002L", - }, -} +VMHOSTDEV_REQUESTS = {'DELETE': {'default': 'KCHVMHDEV0002L'}} -@UrlSubNode("hostdevs") +@UrlSubNode('hostdevs') class VMHostDevs(AsyncCollection): def __init__(self, model, vmid): super(VMHostDevs, self).__init__(model) self.resource = VMHostDev self.vmid = vmid - self.resource_args = [self.vmid, ] - self.model_args = [self.vmid, ] + self.resource_args = [self.vmid] + self.model_args = [self.vmid] # set user log messages and make sure all parameters are present self.log_map = VMHOSTDEVS_REQUESTS - self.log_args.update({ - 'name': '', - 'vmid': self.vmid.encode('utf-8') if self.vmid else '', - }) + self.log_args.update( + {'name': '', 'vmid': self.vmid if self.vmid else ''}) class VMHostDev(AsyncResource): @@ -58,9 +50,7 @@ def __init__(self, model, vmid, ident): # set user log messages and make sure all parameters are present self.log_map = VMHOSTDEV_REQUESTS - self.log_args.update({ - 'vmid': self.vmid.encode('utf-8') if self.vmid else '', - }) + self.log_args.update({'vmid': self.vmid if self.vmid else ''}) @property def data(self): diff --git a/control/vm/ifaces.py b/control/vm/ifaces.py index cd9bd31ad..1bdf03ae2 100644 --- a/control/vm/ifaces.py +++ b/control/vm/ifaces.py @@ -16,42 +16,31 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import Collection, Resource +from wok.control.base import Collection +from wok.control.base import Resource from wok.control.utils import UrlSubNode -VMIFACES_REQUESTS = { - 'POST': { - 'default': "KCHVMIF0001L", - }, -} +VMIFACES_REQUESTS = {'POST': {'default': 'KCHVMIF0001L'}} VMIFACE_REQUESTS = { - 'DELETE': { - 'default': "KCHVMIF0002L", - }, - 'PUT': { - 'default': "KCHVMIF0003L", - }, + 'DELETE': {'default': 'KCHVMIF0002L'}, + 'PUT': {'default': 'KCHVMIF0003L'}, } -@UrlSubNode("ifaces") +@UrlSubNode('ifaces') class VMIfaces(Collection): def __init__(self, model, vm): super(VMIfaces, self).__init__(model) self.resource = VMIface self.vm = vm - self.resource_args = [self.vm, ] - self.model_args = [self.vm, ] + self.resource_args = [self.vm] + self.model_args = [self.vm] # set user log messages and make sure all parameters are present self.log_map = VMIFACES_REQUESTS - self.log_args.update({ - 'network': '', - 'vm': self.vm.encode('utf-8') if self.vm else '', - }) + self.log_args.update({'network': '', 'vm': self.vm if self.vm else ''}) class VMIface(Resource): @@ -65,9 +54,7 @@ def __init__(self, model, vm, ident): # set user log messages and make sure all parameters are present self.log_map = VMIFACE_REQUESTS - self.log_args.update({ - 'vm': self.vm.encode('utf-8') if self.vm else '', - }) + self.log_args.update({'vm': self.vm if self.vm else ''}) @property def data(self): diff --git a/control/vm/snapshots.py b/control/vm/snapshots.py index d66fa0615..8e314988a 100644 --- a/control/vm/snapshots.py +++ b/control/vm/snapshots.py @@ -16,20 +16,16 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import AsyncCollection, Resource +from wok.control.base import AsyncCollection +from wok.control.base import Resource from wok.control.utils import UrlSubNode -VMSNAPSHOTS_REQUESTS = { - 'POST': {'default': "KCHSNAP0001L"}, -} +VMSNAPSHOTS_REQUESTS = {'POST': {'default': 'KCHSNAP0001L'}} VMSNAPSHOT_REQUESTS = { - 'DELETE': {'default': "KCHSNAP0002L"}, - 'POST': { - 'revert': "KCHSNAP0003L", - }, + 'DELETE': {'default': 'KCHSNAP0002L'}, + 'POST': {'revert': 'KCHSNAP0003L'}, } @@ -39,16 +35,13 @@ def __init__(self, model, vm): super(VMSnapshots, self).__init__(model) self.resource = VMSnapshot self.vm = vm - self.resource_args = [self.vm, ] - self.model_args = [self.vm, ] + self.resource_args = [self.vm] + self.model_args = [self.vm] self.current = CurrentVMSnapshot(model, vm) # set user log messages and make sure all parameters are present self.log_map = VMSNAPSHOTS_REQUESTS - self.log_args.update({ - 'vm': self.vm.encode('utf-8') if self.vm else '', - 'name': '', - }) + self.log_args.update({'vm': self.vm if self.vm else '', 'name': ''}) class VMSnapshot(Resource): @@ -62,9 +55,7 @@ def __init__(self, model, vm, ident): # set user log messages and make sure all parameters are present self.log_map = VMSNAPSHOT_REQUESTS - self.log_args.update({ - 'vm': self.vm.encode('utf-8') if self.vm else '', - }) + self.log_args.update({'vm': self.vm if self.vm else ''}) @property def data(self): diff --git a/control/vm/storages.py b/control/vm/storages.py index 5ef5f1a26..bef57aa2b 100644 --- a/control/vm/storages.py +++ b/control/vm/storages.py @@ -16,39 +16,32 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import Collection, Resource +from wok.control.base import Collection +from wok.control.base import Resource from wok.control.utils import UrlSubNode -VMSTORAGES_REQUESTS = { - 'POST': { - 'default': "KCHVMSTOR0001L", - }, -} +VMSTORAGES_REQUESTS = {'POST': {'default': 'KCHVMSTOR0001L'}} VMSTORAGE_REQUESTS = { - 'DELETE': {'default': "KCHVMSTOR0002L"}, - 'PUT': {'default': "KCHVMSTOR0003L"}, + 'DELETE': {'default': 'KCHVMSTOR0002L'}, + 'PUT': {'default': 'KCHVMSTOR0003L'}, } -@UrlSubNode("storages") +@UrlSubNode('storages') class VMStorages(Collection): def __init__(self, model, vm): super(VMStorages, self).__init__(model) self.resource = VMStorage self.vm = vm - self.resource_args = [self.vm, ] - self.model_args = [self.vm, ] + self.resource_args = [self.vm] + self.model_args = [self.vm] # set user log messages and make sure all parameters are present self.log_map = VMSTORAGES_REQUESTS - self.log_args.update({ - 'vm': self.vm.encode('utf-8') if self.vm else '', - 'path': '', - 'type': '', - }) + self.log_args.update( + {'vm': self.vm if self.vm else '', 'path': '', 'type': ''}) class VMStorage(Resource): @@ -62,9 +55,7 @@ def __init__(self, model, vm, ident): # set user log messages and make sure all parameters are present self.log_map = VMSTORAGE_REQUESTS - self.log_args.update({ - 'vm': self.vm.encode('utf-8') if self.vm else '', - }) + self.log_args.update({'vm': self.vm if self.vm else ''}) @property def data(self): diff --git a/control/vms.py b/control/vms.py index 7aefb54be..c0605b34e 100644 --- a/control/vms.py +++ b/control/vms.py @@ -16,33 +16,33 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.control.base import AsyncCollection, Resource -from wok.control.utils import internal_redirect, UrlSubNode - +from wok.control.base import AsyncCollection +from wok.control.base import Resource +from wok.control.utils import internal_redirect +from wok.control.utils import UrlSubNode from wok.plugins.kimchi.control.vm import sub_nodes VMS_REQUESTS = { 'POST': { - 'default': "KCHVM0001L", + 'default': 'KCHVM0001L', }, } VM_REQUESTS = { - 'DELETE': {'default': "KCHVM0002L"}, - 'PUT': {'default': "KCHVM0003L"}, + 'DELETE': {'default': 'KCHVM0002L'}, + 'PUT': {'default': 'KCHVM0003L'}, 'POST': { - 'start': "KCHVM0004L", - 'poweroff': "KCHVM0005L", - 'shutdown': "KCHVM0006L", - 'reset': "KCHVM0007L", - 'connect': "KCHVM0008L", - 'clone': "KCHVM0009L", - 'migrate': "KCHVM0010L", - 'suspend': "KCHVM0011L", - 'resume': "KCHVM0012L", - 'serial': "KCHVM0013L", + 'start': 'KCHVM0004L', + 'poweroff': 'KCHVM0005L', + 'shutdown': 'KCHVM0006L', + 'reset': 'KCHVM0007L', + 'connect': 'KCHVM0008L', + 'clone': 'KCHVM0009L', + 'migrate': 'KCHVM0010L', + 'suspend': 'KCHVM0011L', + 'resume': 'KCHVM0012L', + 'serial': 'KCHVM0013L', }, } diff --git a/disks.py b/disks.py index e86ca2f1c..c986e3777 100644 --- a/disks.py +++ b/disks.py @@ -18,46 +18,46 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os.path import re + from parted import Device as PDevice from parted import Disk as PDisk - -from wok.exception import NotFoundError, OperationFailed +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.stringutils import encode_value -from wok.utils import run_command, wok_log +from wok.utils import run_command +from wok.utils import wok_log def _get_dev_node_path(maj_min): """ Returns device node path given the device number 'major:min' """ - dm_name = "/sys/dev/block/%s/dm/name" % maj_min + dm_name = '/sys/dev/block/%s/dm/name' % maj_min if os.path.exists(dm_name): with open(dm_name) as dm_f: content = dm_f.read().rstrip('\n') - return "/dev/mapper/" + content + return '/dev/mapper/' + content - uevent = "/sys/dev/block/%s/uevent" % maj_min + uevent = '/sys/dev/block/%s/uevent' % maj_min with open(uevent) as ueventf: content = ueventf.read() - data = dict(re.findall(r'(\S+)=(".*?"|\S+)', content.replace("\n", " "))) + data = dict(re.findall(r'(\S+)=(".*?"|\S+)', content.replace('\n', ' '))) - return "/dev/%s" % data["DEVNAME"] + return '/dev/%s' % data['DEVNAME'] def _get_lsblk_devs(keys, devs=None): if devs is None: devs = [] out, err, returncode = run_command( - ["lsblk", "-Pbo"] + [','.join(keys)] + devs - ) + ['lsblk', '-Pbo'] + [','.join(keys)] + devs) if returncode != 0: if 'not a block device' in err: - raise NotFoundError("KCHDISK00002E") + raise NotFoundError('KCHDISK00002E') else: - raise OperationFailed("KCHDISK00001E", {'err': err}) + raise OperationFailed('KCHDISK00001E', {'err': err}) return _parse_lsblk_output(out, keys) @@ -65,10 +65,10 @@ def _get_lsblk_devs(keys, devs=None): def _get_dev_major_min(name): maj_min = None - keys = ["NAME", "MAJ:MIN"] + keys = ['NAME', 'MAJ:MIN'] try: dev_list = _get_lsblk_devs(keys) - except: + except Exception: raise for dev in dev_list: @@ -76,7 +76,7 @@ def _get_dev_major_min(name): maj_min = dev['maj:min'] break else: - raise NotFoundError("KCHDISK00003E", {'device': name}) + raise NotFoundError('KCHDISK00003E', {'device': name}) return maj_min @@ -90,13 +90,11 @@ def _is_dev_leaf(devNodePath, name=None, devs=None, devtype=None): return True # By default, lsblk prints a device information followed by children # device information - childrenCount = len( - _get_lsblk_devs(["NAME"], [devNodePath])) - 1 + childrenCount = len(_get_lsblk_devs(['NAME'], [devNodePath])) - 1 except OperationFailed as e: # lsblk is known to fail on multipath devices # Assume these devices contain children - wok_log.error( - "Error getting device info for %s: %s", devNodePath, e) + wok_log.error('Error getting device info for %s: %s', devNodePath, e) return False return childrenCount == 0 @@ -108,19 +106,19 @@ def _is_dev_extended_partition(devType, devNodePath): if devNodePath.startswith('/dev/mapper'): try: - dev_maj_min = _get_dev_major_min(devNodePath.split("/")[-1]) + dev_maj_min = _get_dev_major_min(devNodePath.split('/')[-1]) parent_sys_path = '/sys/dev/block/' + dev_maj_min + '/slaves' parent_dm_name = os.listdir(parent_sys_path)[0] - parent_maj_min = open( - parent_sys_path + - '/' + - parent_dm_name + - '/dev').readline().rstrip() + parent_maj_min = ( + open(parent_sys_path + '/' + parent_dm_name + '/dev') + .readline() + .rstrip() + ) diskPath = _get_dev_node_path(parent_maj_min) except Exception as e: wok_log.error( - "Error dealing with dev mapper device: " + devNodePath) - raise OperationFailed("KCHDISK00001E", {'err': e.message}) + 'Error dealing with dev mapper device: ' + devNodePath) + raise OperationFailed('KCHDISK00001E', {'err': e.message}) else: diskPath = devNodePath.rstrip('0123456789') @@ -129,8 +127,11 @@ def _is_dev_extended_partition(devType, devNodePath): extended_part = PDisk(device).getExtendedPartition() except NotImplementedError as e: wok_log.warning( - "Error getting extended partition info for dev %s type %s: %s", - devNodePath, devType, e.message) + 'Error getting extended partition info for dev %s type %s: %s', + devNodePath, + devType, + e.message, + ) # Treate disk with unsupported partiton table as if it does not # contain extended partitions. return False @@ -142,7 +143,7 @@ def _is_dev_extended_partition(devType, devNodePath): def _parse_lsblk_output(output, keys): # output is on format key="value", # where key can be NAME, TYPE, FSTYPE, SIZE, MOUNTPOINT, etc - lines = output.rstrip("\n").split("\n") + lines = output.rstrip('\n').split('\n') r = [] for line in lines: d = {} @@ -166,27 +167,30 @@ def _is_available(name, devtype, fstype, mountpoint, majmin, devs=None): has_VG = True else: has_VG = False - if (devtype in ['part', 'disk', 'mpath'] and - fstype in ['', 'LVM2_member'] and - mountpoint == "" and - not has_VG and - _is_dev_leaf(devNodePath, name, devs, devtype) and - not _is_dev_extended_partition(devtype, devNodePath)): + if ( + devtype in ['part', 'disk', 'mpath'] and + fstype in ['', 'LVM2_member'] and + mountpoint == '' and + not has_VG and + _is_dev_leaf(devNodePath, name, devs, devtype) and + not _is_dev_extended_partition(devtype, devNodePath) + ): return True return False def get_partitions_names(check=False): names = set() - keys = ["NAME", "TYPE", "FSTYPE", "MOUNTPOINT", "MAJ:MIN"] + keys = ['NAME', 'TYPE', 'FSTYPE', 'MOUNTPOINT', 'MAJ:MIN'] # output is on format key="value", # where key can be NAME, TYPE, FSTYPE, MOUNTPOINT for dev in _get_lsblk_devs(keys): # split()[0] to avoid the second part of the name, after the # whiteline name = dev['name'].split()[0] - if check and not _is_available(name, dev['type'], dev['fstype'], - dev['mountpoint'], dev['maj:min']): + if check and not _is_available( + name, dev['type'], dev['fstype'], dev['mountpoint'], dev['maj:min'] + ): continue names.add(name) @@ -197,19 +201,20 @@ def get_partition_details(name): majmin = _get_dev_major_min(name) dev_path = _get_dev_node_path(majmin) - keys = ["TYPE", "FSTYPE", "SIZE", "MOUNTPOINT", "MAJ:MIN", "PKNAME"] + keys = ['TYPE', 'FSTYPE', 'SIZE', 'MOUNTPOINT', 'MAJ:MIN', 'PKNAME'] try: dev = _get_lsblk_devs(keys, [dev_path])[0] - except: - wok_log.error("Error getting partition info for %s", name) + except Exception: + wok_log.error('Error getting partition info for %s', name) return {} - dev['available'] = _is_available(name, dev['type'], dev['fstype'], - dev['mountpoint'], majmin) + dev['available'] = _is_available( + name, dev['type'], dev['fstype'], dev['mountpoint'], majmin + ) if dev['mountpoint']: # Sometimes the mountpoint comes with [SWAP] or other # info which is not an actual mount point. Filtering it - regexp = re.compile(r"\[.*\]") + regexp = re.compile(r'\[.*\]') if regexp.search(dev['mountpoint']) is not None: dev['mountpoint'] = '' dev['path'] = dev_path @@ -223,18 +228,20 @@ def vgs(): [{'vgname': 'vgtest', 'size': 999653638144L, 'free': 0}] """ - cmd = ['vgs', - '--units', - 'b', - '--nosuffix', - '--noheading', - '--unbuffered', - '--options', - 'vg_name,vg_size,vg_free'] + cmd = [ + 'vgs', + '--units', + 'b', + '--nosuffix', + '--noheading', + '--unbuffered', + '--options', + 'vg_name,vg_size,vg_free', + ] out, err, rc = run_command(cmd) if rc != 0: - raise OperationFailed("KCHDISK00004E", {'err': err}) + raise OperationFailed('KCHDISK00004E', {'err': err}) if not out: return [] @@ -243,10 +250,10 @@ def vgs(): vgs = map(lambda v: v.strip(), out.strip('\n').split('\n')) # create a dict based on data retrieved from vgs - return map(lambda l: {'vgname': l[0], - 'size': long(l[1]), - 'free': long(l[2])}, - [fields.split() for fields in vgs]) + return map( + lambda l: {'vgname': l[0], 'size': int(l[1]), 'free': int(l[2])}, + [fields.split() for fields in vgs], + ) def lvs(vgname=None): @@ -257,32 +264,36 @@ def lvs(vgname=None): [{'lvname': 'lva', 'path': '/dev/vgtest/lva', 'size': 12345L}, {'lvname': 'lvb', 'path': '/dev/vgtest/lvb', 'size': 12345L}] """ - cmd = ['lvs', - '--units', - 'b', - '--nosuffix', - '--noheading', - '--unbuffered', - '--options', - 'lv_name,lv_path,lv_size,vg_name'] + cmd = [ + 'lvs', + '--units', + 'b', + '--nosuffix', + '--noheading', + '--unbuffered', + '--options', + 'lv_name,lv_path,lv_size,vg_name', + ] out, err, rc = run_command(cmd) if rc != 0: - raise OperationFailed("KCHDISK00004E", {'err': err}) + raise OperationFailed('KCHDISK00004E', {'err': err}) if not out: return [] # remove blank spaces and create a list of LVs filtered by vgname, if # provided - lvs = filter(lambda f: vgname is None or vgname in f, - map(lambda v: v.strip(), out.strip('\n').split('\n'))) + lvs = filter( + lambda f: vgname is None or vgname in f, + map(lambda v: v.strip(), out.strip('\n').split('\n')), + ) # create a dict based on data retrieved from lvs - return map(lambda l: {'lvname': l[0], - 'path': l[1], - 'size': long(l[2])}, - [fields.split() for fields in lvs]) + return map( + lambda l: {'lvname': l[0], 'path': l[1], 'size': int(l[2])}, + [fields.split() for fields in lvs], + ) def pvs(vgname=None): @@ -297,29 +308,33 @@ def pvs(vgname=None): 'size': 21470642176L, 'uuid': 'CyBzhK-cQFl-gWqr-fyWC-A50Y-LMxu-iHiJq4'}] """ - cmd = ['pvs', - '--units', - 'b', - '--nosuffix', - '--noheading', - '--unbuffered', - '--options', - 'pv_name,pv_size,pv_uuid,vg_name'] + cmd = [ + 'pvs', + '--units', + 'b', + '--nosuffix', + '--noheading', + '--unbuffered', + '--options', + 'pv_name,pv_size,pv_uuid,vg_name', + ] out, err, rc = run_command(cmd) if rc != 0: - raise OperationFailed("KCHDISK00004E", {'err': err}) + raise OperationFailed('KCHDISK00004E', {'err': err}) if not out: return [] # remove blank spaces and create a list of PVs filtered by vgname, if # provided - pvs = filter(lambda f: vgname is None or vgname in f, - map(lambda v: v.strip(), out.strip('\n').split('\n'))) + pvs = filter( + lambda f: vgname is None or vgname in f, + map(lambda v: v.strip(), out.strip('\n').split('\n')), + ) # create a dict based on data retrieved from pvs - return map(lambda l: {'pvname': l[0], - 'size': long(l[1]), - 'uuid': l[2]}, - [fields.split() for fields in pvs]) + return map( + lambda l: {'pvname': l[0], 'size': int(l[1]), 'uuid': l[2]}, + [fields.split() for fields in pvs], + ) diff --git a/distroloader.py b/distroloader.py index 775e8c96e..d702e463b 100644 --- a/distroloader.py +++ b/distroloader.py @@ -17,15 +17,14 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # - import glob import json import os -from wok.exception import NotFoundError, OperationFailed -from wok.utils import wok_log - +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.plugins.kimchi import config +from wok.utils import wok_log ARCHS = {'x86_64': ['x86_64', 'amd64', 'i686', 'x86', 'i386'], @@ -43,21 +42,21 @@ def __init__(self, location=None): def _get_json_info(self, fname): msg_args = {'filename': fname} if not os.path.isfile(fname): - msg = "DistroLoader: failed to find distro file: %s" % fname + msg = 'DistroLoader: failed to find distro file: %s' % fname wok_log.error(msg) - raise NotFoundError("KCHDL0001E", msg_args) + raise NotFoundError('KCHDL0001E', msg_args) try: with open(fname) as f: data = json.load(f) return data except ValueError: - msg = "DistroLoader: failed to parse distro file: %s" % fname + msg = 'DistroLoader: failed to parse distro file: %s' % fname wok_log.error(msg) - raise OperationFailed("KCHDL0002E", msg_args) + raise OperationFailed('KCHDL0002E', msg_args) def get(self): arch_list = ARCHS.get(os.uname()[4]) - all_json_files = glob.glob("%s/%s" % (self.location, "*.json")) + all_json_files = glob.glob('%s/%s' % (self.location, '*.json')) distros = [] for f in all_json_files: distros.extend(self._get_json_info(f)) diff --git a/distros.d/fedora.json b/distros.d/fedora.json index aa12b3fa7..8bc4bc121 100644 --- a/distros.d/fedora.json +++ b/distros.d/fedora.json @@ -1,65 +1,16 @@ [ { - "name": "Fedora 22", + "name": "Fedora 28", "os_distro": "fedora", "os_arch": "x86_64", - "os_version": "22", - "path": "http://mirrors.kernel.org/fedora/releases/22/Live/x86_64/Fedora-Live-KDE-x86_64-22-3.iso" + "os_version": "28", + "path": "http://mirrors.kernel.org/fedora/releases/28/Everything/x86_64/iso/Fedora-Everything-netinst-x86_64-28-1.1.iso" }, { - "name": "Fedora 23", + "name": "Fedora 29", "os_distro": "fedora", "os_arch": "x86_64", - "os_version": "23", - "path": "http://mirrors.kernel.org/fedora/releases/23/Live/x86_64/Fedora-Live-KDE-x86_64-23-10.iso" - }, - { - "name": "Fedora 24", - "os_distro": "fedora", - "os_arch": "x86_64", - "os_version": "24", - "path": "http://mirrors.kernel.org/fedora/releases/24/Workstation/x86_64/iso/Fedora-Workstation-Live-x86_64-24-1.2.iso" - }, - { - "name": "Fedora 22", - "os_distro": "fedora", - "os_arch": "ppc64", - "os_version": "22", - "path": "http://mirrors.kernel.org/fedora-secondary/releases/22/Server/ppc64/iso/Fedora-Server-netinst-ppc64-22.iso" - }, - { - "name": "Fedora 22 LE", - "os_distro": "fedora", - "os_arch": "ppc64le", - "os_version": "22", - "path": "http://mirrors.kernel.org/fedora-secondary/releases/22/Server/ppc64le/iso/Fedora-Server-netinst-ppc64le-22.iso" - }, - { - "name": "Fedora 23", - "os_distro": "fedora", - "os_arch": "ppc64", - "os_version": "23", - "path": "http://mirrors.kernel.org/fedora-secondary/releases/23/Server/ppc64/iso/Fedora-Server-netinst-ppc64-23.iso" - }, - { - "name": "Fedora 23 LE", - "os_distro": "fedora", - "os_arch": "ppc64le", - "os_version": "23", - "path": "http://mirrors.kernel.org/fedora-secondary/releases/23/Server/ppc64le/iso/Fedora-Server-netinst-ppc64le-23.iso" - }, - { - "name": "Fedora 24", - "os_distro": "fedora", - "os_arch": "ppc64", - "os_version": "24", - "path": "http://mirrors.kernel.org/fedora-secondary/releases/24/Server/ppc64/iso/Fedora-Server-netinst-ppc64-24-1.2.iso" - }, - { - "name": "Fedora 24 LE", - "os_distro": "fedora", - "os_arch": "ppc64le", - "os_version": "24", - "path": "http://mirrors.kernel.org/fedora-secondary/releases/24/Server/ppc64le/iso/Fedora-Server-netinst-ppc64le-24-1.2.iso" + "os_version": "29", + "path": "http://mirrors.kernel.org/fedora/releases/29/Everything/x86_64/iso/Fedora-Everything-netinst-x86_64-29-1.2.iso" } ] diff --git a/docs/ubuntu-deps.md b/docs/ubuntu-deps.md index 13e352150..47505632b 100644 --- a/docs/ubuntu-deps.md +++ b/docs/ubuntu-deps.md @@ -15,23 +15,26 @@ Build Dependencies Runtime Dependencies -------------------- - $ sudo apt-get install python-configobj novnc python-libvirt \ - libvirt-bin nfs-common qemu-kvm python-parted \ + $ sudo apt-get install python3-configobj novnc python3-libvirt \ + libvirt-bin nfs-common qemu-kvm python3-parted \ python-ethtool sosreport python-ipaddr \ - python-lxml open-iscsi python-guestfs \ - libguestfs-tools spice-html5 python-magic \ - python-paramiko python-imaging \ + python3-lxml open-iscsi python3-guestfs \ + libguestfs-tools spice-html5 python3-magic \ + python3-paramiko python3-pil \ fonts-font-awesome geoip-database gettext \ - nginx-light python-cheetah python-cherrypy3 \ - python-ldap python-m2crypto python-pam + nginx-light python-cheetah python3-cherrypy3 \ + python3-ldap python-openssl python3-pam + + sudo apt install python3 python3-setuptools libpython3.6-dev libnl-route-3-dev + sudo pip3 install ethtool ipaddr Packages required for UI development ------------------------------------ - $ sudo apt-get install g++ python-dev python-pip + $ sudo apt-get install g++ python3-dev python3-pip $ sudo pip install cython libsass Packages required for tests --------------------------- - $ sudo apt-get install pep8 pyflakes python-requests python-mock bc + $ sudo apt-get install pep8 pyflakes python3-requests python3-mock bc diff --git a/i18n.py b/i18n.py index 7dc7b053a..890064d8d 100644 --- a/i18n.py +++ b/i18n.py @@ -16,413 +16,412 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import gettext _ = gettext.gettext messages = { - "KCHAPI0001E": _("Unknown parameter %(value)s"), + 'KCHAPI0001E': _('Unknown parameter %(value)s'), - "KCHAUTH0004E": _("User %(user_id)s not found with given LDAP settings."), + 'KCHAUTH0004E': _('User %(user_id)s not found with given LDAP settings.'), - "KCHPART0001E": _("Partition %(name)s does not exist in the host"), + 'KCHPART0001E': _('Partition %(name)s does not exist in the host'), - "KCHDISK00001E": _("Error while accessing dev mapper device, %(err)s"), - "KCHDISK00002E": _("Block device not found."), - "KCHDISK00003E": _("Block device %(device)s not found."), - "KCHDISK00004E": _("Unable to retrieve LVM information. Details: %(err)s"), + 'KCHDISK00001E': _('Error while accessing dev mapper device, %(err)s'), + 'KCHDISK00002E': _('Block device not found.'), + 'KCHDISK00003E': _('Block device %(device)s not found.'), + 'KCHDISK00004E': _('Unable to retrieve LVM information. Details: %(err)s'), - "KCHDEVS0001E": _('Unknown "_cap" specified'), - "KCHDEVS0002E": _('"_passthrough" should be "true" or "false"'), - "KCHDEVS0003E": _('"_passthrough_affected_by" should be a device name string'), - "KCHDEVS0004E": _('"_available_only" should be "true" or "false"'), + 'KCHDEVS0001E': _('Unknown "_cap" specified'), + 'KCHDEVS0002E': _('"_passthrough" should be "true" or "false"'), + 'KCHDEVS0003E': _('"_passthrough_affected_by" should be a device name string'), + 'KCHDEVS0004E': _('"_available_only" should be "true" or "false"'), - "KCHDL0001E": _("Unable to find distro file: %(filename)s"), - "KCHDL0002E": _("Unable to parse distro file: %(filename)s. Make sure, it is a JSON file."), + 'KCHDL0001E': _('Unable to find distro file: %(filename)s'), + 'KCHDL0002E': _('Unable to parse distro file: %(filename)s. Make sure, it is a JSON file.'), - "KCHISCSI0001E": _("Unable to login to iSCSI host target %(portal)s. Details: %(err)s"), - "KCHISCSI0002E": _("Unable to login to iSCSI host %(host)s target %(target)s"), + 'KCHISCSI0001E': _('Unable to login to iSCSI host target %(portal)s. Details: %(err)s'), + 'KCHISCSI0002E': _('Unable to login to iSCSI host %(host)s target %(target)s'), - "KCHISO0001E": _("Unable to find ISO file %(filename)s"), - "KCHISO0002E": _("The ISO file %(filename)s is not bootable"), - "KCHISO0003E": _("The ISO file %(filename)s does not have a valid El Torito boot record"), - "KCHISO0004E": _("Invalid El Torito validation entry in ISO %(filename)s"), - "KCHISO0005E": _("Invalid El Torito boot indicator in ISO %(filename)s"), - "KCHISO0006E": _("Unexpected volume type for primary volume in ISO %(filename)s"), - "KCHISO0007E": _("Bad format while reading volume descriptor in ISO %(filename)s"), - "KCHISO0008E": _("The hypervisor doesn't have permission to use this ISO %(filename)s. " - "Consider moving it under /var/lib/libvirt, or set the search permission " + 'KCHISO0001E': _('Unable to find ISO file %(filename)s'), + 'KCHISO0002E': _('The ISO file %(filename)s is not bootable'), + 'KCHISO0003E': _('The ISO file %(filename)s does not have a valid El Torito boot record'), + 'KCHISO0004E': _('Invalid El Torito validation entry in ISO %(filename)s'), + 'KCHISO0005E': _('Invalid El Torito boot indicator in ISO %(filename)s'), + 'KCHISO0006E': _('Unexpected volume type for primary volume in ISO %(filename)s'), + 'KCHISO0007E': _('Bad format while reading volume descriptor in ISO %(filename)s'), + 'KCHISO0008E': _("The hypervisor doesn't have permission to use this ISO %(filename)s. " + 'Consider moving it under /var/lib/libvirt, or set the search permission ' "to file access control lists for '%(user)s' user if possible, or add the " "'%(user)s' to the ISO path group, or (not recommended) 'chmod -R o+x 'path_to_iso'." - "Details: %(err)s" ), - "KCHISO0009E": _("Unable to access remote ISO. Details: %(err)s"), - - "KCHIMG0001E": _("Error probing image OS information: %(err)s"), - "KCHIMG0003E": _("Unable to read image file %(filename)s"), - "KCHIMG0004E": _("Image file must be an existing file on system. %(filename)s is not a valid input."), - - "KCHVM0001E": _("Virtual machine %(name)s already exists"), - "KCHVM0002E": _("Virtual machine %(name)s does not exist"), - "KCHVM0004E": _("Unable to retrieve screenshot for stopped virtual machine %(name)s"), - "KCHVM0005E": _("Remote ISO image is not supported by this server."), - "KCHVM0006E": _("Screenshot is not supported on virtual machine %(name)s"), - "KCHVM0007E": _("Unable to create virtual machine %(name)s. Details: %(err)s"), - "KCHVM0008E": _("Unable to update virtual machine %(name)s. Details: %(err)s"), - "KCHVM0009E": _("Unable to retrieve virtual machine %(name)s. Details: %(err)s"), - "KCHVM0010E": _("Unable to connect to powered off virtual machine %(name)s."), - "KCHVM0011E": _("Virtual machine name must be a string without slashes (/)"), - "KCHVM0012E": _("Invalid template URI %(value)s specified for virtual machine"), - "KCHVM0013E": _("Invalid storage pool URI %(value)s specified for virtual machine"), - "KCHVM0014E": _("Supported virtual machine graphics are Spice or VNC"), - "KCHVM0015E": _("Graphics address to listen on must be IPv4 or IPv6"), - "KCHVM0016E": _("Specify a template to create a virtual machine from"), - "KCHVM0019E": _("Unable to start virtual machine %(name)s. Details: %(err)s"), - "KCHVM0020E": _("Unable to power off virtual machine %(name)s. Details: %(err)s"), - "KCHVM0021E": _("Unable to delete virtual machine %(name)s. Details: %(err)s"), - "KCHVM0022E": _("Unable to reset virtual machine %(name)s. Details: %(err)s"), - "KCHVM0023E": _("User name list must be an array"), - "KCHVM0024E": _("User name must be a string"), - "KCHVM0025E": _("Group name list must be an array"), - "KCHVM0026E": _("Group name must be a string"), - "KCHVM0027E": _("User(s) '%(users)s' do not exist"), - "KCHVM0028E": _("Group(s) '%(groups)s' do not exist"), - "KCHVM0029E": _("Unable to shutdown virtual machine %(name)s. Details: %(err)s"), - "KCHVM0031E": _("The guest console password must be a string."), - "KCHVM0032E": _("The life time for the guest console password must be a number."), - "KCHVM0033E": _("Virtual machine '%(name)s' must be stopped before cloning it."), - "KCHVM0034E": _("Insufficient disk space to clone virtual machine '%(name)s'"), - "KCHVM0035E": _("Unable to clone VM '%(name)s'. Details: %(err)s"), - "KCHVM0036E": _("Invalid operation for non-persistent virtual machine %(name)s"), - "KCHVM0037E": _("Cannot suspend VM '%(name)s' because it is not running."), - "KCHVM0038E": _("Unable to suspend VM '%(name)s'. Details: %(err)s"), - "KCHVM0039E": _("Cannot resume VM '%(name)s' because it is not paused."), - "KCHVM0040E": _("Unable to resume VM '%(name)s'. Details: %(err)s"), - "KCHVM0041E": _("Memory assigned is higher then the maximum allowed in the host: %(maxmem)sMib."), - "KCHVM0042E": _("Guest '%(name)s' does not support live memory update. Please, with the guest offline, set Maximum Memory with a value greater then Memory to enable this feature."), - "KCHVM0043E": _("Only increase memory is allowed in active VMs"), - "KCHVM0045E": _("There are not enough free slots to add a new memory device."), - "KCHVM0046E": _("Host's libvirt or qemu version does not support memory devices and memory hotplug. Libvirt must be >= 1.2.14 and QEMU must be >= 2.1."), - "KCHVM0047E": _("Error attaching memory device. Details: %(error)s"), - "KCHVM0048E": _("Cannot start %(name)s. Virtual machine is already running."), - "KCHVM0049E": _("Cannot power off %(name)s. Virtual machine is shut off."), - "KCHVM0050E": _("Cannot shutdown %(name)s. Virtual machine is shut off."), - "KCHVM0051E": _("Cannot reset %(name)s. Virtual machine is already shut off."), - "KCHVM0052E": _("Boot order must be a list. Devices accepted: hd, cdrom, fd or network."), - "KCHVM0053E": _("Bootmenu must be boolean. Values accepted: true of false."), - "KCHVM0054E": _("Graphic type not valid. Values accepted: vnc or spice."), - "KCHVM0055E": _("Migrate to localhost %(host)s is not allowed."), - "KCHVM0056E": _("To migrate a virtual machine to the remote host %(host)s the user %(user)s must have password-less login to the remote host."), - "KCHVM0057E": _("Can not migrate virtual machine %(name)s when its in %(state)s state."), - "KCHVM0058E": _("Failed to migrate virtual machine %(name)s due error: %(err)s"), - "KCHVM0059E": _("User name of the remote server must be a string."), - "KCHVM0060E": _("Destination host of the migration must be a string."), - "KCHVM0061E": _("Unable to create file %(path)s at %(host)s using user %(user)s."), - "KCHVM0062E": _("Unable to read disk size of %(path)s, error: %(error)s"), - "KCHVM0063E": _("Unable to create disk image %(path)s at %(host)s using user %(user)s. Error: %(error)s"), - "KCHVM0064E": _("Unable to migrate virtual machine to remote host %(host)s with arch %(destarch)s using localhost with arch %(srcarch)s."), - "KCHVM0065E": _("Unable to migrate virtual machine to remote host %(host)s with hypervisor %(desthyp)s because localhost uses hypervisor %(srchyp)s."), - "KCHVM0066E": _("Unable to determine remote host hypervisor and architecture. Error: %(error)s"), - "KCHVM0067E": _("Unable to migrate virtual machine: subcores per core setting from localhostand remote host %(host)s differs."), - "KCHVM0068E": _("Unable to setup password-less login at remote host %(host)s using user %(user)s. Error: %(error)s"), - "KCHVM0069E": _("Password field must be a string."), - "KCHVM0070E": _("Error creating local host ssh rsa key of user 'root'."), - "KCHVM0071E": _("%(param)s value (%(mem)sMiB) must be aligned to %(alignment)sMiB."), - "KCHVM0073E": _("Unable to update the following parameters while the VM is offline: %(params)s"), - "KCHVM0074E": _("Unable to update the following parameters while the VM is online: %(params)s"), - "KCHVM0076E": _("VM %(name)s must have serial and console defined to open a web serial console"), - "KCHVM0077E": _("Impossible to get the serial console of %(name)s"), - "KCHVM0078E": _("Memory or Maximum Memory value is higher than amount supported by the host: %(memHost)sMiB."), - "KCHVM0079E": _("Memory or Maximum Memory value is higher than maximum amount recommended: %(value)sTiB"), - "KCHVM0080E": _("Cannot update Maximum Memory when guest is running."), - "KCHVM0081E": _("Impossible to create %(dir)s directory."), - "KCHVM0082E": _("Either the guest %(name)s did not start to listen to the serial or it is not configured to use the serial console."), - "KCHVM0083E": _("Unable to retrieve Virt Viewer file for stopped virtual machine %(name)s"), - "KCHVM0084E": _("Error occured while retrieving the Virt Viewer file for virtual machine %(name)s : %(err)s"), - "KCHVM0085E": _("Virtual machine title must be a string"), - "KCHVM0086E": _("Virtual machine description must be a string"), - "KCHVM0087E": _("console parameter is only supported for s390x/s390 architecture."), - "KCHVM0088E": _("invalid console type, supported types are sclp/virtio."), - "KCHVM0089E": _("Unable to setup password-less login at remote host %(host)s using user %(user)s: remote directory %(sshdir)s does not exist."), - "KCHVM0090E": _("Unable to create a password-less libvirt connection to the remote libvirt daemon at host %(host)s with the user %(user)s. Please verify the remote server libvirt configuration. More information: http://libvirt.org/auth.html ."), - "KCHVM0091E": _("'enable_rdma' must be of type boolean (true or false)."), - - "KCHVMHDEV0001E": _("VM %(vmid)s does not contain directly assigned host device %(dev_name)s."), - "KCHVMHDEV0002E": _("The host device %(dev_name)s is not allowed to directly assign to VM."), - "KCHVMHDEV0003E": _("No IOMMU groups found. Host PCI pass through needs IOMMU group to function correctly. " - "Please enable Intel VT-d or AMD IOMMU in your BIOS, then verify the Kernel is compiled with IOMMU support. " + 'Details: %(err)s'), + 'KCHISO0009E': _('Unable to access remote ISO. Details: %(err)s'), + + 'KCHIMG0001E': _('Error probing image OS information: %(err)s'), + 'KCHIMG0003E': _('Unable to read image file %(filename)s'), + 'KCHIMG0004E': _('Image file must be an existing file on system. %(filename)s is not a valid input.'), + + 'KCHVM0001E': _('Virtual machine %(name)s already exists'), + 'KCHVM0002E': _('Virtual machine %(name)s does not exist'), + 'KCHVM0004E': _('Unable to retrieve screenshot for stopped virtual machine %(name)s'), + 'KCHVM0005E': _('Remote ISO image is not supported by this server.'), + 'KCHVM0006E': _('Screenshot is not supported on virtual machine %(name)s'), + 'KCHVM0007E': _('Unable to create virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0008E': _('Unable to update virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0009E': _('Unable to retrieve virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0010E': _('Unable to connect to powered off virtual machine %(name)s.'), + 'KCHVM0011E': _('Virtual machine name must be a string without slashes (/)'), + 'KCHVM0012E': _('Invalid template URI %(value)s specified for virtual machine'), + 'KCHVM0013E': _('Invalid storage pool URI %(value)s specified for virtual machine'), + 'KCHVM0014E': _('Supported virtual machine graphics are Spice or VNC'), + 'KCHVM0015E': _('Graphics address to listen on must be IPv4 or IPv6'), + 'KCHVM0016E': _('Specify a template to create a virtual machine from'), + 'KCHVM0019E': _('Unable to start virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0020E': _('Unable to power off virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0021E': _('Unable to delete virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0022E': _('Unable to reset virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0023E': _('User name list must be an array'), + 'KCHVM0024E': _('User name must be a string'), + 'KCHVM0025E': _('Group name list must be an array'), + 'KCHVM0026E': _('Group name must be a string'), + 'KCHVM0027E': _("User(s) '%(users)s' do not exist"), + 'KCHVM0028E': _("Group(s) '%(groups)s' do not exist"), + 'KCHVM0029E': _('Unable to shutdown virtual machine %(name)s. Details: %(err)s'), + 'KCHVM0031E': _('The guest console password must be a string.'), + 'KCHVM0032E': _('The life time for the guest console password must be a number.'), + 'KCHVM0033E': _("Virtual machine '%(name)s' must be stopped before cloning it."), + 'KCHVM0034E': _("Insufficient disk space to clone virtual machine '%(name)s'"), + 'KCHVM0035E': _("Unable to clone VM '%(name)s'. Details: %(err)s"), + 'KCHVM0036E': _('Invalid operation for non-persistent virtual machine %(name)s'), + 'KCHVM0037E': _("Cannot suspend VM '%(name)s' because it is not running."), + 'KCHVM0038E': _("Unable to suspend VM '%(name)s'. Details: %(err)s"), + 'KCHVM0039E': _("Cannot resume VM '%(name)s' because it is not paused."), + 'KCHVM0040E': _("Unable to resume VM '%(name)s'. Details: %(err)s"), + 'KCHVM0041E': _('Memory assigned is higher then the maximum allowed in the host: %(maxmem)sMib.'), + 'KCHVM0042E': _("Guest '%(name)s' does not support live memory update. Please, with the guest offline, set Maximum Memory with a value greater then Memory to enable this feature."), + 'KCHVM0043E': _('Only increase memory is allowed in active VMs'), + 'KCHVM0045E': _('There are not enough free slots to add a new memory device.'), + 'KCHVM0046E': _("Host's libvirt or qemu version does not support memory devices and memory hotplug. Libvirt must be >= 1.2.14 and QEMU must be >= 2.1."), + 'KCHVM0047E': _('Error attaching memory device. Details: %(error)s'), + 'KCHVM0048E': _('Cannot start %(name)s. Virtual machine is already running.'), + 'KCHVM0049E': _('Cannot power off %(name)s. Virtual machine is shut off.'), + 'KCHVM0050E': _('Cannot shutdown %(name)s. Virtual machine is shut off.'), + 'KCHVM0051E': _('Cannot reset %(name)s. Virtual machine is already shut off.'), + 'KCHVM0052E': _('Boot order must be a list. Devices accepted: hd, cdrom, fd or network.'), + 'KCHVM0053E': _('Bootmenu must be boolean. Values accepted: true of false.'), + 'KCHVM0054E': _('Graphic type not valid. Values accepted: vnc or spice.'), + 'KCHVM0055E': _('Migrate to localhost %(host)s is not allowed.'), + 'KCHVM0056E': _('To migrate a virtual machine to the remote host %(host)s the user %(user)s must have password-less login to the remote host.'), + 'KCHVM0057E': _('Can not migrate virtual machine %(name)s when its in %(state)s state.'), + 'KCHVM0058E': _('Failed to migrate virtual machine %(name)s due error: %(err)s'), + 'KCHVM0059E': _('User name of the remote server must be a string.'), + 'KCHVM0060E': _('Destination host of the migration must be a string.'), + 'KCHVM0061E': _('Unable to create file %(path)s at %(host)s using user %(user)s.'), + 'KCHVM0062E': _('Unable to read disk size of %(path)s, error: %(error)s'), + 'KCHVM0063E': _('Unable to create disk image %(path)s at %(host)s using user %(user)s. Error: %(error)s'), + 'KCHVM0064E': _('Unable to migrate virtual machine to remote host %(host)s with arch %(destarch)s using localhost with arch %(srcarch)s.'), + 'KCHVM0065E': _('Unable to migrate virtual machine to remote host %(host)s with hypervisor %(desthyp)s because localhost uses hypervisor %(srchyp)s.'), + 'KCHVM0066E': _('Unable to determine remote host hypervisor and architecture. Error: %(error)s'), + 'KCHVM0067E': _('Unable to migrate virtual machine: subcores per core setting from localhostand remote host %(host)s differs.'), + 'KCHVM0068E': _('Unable to setup password-less login at remote host %(host)s using user %(user)s. Error: %(error)s'), + 'KCHVM0069E': _('Password field must be a string.'), + 'KCHVM0070E': _("Error creating local host ssh rsa key of user 'root'."), + 'KCHVM0071E': _('%(param)s value (%(mem)sMiB) must be aligned to %(alignment)sMiB.'), + 'KCHVM0073E': _('Unable to update the following parameters while the VM is offline: %(params)s'), + 'KCHVM0074E': _('Unable to update the following parameters while the VM is online: %(params)s'), + 'KCHVM0076E': _('VM %(name)s must have serial and console defined to open a web serial console'), + 'KCHVM0077E': _('Impossible to get the serial console of %(name)s'), + 'KCHVM0078E': _('Memory or Maximum Memory value is higher than amount supported by the host: %(memHost)sMiB.'), + 'KCHVM0079E': _('Memory or Maximum Memory value is higher than maximum amount recommended: %(value)sTiB'), + 'KCHVM0080E': _('Cannot update Maximum Memory when guest is running.'), + 'KCHVM0081E': _('Impossible to create %(dir)s directory.'), + 'KCHVM0082E': _('Either the guest %(name)s did not start to listen to the serial or it is not configured to use the serial console.'), + 'KCHVM0083E': _('Unable to retrieve Virt Viewer file for stopped virtual machine %(name)s'), + 'KCHVM0084E': _('Error occured while retrieving the Virt Viewer file for virtual machine %(name)s : %(err)s'), + 'KCHVM0085E': _('Virtual machine title must be a string'), + 'KCHVM0086E': _('Virtual machine description must be a string'), + 'KCHVM0087E': _('console parameter is only supported for s390x/s390 architecture.'), + 'KCHVM0088E': _('invalid console type, supported types are sclp/virtio.'), + 'KCHVM0089E': _('Unable to setup password-less login at remote host %(host)s using user %(user)s: remote directory %(sshdir)s does not exist.'), + 'KCHVM0090E': _('Unable to create a password-less libvirt connection to the remote libvirt daemon at host %(host)s with the user %(user)s. Please verify the remote server libvirt configuration. More information: http://libvirt.org/auth.html .'), + 'KCHVM0091E': _("'enable_rdma' must be of type boolean (true or false)."), + + 'KCHVMHDEV0001E': _('VM %(vmid)s does not contain directly assigned host device %(dev_name)s.'), + 'KCHVMHDEV0002E': _('The host device %(dev_name)s is not allowed to directly assign to VM.'), + 'KCHVMHDEV0003E': _('No IOMMU groups found. Host PCI pass through needs IOMMU group to function correctly. ' + 'Please enable Intel VT-d or AMD IOMMU in your BIOS, then verify the Kernel is compiled with IOMMU support. ' "For Intel CPU, add 'intel_iommu=on' to GRUB_CMDLINE_LINUX parameter in /etc/default/grub file. " "For AMD CPU, add 'iommu=pt iommu=1'."), - "KCHVMHDEV0004E": _('"name" should be a device name string'), - "KCHVMHDEV0005E": _('The device %(name)s is probably in use by the host. Unable to attach it to the guest.'), - "KCHVMHDEV0006E": _('Hot-(un)plug of device %(name)s is not supported.'), - "KCHVMHDEV0007E": _('Failed to attach %(device)s to %(vm)s'), - "KCHVMHDEV0008E": _('VM %(vmid)s does not have a USB controller to accept PCI hotplug.'), - - "KCHVMIF0001E": _("Interface %(iface)s does not exist in virtual machine %(name)s"), - "KCHVMIF0002E": _("Network %(network)s specified for virtual machine %(name)s does not exist"), - "KCHVMIF0004E": _("Supported virtual machine interfaces type are network, ovs and macvtap.Type ovs and macvtap are only supported for s390x/s390 architecture."), - "KCHVMIF0005E": _("Network name for virtual machine interface must be a string"), - "KCHVMIF0006E": _("Invalid network model card specified for virtual machine interface"), - "KCHVMIF0007E": _("Specify type and network to add a new virtual machine interface"), - "KCHVMIF0008E": _("MAC Address must respect this format FF:FF:FF:FF:FF:FF"), - "KCHVMIF0009E": _("MAC Address %(mac)s already exists in virtual machine %(name)s"), - "KCHVMIF0010E": _("Invalid MAC Address"), - "KCHVMIF0011E": _("Cannot change MAC address of a running virtual machine"), - "KCHVMIF0012E": _("Type macvtap and ovs are only supported on s390x/s390 architecture."), - "KCHVMIF0013E": _("Source attribute is only supported on s390x/s390 architecture."), - "KCHVMIF0014E": _("If source is provided, only type supported are macvtap and ovs."), - "KCHVMIF0015E": _("For type macvtap and ovs, source has to be provided"), - "KCHVMIF0016E": _("Source name for virtual machine interface must be string"), - "KCHVMIF0017E": _("Invalid source mode. Valid options are: bridge or vepa."), - - - "KCHTMPL0001E": _("Template %(name)s already exists"), - "KCHTMPL0002E": _("Source media %(path)s not found"), - "KCHTMPL0003E": _("Network '%(network)s' specified for template %(template)s does not exist"), - "KCHTMPL0004E": _("Storage pool %(pool)s specified for template %(template)s does not exist"), - "KCHTMPL0006E": _("Invalid parameter '%(param)s' specified for CDROM."), - "KCHTMPL0007E": _("Network %(network)s specified for template %(template)s is not active"), - "KCHTMPL0008E": _("Template name must be a string"), - "KCHTMPL0009E": _("Template icon must be a path to the image"), - "KCHTMPL0010E": _("Template distribution must be a string"), - "KCHTMPL0011E": _("Template distribution version must be a string"), - "KCHTMPL0012E": _("The number of CPUs must be an integer greater than 0"), - "KCHTMPL0013E": _("Amount of memory and maximum memory (MB) must be an integer greater than 512"), - "KCHTMPL0014E": _("Template CDROM must be a local or remote ISO file"), - "KCHTMPL0015E": _("Invalid storage pool URI %(value)s specified for template"), - "KCHTMPL0016E": _("Specify a path to source media (ISO, disk or remote ISO) to create a template"), - "KCHTMPL0017E": _("All networks for the template must be specified in a list."), - "KCHTMPL0018E": _("Specify a volume to a template when storage pool is iSCSI or SCSI"), - "KCHTMPL0019E": _("The volume %(volume)s is not in storage pool %(pool)s"), - "KCHTMPL0020E": _("Unable to create template due error: %(err)s"), - "KCHTMPL0021E": _("Unable to delete template due error: %(err)s"), - "KCHTMPL0022E": _("Disk size must be an integer greater than 1GB."), - "KCHTMPL0024E": _("Cannot identify base image %(path)s format"), - "KCHTMPL0026E": _("When specifying CPU topology, each element must be an integer greater than zero."), - "KCHTMPL0027E": _("Invalid disk image format. Valid formats: qcow, qcow2, qed, raw, vmdk, vpc."), - "KCHTMPL0028E": _("When setting template disks, following parameters are required: 'index', 'pool name', 'format', 'size' or 'volume' (for scsi/iscsi pools)"), - "KCHTMPL0029E": _("Disk format must be 'raw', for logical, iscsi, and scsi pools."), - "KCHTMPL0030E": _("Memory expects an object with one or both parameters: 'current' and 'maxmemory'"), - "KCHTMPL0031E": _("Memory value (%(mem)sMiB) must be equal or lesser than maximum memory value (%(maxmem)sMiB)"), - "KCHTMPL0032E": _("Unable to update template due error: %(err)s"), - "KCHTMPL0033E": _("Parameter 'disks' requires at least one disk object"), - "KCHTMPL0034E": _("Invalid interface type. Type should be 'macvtap' for host network interface (Ethernet, Bond, VLAN) to be connected as direct MacVTap or 'ovs' for openvswitch host network interface to be connected as virtual switch to a VM."), - "KCHTMPL0035E": _("Interface name should be string."), - "KCHTMPL0036E": _("Invalid interface mode. Valid options are: bridge or vepa."), - "KCHTMPL0037E": _("Interfaces should be list of interfaces. Each interface should have name, type and mode(optional, only applicable for interfcae type 'macvtap'."), - "KCHTMPL0038E": _("Interface expects an object with parameters: 'name', 'type' and 'mode'. Name should be name of host network interface (Ethernet, Bond, VLAN) for type 'macvtap' or the name of host openvswitch bridge interface for type 'ovs'. Mode (optional) is only applicable for interface type 'macvtap' to indicates whether packets will be delivered directly to target device (bridge) or to the external bridge (vepa-capable bridge)."), - "KCHTMPL0039E": _("Interfaces parameter only supported on s390x or s390 architecture."), - "KCHTMPL0040E": _("Storage without libvirt pool is not supported on this architecture"), - "KCHTMPL0041E": _("Error while creating the virtual disk for the guest. Details: %(err)s"), - "KCHTMPL0042E": _("When setting template disks without libvirt, following parameters are required: 'index', 'format', 'path', 'size'"), - "KCHTMPL0043E": _("console parameter is only supported for s390x/s390 architecture."), - "KCHTMPL0044E": _("invalid console type, supported types are sclp/virtio."), - - "KCHPOOL0001E": _("Storage pool %(name)s already exists"), - "KCHPOOL0002E": _("Storage pool %(name)s does not exist"), - "KCHPOOL0004E": _("Specify %(item)s in order to create the storage pool %(name)s"), - "KCHPOOL0005E": _("Unable to delete active storage pool %(name)s"), - "KCHPOOL0006E": _("Unable to list storage pools. Details: %(err)s"), - "KCHPOOL0007E": _("Unable to create storage pool %(name)s. Details: %(err)s"), - "KCHPOOL0009E": _("Unable to activate storage pool %(name)s. Details: %(err)s"), - "KCHPOOL0010E": _("Unable to deactivate storage pool %(name)s. Details: %(err)s"), - "KCHPOOL0011E": _("Unable to delete storage pool %(name)s. Details: %(err)s"), - "KCHPOOL0012E": _("Unable to create NFS Pool as export path %(path)s may block during mount"), - "KCHPOOL0013E": _("Unable to create NFS Pool as export path %(path)s mount failed"), - "KCHPOOL0014E": _("Unsupported storage pool type: %(type)s"), - "KCHPOOL0015E": _("Error while retrieving storage pool XML to %(pool)s"), - "KCHPOOL0016E": _("Storage pool name must be a string without slashes (/)"), - "KCHPOOL0017E": _("Supported storage pool types are dir, netfs, logical, iscsi, isci and kimchi-iso"), - "KCHPOOL0018E": _("Storage pool path must be a string"), - "KCHPOOL0019E": _("Storage pool host must be a IP or hostname"), - "KCHPOOL0020E": _("Storage pool device must be the absolute path to the block device"), - "KCHPOOL0021E": _("Storage pool devices parameter must be a list"), - "KCHPOOL0022E": _("Target IQN of an iSCSI pool must be a string"), - "KCHPOOL0023E": _("Port of a remote storage server must be an integer between 1 and 65535"), - "KCHPOOL0024E": _("iSCSI target username must be a string"), - "KCHPOOL0025E": _("iSCSI target password must be a string"), - "KCHPOOL0026E": _("Specify name and type to create a storage pool"), - "KCHPOOL0027E": _("%(disk)s is not a valid disk/partition. Could not add it to the pool %(pool)s."), - "KCHPOOL0028E": _("Unable to extend logical pool %(pool)s. Details: %(err)s"), - "KCHPOOL0029E": _("The parameter disks only can be updated for logical storage pool."), - "KCHPOOL0030E": _("The SCSI host adapter name must be a string."), - "KCHPOOL0031E": _("The storage pool kimchi_isos is reserved for internal use"), - "KCHPOOL0032E": _("Unable to activate NFS storage pool %(name)s. NFS server %(server)s is unreachable."), - "KCHPOOL0033E": _("Unable to deactivate NFS storage pool %(name)s. NFS server %(server)s is unreachable."), - "KCHPOOL0034E": _("Unable to deactivate pool %(name)s as it is associated with some templates"), - "KCHPOOL0035E": _("Unable to delete pool %(name)s as it is associated with some templates"), - "KCHPOOL0036E": _("A volume group named '%(name)s' already exists. Please, choose another name to create the logical pool."), - "KCHPOOL0037E": _("Unable to update database with deep scan information due error: %(err)s"), - "KCHPOOL0038E": _("No volume group '%(name)s' found. Please, specify an existing volume group to create the logical pool from."), - "KCHPOOL0039E": _("Unable to delete pool %(name)s as it is associated with guests: %(vms)s"), - - "KCHVOL0001E": _("Storage volume %(name)s already exists"), - "KCHVOL0002E": _("Storage volume %(name)s does not exist in storage pool %(pool)s"), - "KCHVOL0003E": _("Unable to create storage volume %(volume)s because storage pool %(pool)s is not active"), - "KCHVOL0004E": _("Specify %(item)s in order to create storage volume %(volume)s"), - "KCHVOL0006E": _("Unable to list storage volumes because storage pool %(pool)s is not active"), - "KCHVOL0007E": _("Unable to create storage volume %(name)s in storage pool %(pool)s. Details: %(err)s"), - "KCHVOL0009E": _("Unable to wipe storage volumes %(name)s. Details: %(err)s"), - "KCHVOL0010E": _("Unable to delete storage volume %(name)s. Details: %(err)s"), - "KCHVOL0011E": _("Unable to resize storage volume %(name)s. Details: %(err)s"), - "KCHVOL0012E": _("Storage type %(type)s does not support volume create and delete"), - "KCHVOL0013E": _("Storage volume name must be a string"), - "KCHVOL0014E": _("Storage volume allocation must be an integer number"), - "KCHVOL0015E": _("Storage volume format not supported. Valid formats: qcow, qcow2, qed, raw, vmdk, vpc."), - "KCHVOL0016E": _("Storage volume requires a volume name"), - "KCHVOL0017E": _("Unable to update database with storage volume information due error: %(err)s"), - "KCHVOL0018E": _("Only one of parameter %(param)s can be specified"), - "KCHVOL0019E": _("Create volume from %(param)s is not supported"), - "KCHVOL0020E": _("Storage volume capacity must be an integer number."), - "KCHVOL0021E": _("Storage volume URL must be http://, https://, ftp:// or ftps://."), - "KCHVOL0022E": _("Unable to access file %(url)s. Please, check it."), - "KCHVOL0023E": _("Unable to clone storage volume '%(name)s' in pool '%(pool)s'. Details: %(err)s"), - "KCHVOL0024E": _("Specify chunk data and its size to upload a file."), - "KCHVOL0025E": _("In order to upload a storage volume, specify the 'upload' parameter."), - "KCHVOL0026E": _("Unable to upload chunk data as it does not match with requested chunk size."), - "KCHVOL0027E": _("The storage volume %(vol)s is not under an upload process."), - "KCHVOL0028E": _("The upload chunk data will exceed the storage volume size."), - "KCHVOL0029E": _("Unable to upload chunk data to storage volume. Details: %(err)s."), - - "KCHIFACE0001E": _("Interface %(name)s does not exist"), - "KCHIFACE0002E": _("Failed to list interfaces. Invalid _inuse parameter. Supported options for _inuse are: %(supported_inuse)s"), - - "KCHNET0001E": _("Network %(name)s already exists"), - "KCHNET0002E": _("Network %(name)s does not exist"), - "KCHNET0003E": _("Subnet %(subnet)s specified for network %(network)s is not valid."), - "KCHNET0004E": _("Specify a network interface to create bridged or macvtap networks."), - "KCHNET0005E": _("Unable to delete or update active network %(name)s"), - "KCHNET0006E": _("Interface %(iface)s specified for network %(network)s is already in use"), - "KCHNET0007E": _("Interface should be bare NIC, bonding or bridge device."), - "KCHNET0008E": _("Unable to create or update network %(name)s. Details: %(err)s"), - "KCHNET0009E": _("Unable to find a free IP address for network '%(name)s'"), - "KCHNET0010E": _("The interface %(iface)s already exists."), - "KCHNET0011E": _("Network name must be a string without slashes (/) or quotes (\")"), - "KCHNET0012E": _("Supported network types are isolated, NAT, macvtap, bridge, vepa and passthrough."), - "KCHNET0013E": _("Network subnet must be a string with IP address and prefix or netmask"), - "KCHNET0014E": _("Network interfaces must be an array."), - "KCHNET0015E": _("Network VLAN ID must be an integer between 1 and 4094"), - "KCHNET0016E": _("Specify name and type to create a Network"), - "KCHNET0017E": _("Unable to delete or update network %(name)s as it is linked to some virtual machines (%(vms)s) and/or templates (%(tmpls)s)."), - "KCHNET0018E": _("Unable to deactivate network %(name)s as it is linked to are some virtual machines (%(vms)s) and/or templates (%(tmpls)s)."), - "KCHNET0019E": _("Bridge device %(name)s can not be the trunk device of a VLAN."), - "KCHNET0020E": _("Failed to activate interface %(iface)s: %(err)s."), - "KCHNET0021E": _("Failed to activate interface %(iface)s. Please check the physical link status."), - "KCHNET0022E": _("Failed to start network %(name)s. Details: %(err)s"), - "KCHNET0024E": _("Unable to redefine interface %(name)s. Details: %(err)s"), - "KCHNET0025E": _("Unable to create bridge %(name)s. Details: %(err)s"), - "KCHNET0027E": _("Unable to create bridge with NetworkManager enabled. Disable it and try again."), - "KCHNET0028E": _("Interface should be bare NIC or bonding."), - "KCHNET0029E": _("Network interfaces parameter must contain at least one interface."), - "KCHNET0030E": _("Only one interface is allowed for 'bridge' and 'macvtap' networks."), - "KCHNET0031E": _("Subnet is not a valid parameter for this type of virtual network."), - "KCHNET0032E": _("VLAN ID and interfaces are not valid parameters for this type of virtual network."), - - "KCHSR0001E": _("Storage server %(server)s was not used by Kimchi"), - - "KCHDISTRO0001E": _("Distro '%(name)s' does not exist"), - - "KCHHOST0003E": _("Node device '%(name)s' not found"), - "KCHHOST0004E": _("Conflicting flag filters specified."), - - "KCHUTILS0003E": _("Unable to choose a virtual machine name"), - "KCHUTILS0006E": _("Cannot upgrade objectstore data."), - - "KCHVMSTOR0002E": _("Invalid storage type. Types supported: 'cdrom', 'disk'"), - "KCHVMSTOR0003E": _("The path '%(value)s' is not a valid local/remote path for the device"), - "KCHVMSTOR0006E": _("Only CDROM path can be update."), - "KCHVMSTOR0007E": _("The storage device %(dev_name)s does not exist in the virtual machine %(vm_name)s"), - "KCHVMSTOR0008E": _("Error while creating new storage device: %(error)s"), - "KCHVMSTOR0009E": _("Error while updating storage device: %(error)s"), - "KCHVMSTOR0010E": _("Error while removing storage device: %(error)s"), - "KCHVMSTOR0011E": _("Do not support IDE device hot plug"), - "KCHVMSTOR0012E": _("Specify type and path or type and pool/volume to add a new virtual machine disk"), - "KCHVMSTOR0013E": _("Specify path to update virtual machine disk"), - "KCHVMSTOR0014E": _("Controller type %(type)s limitation of %(limit)s devices reached"), - "KCHVMSTOR0015E": _("Cannot retrieve disk path information for given pool/volume: %(error)s"), - "KCHVMSTOR0016E": _("Volume already in use by other virtual machine."), - "KCHVMSTOR0017E": _("Only one of path or pool/volume can be specified to add a new virtual machine disk"), - "KCHVMSTOR0018E": _("Volume chosen with format %(format)s does not fit in the storage type %(type)s"), - "KCHVMSTOR0019E": _("On s390x arch one of pool, path of dir_path must be specified"), - "KCHVMSTOR0020E": _("On s390x arch 'format' must be specified while attaching disk to virtual machine"), - "KCHVMSTOR0021E": _("Virtual disk already exists on the system: %(disk_path)s"), - - "KCHSNAP0002E": _("Unable to create snapshot '%(name)s' on virtual machine '%(vm)s'. Details: %(err)s"), - "KCHSNAP0003E": _("Snapshot '%(name)s' does not exist on virtual machine '%(vm)s'."), - "KCHSNAP0004E": _("Unable to retrieve snapshot '%(name)s' on virtual machine '%(vm)s'. Details: %(err)s"), - "KCHSNAP0005E": _("Unable to list snapshots on virtual machine '%(vm)s'. Details: %(err)s"), - "KCHSNAP0006E": _("Unable to delete snapshot '%(name)s' on virtual machine '%(vm)s'. Details: %(err)s"), - "KCHSNAP0008E": _("Unable to retrieve current snapshot of virtual machine '%(vm)s'. Details: %(err)s"), - "KCHSNAP0009E": _("Unable to revert virtual machine '%(vm)s' to snapshot '%(name)s'. Details: %(err)s"), - "KCHSNAP0010E": _("Unable to create snapshot of virtual machine '%(vm)s' because it contains a disk with format '%(format)s'; only 'qcow2' is supported."), - - "KCHCPUINF0001E": _("The number of vCPUs must be less than or equal the maximum number of vCPUs specified."), - "KCHCPUINF0002E": _("When CPU topology is defined, maximum number of vCPUs must be a product of sockets, cores, and threads."), - "KCHCPUINF0003E": _("This host (or current configuration) does not allow CPU topology."), - "KCHCPUINF0004E": _("The maximum number of vCPUs is too large for this system."), - "KCHCPUINF0005E": _("When CPU topology is defined, CPUs must be a multiple of the 'threads' number defined."), - "KCHCPUINF0007E": _("When CPU topology is specified, sockets, cores and threads are required paramaters."), - "KCHCPUINF0008E": _("Parameter 'cpu_info' expects an object with fields among: 'vcpus', 'maxvcpus', 'topology'."), - "KCHCPUINF0009E": _("Parameter 'topology' expects an object with fields among: 'sockets', 'cores', 'threads'."), - - "KCHCPUHOTP0001E": _("Unable to update Max CPU or CPU topology when guest is running."), - "KCHCPUHOTP0002E": _("Unable to hot plug/unplug CPUs. Details: %(err)s"), - - "KCHLVMS0001E": _("Invalid volume group name parameter: %(name)s."), - - "KCHCONN0001E": _("Unable to establish connection with libvirt. Please check your libvirt URI which is often defined in /etc/libvirt/libvirt.conf"), - "KCHCONN0002E": _("Libvirt service is not active. Please start the libvirt service in your host system."), - - "KCHEVENT0001E": _("Failed to register the default event implementation."), - "KCHEVENT0002E": _("Failed to register timeout event."), - "KCHEVENT0003E": _("Failed to Run the default event implementation."), - "KCHEVENT0004W": _("I/O error on guest '%(vm)s': storage pool out of space for %(devAlias)s (%(srcPath)s)."), + 'KCHVMHDEV0004E': _('"name" should be a device name string'), + 'KCHVMHDEV0005E': _('The device %(name)s is probably in use by the host. Unable to attach it to the guest.'), + 'KCHVMHDEV0006E': _('Hot-(un)plug of device %(name)s is not supported.'), + 'KCHVMHDEV0007E': _('Failed to attach %(device)s to %(vm)s'), + 'KCHVMHDEV0008E': _('VM %(vmid)s does not have a USB controller to accept PCI hotplug.'), + + 'KCHVMIF0001E': _('Interface %(iface)s does not exist in virtual machine %(name)s'), + 'KCHVMIF0002E': _('Network %(network)s specified for virtual machine %(name)s does not exist'), + 'KCHVMIF0004E': _('Supported virtual machine interfaces type are network, ovs and macvtap.Type ovs and macvtap are only supported for s390x/s390 architecture.'), + 'KCHVMIF0005E': _('Network name for virtual machine interface must be a string'), + 'KCHVMIF0006E': _('Invalid network model card specified for virtual machine interface'), + 'KCHVMIF0007E': _('Specify type and network to add a new virtual machine interface'), + 'KCHVMIF0008E': _('MAC Address must respect this format FF:FF:FF:FF:FF:FF'), + 'KCHVMIF0009E': _('MAC Address %(mac)s already exists in virtual machine %(name)s'), + 'KCHVMIF0010E': _('Invalid MAC Address'), + 'KCHVMIF0011E': _('Cannot change MAC address of a running virtual machine'), + 'KCHVMIF0012E': _('Type macvtap and ovs are only supported on s390x/s390 architecture.'), + 'KCHVMIF0013E': _('Source attribute is only supported on s390x/s390 architecture.'), + 'KCHVMIF0014E': _('If source is provided, only type supported are macvtap and ovs.'), + 'KCHVMIF0015E': _('For type macvtap and ovs, source has to be provided'), + 'KCHVMIF0016E': _('Source name for virtual machine interface must be string'), + 'KCHVMIF0017E': _('Invalid source mode. Valid options are: bridge or vepa.'), + + + 'KCHTMPL0001E': _('Template %(name)s already exists'), + 'KCHTMPL0002E': _('Source media %(path)s not found'), + 'KCHTMPL0003E': _("Network '%(network)s' specified for template %(template)s does not exist"), + 'KCHTMPL0004E': _('Storage pool %(pool)s specified for template %(template)s does not exist'), + 'KCHTMPL0006E': _("Invalid parameter '%(param)s' specified for CDROM."), + 'KCHTMPL0007E': _('Network %(network)s specified for template %(template)s is not active'), + 'KCHTMPL0008E': _('Template name must be a string'), + 'KCHTMPL0009E': _('Template icon must be a path to the image'), + 'KCHTMPL0010E': _('Template distribution must be a string'), + 'KCHTMPL0011E': _('Template distribution version must be a string'), + 'KCHTMPL0012E': _('The number of CPUs must be an integer greater than 0'), + 'KCHTMPL0013E': _('Amount of memory and maximum memory (MB) must be an integer greater than 512'), + 'KCHTMPL0014E': _('Template CDROM must be a local or remote ISO file'), + 'KCHTMPL0015E': _('Invalid storage pool URI %(value)s specified for template'), + 'KCHTMPL0016E': _('Specify a path to source media (ISO, disk or remote ISO) to create a template'), + 'KCHTMPL0017E': _('All networks for the template must be specified in a list.'), + 'KCHTMPL0018E': _('Specify a volume to a template when storage pool is iSCSI or SCSI'), + 'KCHTMPL0019E': _('The volume %(volume)s is not in storage pool %(pool)s'), + 'KCHTMPL0020E': _('Unable to create template due error: %(err)s'), + 'KCHTMPL0021E': _('Unable to delete template due error: %(err)s'), + 'KCHTMPL0022E': _('Disk size must be an integer greater than 1GB.'), + 'KCHTMPL0024E': _('Cannot identify base image %(path)s format'), + 'KCHTMPL0026E': _('When specifying CPU topology, each element must be an integer greater than zero.'), + 'KCHTMPL0027E': _('Invalid disk image format. Valid formats: qcow, qcow2, qed, raw, vmdk, vpc.'), + 'KCHTMPL0028E': _("When setting template disks, following parameters are required: 'index', 'pool name', 'format', 'size' or 'volume' (for scsi/iscsi pools)"), + 'KCHTMPL0029E': _("Disk format must be 'raw', for logical, iscsi, and scsi pools."), + 'KCHTMPL0030E': _("Memory expects an object with one or both parameters: 'current' and 'maxmemory'"), + 'KCHTMPL0031E': _('Memory value (%(mem)sMiB) must be equal or lesser than maximum memory value (%(maxmem)sMiB)'), + 'KCHTMPL0032E': _('Unable to update template due error: %(err)s'), + 'KCHTMPL0033E': _("Parameter 'disks' requires at least one disk object"), + 'KCHTMPL0034E': _("Invalid interface type. Type should be 'macvtap' for host network interface (Ethernet, Bond, VLAN) to be connected as direct MacVTap or 'ovs' for openvswitch host network interface to be connected as virtual switch to a VM."), + 'KCHTMPL0035E': _('Interface name should be string.'), + 'KCHTMPL0036E': _('Invalid interface mode. Valid options are: bridge or vepa.'), + 'KCHTMPL0037E': _("Interfaces should be list of interfaces. Each interface should have name, type and mode(optional, only applicable for interfcae type 'macvtap'."), + 'KCHTMPL0038E': _("Interface expects an object with parameters: 'name', 'type' and 'mode'. Name should be name of host network interface (Ethernet, Bond, VLAN) for type 'macvtap' or the name of host openvswitch bridge interface for type 'ovs'. Mode (optional) is only applicable for interface type 'macvtap' to indicates whether packets will be delivered directly to target device (bridge) or to the external bridge (vepa-capable bridge)."), + 'KCHTMPL0039E': _('Interfaces parameter only supported on s390x or s390 architecture.'), + 'KCHTMPL0040E': _('Storage without libvirt pool is not supported on this architecture'), + 'KCHTMPL0041E': _('Error while creating the virtual disk for the guest. Details: %(err)s'), + 'KCHTMPL0042E': _("When setting template disks without libvirt, following parameters are required: 'index', 'format', 'path', 'size'"), + 'KCHTMPL0043E': _('console parameter is only supported for s390x/s390 architecture.'), + 'KCHTMPL0044E': _('invalid console type, supported types are sclp/virtio.'), + + 'KCHPOOL0001E': _('Storage pool %(name)s already exists'), + 'KCHPOOL0002E': _('Storage pool %(name)s does not exist'), + 'KCHPOOL0004E': _('Specify %(item)s in order to create the storage pool %(name)s'), + 'KCHPOOL0005E': _('Unable to delete active storage pool %(name)s'), + 'KCHPOOL0006E': _('Unable to list storage pools. Details: %(err)s'), + 'KCHPOOL0007E': _('Unable to create storage pool %(name)s. Details: %(err)s'), + 'KCHPOOL0009E': _('Unable to activate storage pool %(name)s. Details: %(err)s'), + 'KCHPOOL0010E': _('Unable to deactivate storage pool %(name)s. Details: %(err)s'), + 'KCHPOOL0011E': _('Unable to delete storage pool %(name)s. Details: %(err)s'), + 'KCHPOOL0012E': _('Unable to create NFS Pool as export path %(path)s may block during mount'), + 'KCHPOOL0013E': _('Unable to create NFS Pool as export path %(path)s mount failed'), + 'KCHPOOL0014E': _('Unsupported storage pool type: %(type)s'), + 'KCHPOOL0015E': _('Error while retrieving storage pool XML to %(pool)s'), + 'KCHPOOL0016E': _('Storage pool name must be a string without slashes (/)'), + 'KCHPOOL0017E': _('Supported storage pool types are dir, netfs, logical, iscsi, isci and kimchi-iso'), + 'KCHPOOL0018E': _('Storage pool path must be a string'), + 'KCHPOOL0019E': _('Storage pool host must be a IP or hostname'), + 'KCHPOOL0020E': _('Storage pool device must be the absolute path to the block device'), + 'KCHPOOL0021E': _('Storage pool devices parameter must be a list'), + 'KCHPOOL0022E': _('Target IQN of an iSCSI pool must be a string'), + 'KCHPOOL0023E': _('Port of a remote storage server must be an integer between 1 and 65535'), + 'KCHPOOL0024E': _('iSCSI target username must be a string'), + 'KCHPOOL0025E': _('iSCSI target password must be a string'), + 'KCHPOOL0026E': _('Specify name and type to create a storage pool'), + 'KCHPOOL0027E': _('%(disk)s is not a valid disk/partition. Could not add it to the pool %(pool)s.'), + 'KCHPOOL0028E': _('Unable to extend logical pool %(pool)s. Details: %(err)s'), + 'KCHPOOL0029E': _('The parameter disks only can be updated for logical storage pool.'), + 'KCHPOOL0030E': _('The SCSI host adapter name must be a string.'), + 'KCHPOOL0031E': _('The storage pool kimchi_isos is reserved for internal use'), + 'KCHPOOL0032E': _('Unable to activate NFS storage pool %(name)s. NFS server %(server)s is unreachable.'), + 'KCHPOOL0033E': _('Unable to deactivate NFS storage pool %(name)s. NFS server %(server)s is unreachable.'), + 'KCHPOOL0034E': _('Unable to deactivate pool %(name)s as it is associated with some templates'), + 'KCHPOOL0035E': _('Unable to delete pool %(name)s as it is associated with some templates'), + 'KCHPOOL0036E': _("A volume group named '%(name)s' already exists. Please, choose another name to create the logical pool."), + 'KCHPOOL0037E': _('Unable to update database with deep scan information due error: %(err)s'), + 'KCHPOOL0038E': _("No volume group '%(name)s' found. Please, specify an existing volume group to create the logical pool from."), + 'KCHPOOL0039E': _('Unable to delete pool %(name)s as it is associated with guests: %(vms)s'), + + 'KCHVOL0001E': _('Storage volume %(name)s already exists'), + 'KCHVOL0002E': _('Storage volume %(name)s does not exist in storage pool %(pool)s'), + 'KCHVOL0003E': _('Unable to create storage volume %(volume)s because storage pool %(pool)s is not active'), + 'KCHVOL0004E': _('Specify %(item)s in order to create storage volume %(volume)s'), + 'KCHVOL0006E': _('Unable to list storage volumes because storage pool %(pool)s is not active'), + 'KCHVOL0007E': _('Unable to create storage volume %(name)s in storage pool %(pool)s. Details: %(err)s'), + 'KCHVOL0009E': _('Unable to wipe storage volumes %(name)s. Details: %(err)s'), + 'KCHVOL0010E': _('Unable to delete storage volume %(name)s. Details: %(err)s'), + 'KCHVOL0011E': _('Unable to resize storage volume %(name)s. Details: %(err)s'), + 'KCHVOL0012E': _('Storage type %(type)s does not support volume create and delete'), + 'KCHVOL0013E': _('Storage volume name must be a string'), + 'KCHVOL0014E': _('Storage volume allocation must be an integer number'), + 'KCHVOL0015E': _('Storage volume format not supported. Valid formats: qcow, qcow2, qed, raw, vmdk, vpc.'), + 'KCHVOL0016E': _('Storage volume requires a volume name'), + 'KCHVOL0017E': _('Unable to update database with storage volume information due error: %(err)s'), + 'KCHVOL0018E': _('Only one of parameter %(param)s can be specified'), + 'KCHVOL0019E': _('Create volume from %(param)s is not supported'), + 'KCHVOL0020E': _('Storage volume capacity must be an integer number.'), + 'KCHVOL0021E': _('Storage volume URL must be http://, https://, ftp:// or ftps://.'), + 'KCHVOL0022E': _('Unable to access file %(url)s. Please, check it.'), + 'KCHVOL0023E': _("Unable to clone storage volume '%(name)s' in pool '%(pool)s'. Details: %(err)s"), + 'KCHVOL0024E': _('Specify chunk data and its size to upload a file.'), + 'KCHVOL0025E': _("In order to upload a storage volume, specify the 'upload' parameter."), + 'KCHVOL0026E': _('Unable to upload chunk data as it does not match with requested chunk size.'), + 'KCHVOL0027E': _('The storage volume %(vol)s is not under an upload process.'), + 'KCHVOL0028E': _('The upload chunk data will exceed the storage volume size.'), + 'KCHVOL0029E': _('Unable to upload chunk data to storage volume. Details: %(err)s.'), + + 'KCHIFACE0001E': _('Interface %(name)s does not exist'), + 'KCHIFACE0002E': _('Failed to list interfaces. Invalid _inuse parameter. Supported options for _inuse are: %(supported_inuse)s'), + + 'KCHNET0001E': _('Network %(name)s already exists'), + 'KCHNET0002E': _('Network %(name)s does not exist'), + 'KCHNET0003E': _('Subnet %(subnet)s specified for network %(network)s is not valid.'), + 'KCHNET0004E': _('Specify a network interface to create bridged or macvtap networks.'), + 'KCHNET0005E': _('Unable to delete or update active network %(name)s'), + 'KCHNET0006E': _('Interface %(iface)s specified for network %(network)s is already in use'), + 'KCHNET0007E': _('Interface should be bare NIC, bonding or bridge device.'), + 'KCHNET0008E': _('Unable to create or update network %(name)s. Details: %(err)s'), + 'KCHNET0009E': _("Unable to find a free IP address for network '%(name)s'"), + 'KCHNET0010E': _('The interface %(iface)s already exists.'), + 'KCHNET0011E': _("Network name must be a string without slashes (/) or quotes (\")"), + 'KCHNET0012E': _('Supported network types are isolated, NAT, macvtap, bridge, vepa and passthrough.'), + 'KCHNET0013E': _('Network subnet must be a string with IP address and prefix or netmask'), + 'KCHNET0014E': _('Network interfaces must be an array.'), + 'KCHNET0015E': _('Network VLAN ID must be an integer between 1 and 4094'), + 'KCHNET0016E': _('Specify name and type to create a Network'), + 'KCHNET0017E': _('Unable to delete or update network %(name)s as it is linked to some virtual machines (%(vms)s) and/or templates (%(tmpls)s).'), + 'KCHNET0018E': _('Unable to deactivate network %(name)s as it is linked to are some virtual machines (%(vms)s) and/or templates (%(tmpls)s).'), + 'KCHNET0019E': _('Bridge device %(name)s can not be the trunk device of a VLAN.'), + 'KCHNET0020E': _('Failed to activate interface %(iface)s: %(err)s.'), + 'KCHNET0021E': _('Failed to activate interface %(iface)s. Please check the physical link status.'), + 'KCHNET0022E': _('Failed to start network %(name)s. Details: %(err)s'), + 'KCHNET0024E': _('Unable to redefine interface %(name)s. Details: %(err)s'), + 'KCHNET0025E': _('Unable to create bridge %(name)s. Details: %(err)s'), + 'KCHNET0027E': _('Unable to create bridge with NetworkManager enabled. Disable it and try again.'), + 'KCHNET0028E': _('Interface should be bare NIC or bonding.'), + 'KCHNET0029E': _('Network interfaces parameter must contain at least one interface.'), + 'KCHNET0030E': _("Only one interface is allowed for 'bridge' and 'macvtap' networks."), + 'KCHNET0031E': _('Subnet is not a valid parameter for this type of virtual network.'), + 'KCHNET0032E': _('VLAN ID and interfaces are not valid parameters for this type of virtual network.'), + + 'KCHSR0001E': _('Storage server %(server)s was not used by Kimchi'), + + 'KCHDISTRO0001E': _("Distro '%(name)s' does not exist"), + + 'KCHHOST0003E': _("Node device '%(name)s' not found"), + 'KCHHOST0004E': _('Conflicting flag filters specified.'), + + 'KCHUTILS0003E': _('Unable to choose a virtual machine name'), + 'KCHUTILS0006E': _('Cannot upgrade objectstore data.'), + + 'KCHVMSTOR0002E': _("Invalid storage type. Types supported: 'cdrom', 'disk'"), + 'KCHVMSTOR0003E': _("The path '%(value)s' is not a valid local/remote path for the device"), + 'KCHVMSTOR0006E': _('Only CDROM path can be update.'), + 'KCHVMSTOR0007E': _('The storage device %(dev_name)s does not exist in the virtual machine %(vm_name)s'), + 'KCHVMSTOR0008E': _('Error while creating new storage device: %(error)s'), + 'KCHVMSTOR0009E': _('Error while updating storage device: %(error)s'), + 'KCHVMSTOR0010E': _('Error while removing storage device: %(error)s'), + 'KCHVMSTOR0011E': _('Do not support IDE device hot plug'), + 'KCHVMSTOR0012E': _('Specify type and path or type and pool/volume to add a new virtual machine disk'), + 'KCHVMSTOR0013E': _('Specify path to update virtual machine disk'), + 'KCHVMSTOR0014E': _('Controller type %(type)s limitation of %(limit)s devices reached'), + 'KCHVMSTOR0015E': _('Cannot retrieve disk path information for given pool/volume: %(error)s'), + 'KCHVMSTOR0016E': _('Volume already in use by other virtual machine.'), + 'KCHVMSTOR0017E': _('Only one of path or pool/volume can be specified to add a new virtual machine disk'), + 'KCHVMSTOR0018E': _('Volume chosen with format %(format)s does not fit in the storage type %(type)s'), + 'KCHVMSTOR0019E': _('On s390x arch one of pool, path of dir_path must be specified'), + 'KCHVMSTOR0020E': _("On s390x arch 'format' must be specified while attaching disk to virtual machine"), + 'KCHVMSTOR0021E': _('Virtual disk already exists on the system: %(disk_path)s'), + + 'KCHSNAP0002E': _("Unable to create snapshot '%(name)s' on virtual machine '%(vm)s'. Details: %(err)s"), + 'KCHSNAP0003E': _("Snapshot '%(name)s' does not exist on virtual machine '%(vm)s'."), + 'KCHSNAP0004E': _("Unable to retrieve snapshot '%(name)s' on virtual machine '%(vm)s'. Details: %(err)s"), + 'KCHSNAP0005E': _("Unable to list snapshots on virtual machine '%(vm)s'. Details: %(err)s"), + 'KCHSNAP0006E': _("Unable to delete snapshot '%(name)s' on virtual machine '%(vm)s'. Details: %(err)s"), + 'KCHSNAP0008E': _("Unable to retrieve current snapshot of virtual machine '%(vm)s'. Details: %(err)s"), + 'KCHSNAP0009E': _("Unable to revert virtual machine '%(vm)s' to snapshot '%(name)s'. Details: %(err)s"), + 'KCHSNAP0010E': _("Unable to create snapshot of virtual machine '%(vm)s' because it contains a disk with format '%(format)s'; only 'qcow2' is supported."), + + 'KCHCPUINF0001E': _('The number of vCPUs must be less than or equal the maximum number of vCPUs specified.'), + 'KCHCPUINF0002E': _('When CPU topology is defined, maximum number of vCPUs must be a product of sockets, cores, and threads.'), + 'KCHCPUINF0003E': _('This host (or current configuration) does not allow CPU topology.'), + 'KCHCPUINF0004E': _('The maximum number of vCPUs is too large for this system.'), + 'KCHCPUINF0005E': _("When CPU topology is defined, CPUs must be a multiple of the 'threads' number defined."), + 'KCHCPUINF0007E': _('When CPU topology is specified, sockets, cores and threads are required paramaters.'), + 'KCHCPUINF0008E': _("Parameter 'cpu_info' expects an object with fields among: 'vcpus', 'maxvcpus', 'topology'."), + 'KCHCPUINF0009E': _("Parameter 'topology' expects an object with fields among: 'sockets', 'cores', 'threads'."), + + 'KCHCPUHOTP0001E': _('Unable to update Max CPU or CPU topology when guest is running.'), + 'KCHCPUHOTP0002E': _('Unable to hot plug/unplug CPUs. Details: %(err)s'), + + 'KCHLVMS0001E': _('Invalid volume group name parameter: %(name)s.'), + + 'KCHCONN0001E': _('Unable to establish connection with libvirt. Please check your libvirt URI which is often defined in /etc/libvirt/libvirt.conf'), + 'KCHCONN0002E': _('Libvirt service is not active. Please start the libvirt service in your host system.'), + + 'KCHEVENT0001E': _('Failed to register the default event implementation.'), + 'KCHEVENT0002E': _('Failed to register timeout event.'), + 'KCHEVENT0003E': _('Failed to Run the default event implementation.'), + 'KCHEVENT0004W': _("I/O error on guest '%(vm)s': storage pool out of space for %(devAlias)s (%(srcPath)s)."), # These messages (ending with L) are for user log purposes - "KCHNET0001L": _("Create virtual network '%(name)s' type '%(connection)s'"), - "KCHNET0002L": _("Remove virtual network '%(ident)s'"), - "KCHNET0003L": _("Update virtual network '%(ident)s'"), - "KCHNET0004L": _("Activate virtual network '%(ident)s'"), - "KCHNET0005L": _("Deactivate virtual network '%(ident)s'"), - "KCHPOOL0001L": _("Create storage pool '%(name)s' type '%(type)s'"), - "KCHPOOL0002L": _("Remove storage pool '%(ident)s'"), - "KCHPOOL0003L": _("Update storage pool '%(ident)s'"), - "KCHPOOL0004L": _("Activate storage pool '%(ident)s'"), - "KCHPOOL0005L": _("Deactivate storage pool '%(ident)s'"), - "KCHSNAP0001L": _("Create snapshot '%(name)s' at guest '%(vm)s'"), - "KCHSNAP0002L": _("Remove snapshot '%(ident)s' from guest '%(vm)s'"), - "KCHSNAP0003L": _("Revert guest '%(vm)s' to snapshot '%(ident)s'"), - "KCHTMPL0001L": _("Create template '%(name)s'"), - "KCHTMPL0002L": _("Remove template '%(ident)s'"), - "KCHTMPL0003L": _("Update template '%(ident)s'"), - "KCHTMPL0004L": _("Clone template '%(ident)s'"), - "KCHVM0001L": _("Create guest '%(name)s' from template '%(template)s'"), - "KCHVM0002L": _("Remove guest '%(ident)s'"), - "KCHVM0003L": _("Edit guest '%(ident)s'"), - "KCHVM0004L": _("Start guest '%(ident)s'"), - "KCHVM0005L": _("Power off guest '%(ident)s'"), - "KCHVM0006L": _("Shutdown guest '%(ident)s'"), - "KCHVM0007L": _("Restart guest '%(ident)s'"), - "KCHVM0008L": _("Connect to guest '%(ident)s' through novnc/spice"), - "KCHVM0009L": _("Clone guest '%(ident)s'"), - "KCHVM0010L": _("Migrate guest '%(ident)s' to '%(remote_host)s'"), - "KCHVM0011L": _("Suspend guest '%(ident)s'"), - "KCHVM0012L": _("Resume guest '%(ident)s'"), - "KCHVM0013L": _("Connect to guest '%(ident)s' through serial"), - "KCHVMHDEV0001L": _("Attach host device '%(name)s' to guest '%(vmid)s'"), - "KCHVMHDEV0002L": _("Detach host device '%(ident)s' from guest '%(vmid)s'"), - "KCHVMIF0001L": _("Attach network interface '%(network)s' to guest '%(vm)s'"), - "KCHVMIF0002L": _("Detach network interface '%(ident)s' from guest '%(vm)s'"), - "KCHVMIF0003L": _("Update network interface '%(ident)s' at guest '%(vm)s'"), - "KCHVMSTOR0001L": _("Attach %(type)s storage '%(path)s' to guest '%(vm)s'"), - "KCHVMSTOR0002L": _("Remove storage '%(ident)s' from guest '%(vm)s'"), - "KCHVMSTOR0003L": _("Update storage '%(ident)s' at guest '%(vm)s'"), - "KCHVOL0001L": _("Create storage volume '%(name)s' at pool '%(pool)s'"), - "KCHVOL0002L": _("Remove storage volume '%(ident)s' from pool '%(pool)s'"), - "KCHVOL0003L": _("Update storage volume '%(ident)s' at pool '%(pool)s'"), - "KCHVOL0004L": _("Wipe storage volume '%(ident)s' off pool '%(pool)s'"), - "KCHVOL0005L": _("Resize storage volume '%(ident)s' at pool '%(pool)s' with size %(size)s"), - "KCHVOL0006L": _("Clone storage volume '%(ident)s' at pool '%(pool)s'"), + 'KCHNET0001L': _("Create virtual network '%(name)s' type '%(connection)s'"), + 'KCHNET0002L': _("Remove virtual network '%(ident)s'"), + 'KCHNET0003L': _("Update virtual network '%(ident)s'"), + 'KCHNET0004L': _("Activate virtual network '%(ident)s'"), + 'KCHNET0005L': _("Deactivate virtual network '%(ident)s'"), + 'KCHPOOL0001L': _("Create storage pool '%(name)s' type '%(type)s'"), + 'KCHPOOL0002L': _("Remove storage pool '%(ident)s'"), + 'KCHPOOL0003L': _("Update storage pool '%(ident)s'"), + 'KCHPOOL0004L': _("Activate storage pool '%(ident)s'"), + 'KCHPOOL0005L': _("Deactivate storage pool '%(ident)s'"), + 'KCHSNAP0001L': _("Create snapshot '%(name)s' at guest '%(vm)s'"), + 'KCHSNAP0002L': _("Remove snapshot '%(ident)s' from guest '%(vm)s'"), + 'KCHSNAP0003L': _("Revert guest '%(vm)s' to snapshot '%(ident)s'"), + 'KCHTMPL0001L': _("Create template '%(name)s'"), + 'KCHTMPL0002L': _("Remove template '%(ident)s'"), + 'KCHTMPL0003L': _("Update template '%(ident)s'"), + 'KCHTMPL0004L': _("Clone template '%(ident)s'"), + 'KCHVM0001L': _("Create guest '%(name)s' from template '%(template)s'"), + 'KCHVM0002L': _("Remove guest '%(ident)s'"), + 'KCHVM0003L': _("Edit guest '%(ident)s'"), + 'KCHVM0004L': _("Start guest '%(ident)s'"), + 'KCHVM0005L': _("Power off guest '%(ident)s'"), + 'KCHVM0006L': _("Shutdown guest '%(ident)s'"), + 'KCHVM0007L': _("Restart guest '%(ident)s'"), + 'KCHVM0008L': _("Connect to guest '%(ident)s' through novnc/spice"), + 'KCHVM0009L': _("Clone guest '%(ident)s'"), + 'KCHVM0010L': _("Migrate guest '%(ident)s' to '%(remote_host)s'"), + 'KCHVM0011L': _("Suspend guest '%(ident)s'"), + 'KCHVM0012L': _("Resume guest '%(ident)s'"), + 'KCHVM0013L': _("Connect to guest '%(ident)s' through serial"), + 'KCHVMHDEV0001L': _("Attach host device '%(name)s' to guest '%(vmid)s'"), + 'KCHVMHDEV0002L': _("Detach host device '%(ident)s' from guest '%(vmid)s'"), + 'KCHVMIF0001L': _("Attach network interface '%(network)s' to guest '%(vm)s'"), + 'KCHVMIF0002L': _("Detach network interface '%(ident)s' from guest '%(vm)s'"), + 'KCHVMIF0003L': _("Update network interface '%(ident)s' at guest '%(vm)s'"), + 'KCHVMSTOR0001L': _("Attach %(type)s storage '%(path)s' to guest '%(vm)s'"), + 'KCHVMSTOR0002L': _("Remove storage '%(ident)s' from guest '%(vm)s'"), + 'KCHVMSTOR0003L': _("Update storage '%(ident)s' at guest '%(vm)s'"), + 'KCHVOL0001L': _("Create storage volume '%(name)s' at pool '%(pool)s'"), + 'KCHVOL0002L': _("Remove storage volume '%(ident)s' from pool '%(pool)s'"), + 'KCHVOL0003L': _("Update storage volume '%(ident)s' at pool '%(pool)s'"), + 'KCHVOL0004L': _("Wipe storage volume '%(ident)s' off pool '%(pool)s'"), + 'KCHVOL0005L': _("Resize storage volume '%(ident)s' at pool '%(pool)s' with size %(size)s"), + 'KCHVOL0006L': _("Clone storage volume '%(ident)s' at pool '%(pool)s'"), } diff --git a/imageinfo.py b/imageinfo.py index 1328945fe..0a8e2fb4f 100644 --- a/imageinfo.py +++ b/imageinfo.py @@ -16,22 +16,24 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import json import os import sys -from wok.exception import ImageFormatError, InvalidParameter, TimeoutExpired -from wok.utils import run_command, wok_log +from wok.exception import ImageFormatError +from wok.exception import InvalidParameter +from wok.exception import TimeoutExpired +from wok.utils import run_command +from wok.utils import wok_log def probe_img_info(path): - cmd = ["qemu-img", "info", "--output=json", path] + cmd = ['qemu-img', 'info', '--output=json', path] info = dict() try: out = run_command(cmd, 10)[0] except TimeoutExpired: - wok_log.warning("Cannot decide format of base img %s", path) + wok_log.warning('Cannot decide format of base img %s', path) return None info = json.loads(out) @@ -42,34 +44,37 @@ def probe_img_info(path): def probe_image(image_path): if not os.path.isfile(image_path): - raise InvalidParameter("KCHIMG0004E", {'filename': image_path}) + raise InvalidParameter('KCHIMG0004E', {'filename': image_path}) if not os.access(image_path, os.R_OK): - raise ImageFormatError("KCHIMG0003E", {'filename': image_path}) + raise ImageFormatError('KCHIMG0003E', {'filename': image_path}) try: import guestfs + g = guestfs.GuestFS(python_return_dict=True) g.add_drive_opts(image_path, readonly=1) g.launch() roots = g.inspect_os() except ImportError: - return ("unknown", "unknown") - except Exception, e: - raise ImageFormatError("KCHIMG0001E", {'err': str(e)}) + return ('unknown', 'unknown') + except Exception as e: + raise ImageFormatError('KCHIMG0001E', {'err': str(e)}) if len(roots) == 0: # If we are unable to detect the OS, still add the image # but make distro and vendor 'unknown' - return ("unknown", "unknown") + return ('unknown', 'unknown') for root in roots: - version = "%d.%d" % (g.inspect_get_major_version(root), - g.inspect_get_minor_version(root)) - distro = "%s" % (g.inspect_get_distro(root)) + version = '%d.%d' % ( + g.inspect_get_major_version(root), + g.inspect_get_minor_version(root), + ) + distro = '%s' % (g.inspect_get_distro(root)) return (distro, version) if __name__ == '__main__': - print probe_image(sys.argv[1]) + print(probe_image(sys.argv[1])) diff --git a/iscsi.py b/iscsi.py index 6683fb25c..b50cb22d0 100644 --- a/iscsi.py +++ b/iscsi.py @@ -16,16 +16,14 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301USA - import subprocess - from wok.exception import OperationFailed class TargetClient(object): def __init__(self, target, host, port=None, auth=None): - self.portal = host + ("" if port is None else ":%s" % port) + self.portal = host + ('' if port is None else ':%s' % port) self.target = target self.auth = auth self.targetCmd = ['iscsiadm', '--mode', 'node', '--targetname', @@ -53,7 +51,7 @@ def _run_cmd(self, cmd): out, err = iscsiadm.communicate() if iscsiadm.returncode != 0: msg_args = {'portal': self.portal, 'err': err} - raise OperationFailed("KCHISCSI0001E", msg_args) + raise OperationFailed('KCHISCSI0001E', msg_args) return out def _discover(self): @@ -64,7 +62,7 @@ def _discover(self): out, err = iscsiadm.communicate() if iscsiadm.returncode != 0: msg_args = {'portal': self.portal, 'err': err} - raise OperationFailed("KCHISCSI0001E", msg_args) + raise OperationFailed('KCHISCSI0001E', msg_args) return out def _run_op(self, op): diff --git a/isoinfo.py b/isoinfo.py index 6876672f9..37f44d790 100644 --- a/isoinfo.py +++ b/isoinfo.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import contextlib import glob import os @@ -25,10 +24,10 @@ import stat import struct import sys -import urllib2 - +import urllib -from wok.exception import IsoFormatError, OperationFailed +from wok.exception import IsoFormatError +from wok.exception import OperationFailed from wok.plugins.kimchi.utils import check_url_path from wok.utils import wok_log @@ -43,86 +42,127 @@ # given a regular expression match on the volume id string # Regular Expression: A regex to match against the ISO Volume ID ## - ('openbsd', lambda m: m.group(2), - ('OpenBSD/(i386|amd64) (\d+\.\d+) Install CD')), - ('centos', lambda m: m.group(1), - ('CentOS[ _](\d+\.?\d?)[ _].+')), - ('windows', '2000', - ('W2AFPP|SP1AFPP|SP2AFPP|YRMAFPP|ZRMAFPP|W2AOEM|SP1AOEM|SP2AOEM' + - '|YRMAOEM|ZRMAOEM|W2ASEL|SP2ASEL|W2SFPP|SP1SFPP|SP2SFPP|YRMSFPP' + - '|ZRMSFPP|W2SOEM|W2SOEM|SP1SOEM|SP2SOEM|YRMSOEM|ZRMSOEM|W2SSEL' + - '|SP2SSEL|W2PFPP|SP1PFPP|SP2PFPP|YRMPFPP|ZRMPFPP|W2POEM|SP1POEM' + - '|SP2POEM|YRMPOEM|ZRMPOEM|W2PSEL|SP2PSEL|W2PCCP|WIN2000|W2K_SP4')), - ('windows', 'xp', - ('WXPFPP|WXHFPP|WXPCCP|WXHCCP|WXPOEM|WXHOEM|WXPVOL|WXPEVL|XRMPFPP' + - '|XRMHFPP|XRMPCCP|XRMHCCP|XRMPOEM|XRMHOEM|XRMPVOL|XRMSD2|X1APFPP' + - '|X1AHFPP|X1APCCP|X1APCCP|X1AHCCP|X1APOEM|X1AHOEM|X1APVOL|VRMPFPP' + - '|VRMHFPP|VRMPCCP|VRMHCCP|VRMPOEM|VRMHOEM|VRMPVOL|VRMSD2|VX2PFPP' + - '|VX2HFPP|VX2PCCP|VX2HCCP|VX2POEM|VX2HOEM|VX2PRMFPP|VX2PVOL|GRTMUPD' + - '|GRTMPFPP|GRTMPRMFPP|GRTMHFPP|GRTMHKFPP|GRTMHKNFPP|GRTMHRMFPP' + - '|GRTMPOEM|GRTMHOEM|GRTMPVOL|GRTMPKNVOL|GRTMPKVOL|GRTMPRMVOL' + - '|MX2PFPP|MRMSD2|ARMPXFPP|ARMPXCCP|ARMPXOEM|ARMPXVOL|AX2PXCFPP' + - '|AX2PXFPP|NRMPIFPP')), - ('windows', '2003', - ('ARMECHK|ARMEVOL|ARMSVOL|ARMWVOL|ARMEEVL|ARMSEVL|ARMWEVL|ARMEOEM' + - '|ARMDOEM|ARMSOEM|ARMWOEM|ARMEFPP|ARMDFPP|ARMSFPP|ARMWFPP|NRMECHK' + - '|NRMEVOL|NRMSVOL|NRMWVOL|NRMEEVL|NRMSEVL|NRMWEVL|NRMEOEM|NRMDOEM' + - '|NRMSOEM|NRMWOEM|NRMEFPP|NRMDFPP|NRMSFPP|NRMSFPP|CRMSVOL|CRMSXVOL' + - '|BRMEVOL|BX2DVOL|ARMEEVL|BRMEEVL|CR0SP2|ARMEICHK|ARMEIFPP|ARMEIEVL' + - '|ARMEIOEM|ARMDIOEM|ARMEXFPP|ARMDFPP|ARMSXFPP|CR0SPX2|NRMEICHK' + - '|NRMEIFPP|NRMDIFPP|NRMEIOEM|NRMDIOEM|NRMEIVOL|NRMEIEVL|BRMEXVOL' + - '|BX2DXVOL|ARMEIFPP|CR0SPI2')), - ('windows', '2003r2', - ('CRMEFPP|CRMSFPP|CR0SCD2|CR0ECD2|BX2SFPP|BX2EFPP|BRMECD2FRE' + - '|BRMSCD2FRE|CRMEXFPP|CRMSXFPP|CR0SCD2X|CR0ECD2X|BX2SXFPP|BX2EXFPP' + - '|BRMECD2XFRE|BRMSCD2XFRE|CRMDVOL|CRMDXVOL')), - ('windows', '2008', - ('KRTMSVOL|KRTMSCHK|KRMWVOL|KRMSVOL|KRTMSXVOL|KRTMSXCHK|KRMWXVOL' + - '|KRMSXVOL')), - ('windows', '2008r2', - ('GRMSXVOL|GRMSXFRER|GRMSHXVOL|GRMSIAIVOL|SRVHPCR2')), - ('windows', 'vista', - ('FB1EVOL|LRMCFRE|FRTMBVOL|FRMBVOL|FRMEVOL|FB1EXVOL|LRMCXFRE' + - '|FRTMBXVOL|FRMBXVOL|FRMEXVOL|LRMEVOL|LRMEXVOL')), - ('windows', '7', - ('GRMCULFRER|GSP1RMCNPRFRER|GSP1RMCNULFRER|GSP1RMCULFRER' + - '|GSP1RMCPRFRER|GRMCENVOL|GRMCNENVOL|GRMCPRFRER|GSP1RMCPRVOL' + - '|GRMCULXFRER|GSP1RMCPRXFRER|GSP1RMCNHPXFRER|GRMCHPXFRER|GRMCXCHK' + - '|GSP1RMCENXVOL|GRMCENXVOL|GRMCNENXVOL|GRMCPRXFRER|GSP1RMCPRXVOL')), - ('windows', '8', - ('HB1_CCPA_X86FRE|HRM_CCSA_X86FRE|HRM_CCSA_X86CHK|HRM_CCSNA_X86CHK' + - '|HRM_CCSNA_X86FRE|HRM_CENA_X86FREV|HRM_CENA_X86CHKV' + - '|HRM_CENNA_X86FREV|HRM_CENNA_X86CHKV|HRM_CPRA_X86FREV' + - '|HRM_CPRNA_X86FREV|HB1_CCPA_X64FRE|HRM_CCSA_X64FRE' + - '|HRM_CCSA_X64CHK|HRM_CCSNA_X64FRE|HRM_CCSNA_X64CHK' + - '|HRM_CENNA_X64FREV|HRM_CENNA_X64CHKV|HRM_CENA_X64FREV' + - '|HRM_CENA_X64CHKV|HRM_CPRA_X64FREV|HRM_CPRNA_X64FREV')), + ( + 'openbsd', + lambda m: m.group(2), + ('OpenBSD/(i386|amd64) (\\d+\\.\\d+) Install CD'), + ), + ('centos', lambda m: m.group(1), ('CentOS[ _](\\d+\\.?\\d?)[ _].+')), + ( + 'windows', + '2000', + ( + 'W2AFPP|SP1AFPP|SP2AFPP|YRMAFPP|ZRMAFPP|W2AOEM|SP1AOEM|SP2AOEM' + + '|YRMAOEM|ZRMAOEM|W2ASEL|SP2ASEL|W2SFPP|SP1SFPP|SP2SFPP|YRMSFPP' + + '|ZRMSFPP|W2SOEM|W2SOEM|SP1SOEM|SP2SOEM|YRMSOEM|ZRMSOEM|W2SSEL' + + '|SP2SSEL|W2PFPP|SP1PFPP|SP2PFPP|YRMPFPP|ZRMPFPP|W2POEM|SP1POEM' + + '|SP2POEM|YRMPOEM|ZRMPOEM|W2PSEL|SP2PSEL|W2PCCP|WIN2000|W2K_SP4' + ), + ), + ( + 'windows', + 'xp', + ( + 'WXPFPP|WXHFPP|WXPCCP|WXHCCP|WXPOEM|WXHOEM|WXPVOL|WXPEVL|XRMPFPP' + + '|XRMHFPP|XRMPCCP|XRMHCCP|XRMPOEM|XRMHOEM|XRMPVOL|XRMSD2|X1APFPP' + + '|X1AHFPP|X1APCCP|X1APCCP|X1AHCCP|X1APOEM|X1AHOEM|X1APVOL|VRMPFPP' + + '|VRMHFPP|VRMPCCP|VRMHCCP|VRMPOEM|VRMHOEM|VRMPVOL|VRMSD2|VX2PFPP' + + '|VX2HFPP|VX2PCCP|VX2HCCP|VX2POEM|VX2HOEM|VX2PRMFPP|VX2PVOL|GRTMUPD' + + '|GRTMPFPP|GRTMPRMFPP|GRTMHFPP|GRTMHKFPP|GRTMHKNFPP|GRTMHRMFPP' + + '|GRTMPOEM|GRTMHOEM|GRTMPVOL|GRTMPKNVOL|GRTMPKVOL|GRTMPRMVOL' + + '|MX2PFPP|MRMSD2|ARMPXFPP|ARMPXCCP|ARMPXOEM|ARMPXVOL|AX2PXCFPP' + + '|AX2PXFPP|NRMPIFPP' + ), + ), + ( + 'windows', + '2003', + ( + 'ARMECHK|ARMEVOL|ARMSVOL|ARMWVOL|ARMEEVL|ARMSEVL|ARMWEVL|ARMEOEM' + + '|ARMDOEM|ARMSOEM|ARMWOEM|ARMEFPP|ARMDFPP|ARMSFPP|ARMWFPP|NRMECHK' + + '|NRMEVOL|NRMSVOL|NRMWVOL|NRMEEVL|NRMSEVL|NRMWEVL|NRMEOEM|NRMDOEM' + + '|NRMSOEM|NRMWOEM|NRMEFPP|NRMDFPP|NRMSFPP|NRMSFPP|CRMSVOL|CRMSXVOL' + + '|BRMEVOL|BX2DVOL|ARMEEVL|BRMEEVL|CR0SP2|ARMEICHK|ARMEIFPP|ARMEIEVL' + + '|ARMEIOEM|ARMDIOEM|ARMEXFPP|ARMDFPP|ARMSXFPP|CR0SPX2|NRMEICHK' + + '|NRMEIFPP|NRMDIFPP|NRMEIOEM|NRMDIOEM|NRMEIVOL|NRMEIEVL|BRMEXVOL' + + '|BX2DXVOL|ARMEIFPP|CR0SPI2' + ), + ), + ( + 'windows', + '2003r2', + ( + 'CRMEFPP|CRMSFPP|CR0SCD2|CR0ECD2|BX2SFPP|BX2EFPP|BRMECD2FRE' + + '|BRMSCD2FRE|CRMEXFPP|CRMSXFPP|CR0SCD2X|CR0ECD2X|BX2SXFPP|BX2EXFPP' + + '|BRMECD2XFRE|BRMSCD2XFRE|CRMDVOL|CRMDXVOL' + ), + ), + ( + 'windows', + '2008', + ( + 'KRTMSVOL|KRTMSCHK|KRMWVOL|KRMSVOL|KRTMSXVOL|KRTMSXCHK|KRMWXVOL' + + '|KRMSXVOL' + ), + ), + ('windows', '2008r2', ('GRMSXVOL|GRMSXFRER|GRMSHXVOL|GRMSIAIVOL|SRVHPCR2')), + ( + 'windows', + 'vista', + ( + 'FB1EVOL|LRMCFRE|FRTMBVOL|FRMBVOL|FRMEVOL|FB1EXVOL|LRMCXFRE' + + '|FRTMBXVOL|FRMBXVOL|FRMEXVOL|LRMEVOL|LRMEXVOL' + ), + ), + ( + 'windows', + '7', + ( + 'GRMCULFRER|GSP1RMCNPRFRER|GSP1RMCNULFRER|GSP1RMCULFRER' + + '|GSP1RMCPRFRER|GRMCENVOL|GRMCNENVOL|GRMCPRFRER|GSP1RMCPRVOL' + + '|GRMCULXFRER|GSP1RMCPRXFRER|GSP1RMCNHPXFRER|GRMCHPXFRER|GRMCXCHK' + + '|GSP1RMCENXVOL|GRMCENXVOL|GRMCNENXVOL|GRMCPRXFRER|GSP1RMCPRXVOL' + ), + ), + ( + 'windows', + '8', + ( + 'HB1_CCPA_X86FRE|HRM_CCSA_X86FRE|HRM_CCSA_X86CHK|HRM_CCSNA_X86CHK' + + '|HRM_CCSNA_X86FRE|HRM_CENA_X86FREV|HRM_CENA_X86CHKV' + + '|HRM_CENNA_X86FREV|HRM_CENNA_X86CHKV|HRM_CPRA_X86FREV' + + '|HRM_CPRNA_X86FREV|HB1_CCPA_X64FRE|HRM_CCSA_X64FRE' + + '|HRM_CCSA_X64CHK|HRM_CCSNA_X64FRE|HRM_CCSNA_X64CHK' + + '|HRM_CENNA_X64FREV|HRM_CENNA_X64CHKV|HRM_CENA_X64FREV' + + '|HRM_CENA_X64CHKV|HRM_CPRA_X64FREV|HRM_CPRNA_X64FREV' + ), + ), ('sles', '10', 'SLES10|SUSE-Linux-Enterprise-Server.001'), ('sles', '11', 'SUSE_SLES-11-0-0|SLE-11'), ('sles', '12', 'SLE-12'), - ('sles', lambda m: "11sp%s" % m.group(1), 'SLES-11-SP(\d+)'), - ('opensuse', lambda m: m.group(1), 'openSUSE[ -](\d+\.\d+)'), + ('sles', lambda m: '11sp%s' % m.group(1), 'SLES-11-SP(\\d+)'), + ('opensuse', lambda m: m.group(1), 'openSUSE[ -](\\d+\\.\\d+)'), ('opensuse', '11.1', 'SU1110.001'), - ('opensuse', '11.3', - 'openSUSE-DVD-i586-Build0702..001|openSUSE-DVD-x86_64.0702..001'), - ('opensuse', '11.4', - 'openSUSE-DVD-i586-Build0024|openSUSE-DVD-x86_640024'), - ('opensuse', '12.1', - 'openSUSE-DVD-i586-Build0039|openSUSE-DVD-x86_640039'), - ('opensuse', '12.2', - 'openSUSE-DVD-i586-Build0167|openSUSE-DVD-x86_640167'), - ('opensuse', lambda m: m.group(1), 'openSUSE-Leap-(\d+\.\d+)'), + ( + 'opensuse', + '11.3', + 'openSUSE-DVD-i586-Build0702..001|openSUSE-DVD-x86_64.0702..001', + ), + ('opensuse', '11.4', 'openSUSE-DVD-i586-Build0024|openSUSE-DVD-x86_640024'), + ('opensuse', '12.1', 'openSUSE-DVD-i586-Build0039|openSUSE-DVD-x86_640039'), + ('opensuse', '12.2', 'openSUSE-DVD-i586-Build0167|openSUSE-DVD-x86_640167'), + ('opensuse', lambda m: m.group(1), 'openSUSE-Leap-(\\d+\\.\\d+)'), ('rhel', '4.8', 'RHEL/4-U8'), - ('rhel', lambda m: m.group(2), 'RHEL(-LE)?[_/-](\d+\.\d+)'), - ('debian', lambda m: m.group(1), 'Debian (\d+\.\d+)'), - ('ubuntu', lambda m: m.group(2), '[Uu]buntu(-Server)? (\d+\.\d+)'), - ('fedora', lambda m: m.group(1), 'Fedora-WS-[\D-]+-(\d+)'), - ('fedora', lambda m: m.group(1), 'Fedora-S-[\w-]+-(\d+)'), - ('fedora', lambda m: m.group(1), 'Fedora[ -](\d+)'), - ('fedora', lambda m: m.group(1), 'Fedora.*-(\d+)-'), - ('gentoo', lambda m: m.group(1), 'Gentoo Linux \w+ (\d+)'), + ('rhel', lambda m: m.group(2), 'RHEL(-LE)?[_/-](\\d+\\.\\d+)'), + ('debian', lambda m: m.group(1), 'Debian (\\d+\\.\\d+)'), + ('ubuntu', lambda m: m.group(2), '[Uu]buntu(-Server)? (\\d+\\.\\d+)'), + ('fedora', lambda m: m.group(1), 'Fedora-WS-[\\D-]+-(\\d+)'), + ('fedora', lambda m: m.group(1), 'Fedora-S-[\\w-]+-(\\d+)'), + ('fedora', lambda m: m.group(1), 'Fedora[ -](\\d+)'), + ('fedora', lambda m: m.group(1), 'Fedora.*-(\\d+)-'), + ('gentoo', lambda m: m.group(1), 'Gentoo Linux \\w+ (\\d+)'), ('powerkvm', 'live_cd', 'POWERKVM_LIVECD'), - ('arch', lambda m: m.group(1), 'ARCH_(\d+)'), + ('arch', lambda m: m.group(1), 'ARCH_(\\d+)'), ] @@ -136,17 +176,18 @@ class IsoImage(object): El-Torito specification: http://download.intel.com/support/motherboards/desktop/sb/specscdrom.pdf """ + SECTOR_SIZE = 2048 - VOL_DESC = struct.Struct("=B5sBB32s32s") - EL_TORITO_BOOT_RECORD = struct.Struct("=B5sB32s32sI") - EL_TORITO_VALIDATION_ENTRY = struct.Struct("=BBH24sHBB") - EL_TORITO_BOOT_ENTRY = struct.Struct("=BBHBBHL20x") + VOL_DESC = struct.Struct('=B5sBB32s32s') + EL_TORITO_BOOT_RECORD = struct.Struct('=B5sB32s32sI') + EL_TORITO_VALIDATION_ENTRY = struct.Struct('=BBH24sHBB') + EL_TORITO_BOOT_ENTRY = struct.Struct('=BBHBBHL20x') # Path table info starting in ISO9660 offset 132. We force little # endian byte order (the '<' sign) because Power systems can run on # both. # First int is path table size, next 4 bytes are discarded (it is # the same info but in big endian) and next int is the location. - PATH_TABLE_SIZE_LOC = struct.Struct(" self.get_host_max_vcpus(): - raise InvalidParameter("KCHCPUINF0004E") + raise InvalidParameter('KCHCPUINF0004E') if vcpus > maxvcpus: - raise InvalidParameter("KCHCPUINF0001E") + raise InvalidParameter('KCHCPUINF0001E') def get_host_max_vcpus(self): if ARCH == 'power': diff --git a/model/diskutils.py b/model/diskutils.py index a3162d74c..eaeae0b40 100644 --- a/model/diskutils.py +++ b/model/diskutils.py @@ -16,9 +16,10 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -from wok.plugins.kimchi.model.vms import VMModel, VMsModel -from wok.plugins.kimchi.xmlutils.disk import get_vm_disk_info, get_vm_disks +from wok.plugins.kimchi.model.vms import VMModel +from wok.plugins.kimchi.model.vms import VMsModel +from wok.plugins.kimchi.xmlutils.disk import get_vm_disk_info +from wok.plugins.kimchi.xmlutils.disk import get_vm_disks """ diff --git a/model/featuretests.py b/model/featuretests.py index fca152d98..6549cb259 100644 --- a/model/featuretests.py +++ b/model/featuretests.py @@ -16,19 +16,20 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import platform +import subprocess import cherrypy import libvirt import lxml.etree as ET -import platform -import subprocess from lxml.builder import E +from wok.utils import run_command +from wok.utils import servermethod +from wok.utils import wok_log -from wok.utils import run_command, servermethod, wok_log - -FEATURETEST_VM_NAME = "FEATURETEST_VM" -FEATURETEST_POOL_NAME = "FEATURETEST_POOL" +FEATURETEST_VM_NAME = 'FEATURETEST_VM' +FEATURETEST_POOL_NAME = 'FEATURETEST_POOL' ISO_STREAM_XML = """ @@ -103,7 +104,6 @@ class FeatureTests(object): - @staticmethod def disable_libvirt_error_logging(): def libvirt_errorhandler(userdata, error): @@ -130,16 +130,19 @@ def libvirt_supports_iso_stream(conn, protocol): domain_type = 'test' if conn_type == 'test' else 'kvm' arch = 'i686' if conn_type == 'test' else platform.machine() arch = 'ppc64' if arch == 'ppc64le' else arch - xml = ISO_STREAM_XML % {'name': FEATURETEST_VM_NAME, - 'domain': domain_type, 'protocol': protocol, - 'arch': arch} + xml = ISO_STREAM_XML % { + 'name': FEATURETEST_VM_NAME, + 'domain': domain_type, + 'protocol': protocol, + 'arch': arch, + } try: FeatureTests.disable_libvirt_error_logging() dom = conn.defineXML(xml) dom.undefine() return True - except libvirt.libvirtError, e: - wok_log.error(e.message) + except libvirt.libvirtError as e: + wok_log.error(str(e)) return False finally: FeatureTests.enable_libvirt_error_logging() @@ -148,13 +151,14 @@ def libvirt_supports_iso_stream(conn, protocol): def libvirt_support_nfs_probe(conn): def _get_xml(): obj = E.source(E.host(name='127.0.0.1'), E.format(type='nfs')) - xml = ET.tostring(obj) + xml = ET.tostring(obj).decode('utf-8') return xml + try: FeatureTests.disable_libvirt_error_logging() conn.findStoragePoolSources('netfs', _get_xml(), 0) except libvirt.libvirtError as e: - wok_log.error(e.message) + wok_log.error(str(e)) if e.get_error_code() == 38: # if libvirt cannot find showmount, # it returns 38--general system call failure @@ -169,18 +173,22 @@ def _get_xml(): def qemu_supports_iso_stream(): host = cherrypy.server.socket_host port = cherrypy.server.socket_port - cmd = "qemu-io -r http://%s:%d/plugins/kimchi/images/icon-fedora.png \ - -c 'read -v 0 512'" % (host, port) - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, shell=True) + cmd = ( + "qemu-io -r http://%s:%d/plugins/kimchi/images/icon-fedora.png \ + -c 'read -v 0 512'" + % (host, port) + ) + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True + ) stdout, stderr = proc.communicate() return len(stderr) == 0 @staticmethod def libvirt_support_fc_host(conn): + pool = None try: FeatureTests.disable_libvirt_error_logging() - pool = None pool_xml = SCSI_FC_XML % {'name': FEATURETEST_POOL_NAME} pool = conn.storagePoolDefineXML(pool_xml, 0) except libvirt.libvirtError as e: @@ -196,13 +204,13 @@ def libvirt_support_fc_host(conn): def kernel_support_vfio(): out, err, rc = run_command(['modprobe', 'vfio-pci']) if rc != 0: - wok_log.warning("Unable to load Kernal module vfio-pci.") + wok_log.warning('Unable to load Kernal module vfio-pci.') return False return True @staticmethod def is_nm_running(): - '''Tries to determine whether NetworkManager is running.''' + """Tries to determine whether NetworkManager is running.""" out, err, rc = run_command(['nmcli', 'dev', 'status']) if rc != 0: @@ -212,10 +220,10 @@ def is_nm_running(): @staticmethod def has_mem_hotplug_support(conn): - ''' + """ A memory device can be hot-plugged or hot-unplugged since libvirt version 1.2.14. - ''' + """ # Libvirt < 1.2.14 does not support memory devices, so try to attach a # device. Then check if QEMU (>= 2.1) supports memory hotplug, starting # the guest These steps avoid errors with Libvirt 'test' driver for KVM @@ -227,15 +235,16 @@ def has_mem_hotplug_support(conn): dom = None try: FeatureTests.disable_libvirt_error_logging() - dom = conn.defineXML(MAXMEM_VM_XML % {'name': FEATURETEST_VM_NAME, - 'domain': domain_type, - 'arch': arch}) + dom = conn.defineXML( + MAXMEM_VM_XML + % {'name': FEATURETEST_VM_NAME, 'domain': domain_type, 'arch': arch} + ) dom.attachDeviceFlags(DEV_MEM_XML, libvirt.VIR_DOMAIN_MEM_CONFIG) dom.create() except libvirt.libvirtError: return False finally: - if (dom and dom.isActive() == 1): + if dom and dom.isActive() == 1: dom.destroy() dom is None or dom.undefine() FeatureTests.enable_libvirt_error_logging() diff --git a/model/groups.py b/model/groups.py index e67394722..641b197b8 100644 --- a/model/groups.py +++ b/model/groups.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import grp from wok.config import config @@ -24,7 +23,7 @@ class GroupsModel(object): def __init__(self, **args): - auth_type = config.get("authentication", "method") + auth_type = config.get('authentication', 'method') for klass in GroupsModel.__subclasses__(): if auth_type == klass.auth_type: self.grp = klass(**args) @@ -47,7 +46,7 @@ def __init__(self, **kargs): def _get_list(self): return sorted([group.gr_name - for group in grp.getgrall()]) + for group in grp.getgrall()]) def _validate(self, gid): try: diff --git a/model/host.py b/model/host.py index 90834e37c..dd305190a 100644 --- a/model/host.py +++ b/model/host.py @@ -16,42 +16,40 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import libvirt import os from collections import defaultdict -from lxml import objectify +import libvirt +from lxml import objectify from wok.exception import InvalidParameter from wok.exception import NotFoundError -from wok.xmlutils.utils import xpath_get_text - from wok.plugins.kimchi import disks from wok.plugins.kimchi.model import hostdev from wok.plugins.kimchi.model.config import CapabilitiesModel -from wok.plugins.kimchi.model.vms import VMModel, VMsModel +from wok.plugins.kimchi.model.vms import VMModel +from wok.plugins.kimchi.model.vms import VMsModel +from wok.xmlutils.utils import xpath_get_text class DevicesModel(object): def __init__(self, **kargs): self.conn = kargs['conn'] self.caps = CapabilitiesModel(**kargs) - self.cap_map = \ - {'net': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET, - 'pci': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV, - 'scsi': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI, - 'scsi_host': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI_HOST, - 'storage': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_STORAGE, - 'usb_device': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_USB_DEV, - 'usb': - libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_USB_INTERFACE} + self.cap_map = { + 'net': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_NET, + 'pci': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_PCI_DEV, + 'scsi': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI, + 'scsi_host': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_SCSI_HOST, + 'storage': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_STORAGE, + 'usb_device': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_USB_DEV, + 'usb': libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_USB_INTERFACE, + } # TODO: when no longer supporting Libvirt < 1.0.5 distros # (like RHEL6) remove this verification and insert the # key 'fc_host' with the libvirt variable in the hash # declaration above. try: - self.cap_map['fc_host'] = \ - libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_FC_HOST + self.cap_map['fc_host'] = libvirt.VIR_CONNECT_LIST_NODE_DEVICES_CAP_FC_HOST except AttributeError: self.cap_map['fc_host'] = None @@ -67,23 +65,26 @@ def _get_unavailable_devices(self): except AttributeError: continue - vm_devs = [DeviceModel.deduce_dev_name(e, self.conn) - for e in hostDevices] + vm_devs = [DeviceModel.deduce_dev_name( + e, self.conn) for e in hostDevices] for dev in vm_devs: unavailable_devs.append(dev) return unavailable_devs - def get_list(self, _cap=None, _passthrough=None, - _passthrough_affected_by=None, - _available_only=None): + def get_list( + self, + _cap=None, + _passthrough=None, + _passthrough_affected_by=None, + _available_only=None, + ): if _passthrough_affected_by is not None: # _passthrough_affected_by conflicts with _cap and _passthrough if (_cap, _passthrough) != (None, None): - raise InvalidParameter("KCHHOST0004E") - return sorted( - self._get_passthrough_affected_devs(_passthrough_affected_by)) + raise InvalidParameter('KCHHOST0004E') + return sorted(self._get_passthrough_affected_devs(_passthrough_affected_by)) if _cap == 'fc_host': dev_names = self._get_devices_fc_host() @@ -93,15 +94,15 @@ def get_list(self, _cap=None, _passthrough=None, if _passthrough is not None and _passthrough.lower() == 'true': conn = self.conn.get() passthrough_names = [ - dev['name'] for dev in hostdev.get_passthrough_dev_infos(conn)] + dev['name'] for dev in hostdev.get_passthrough_dev_infos(conn) + ] dev_names = list(set(dev_names) & set(passthrough_names)) - if _available_only is not None and _available_only.lower() \ - == 'true': + if _available_only is not None and _available_only.lower() == 'true': unavailable_devs = self._get_unavailable_devices() - dev_names = [dev for dev in dev_names - if dev not in unavailable_devs] + dev_names = [ + dev for dev in dev_names if dev not in unavailable_devs] dev_names.sort() return dev_names @@ -155,7 +156,7 @@ def get_iommu_groups(self): try: devices = DevicesModel(conn=conn).get_list() - except: + except Exception: return iommu_groups for device in devices: @@ -172,7 +173,7 @@ def lookup(self, nodedev_name): conn = self.conn.get() try: dev = conn.nodeDeviceLookupByName(nodedev_name) - except: + except Exception: raise NotFoundError('KCHHOST0003E', {'name': nodedev_name}) info = hostdev.get_dev_info(dev) @@ -191,7 +192,7 @@ def is_device_3D_controller(self, info): with open(os.path.join(info['path'], 'class')) as f: pci_class = int(f.readline().strip(), 16) - except: + except Exception: return False if pci_class == 0x030200: @@ -231,7 +232,8 @@ def _deduce_dev_name_scsi(e): for field in ('bus', 'target', 'unit'): attrib[field] = DeviceModel._toint(e.source.address.attrib[field]) attrib['host'] = DeviceModel._toint( - e.source.adapter.attrib['name'][len('scsi_host'):]) + e.source.adapter.attrib['name'][len('scsi_host'):] + ) return 'scsi_%(host)d_%(bus)d_%(target)d_%(unit)d' % attrib @staticmethod @@ -249,7 +251,7 @@ def _deduce_dev_name_usb(e, conn): evendor = 0 eproduct = 0 else: - unknown_dev = 'usb_vendor_%s_product_%s' % (evendor, eproduct) + unknown_dev = f'usb_vendor_{evendor}_product_{eproduct}' try: ebus = DeviceModel._toint(e.source.address.attrib['bus']) @@ -258,7 +260,7 @@ def _deduce_dev_name_usb(e, conn): ebus = -1 edevice = -1 else: - unknown_dev = 'usb_bus_%s_device_%s' % (ebus, edevice) + unknown_dev = f'usb_bus_{ebus}_device_{edevice}' for usb_info in usb_infos: ivendor = DeviceModel._toint(usb_info['vendor']['id']) @@ -303,14 +305,16 @@ def __init__(self, **kargs): def lookup(self, name): def _format(vg): - return {'name': vg['vgname'], - 'size': vg['size'], - 'free': vg['free'], - 'pvs': [pv['pvname'] for pv in disks.pvs(vg['vgname'])], - 'lvs': [lv['lvname'] for lv in disks.lvs(vg['vgname'])]} + return { + 'name': vg['vgname'], + 'size': vg['size'], + 'free': vg['free'], + 'pvs': [pv['pvname'] for pv in disks.pvs(vg['vgname'])], + 'lvs': [lv['lvname'] for lv in disks.lvs(vg['vgname'])], + } vgs = [_format(vg) for vg in disks.vgs() if vg['vgname'] == name] if not vgs: - raise InvalidParameter("KCHLVMS0001E", {'name': name}) + raise InvalidParameter('KCHLVMS0001E', {'name': name}) return vgs[0] diff --git a/model/hostdev.py b/model/hostdev.py index 829e27d5f..0ebadf84f 100644 --- a/model/hostdev.py +++ b/model/hostdev.py @@ -16,16 +16,14 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os from pprint import pformat from pprint import pprint +from wok.plugins.kimchi.model.libvirtconnection import LibvirtConnection from wok.utils import wok_log from wok.xmlutils.utils import dictize -from wok.plugins.kimchi.model.libvirtconnection import LibvirtConnection - def _get_all_host_dev_infos(libvirt_conn): node_devs = libvirt_conn.listAllDevices(0) @@ -43,8 +41,11 @@ def _get_dev_info_tree(dev_infos): try: parent = devs[dev_info['parent']] except KeyError: - wok_log.error('Parent %s of device %s does not exist.', - dev_info['parent'], dev_info['name']) + wok_log.error( + 'Parent %s of device %s does not exist.', + dev_info['parent'], + dev_info['name'], + ) continue try: @@ -65,18 +66,19 @@ def _is_pci_qualified(pci_dev): with open(os.path.join(pci_dev['path'], 'class')) as f: pci_class = int(f.readline().strip(), 16) - if pci_class != 0x030200 and pci_class & 0xff0000 in blacklist_classes: + if pci_class != 0x030200 and pci_class & 0xFF0000 in blacklist_classes: return False return True def get_passthrough_dev_infos(libvirt_conn): - ''' Get devices eligible to be passed through to VM. ''' + """ Get devices eligible to be passed through to VM. """ def is_eligible(dev): - return dev['device_type'] in ('usb_device', 'scsi') or \ - (dev['device_type'] == 'pci' and _is_pci_qualified(dev)) + return dev['device_type'] in ('usb_device', 'scsi') or ( + dev['device_type'] == 'pci' and _is_pci_qualified(dev) + ) dev_infos = _get_all_host_dev_infos(libvirt_conn) @@ -99,8 +101,9 @@ def get_iommu_group(dev_info): try: parent_info = dev_dict[parent] except KeyError: - wok_log.error("Parent %s of device %s does not exist", - parent, dev_info['name']) + wok_log.error( + 'Parent %s of device %s does not exist', parent, dev_info['name'] + ) break try: @@ -119,9 +122,12 @@ def get_iommu_group(dev_info): if iommu_group is None: return [] - return [dev_info for dev_info in dev_infos - if dev_info['name'] != device_info['name'] and - get_iommu_group(dev_info) == iommu_group] + return [ + dev_info + for dev_info in dev_infos + if dev_info['name'] != device_info['name'] + and get_iommu_group(dev_info) == iommu_group + ] def _get_children_devices(dev_infos, device_info): @@ -161,7 +167,7 @@ def get_affected_passthrough_devices(libvirt_conn, passthrough_dev): def get_dev_info(node_dev): - ''' Parse the node device XML string into dict according to + """ Parse the node device XML string into dict according to http://libvirt.org/formatnode.html. scsi_generic is not documented in libvirt official website. Try to @@ -170,7 +176,7 @@ def get_dev_info(node_dev): scsi_target is not documented in libvirt official website. Try to parse scsi_target according to the libvirt commit db19834a0a. - ''' + """ xmlstr = node_dev.XMLDesc(0) info = dictize(xmlstr)['device'] dev_type = info['capability'].pop('type') @@ -179,21 +185,21 @@ def get_dev_info(node_dev): info.update(cap_dict) # parent device not found: set as None - info["parent"] = info.get("parent") + info['parent'] = info.get('parent') if dev_type in ('scsi', 'scsi_generic', 'scsi_target', 'system', 'usb'): return info if dev_type in ('net', 'pci', 'scsi_host', 'storage', 'usb_device'): - return globals()['_get_%s_dev_info' % dev_type](info) + return globals()[f'_get_{dev_type}_dev_info'](info) - wok_log.error("Unknown device type: %s", dev_type) + wok_log.error(f'Unknown device type: {dev_type}') return info def _get_net_dev_info(info): cap = info.pop('capability') - links = {"80203": "IEEE 802.3", "80211": "IEEE 802.11"} + links = {'80203': 'IEEE 802.3', '80211': 'IEEE 802.11'} link_raw = cap['type'] info['link_type'] = links.get(link_raw, link_raw) @@ -209,11 +215,16 @@ def _get_pci_dev_info(info): info[k]['description'] = description if 'path' not in info: # Old libvirt does not provide syspath info - info['path'] = \ - "/sys/bus/pci/devices/" \ - "%(domain)04x:%(bus)02x:%(slot)02x.%(function)01x" % { - 'domain': info['domain'], 'bus': info['bus'], - 'slot': info['slot'], 'function': info['function']} + info['path'] = ( + '/sys/bus/pci/devices/' + '%(domain)04x:%(bus)02x:%(slot)02x.%(function)01x' + % { + 'domain': info['domain'], + 'bus': info['bus'], + 'slot': info['slot'], + 'function': info['function'], + } + ) try: info['iommuGroup'] = int(info['iommuGroup']['number']) except KeyError: @@ -286,10 +297,10 @@ def _print_host_dev_tree(libvirt_conn): dev_infos = _get_all_host_dev_infos(libvirt_conn) root = _get_dev_info_tree(dev_infos) if root is None: - print "No device found" + print('No device found') return - print '-----------------' - print '\n'.join(_format_dev_node(root)) + print('-----------------') + print('\n'.join(_format_dev_node(root))) def _format_dev_node(node): @@ -305,7 +316,7 @@ def _format_dev_node(node): count = len(children) for i, child in enumerate(children): if count == 1: - lines.append(' \-----------------') + lines.append(' \\-----------------') else: lines.append(' +-----------------') clines = _format_dev_node(child) @@ -322,5 +333,5 @@ def _format_dev_node(node): if __name__ == '__main__': libvirt_conn = LibvirtConnection('qemu:///system').get() _print_host_dev_tree(libvirt_conn) - print 'Eligible passthrough devices:' + print('Eligible passthrough devices:') pprint(get_passthrough_dev_infos(libvirt_conn)) diff --git a/model/interfaces.py b/model/interfaces.py index 053207241..de089d9d9 100644 --- a/model/interfaces.py +++ b/model/interfaces.py @@ -16,15 +16,13 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import ethtool - -from wok.exception import InvalidParameter, NotFoundError -from wok.stringutils import encode_value -from wok.utils import wok_log - +from wok.exception import InvalidParameter +from wok.exception import NotFoundError from wok.plugins.kimchi import network as netinfo from wok.plugins.kimchi.model.networks import NetworksModel +from wok.stringutils import encode_value +from wok.utils import wok_log class InterfacesModel(object): @@ -33,19 +31,26 @@ def __init__(self, **kargs): self.networks = NetworksModel(**kargs) def get_list(self, _inuse=None): - if _inuse == "true": - return list(set(netinfo.all_favored_interfaces()) & - set(self.networks.get_all_networks_interfaces())) - elif _inuse == "false": - return list(set(netinfo.all_favored_interfaces()) - - set(self.networks.get_all_networks_interfaces())) + if _inuse == 'true': + return list( + set(netinfo.all_favored_interfaces()) & + set(self.networks.get_all_networks_interfaces()) + ) + elif _inuse == 'false': + return list( + set(netinfo.all_favored_interfaces()) - + set(self.networks.get_all_networks_interfaces()) + ) elif _inuse is None: return list(set(netinfo.all_favored_interfaces())) else: - wok_log.error("Invalid filter _inuse. _inuse: %s. Supported" - " options are %s" % (_inuse, 'true/false')) - raise InvalidParameter("KCHIFACE0002E", - {'supported_inuse': ['true', 'false']}) + wok_log.error( + f'Invalid filter _inuse. _inuse: {_inuse}. Supported' + f' options are true/false' + ) + raise InvalidParameter( + 'KCHIFACE0002E', {'supported_inuse': ['true', 'false']} + ) class InterfaceModel(object): @@ -54,7 +59,7 @@ def __init__(self, **kargs): def lookup(self, name): if encode_value(name) not in map(encode_value, ethtool.get_devices()): - raise NotFoundError("KCHIFACE0001E", {'name': name}) + raise NotFoundError('KCHIFACE0001E', {'name': name}) ipaddr = '' netmask = '' @@ -66,16 +71,18 @@ def lookup(self, name): module = ethtool.get_module(encode_value(name)) flags = ethtool.get_flags(encode_value(name)) - status = 'up' if flags & (ethtool.IFF_RUNNING | ethtool.IFF_UP) \ - else 'down' + status = 'up' if flags & ( + ethtool.IFF_RUNNING | ethtool.IFF_UP) else 'down' except IOError: pass iface_type = netinfo.get_interface_type(name) - return {'name': name, - 'type': iface_type, - 'status': status, - 'ipaddr': ipaddr, - 'netmask': netmask, - 'module': module} + return { + 'name': name, + 'type': iface_type, + 'status': status, + 'ipaddr': ipaddr, + 'netmask': netmask, + 'module': module, + } diff --git a/model/libvirtconnection.py b/model/libvirtconnection.py index d7c74bc0d..d4ef79225 100644 --- a/model/libvirtconnection.py +++ b/model/libvirtconnection.py @@ -16,16 +16,15 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import libvirt import threading import time -from wok.model.notifications import add_notification, del_notification +import libvirt +from wok.model.notifications import add_notification +from wok.model.notifications import del_notification from wok.model.notifications import notificationsStore -from wok.utils import wok_log - from wok.plugins.kimchi.utils import is_libvirtd_up +from wok.utils import wok_log class LibvirtConnection(object): @@ -46,9 +45,18 @@ def get_wrappable_objects(self): when calling its methods. """ objs = [] - for name in ('virDomain', 'virDomainSnapshot', 'virInterface', - 'virNWFilter', 'virNetwork', 'virNodeDevice', 'virSecret', - 'virStoragePool', 'virStorageVol', 'virStream'): + for name in ( + 'virDomain', + 'virDomainSnapshot', + 'virInterface', + 'virNWFilter', + 'virNetwork', + 'virNodeDevice', + 'virSecret', + 'virStoragePool', + 'virStorageVol', + 'virStream', + ): try: attr = getattr(libvirt, name) except AttributeError: @@ -62,6 +70,7 @@ def get(self, conn_id=0): callable libvirt methods so we can catch connection errors and handle them by restarting the server. """ + def wrapMethod(f): def wrapper(*args, **kwargs): try: @@ -70,19 +79,22 @@ def wrapper(*args, **kwargs): except libvirt.libvirtError as e: edom = e.get_error_domain() ecode = e.get_error_code() - EDOMAINS = (libvirt.VIR_FROM_REMOTE, - libvirt.VIR_FROM_RPC) - ECODES = (libvirt.VIR_ERR_SYSTEM_ERROR, - libvirt.VIR_ERR_INTERNAL_ERROR, - libvirt.VIR_ERR_NO_CONNECT, - libvirt.VIR_ERR_INVALID_CONN) + EDOMAINS = (libvirt.VIR_FROM_REMOTE, libvirt.VIR_FROM_RPC) + ECODES = ( + libvirt.VIR_ERR_SYSTEM_ERROR, + libvirt.VIR_ERR_INTERNAL_ERROR, + libvirt.VIR_ERR_NO_CONNECT, + libvirt.VIR_ERR_INVALID_CONN, + ) if edom in EDOMAINS and ecode in ECODES: - wok_log.error('Connection to libvirt broken. ' - 'Recycling. ecode: %d edom: %d' % - (ecode, edom)) + wok_log.error( + 'Connection to libvirt broken. ' + 'Recycling. ecode: %d edom: %d' % (ecode, edom) + ) with LibvirtConnection._connectionLock: self._connections[conn_id] = None raise + wrapper.__name__ = f.__name__ wrapper.__doc__ = f.__doc__ return wrapper @@ -91,34 +103,17 @@ def wrapper(*args, **kwargs): wok_log.error('Libvirt service is not active.') add_notification('KCHCONN0002E', plugin_name='/plugins/kimchi') return None - elif (notificationsStore.get('KCHCONN0002E') is not None): + elif notificationsStore.get('KCHCONN0002E') is not None: try: del_notification('KCHCONN0002E') - except: + except Exception: # If notification was not found, just ignore pass with LibvirtConnection._connectionLock: conn = self._connections.get(conn_id) if not conn: - retries = 5 - while True: - retries = retries - 1 - try: - conn = libvirt.open(self.uri) - break - except libvirt.libvirtError: - wok_log.error('Unable to connect to libvirt.') - if not retries: - wok_log.error("Unable to establish connection " - "with libvirt. Please check " - "your libvirt URI which is often " - "defined in " - "/etc/libvirt/libvirt.conf") - add_notification("KCHCONN0001E", - plugin_name="/plugins/kimchi") - return None - time.sleep(2) + conn = self._get_new_connection() for name in dir(libvirt.virConnect): method = getattr(conn, name) @@ -139,6 +134,28 @@ def wrapper(*args, **kwargs): # hosts which are hosting a lot of virtual machines return conn + def _get_new_connection(self): + retries = 5 + while True: + retries = retries - 1 + try: + return libvirt.open(self.uri) + except libvirt.libvirtError: + wok_log.error('Unable to connect to libvirt.') + if not retries: + wok_log.error( + 'Unable to establish connection ' + 'with libvirt. Please check ' + 'your libvirt URI which is often ' + 'defined in ' + '/etc/libvirt/libvirt.conf' + ) + add_notification( + 'KCHCONN0001E', plugin_name='/plugins/kimchi' + ) + return None + time.sleep(2) + def isQemuURI(self): """ This method will return True or Value when the system libvirt diff --git a/model/libvirtevents.py b/model/libvirtevents.py index 35fb1d0ac..a280f8607 100644 --- a/model/libvirtevents.py +++ b/model/libvirtevents.py @@ -16,11 +16,10 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import time import cherrypy import libvirt -import time - from wok.exception import OperationFailed from wok.message import WokMessage from wok.model.notifications import add_notification @@ -37,8 +36,7 @@ def __init__(self): # BackgroundTask class due to issues when using threading module with # cherrypy. self.event_loop_thread = cherrypy.process.plugins.BackgroundTask( - 2, - self._event_loop_run + 2, self._event_loop_run ) self.event_loop_thread.setName('KimchiLibvirtEventLoop') self.event_loop_thread.setDaemon(True) @@ -62,14 +60,10 @@ def _kimchi_EventTimeout(self, timer, opaque): time.sleep(0.01) def event_enospc_cb(self, conn, dom, path, dev, action, reason, args): - if reason == "enospc": - info = { - "vm": dom.name(), - "srcPath": path, - "devAlias": dev, - } - add_notification("KCHEVENT0004W", info, '/plugins/kimchi') - msg = WokMessage("KCHEVENT0004W", info, '/plugins/kimchi') + if reason == 'enospc': + info = {'vm': dom.name(), 'srcPath': path, 'devAlias': dev} + add_notification('KCHEVENT0004W', info, '/plugins/kimchi') + msg = WokMessage('KCHEVENT0004W', info, '/plugins/kimchi') wok_log.warning(msg.get_text()) def handleEnospc(self, conn): @@ -81,14 +75,14 @@ def handleEnospc(self, conn): None, libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON, self.event_enospc_cb, - libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON + libvirt.VIR_DOMAIN_EVENT_ID_IO_ERROR_REASON, ) except (libvirt.libvirtError, AttributeError) as e: if type(e) == AttributeError: reason = 'Libvirt service is not running' else: - reason = e.message - wok_log.error("Register of ENOSPC event failed: %s" % reason) + reason = e + wok_log.error('Register of ENOSPC event failed: %s' % reason) def registerAttachDevicesEvent(self, conn, cb, arg): """ @@ -96,13 +90,11 @@ def registerAttachDevicesEvent(self, conn, cb, arg): """ try: return conn.get().domainEventRegisterAny( - None, - libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_ADDED, - cb, - arg) + None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_ADDED, cb, arg + ) - except (AttributeError, libvirt.libvirtError), e: - wok_log.error("register attach event failed: %s" % e.message) + except (AttributeError, libvirt.libvirtError) as e: + wok_log.error(f'register attach event failed: {str(e)}') def registerDetachDevicesEvent(self, conn, cb, arg): """ @@ -110,61 +102,70 @@ def registerDetachDevicesEvent(self, conn, cb, arg): """ try: return conn.get().domainEventRegisterAny( - None, - libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED, - cb, - arg) + None, libvirt.VIR_DOMAIN_EVENT_ID_DEVICE_REMOVED, cb, arg + ) except libvirt.libvirtError as e: - wok_log.error("register detach event failed: %s" % e.message) + wok_log.error(f'register detach event failed: {str(e)}') def registerPoolEvents(self, conn, cb, arg): """ Register libvirt events to listen to any pool change """ - pool_events = [libvirt.VIR_STORAGE_POOL_EVENT_DEFINED, - libvirt.VIR_STORAGE_POOL_EVENT_STARTED, - libvirt.VIR_STORAGE_POOL_EVENT_STOPPED, - libvirt.VIR_STORAGE_POOL_EVENT_UNDEFINED] + def lifecycle_cb(conn, dom, event, detail, opaque): + return cb(opaque) + + def refresh_cb(conn, pool, opaque): + return cb(opaque) + + pool_events = [ + (libvirt.VIR_STORAGE_POOL_EVENT_ID_LIFECYCLE, lifecycle_cb), + (libvirt.VIR_STORAGE_POOL_EVENT_ID_REFRESH, refresh_cb) + ] - for ev in pool_events: + for ev, ev_cb in pool_events: try: - conn.get().storagePoolEventRegisterAny(None, ev, cb, arg) + conn.get().storagePoolEventRegisterAny(None, ev, ev_cb, arg) except libvirt.libvirtError as e: - wok_log.error("Unable to register pool event handler: %s" % - e.message) + wok_log.error( + f'Unable to register pool event handler: {str(e)}') def registerNetworkEvents(self, conn, cb, arg): """ Register libvirt events to listen to any network change """ - net_events = [libvirt.VIR_NETWORK_EVENT_DEFINED, - libvirt.VIR_NETWORK_EVENT_STARTED, - libvirt.VIR_NETWORK_EVENT_STOPPED, - libvirt.VIR_NETWORK_EVENT_UNDEFINED] + def lifecycle_cb(conn, dom, event, detail, opaque): + return cb(opaque) - for ev in net_events: - try: - conn.get().networkEventRegisterAny(None, ev, cb, arg) - except libvirt.libvirtError as e: - wok_log.error("Unable to register network event handler: %s" % - e.message) + try: + conn.get().networkEventRegisterAny( + None, + libvirt.VIR_NETWORK_EVENT_ID_LIFECYCLE, + lifecycle_cb, + arg + ) + except libvirt.libvirtError as e: + wok_log.error( + f'Unable to register network event handler: {str(e)}') def registerDomainEvents(self, conn, cb, arg): """ Register libvirt events to listen to any domain change """ - net_events = [libvirt.VIR_DOMAIN_EVENT_DEFINED, - libvirt.VIR_DOMAIN_EVENT_PMSUSPENDED, - libvirt.VIR_DOMAIN_EVENT_RESUMED, - libvirt.VIR_DOMAIN_EVENT_STARTED, - libvirt.VIR_DOMAIN_EVENT_STOPPED, - libvirt.VIR_DOMAIN_EVENT_SUSPENDED, - libvirt.VIR_DOMAIN_EVENT_UNDEFINED] - - for ev in net_events: + def lifecycle_cb(conn, dom, event, detail, opaque): + return cb(opaque) + + def reboot_cb(conn, pool, opaque): + return cb(opaque) + + events = [ + (libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, lifecycle_cb), + (libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, reboot_cb) + ] + + for ev, ev_cb in events: try: - conn.get().domainEventRegisterAny(None, ev, cb, arg) + conn.get().domainEventRegisterAny(None, ev, ev_cb, arg) except libvirt.libvirtError as e: - wok_log.error("Unable to register domain event handler: %s" % - e.message) + wok_log.error( + f'Unable to register domain event handler: {str(e)}') diff --git a/model/libvirtstoragepool.py b/model/libvirtstoragepool.py index aa1213fe7..f18ca7129 100644 --- a/model/libvirtstoragepool.py +++ b/model/libvirtstoragepool.py @@ -16,18 +16,20 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import os +import tempfile import libvirt import lxml.etree as ET -import os -import tempfile from lxml.builder import E - -from wok.exception import InvalidParameter, OperationFailed, TimeoutExpired -from wok.rollbackcontext import RollbackContext -from wok.utils import parse_cmd_output, run_command, wok_log - +from wok.exception import InvalidParameter +from wok.exception import OperationFailed +from wok.exception import TimeoutExpired from wok.plugins.kimchi.iscsi import TargetClient +from wok.rollbackcontext import RollbackContext +from wok.utils import parse_cmd_output +from wok.utils import run_command +from wok.utils import wok_log class StoragePoolDef(object): @@ -36,25 +38,25 @@ def create(cls, poolArgs): for klass in cls.__subclasses__(): if poolArgs['type'] == klass.poolType: return klass(poolArgs) - raise OperationFailed("KCHPOOL0014E", {'type': poolArgs['type']}) + raise OperationFailed('KCHPOOL0014E', {'type': poolArgs['type']}) def __init__(self, poolArgs): self.poolArgs = poolArgs def prepare(self, conn): - ''' Validate pool arguments and perform preparations. Operation which + """ Validate pool arguments and perform preparations. Operation which would cause side effect should be put here. Subclasses can optionally - override this method, or it always succeeds by default. ''' + override this method, or it always succeeds by default. """ pass @property def xml(self): - ''' Subclasses have to override this method to actually generate the + """ Subclasses have to override this method to actually generate the storage pool XML definition. Should cause no side effect and be - idempotent''' + idempotent""" # TODO: When add new pool type, should also add the related test in # tests/test_storagepool.py - raise OperationFailed("KCHPOOL0015E", {'pool': self}) + raise OperationFailed('KCHPOOL0015E', {'pool': self}) class DirPoolDef(StoragePoolDef): @@ -62,14 +64,10 @@ class DirPoolDef(StoragePoolDef): @property def xml(self): - # Required parameters - # name: - # type: - # path: pool = E.pool(type='dir') pool.append(E.name(self.poolArgs['name'])) pool.append(E.target(E.path(self.poolArgs['path']))) - return ET.tostring(pool, encoding='unicode', pretty_print=True) + return ET.tostring(pool, encoding='utf-8', pretty_print=True).decode('utf-8') class NetfsPoolDef(StoragePoolDef): @@ -81,11 +79,18 @@ def __init__(self, poolArgs): def prepare(self, conn): mnt_point = tempfile.mkdtemp(dir='/tmp') - export_path = "%s:%s" % ( - self.poolArgs['source']['host'], self.poolArgs['source']['path']) - mount_cmd = ["mount", "-o", 'soft,timeo=100,retrans=3,retry=0', - export_path, mnt_point] - umount_cmd = ["umount", "-f", export_path] + export_path = '%s:%s' % ( + self.poolArgs['source']['host'], + self.poolArgs['source']['path'], + ) + mount_cmd = [ + 'mount', + '-o', + 'soft,timeo=100,retrans=3,retry=0', + export_path, + mnt_point, + ] + umount_cmd = ['umount', '-f', export_path] mounted = False # Due to an NFS bug (See Red Hat BZ 1023059), NFSv4 exports may take # 10-15 seconds to mount the first time. @@ -97,8 +102,8 @@ def prepare(self, conn): run_command(mount_cmd, cmd_timeout) rollback.prependDefer(run_command, umount_cmd, cmd_timeout) except TimeoutExpired: - raise InvalidParameter("KCHPOOL0012E", {'path': export_path}) - with open("/proc/mounts", "rb") as f: + raise InvalidParameter('KCHPOOL0012E', {'path': export_path}) + with open('/proc/mounts', 'rb') as f: rawMounts = f.read() output_items = ['dev_path', 'mnt_point', 'type'] mounts = parse_cmd_output(rawMounts, output_items) @@ -107,15 +112,10 @@ def prepare(self, conn): mounted = True if not mounted: - raise InvalidParameter("KCHPOOL0013E", {'path': export_path}) + raise InvalidParameter('KCHPOOL0013E', {'path': export_path}) @property def xml(self): - # Required parameters - # name: - # type: - # source[host]: - # source[path]: pool = E.pool(type='netfs') pool.append(E.name(self.poolArgs['name'])) @@ -137,10 +137,6 @@ def __init__(self, poolArgs): @property def xml(self): - # Required parameters - # name: - # type: - # source[devices]: pool = E.pool(type='logical') pool.append(E.name(self.poolArgs['name'])) @@ -163,9 +159,11 @@ def prepare(self, conn=None): # fc_host adapters type are only available in libvirt >= 1.0.5 if not self.poolArgs['fc_host_support']: self.poolArgs['source']['adapter']['type'] = 'scsi_host' - msg = "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; "\ - "setting SCSI adapter type as 'scsi_host'; "\ - "ignoring wwnn and wwpn." % tmp_name + msg = ( + "Libvirt version <= 1.0.5. Setting SCSI host name as '%s'; " + "setting SCSI adapter type as 'scsi_host'; " + 'ignoring wwnn and wwpn.' % tmp_name + ) wok_log.info(msg) # Path for Fibre Channel scsi hosts self.poolArgs['path'] = '/dev/disk/by-path' @@ -174,13 +172,6 @@ def prepare(self, conn=None): @property def xml(self): - # Required parameters - # name: - # source[adapter][type]: - # source[name]: - # source[adapter][wwnn]: - # source[adapter][wwpn]: - # path: pool = E.pool(type='scsi') pool.append(E.name(self.poolArgs['name'])) @@ -201,7 +192,7 @@ def prepare(self, conn): source = self.poolArgs['source'] if not TargetClient(**source).validate(): msg_args = {'host': source['host'], 'target': source['target']} - raise OperationFailed("KCHISCSI0002E", msg_args) + raise OperationFailed('KCHISCSI0002E', msg_args) self._prepare_auth(conn) def _prepare_auth(self, conn): @@ -212,12 +203,14 @@ def _prepare_auth(self, conn): try: virSecret = conn.secretLookupByUsage( - libvirt.VIR_SECRET_USAGE_TYPE_ISCSI, self.poolArgs['name']) + libvirt.VIR_SECRET_USAGE_TYPE_ISCSI, self.poolArgs['name'] + ) except libvirt.libvirtError: secret = E.secret(ephemeral='no', private='yes') - description = E.description('Secret for iSCSI storage pool %s' % - self.poolArgs['name']) + description = E.description( + 'Secret for iSCSI storage pool %s' % self.poolArgs['name'] + ) secret.append(description) secret.append(E.auth(type='chap', username=auth['username'])) @@ -230,14 +223,6 @@ def _prepare_auth(self, conn): @property def xml(self): - # Required parameters - # name: - # type: - # source[host]: - # source[target]: - # - # Optional parameters - # source[port]: pool = E.pool(type='iscsi') pool.append(E.name(self.poolArgs['name'])) diff --git a/model/model.py b/model/model.py index a72593f23..c3aaf24ec 100644 --- a/model/model.py +++ b/model/model.py @@ -16,15 +16,14 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.basemodel import BaseModel from wok.objectstore import ObjectStore from wok.plugins.kimchi import config -from wok.pushserver import send_wok_notification -from wok.utils import get_all_model_instances, get_model_instances - from wok.plugins.kimchi.model.libvirtconnection import LibvirtConnection from wok.plugins.kimchi.model.libvirtevents import LibvirtEvents +from wok.pushserver import send_wok_notification +from wok.utils import get_all_model_instances +from wok.utils import get_model_instances class Model(BaseModel): @@ -55,7 +54,7 @@ def __init__(self, libvirt_uri=None, objstore_loc=None): super(Model, self).__init__(models) - def _events_handler(self, conn, pool, ev, details, opaque): + def _events_handler(self, api): # Do not use any known method (POST, PUT, DELETE) as it is used by Wok # engine and may lead in having 2 notifications for the same action - send_wok_notification('/plugins/kimchi', opaque, 'METHOD') + send_wok_notification('/plugins/kimchi', api, 'METHOD') diff --git a/model/networks.py b/model/networks.py index 722b97b1e..a38498166 100644 --- a/model/networks.py +++ b/model/networks.py @@ -16,18 +16,17 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import copy +import time + import ipaddr import libvirt -import time from libvirt import VIR_INTERFACE_XML_INACTIVE - -from wok.exception import InvalidOperation, InvalidParameter -from wok.exception import MissingParameter, NotFoundError, OperationFailed -from wok.utils import run_command, wok_log -from wok.xmlutils.utils import xpath_get_text - +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import MissingParameter +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.plugins.kimchi import network as netinfo from wok.plugins.kimchi.config import kimchiPaths from wok.plugins.kimchi.model.featuretests import FeatureTests @@ -37,6 +36,9 @@ from wok.plugins.kimchi.xmlutils.network import create_vlan_tagged_bridge_xml from wok.plugins.kimchi.xmlutils.network import get_no_network_config_xml from wok.plugins.kimchi.xmlutils.network import to_network_xml +from wok.utils import run_command +from wok.utils import wok_log +from wok.xmlutils.utils import xpath_get_text KIMCHI_BRIDGE_PREFIX = 'kb' @@ -55,36 +57,37 @@ def _check_default_networks(self): conn = self.conn.get() for net_name in networks: - error_msg = ("Network %s does not exist or is not " - "active. Please, check the configuration in " - "%s/template.conf to ensure it lists only valid " - "networks." % (net_name, kimchiPaths.sysconf_dir)) + error_msg = ( + 'Network %s does not exist or is not ' + 'active. Please, check the configuration in ' + '%s/template.conf to ensure it lists only valid ' + 'networks.' % (net_name, kimchiPaths.sysconf_dir) + ) try: net = conn.networkLookupByName(net_name) - except libvirt.libvirtError, e: - msg = "Fatal: Unable to find network %s." - wok_log.error(msg, net_name) - wok_log.error("Details: %s", e.message) + except libvirt.libvirtError as e: + wok_log.error(f'Fatal: Unable to find network {net_name}.') + wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) if net.isActive() == 0: try: net.create() except libvirt.libvirtError as e: - msg = "Fatal: Unable to activate network %s." - wok_log.error(msg, net_name) - wok_log.error("Details: %s", e.message) + wok_log.error( + f'Fatal: Unable to activate network {net_name}.') + wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) def create(self, params): conn = self.conn.get() name = params['name'] if name in self.get_list(): - raise InvalidOperation("KCHNET0001E", {'name': name}) + raise InvalidOperation('KCHNET0001E', {'name': name}) # handle connection type - connection = params["connection"] + connection = params['connection'] if connection in ['nat', 'isolated']: if connection == 'nat': params['forward'] = {'mode': 'nat'} @@ -104,18 +107,19 @@ def create(self, params): xml = to_network_xml(**params) try: - network = conn.networkDefineXML(xml.encode("utf-8")) + network = conn.networkDefineXML(xml) network.setAutostart(params.get('autostart', True)) except libvirt.libvirtError as e: - raise OperationFailed("KCHNET0008E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHNET0008E', {'name': name, 'err': e.get_error_message()} + ) return name def get_list(self): conn = self.conn.get() names = conn.listNetworks() + conn.listDefinedNetworks() - return sorted(map(lambda x: x.decode('utf-8'), names)) + return sorted(names) def _get_available_address(self, addr_pools=None): if addr_pools is None: @@ -136,57 +140,60 @@ def _set_network_subnet(self, params): if not netaddr: netaddr = self._get_available_address() if not netaddr: - raise OperationFailed("KCHNET0009E", {'name': params['name']}) + raise OperationFailed('KCHNET0009E', {'name': params['name']}) try: ip = ipaddr.IPNetwork(netaddr) except ValueError: - raise InvalidParameter("KCHNET0003E", {'subnet': netaddr, - 'network': params['name']}) + raise InvalidParameter( + 'KCHNET0003E', {'subnet': netaddr, 'network': params['name']} + ) if ip.ip == ip.network: ip.ip = ip.ip + 1 - dhcp_start = str(ip.ip + ip.numhosts / 2) - dhcp_end = str(ip.ip + ip.numhosts - 3) - params.update({'net': str(ip), - 'dhcp': {'range': {'start': dhcp_start, - 'end': dhcp_end}}}) + dhcp_start = str(ip.network + int(ip.numhosts / 2)) + dhcp_end = str(ip.network + int(ip.numhosts - 3)) + params.update( + {'net': str(ip), 'dhcp': { + 'range': {'start': dhcp_start, 'end': dhcp_end}}} + ) def _ensure_iface_up(self, iface): if netinfo.operstate(iface) != 'up': _, err, rc = run_command(['ip', 'link', 'set', 'dev', iface, 'up']) if rc != 0: - raise OperationFailed("KCHNET0020E", - {'iface': iface, 'err': err}) + raise OperationFailed( + 'KCHNET0020E', {'iface': iface, 'err': err}) # Add a delay to wait for the link change takes into effect. for i in range(10): time.sleep(1) if netinfo.operstate(iface) == 'up': break else: - raise OperationFailed("KCHNET0021E", {'iface': iface}) + raise OperationFailed('KCHNET0021E', {'iface': iface}) def _check_network_interface(self, params): if not params.get('interfaces'): - raise MissingParameter("KCHNET0004E", {'name': params['name']}) + raise MissingParameter('KCHNET0004E', {'name': params['name']}) if len(params['interfaces']) == 0: - raise InvalidParameter("KCHNET0029E") + raise InvalidParameter('KCHNET0029E') conn = params['connection'] if conn in ['bridge', 'macvtap'] and len(params['interfaces']) > 1: - raise InvalidParameter("KCHNET0030E") + raise InvalidParameter('KCHNET0030E') for iface in params['interfaces']: if iface in self.get_all_networks_interfaces(): msg_args = {'iface': iface, 'network': params['name']} - raise InvalidParameter("KCHNET0006E", msg_args) + raise InvalidParameter('KCHNET0006E', msg_args) def _set_network_macvtap(self, params): iface = params['interfaces'][0] - if ('vlan_id' in params or not (netinfo.is_bare_nic(iface) or - netinfo.is_bonding(iface))): + if 'vlan_id' in params or not ( + netinfo.is_bare_nic(iface) or netinfo.is_bonding(iface) + ): raise InvalidParameter('KCHNET0028E', {'name': iface}) # set macvtap network @@ -194,14 +201,13 @@ def _set_network_macvtap(self, params): def _set_network_multiple_interfaces(self, params): for iface in params['interfaces']: - if ('vlan_id' in params or not (netinfo.is_bare_nic(iface) or - netinfo.is_bonding(iface))): + if 'vlan_id' in params or not ( + netinfo.is_bare_nic(iface) or netinfo.is_bonding(iface) + ): raise InvalidParameter('KCHNET0028E', {'name': iface}) - params['forward'] = { - 'mode': params["connection"], - 'devs': params['interfaces'] - } + params['forward'] = {'mode': params['connection'], + 'devs': params['interfaces']} def _set_network_bridge(self, params): params['forward'] = {'mode': 'bridge'} @@ -227,9 +233,9 @@ def _set_network_bridge(self, params): raise InvalidParameter('KCHNET0027E') if 'vlan_id' in params: - params['bridge'] = \ - self._create_vlan_tagged_bridge(str(iface), - str(params['vlan_id'])) + params['bridge'] = self._create_vlan_tagged_bridge( + str(iface), str(params['vlan_id']) + ) else: # create Linux bridge interface and use it as actual iface iface = self._create_linux_bridge(iface) @@ -237,20 +243,23 @@ def _set_network_bridge(self, params): # unrecognized interface type: fail else: - raise InvalidParameter("KCHNET0007E") + raise InvalidParameter('KCHNET0007E') def get_all_networks_interfaces(self): net_names = self.get_list() interfaces = [] for name in net_names: conn = self.conn.get() - network = conn.networkLookupByName(name.encode("utf-8")) + network = conn.networkLookupByName(name) xml = network.XMLDesc(0) net_dict = NetworkModel.get_network_from_xml(xml) forward = net_dict['forward'] - (forward['mode'] == 'bridge' and forward['interface'] and - interfaces.append(forward['interface'][0]) is None or - interfaces.extend(forward['interface'] + forward['pf'])) + ( + forward['mode'] == 'bridge' + and forward['interface'] + and interfaces.append(forward['interface'][0]) is None + or interfaces.extend(forward['interface'] + forward['pf']) + ) net_dict['bridge'] and interfaces.append(net_dict['bridge']) return interfaces @@ -259,15 +268,16 @@ def _create_bridge(self, name, xml): # check if name exists if name in netinfo.all_interfaces(): - raise InvalidOperation("KCHNET0010E", {'iface': name}) + raise InvalidOperation('KCHNET0010E', {'iface': name}) # create bridge through libvirt try: bridge = conn.interfaceDefineXML(xml) bridge.create() except libvirt.libvirtError as e: - raise OperationFailed("KCHNET0025E", {'name': name, - 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHNET0025E', {'name': name, 'err': e.get_error_message()} + ) def _create_linux_bridge(self, interface): # get xml definition of interface @@ -279,15 +289,21 @@ def _create_linux_bridge(self, interface): if iface_xml is None: try: mac = netinfo.get_dev_macaddr(str(interface)) - iface_xml = get_iface_xml({'type': 'ethernet', - 'name': interface, - 'mac': mac, - 'startmode': "onboot"}) - conn.interfaceDefineXML(iface_xml.encode("utf-8")) + iface_xml = get_iface_xml( + { + 'type': 'ethernet', + 'name': interface, + 'mac': mac, + 'startmode': 'onboot', + } + ) + conn.interfaceDefineXML(iface_xml) iface_defined = True - except libvirt.libvirtError, e: - raise OperationFailed("KCHNET0024E", {'name': interface, - 'err': e.get_error_message()}) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHNET0024E', {'name': interface, + 'err': e.get_error_message()} + ) # Truncate the interface name if it exceeds 13 characters to make sure # the length of bridge name is less than 15 (its maximum value). @@ -327,14 +343,15 @@ def _redefine_iface_no_network(self, name, iface_xml): conn = self.conn.get() # drop network config from definition of interface - xml = get_no_network_config_xml(iface_xml.encode("utf-8")) + xml = get_no_network_config_xml(iface_xml) try: # redefine interface - conn.interfaceDefineXML(xml.encode("utf-8")) + conn.interfaceDefineXML(xml) except libvirt.libvirtError as e: - raise OperationFailed("KCHNET0024E", {'name': name, - 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHNET0024E', {'name': name, 'err': e.get_error_message()} + ) class NetworkModel(object): @@ -352,7 +369,7 @@ def lookup(self, name): forward = net_dict['forward'] interface = net_dict['bridge'] - connection = forward['mode'] or "isolated" + connection = forward['mode'] or 'isolated' # FIXME, if we want to support other forward mode well. if connection == 'bridge': # macvtap bridge @@ -368,27 +385,26 @@ def lookup(self, name): # http://www.ovirt.org/File:Issue3.png if subnet: subnet = ipaddr.IPNetwork(subnet) - subnet = "%s/%s" % (subnet.network, subnet.prefixlen) + subnet = '%s/%s' % (subnet.network, subnet.prefixlen) if connection in ['passthrough', 'vepa']: - interfaces = xpath_get_text( - xml, - "/network/forward/interface/@dev" - ) + interfaces = xpath_get_text(xml, '/network/forward/interface/@dev') else: interfaces = [interface] network_in_use, used_by_vms, _ = self._is_network_in_use(name) - return {'connection': connection, - 'interfaces': interfaces, - 'subnet': subnet, - 'dhcp': dhcp, - 'vms': used_by_vms, - 'in_use': network_in_use, - 'autostart': network.autostart() == 1, - 'state': network.isActive() and "active" or "inactive", - 'persistent': True if network.isPersistent() else False} + return { + 'connection': connection, + 'interfaces': interfaces, + 'subnet': subnet, + 'dhcp': dhcp, + 'vms': used_by_vms, + 'in_use': network_in_use, + 'autostart': network.autostart() == 1, + 'state': network.isActive() and 'active' or 'inactive', + 'persistent': True if network.isPersistent() else False, + } def _is_network_in_use(self, name): # All the networks listed as default in template.conf file should not @@ -398,9 +414,9 @@ def _is_network_in_use(self, name): tmpls = self._is_network_used_by_template(name) if name in tmpl_defaults['networks']: - return (True, vms, tmpls) + return True, vms, tmpls - return (bool(vms) or bool(tmpls), vms, tmpls) + return bool(vms) or bool(tmpls), vms, tmpls def _is_network_used_by_template(self, network): tmpl_list = [] @@ -413,17 +429,22 @@ def _is_network_used_by_template(self, network): tmpl_list.append(tmpl) return tmpl_list - def _get_vms_attach_to_a_network(self, network, filter="all"): - DOM_STATE_MAP = {'nostate': 0, 'running': 1, 'blocked': 2, - 'paused': 3, 'shutdown': 4, 'shutoff': 5, - 'crashed': 6} + def _get_vms_attach_to_a_network(self, network, filter='all'): + DOM_STATE_MAP = { + 'nostate': 0, + 'running': 1, + 'blocked': 2, + 'paused': 3, + 'shutdown': 4, + 'shutoff': 5, + 'crashed': 6, + } state = DOM_STATE_MAP.get(filter) vms = [] conn = self.conn.get() for dom in conn.listAllDomains(0): networks = self._vm_get_networks(dom) - if network.encode('utf-8') in networks and \ - (state is None or state == dom.state(0)[0]): + if network in networks and (state is None or state == dom.state(0)[0]): vms.append(dom.name()) return vms @@ -436,18 +457,18 @@ def activate(self, name): network = self.get_network(self.conn.get(), name) try: network.create() - except libvirt.libvirtError, e: - raise OperationFailed('KCHNET0022E', {'name': name, - 'err': e.message}) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHNET0022E', {'name': name, 'err': e.message}) def deactivate(self, name): in_use, used_by_vms, used_by_tmpls = self._is_network_in_use(name) vms = 'N/A' if len(used_by_vms) == 0 else ', '.join(used_by_vms) tmpls = 'N/A' if len(used_by_tmpls) == 0 else ', '.join(used_by_tmpls) if in_use: - raise InvalidOperation("KCHNET0018E", {'name': name, - 'vms': vms, - 'tmpls': tmpls}) + raise InvalidOperation( + 'KCHNET0018E', {'name': name, 'vms': vms, 'tmpls': tmpls} + ) network = self.get_network(self.conn.get(), name) network.destroy() @@ -457,49 +478,54 @@ def delete(self, name): vms = 'N/A' if len(used_by_vms) == 0 else ', '.join(used_by_vms) tmpls = 'N/A' if len(used_by_tmpls) == 0 else ', '.join(used_by_tmpls) if in_use: - raise InvalidOperation("KCHNET0017E", {'name': name, - 'vms': vms, - 'tmpls': tmpls}) + raise InvalidOperation( + 'KCHNET0017E', {'name': name, 'vms': vms, 'tmpls': tmpls} + ) network = self.get_network(self.conn.get(), name) if network.isActive(): - raise InvalidOperation("KCHNET0005E", {'name': name}) + raise InvalidOperation('KCHNET0005E', {'name': name}) self._remove_bridge(network) network.undefine() @staticmethod def get_network(conn, name): - name = name.encode("utf-8") try: return conn.networkLookupByName(name) except libvirt.libvirtError: - raise NotFoundError("KCHNET0002E", {'name': name}) + raise NotFoundError('KCHNET0002E', {'name': name}) @staticmethod def get_network_from_xml(xml): - address = xpath_get_text(xml, "/network/ip/@address") + address = xpath_get_text(xml, '/network/ip/@address') address = address and address[0] or '' - netmask = xpath_get_text(xml, "/network/ip/@netmask") + netmask = xpath_get_text(xml, '/network/ip/@netmask') netmask = netmask and netmask[0] or '' - net = address and netmask and "/".join([address, netmask]) or '' + net = address and netmask and '/'.join([address, netmask]) or '' - dhcp_start = xpath_get_text(xml, "/network/ip/dhcp/range/@start") + dhcp_start = xpath_get_text(xml, '/network/ip/dhcp/range/@start') dhcp_start = dhcp_start and dhcp_start[0] or '' - dhcp_end = xpath_get_text(xml, "/network/ip/dhcp/range/@end") + dhcp_end = xpath_get_text(xml, '/network/ip/dhcp/range/@end') dhcp_end = dhcp_end and dhcp_end[0] or '' dhcp = {'start': dhcp_start, 'end': dhcp_end} - forward_mode = xpath_get_text(xml, "/network/forward/@mode") + forward_mode = xpath_get_text(xml, '/network/forward/@mode') forward_mode = forward_mode and forward_mode[0] or '' - forward_if = xpath_get_text(xml, "/network/forward/interface/@dev") - forward_pf = xpath_get_text(xml, "/network/forward/pf/@dev") - bridge = xpath_get_text(xml, "/network/bridge/@name") + forward_if = xpath_get_text(xml, '/network/forward/interface/@dev') + forward_pf = xpath_get_text(xml, '/network/forward/pf/@dev') + bridge = xpath_get_text(xml, '/network/bridge/@name') bridge = bridge and bridge[0] or '' - return {'subnet': net, 'dhcp': dhcp, 'bridge': bridge, - 'forward': {'mode': forward_mode, - 'interface': forward_if, - 'pf': forward_pf}} + return { + 'subnet': net, + 'dhcp': dhcp, + 'bridge': bridge, + 'forward': { + 'mode': forward_mode, + 'interface': forward_if, + 'pf': forward_pf, + }, + } def _remove_bridge(self, network): try: @@ -522,10 +548,10 @@ def update(self, name, params): connection = info['connection'] if connection in ['bridge', 'macvtap', 'passthrough', 'vepa']: if params.get('subnet'): - raise InvalidParameter("KCHNET0031E") + raise InvalidParameter('KCHNET0031E') elif connection in ['nat', 'isolated']: if params.get('vlan_id') or params.get('interfaces'): - raise InvalidParameter("KCHNET0032E") + raise InvalidParameter('KCHNET0032E') # merge parameters info.update(params) @@ -533,8 +559,7 @@ def update(self, name, params): # get target device if bridge was created by Kimchi if connection == 'bridge': iface = info['interfaces'][0] - if (netinfo.is_bridge(iface) and - iface.startswith(KIMCHI_BRIDGE_PREFIX)): + if netinfo.is_bridge(iface) and iface.startswith(KIMCHI_BRIDGE_PREFIX): port = netinfo.ports(iface)[0] if netinfo.is_vlan(port): dev = netinfo.get_vlan_device(port) @@ -549,7 +574,7 @@ def update(self, name, params): try: # create new network return self.collection.create(info) - except: + except Exception: # restore original network self.collection.create(original) raise diff --git a/model/ovsbridges.py b/model/ovsbridges.py index f5bb3df6b..0ebb1623b 100644 --- a/model/ovsbridges.py +++ b/model/ovsbridges.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.plugins.kimchi.network import ovs_bridges diff --git a/model/storagepools.py b/model/storagepools.py index e6f5e582b..b1226872d 100644 --- a/model/storagepools.py +++ b/model/storagepools.py @@ -16,48 +16,57 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import libvirt import lxml.etree as ET from lxml.builder import E - from wok.asynctask import AsyncTask -from wok.exception import InvalidOperation, MissingParameter -from wok.exception import NotFoundError, OperationFailed -from wok.utils import run_command, wok_log -from wok.xmlutils.utils import xpath_get_text - -from wok.plugins.kimchi.config import config, get_kimchi_version, kimchiPaths +from wok.exception import InvalidOperation +from wok.exception import MissingParameter +from wok.exception import NotFoundError +from wok.exception import OperationFailed +from wok.plugins.kimchi.config import config +from wok.plugins.kimchi.config import get_kimchi_version +from wok.plugins.kimchi.config import kimchiPaths from wok.plugins.kimchi.model.config import CapabilitiesModel from wok.plugins.kimchi.model.host import DeviceModel from wok.plugins.kimchi.model.libvirtstoragepool import StoragePoolDef from wok.plugins.kimchi.osinfo import defaults as tmpl_defaults from wok.plugins.kimchi.scan import Scanner -from wok.plugins.kimchi.utils import pool_name_from_uri, is_s390x +from wok.plugins.kimchi.utils import is_s390x +from wok.plugins.kimchi.utils import pool_name_from_uri +from wok.utils import run_command +from wok.utils import wok_log +from wok.xmlutils.utils import xpath_get_text -ISO_POOL_NAME = u'kimchi_isos' +ISO_POOL_NAME = 'kimchi_isos' -POOL_STATE_MAP = {0: 'inactive', - 1: 'initializing', - 2: 'active', - 3: 'degraded', - 4: 'inaccessible'} +POOL_STATE_MAP = { + 0: 'inactive', + 1: 'initializing', + 2: 'active', + 3: 'degraded', + 4: 'inaccessible', +} # Types of pools supported -STORAGE_SOURCES = {'netfs': {'addr': '/pool/source/host/@name', - 'path': '/pool/source/dir/@path'}, - 'iscsi': {'addr': '/pool/source/host/@name', - 'port': '/pool/source/host/@port', - 'path': '/pool/source/device/@path'}, - 'scsi': {'adapter_type': '/pool/source/adapter/@type', - 'adapter_name': '/pool/source/adapter/@name', - 'wwnn': '/pool/source/adapter/@wwnn', - 'wwpn': '/pool/source/adapter/@wwpn'}} +STORAGE_SOURCES = { + 'netfs': {'addr': '/pool/source/host/@name', 'path': '/pool/source/dir/@path'}, + 'iscsi': { + 'addr': '/pool/source/host/@name', + 'port': '/pool/source/host/@port', + 'path': '/pool/source/device/@path', + }, + 'scsi': { + 'adapter_type': '/pool/source/adapter/@type', + 'adapter_name': '/pool/source/adapter/@name', + 'wwnn': '/pool/source/adapter/@wwnn', + 'wwpn': '/pool/source/adapter/@wwpn', + }, +} class StoragePoolsModel(object): - def __init__(self, **kargs): self.conn = kargs['conn'] self.objstore = kargs['objstore'] @@ -90,18 +99,20 @@ def _check_default_pools(self): conn = self.conn.get() for pool_name in pools: - error_msg = ("Storage pool %s does not exist or is not " - "active. Please, check the configuration in " - "%s/template.conf to ensure it lists only valid " - "storage." % (pool_name, kimchiPaths.sysconf_dir)) + error_msg = ( + 'Storage pool %s does not exist or is not ' + 'active. Please, check the configuration in ' + '%s/template.conf to ensure it lists only valid ' + 'storage.' % (pool_name, kimchiPaths.sysconf_dir) + ) try: pool = conn.storagePoolLookupByName(pool_name) - except libvirt.libvirtError, e: + except libvirt.libvirtError as e: pool_path = pools[pool_name].get('path') if pool_path is None: - msg = "Fatal: Unable to find storage pool %s. " - wok_log.error(msg % pool_name) - wok_log.error("Details: %s", e.message) + wok_log.error( + f'Fatal: Unable to find storage pool {pool_name}.') + wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) # Try to create the pool @@ -110,10 +121,10 @@ def _check_default_pools(self): xml = ET.tostring(pool) try: pool = conn.storagePoolDefineXML(xml, 0) - except libvirt.libvirtError, e: - msg = "Fatal: Unable to create storage pool %s. " - wok_log.error(msg % pool_name) - wok_log.error("Details: %s", e.message) + except libvirt.libvirtError as e: + wok_log.error( + f'Fatal: Unable to create storage pool {pool_name}.') + wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) # Build and set autostart value to pool @@ -124,16 +135,16 @@ def _check_default_pools(self): # already exists on system pool.build(libvirt.VIR_STORAGE_POOL_BUILD_NEW) pool.setAutostart(1) - except: + except Exception: pass if pool.isActive() == 0: try: pool.create(0) - except libvirt.libvirtError, e: - msg = "Fatal: Unable to create storage pool %s. " - wok_log.error(msg % pool_name) - wok_log.error("Details: %s", e.message) + except libvirt.libvirtError as e: + wok_log.error( + f'Fatal: Unable to create storage pool {pool_name}.') + wok_log.error(f'Details: {str(e)}') raise Exception(error_msg) def get_list(self): @@ -141,13 +152,13 @@ def get_list(self): conn = self.conn.get() names = conn.listStoragePools() names += conn.listDefinedStoragePools() - return sorted(map(lambda x: x.decode('utf-8'), names)) + return sorted(names) except libvirt.libvirtError as e: - raise OperationFailed("KCHPOOL0006E", - {'err': e.get_error_message()}) + raise OperationFailed( + 'KCHPOOL0006E', {'err': e.get_error_message()}) def _check_lvm(self, name, from_vg): - vgdisplay_cmd = ['vgdisplay', name.encode('utf-8')] + vgdisplay_cmd = ['vgdisplay', name] output, error, returncode = run_command(vgdisplay_cmd) # From vgdisplay error codes: # 1 error reading VGDA @@ -157,10 +168,10 @@ def _check_lvm(self, name, from_vg): # 5 no volume groups found at all # 6 error reading VGDA from lvmtab if from_vg and returncode in [2, 4, 5]: - raise InvalidOperation("KCHPOOL0038E", {'name': name}) + raise InvalidOperation('KCHPOOL0038E', {'name': name}) if not from_vg and returncode not in [2, 4, 5]: - raise InvalidOperation("KCHPOOL0036E", {'name': name}) + raise InvalidOperation('KCHPOOL0036E', {'name': name}) def create(self, params): task_id = None @@ -170,7 +181,7 @@ def create(self, params): try: name = params['name'] if name == ISO_POOL_NAME: - raise InvalidOperation("KCHPOOL0031E") + raise InvalidOperation('KCHPOOL0031E') # The user may want to create a logical pool with the same name # used before but a volume group will already exist with this name @@ -190,13 +201,13 @@ def create(self, params): poolDef = StoragePoolDef.create(params) poolDef.prepare(conn) - xml = poolDef.xml.encode("utf-8") - except KeyError, item: - raise MissingParameter("KCHPOOL0004E", - {'item': str(item), 'name': name}) + xml = poolDef.xml + except KeyError as item: + raise MissingParameter( + 'KCHPOOL0004E', {'item': str(item), 'name': name}) if name in self.get_list(): - raise InvalidOperation("KCHPOOL0001E", {'name': name}) + raise InvalidOperation('KCHPOOL0001E', {'name': name}) try: if task_id: @@ -206,9 +217,10 @@ def create(self, params): pool = conn.storagePoolDefineXML(xml, 0) except libvirt.libvirtError as e: - wok_log.error("Problem creating Storage Pool: %s", e) - raise OperationFailed("KCHPOOL0007E", - {'name': name, 'err': e.get_error_message()}) + wok_log.error(f'Problem creating Storage Pool: {str(e)}') + raise OperationFailed( + 'KCHPOOL0007E', {'name': name, 'err': e.get_error_message()} + ) # Build and set autostart value to pool # Ignore error as the pool was already successfully created @@ -219,28 +231,30 @@ def create(self, params): pool.setAutostart(1) else: pool.setAutostart(0) - except: + except Exception: pass if params['type'] == 'netfs': - output, error, returncode = run_command(['setsebool', '-P', - 'virt_use_nfs=1']) + output, error, returncode = run_command( + ['setsebool', '-P', 'virt_use_nfs=1'] + ) if error or returncode: - wok_log.error("Unable to set virt_use_nfs=1. If you use " - "SELinux, this may prevent NFS pools from " - "being used.") + wok_log.error( + 'Unable to set virt_use_nfs=1. If you use ' + 'SELinux, this may prevent NFS pools from ' + 'being used.' + ) return name def _clean_scan(self, pool_name): try: conn = self.conn.get() - pool = conn.storagePoolLookupByName(pool_name.encode("utf-8")) + pool = conn.storagePoolLookupByName(pool_name) pool.destroy() with self.objstore as session: session.delete('scanning', pool_name) - except Exception, e: - err = "Exception %s occured when cleaning scan result" - wok_log.debug(err % e.message) + except Exception as e: + wok_log.debug(f'Exception {e} occurred when cleaning scan result') def _do_deep_scan(self, params): scan_params = dict(ignore_list=[]) @@ -249,23 +263,26 @@ def _do_deep_scan(self, params): for pool in self.get_list(): try: - res = StoragePoolModel(conn=self.conn, - objstore=self.objstore).lookup(pool) + res = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup( + pool + ) if res['state'] == 'active': scan_params['ignore_list'].append(res['path']) - except Exception, e: - err = "Exception %s occured when get ignore path" - wok_log.debug(err % e.message) + except Exception as e: + wok_log.debug(f'Exception {e} occured when get ignore path') params['path'] = self.scanner.scan_dir_prepare(params['name']) scan_params['pool_path'] = params['path'] - task_id = AsyncTask('/plugins/kimchi/storagepools/%s' % ISO_POOL_NAME, - self.scanner.start_scan, scan_params).id + task_id = AsyncTask( + f'/plugins/kimchi/storagepools/{ISO_POOL_NAME}', + self.scanner.start_scan, + scan_params, + ).id # Record scanning-task/storagepool mapping for future querying try: with self.objstore as session: - session.store('scanning', params['name'], task_id, - get_kimchi_version()) + session.store( + 'scanning', params['name'], task_id, get_kimchi_version()) return task_id except Exception as e: raise OperationFailed('KCHPOOL0037E', {'err': e.message}) @@ -280,10 +297,10 @@ def __init__(self, **kargs): def get_storagepool(name, conn): conn = conn.get() try: - return conn.storagePoolLookupByName(name.encode("utf-8")) + return conn.storagePoolLookupByName(name) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_STORAGE_POOL: - raise NotFoundError("KCHPOOL0002E", {'name': name}) + raise NotFoundError('KCHPOOL0002E', {'name': name}) else: raise @@ -294,8 +311,8 @@ def _get_storagepool_vols_num(self, pool): try: pool.refresh(0) - except Exception, e: - wok_log.error("Pool refresh failed: %s" % str(e)) + except Exception as e: + wok_log.error(f'Pool refresh failed: {e}') return pool.numOfVolumes() @@ -309,7 +326,7 @@ def _get_storage_source(self, pool_type, pool_xml): if len(res) == 1: source[key] = res[0] elif len(res) == 0: - source[key] = "" + source[key] = '' else: source[key] = res return source @@ -317,13 +334,13 @@ def _get_storage_source(self, pool_type, pool_xml): def _nfs_status_online(self, pool, poolArgs=None): if not poolArgs: xml = pool.XMLDesc(0) - pool_type = xpath_get_text(xml, "/pool/@type")[0] + pool_type = xpath_get_text(xml, '/pool/@type')[0] source = self._get_storage_source(pool_type, xml) poolArgs = {} poolArgs['name'] = pool.name() poolArgs['type'] = pool_type - poolArgs['source'] = {'path': source['path'], - 'host': source['addr']} + poolArgs['source'] = { + 'path': source['path'], 'host': source['addr']} conn = self.conn.get() poolDef = StoragePoolDef.create(poolArgs) try: @@ -338,14 +355,17 @@ def lookup(self, name): autostart = True if pool.autostart() else False persistent = True if pool.isPersistent() else False xml = pool.XMLDesc(0) - path = xpath_get_text(xml, "/pool/target/path")[0] - pool_type = xpath_get_text(xml, "/pool/@type")[0] + path = xpath_get_text(xml, '/pool/target/path')[0] + pool_type = xpath_get_text(xml, '/pool/@type')[0] source = self._get_storage_source(pool_type, xml) # FIXME: nfs workaround - prevent any libvirt operation # for a nfs if the corresponding NFS server is down. if pool_type == 'netfs' and not self._nfs_status_online(pool): - wok_log.debug("NFS pool %s is offline, reason: NFS " - "server %s is unreachable.", name, source['addr']) + wok_log.debug( + 'NFS pool %s is offline, reason: NFS ' 'server %s is unreachable.', + name, + source['addr'], + ) # Mark state as '4' => inaccessible. info[0] = 4 # skip calculating volumes @@ -353,17 +373,19 @@ def lookup(self, name): else: nr_volumes = self._get_storagepool_vols_num(pool) - res = {'state': POOL_STATE_MAP[info[0]], - 'path': path, - 'source': source, - 'type': pool_type, - 'autostart': autostart, - 'capacity': info[1], - 'allocated': info[2], - 'available': info[3], - 'nr_volumes': nr_volumes, - 'persistent': persistent, - 'in_use': self._pool_used_by_template(name)} + res = { + 'state': POOL_STATE_MAP[info[0]], + 'path': path, + 'source': source, + 'type': pool_type, + 'autostart': autostart, + 'capacity': info[1], + 'allocated': info[2], + 'available': info[3], + 'nr_volumes': nr_volumes, + 'persistent': persistent, + 'in_use': self._pool_used_by_template(name), + } if not pool.isPersistent(): # Deal with deep scan generated pool @@ -383,19 +405,23 @@ def _update_lvm_disks(self, pool_name, disks): lsblk_cmd = ['lsblk', disk] output, error, returncode = run_command(lsblk_cmd) if returncode != 0: - wok_log.error('%s is not a valid disk/partition. Could not ' - 'add it to the pool %s.', disk, pool_name) - raise OperationFailed('KCHPOOL0027E', {'disk': disk, - 'pool': pool_name}) + wok_log.error( + '%s is not a valid disk/partition. Could not ' + 'add it to the pool %s.', + disk, + pool_name, + ) + raise OperationFailed( + 'KCHPOOL0027E', {'disk': disk, 'pool': pool_name}) # add disks to the lvm pool using vgextend + virsh refresh - vgextend_cmd = ["vgextend", pool_name] + vgextend_cmd = ['vgextend', pool_name] vgextend_cmd += disks output, error, returncode = run_command(vgextend_cmd) if returncode != 0: - msg = "Could not add disks to pool %s, error: %s" + msg = 'Could not add disks to pool %s, error: %s' wok_log.error(msg, pool_name, error) - raise OperationFailed('KCHPOOL0028E', {'pool': pool_name, - 'err': error}) + raise OperationFailed( + 'KCHPOOL0028E', {'pool': pool_name, 'err': error}) # refreshing pool state pool = self.get_storagepool(pool_name, self.conn) if pool.isActive(): @@ -412,30 +438,32 @@ def update(self, name, params): if 'disks' in params: # check if pool is type 'logical' xml = pool.XMLDesc(0) - pool_type = xpath_get_text(xml, "/pool/@type")[0] + pool_type = xpath_get_text(xml, '/pool/@type')[0] if pool_type != 'logical': raise InvalidOperation('KCHPOOL0029E') self._update_lvm_disks(name, params['disks']) ident = pool.name() - return ident.decode('utf-8') + return ident def activate(self, name): pool = self.get_storagepool(name, self.conn) # FIXME: nfs workaround - do not activate a NFS pool # if the NFS server is not reachable. xml = pool.XMLDesc(0) - pool_type = xpath_get_text(xml, "/pool/@type")[0] + pool_type = xpath_get_text(xml, '/pool/@type')[0] if pool_type == 'netfs' and not self._nfs_status_online(pool): # block the user from activating the pool. source = self._get_storage_source(pool_type, xml) - raise OperationFailed("KCHPOOL0032E", - {'name': name, 'server': source['addr']}) - return + raise OperationFailed( + 'KCHPOOL0032E', {'name': name, 'server': source['addr']} + ) + try: pool.create(0) except libvirt.libvirtError as e: - raise OperationFailed("KCHPOOL0009E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHPOOL0009E', {'name': name, 'err': e.get_error_message()} + ) def _pool_used_by_template(self, pool_name): with self.objstore as session: @@ -457,23 +485,25 @@ def deactivate(self, name): # FIXME: nfs workaround - do not try to deactivate a NFS pool # if the NFS server is not reachable. xml = pool.XMLDesc(0) - pool_type = xpath_get_text(xml, "/pool/@type")[0] + pool_type = xpath_get_text(xml, '/pool/@type')[0] if pool_type == 'netfs' and not self._nfs_status_online(pool): # block the user from dactivating the pool. source = self._get_storage_source(pool_type, xml) - raise OperationFailed("KCHPOOL0033E", - {'name': name, 'server': source['addr']}) - return + raise OperationFailed( + 'KCHPOOL0033E', {'name': name, 'server': source['addr']} + ) + try: persistent = pool.isPersistent() pool.destroy() except libvirt.libvirtError as e: - raise OperationFailed("KCHPOOL0010E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHPOOL0010E', {'name': name, 'err': e.get_error_message()} + ) # If pool was not persistent, then it was erased by destroy() and # must return nothing here, to trigger _redirect() and avoid errors if not persistent: - return "" + return '' def delete(self, name): if self._pool_used_by_template(name): @@ -481,24 +511,25 @@ def delete(self, name): pool = self.get_storagepool(name, self.conn) if pool.isActive(): - raise InvalidOperation("KCHPOOL0005E", {'name': name}) + raise InvalidOperation('KCHPOOL0005E', {'name': name}) vms = self._get_vms_attach_to_storagepool(name) if len(vms) > 0: - raise InvalidOperation('KCHPOOL0039E', {'name': name, - 'vms': ",".join(vms)}) + raise InvalidOperation( + 'KCHPOOL0039E', {'name': name, 'vms': ','.join(vms)}) try: pool.undefine() except libvirt.libvirtError as e: - raise OperationFailed("KCHPOOL0011E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHPOOL0011E', {'name': name, 'err': e.get_error_message()} + ) def _get_vms_attach_to_storagepool(self, storagepool): conn = self.conn.get() # get storage pool path pool = self.get_storagepool(storagepool, self.conn) - path = "".join(xpath_get_text(pool.XMLDesc(), "/pool/target/path")) + path = ''.join(xpath_get_text(pool.XMLDesc(), '/pool/target/path')) # activate and deactive quickly to get volumes vms = [] @@ -516,5 +547,4 @@ def __init__(self, **kargs): pass def lookup(self, name): - return {'state': 'active', - 'type': 'kimchi-iso'} + return {'state': 'active', 'type': 'kimchi-iso'} diff --git a/model/storageservers.py b/model/storageservers.py index eed0a4192..8e5bd4ef6 100644 --- a/model/storageservers.py +++ b/model/storageservers.py @@ -16,9 +16,7 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - from wok.exception import NotFoundError - from wok.plugins.kimchi.model.storagepools import StoragePoolModel from wok.plugins.kimchi.model.storagepools import StoragePoolsModel @@ -44,8 +42,8 @@ def get_list(self, _target_type=None): for pool in pools: try: pool_info = self.pool.lookup(pool) - if (pool_info['type'] in target_type and - pool_info['source']['addr'] not in server_list): + if (pool_info['type'] in target_type + and pool_info['source']['addr'] not in server_list): # Avoid to add same server for multiple times # if it hosts more than one storage type server_list.append(pool_info['source']['addr']) @@ -67,16 +65,16 @@ def lookup(self, server): for pool in pools: try: pool_info = self.pool.lookup(pool) - if (pool_info['type'] in STORAGE_SERVERS and - pool_info['source']['addr'] == server): + if (pool_info['type'] in STORAGE_SERVERS + and pool_info['source']['addr'] == server): info = dict(host=server) - if (pool_info['type'] == "iscsi" and - 'port' in pool_info['source']): - info["port"] = pool_info['source']['port'] + if (pool_info['type'] == 'iscsi' + and 'port' in pool_info['source']): + info['port'] = pool_info['source']['port'] return info except NotFoundError: # Avoid inconsistent pool result because of lease between list # lookup pass - raise NotFoundError("KCHSR0001E", {'server': server}) + raise NotFoundError('KCHSR0001E', {'server': server}) diff --git a/model/storagetargets.py b/model/storagetargets.py index 25a9b3484..203c51a59 100644 --- a/model/storagetargets.py +++ b/model/storagetargets.py @@ -16,16 +16,14 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import libvirt import lxml.etree as ET from lxml import objectify from lxml.builder import E - -from wok.utils import patch_find_nfs_target, wok_log - from wok.plugins.kimchi.model.config import CapabilitiesModel from wok.plugins.kimchi.model.storageservers import STORAGE_SERVERS +from wok.utils import patch_find_nfs_target +from wok.utils import wok_log class StorageTargetsModel(object): @@ -44,15 +42,19 @@ def get_list(self, storage_server, _target_type=None, _server_port=None): if not self.caps.nfs_target_probe and target_type == 'netfs': targets = patch_find_nfs_target(storage_server) else: - xml = self._get_storage_server_spec(server=storage_server, - target_type=target_type, - server_port=_server_port) + xml = self._get_storage_server_spec( + server=storage_server, + target_type=target_type, + server_port=_server_port, + ) conn = self.conn.get() try: ret = conn.findStoragePoolSources(target_type, xml, 0) except libvirt.libvirtError as e: - err = "Query storage pool source fails because of %s" - wok_log.warning(err, e.get_error_message()) + wok_log.warning( + f'Query storage pool source fails because of ' + f'{e.get_error_message()}' + ) continue targets = self._parse_target_source_result(target_type, ret) @@ -66,24 +68,25 @@ def get_list(self, storage_server, _target_type=None, _server_port=None): # Get all existing ISCSI and NFS pools pools = conn.listAllStoragePools( libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_ISCSI | - libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_NETFS) + libvirt.VIR_CONNECT_LIST_STORAGE_POOLS_NETFS + ) for pool in pools: pool_xml = pool.XMLDesc(0) root = objectify.fromstring(pool_xml) - if root.get('type') == 'netfs' and \ - root.source.dir is not None: + if root.get('type') == 'netfs' and root.source.dir is not None: used_paths.append(root.source.dir.get('path')) - elif root.get('type') == 'iscsi' and \ - root.source.device is not None: + elif root.get('type') == 'iscsi' and root.source.device is not None: used_paths.append(root.source.device.get('path')) except libvirt.libvirtError as e: - err = "Query storage pool source fails because of %s" - wok_log.warning(err, e.get_error_message()) + wok_log.warning( + f'Query storage pool source fails because of {e.get_error_message()}' + ) # Filter target_list to not not show the used paths - target_list = [elem for elem in target_list - if elem.get('target') not in used_paths] + target_list = [ + elem for elem in target_list if elem.get('target') not in used_paths + ] return [dict(t) for t in set(tuple(t.items()) for t in target_list)] def _get_storage_server_spec(self, **kwargs): @@ -97,8 +100,8 @@ def _get_storage_server_spec(self, **kwargs): else: extra_args.append(E.format(type=server_type)) - host_attr = {"name": kwargs['server']} - server_port = kwargs.get("server_port") + host_attr = {'name': kwargs['server']} + server_port = kwargs.get('server_port') if server_port is not None: host_attr['port'] = server_port @@ -117,6 +120,6 @@ def _parse_target_source_result(self, target_type, xml_str): target_path = source.device.get('path') type = target_type host_name = source.host.get('name') - ret.append(dict(host=host_name, target_type=type, - target=target_path)) + ret.append( + dict(host=host_name, target_type=type, target=target_path)) return ret diff --git a/model/storagevolumes.py b/model/storagevolumes.py index 0bdbe998c..a3ce86a92 100644 --- a/model/storagevolumes.py +++ b/model/storagevolumes.py @@ -16,44 +16,42 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import contextlib -import libvirt -import lxml.etree as ET -import magic import os import tempfile import threading import time -import urllib2 -from lxml.builder import E +import urllib +import libvirt +import lxml.etree as ET +import magic +from lxml.builder import E from wok.asynctask import AsyncTask -from wok.exception import InvalidOperation, InvalidParameter, IsoFormatError -from wok.exception import MissingParameter, NotFoundError, OperationFailed -from wok.utils import get_unique_file_name -from wok.utils import probe_file_permission_as_user, wok_log -from wok.xmlutils.utils import xpath_get_text +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import IsoFormatError +from wok.exception import MissingParameter +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.model.tasks import TaskModel - from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.isoinfo import IsoImage from wok.plugins.kimchi.kvmusertests import UserTests from wok.plugins.kimchi.model.diskutils import get_disk_used_by from wok.plugins.kimchi.model.storagepools import StoragePoolModel from wok.plugins.kimchi.utils import get_next_clone_name +from wok.utils import get_unique_file_name +from wok.utils import probe_file_permission_as_user +from wok.utils import wok_log +from wok.xmlutils.utils import xpath_get_text -VOLUME_TYPE_MAP = {0: 'file', - 1: 'block', - 2: 'directory', - 3: 'network'} +VOLUME_TYPE_MAP = {0: 'file', 1: 'block', 2: 'directory', 3: 'network'} READ_CHUNK_SIZE = 1048576 # 1 MiB REQUIRE_NAME_PARAMS = ['capacity'] -VALID_RAW_CONTENT = ['dos/mbr boot sector', - 'x86 boot sector', - 'data'] +VALID_RAW_CONTENT = ['dos/mbr boot sector', 'x86 boot sector', 'data'] upload_volumes = dict() @@ -72,8 +70,8 @@ def create(self, pool_name, params): index_list = list(i for i in range(len(vol_source)) if vol_source[i] in params) if len(index_list) != 1: - raise InvalidParameter("KCHVOL0018E", - {'param': ",".join(vol_source)}) + raise InvalidParameter( + 'KCHVOL0018E', {'param': ','.join(vol_source)}) create_param = vol_source[index_list[0]] @@ -81,8 +79,8 @@ def create(self, pool_name, params): if create_param == 'url': url = params['url'] try: - urllib2.urlopen(url).close() - except: + urllib.request.urlopen(url).close() + except Exception: raise InvalidParameter('KCHVOL0022E', {'url': url}) all_vol_names = self.get_list(pool_name) @@ -99,31 +97,33 @@ def create(self, pool_name, params): if create_param == 'url': name = os.path.basename(params['url']) else: - name = 'upload-%s' % int(time.time()) + name = f'upload-{int(time.time())}' name = get_unique_file_name(all_vol_names, name) params['name'] = name try: - create_func = getattr(self, '_create_volume_with_%s' % - create_param) + create_func = getattr(self, f'_create_volume_with_{create_param}') except AttributeError: - raise InvalidParameter("KCHVOL0019E", {'param': create_param}) + raise InvalidParameter('KCHVOL0019E', {'param': create_param}) - pool_info = StoragePoolModel(conn=self.conn, - objstore=self.objstore).lookup(pool_name) + pool_info = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup( + pool_name + ) if pool_info['type'] in READONLY_POOL_TYPE: - raise InvalidParameter("KCHVOL0012E", {'type': pool_info['type']}) + raise InvalidParameter('KCHVOL0012E', {'type': pool_info['type']}) if pool_info['state'] == 'inactive': - raise InvalidParameter('KCHVOL0003E', {'pool': pool_name, - 'volume': name}) + raise InvalidParameter( + 'KCHVOL0003E', {'pool': pool_name, 'volume': name}) if name in all_vol_names: raise InvalidParameter('KCHVOL0001E', {'name': name}) params['pool'] = pool_name params['pool_type'] = pool_info['type'] - targeturi = '/plugins/kimchi/storagepools/%s/storagevolumes/%s' \ - % (pool_name, name) + targeturi = '/plugins/kimchi/storagepools/%s/storagevolumes/%s' % ( + pool_name, + name, + ) taskid = AsyncTask(targeturi, create_func, params).id return self.task.lookup(taskid) @@ -142,7 +142,7 @@ def _create_volume_with_capacity(self, cb, params): """ allocation = 0 - if params['pool_type'] == "logical": + if params['pool_type'] == 'logical': allocation = params['capacity'] params.setdefault('allocation', allocation) params.setdefault('format', 'qcow2') @@ -151,27 +151,30 @@ def _create_volume_with_capacity(self, cb, params): try: pool = StoragePoolModel.get_storagepool(pool_name, self.conn) xml = vol_xml % params - except KeyError, item: - raise MissingParameter("KCHVOL0004E", {'item': str(item), - 'volume': name}) + except KeyError as item: + raise MissingParameter( + 'KCHVOL0004E', {'item': str(item), 'volume': name}) try: pool.createXML(xml, 0) except libvirt.libvirtError as e: - raise OperationFailed("KCHVOL0007E", - {'name': name, 'pool': pool_name, - 'err': e.get_error_message()}) - - vol_info = StorageVolumeModel(conn=self.conn, - objstore=self.objstore).lookup(pool_name, - name) + raise OperationFailed( + 'KCHVOL0007E', + {'name': name, 'pool': pool_name, 'err': e.get_error_message()}, + ) + + vol_info = StorageVolumeModel(conn=self.conn, objstore=self.objstore).lookup( + pool_name, name + ) vol_path = vol_info['path'] if params.get('upload', False): - upload_volumes[vol_path] = {'lock': threading.Lock(), - 'offset': 0, 'cb': cb, - 'expected_vol_size': params['capacity'] - } + upload_volumes[vol_path] = { + 'lock': threading.Lock(), + 'offset': 0, + 'cb': cb, + 'expected_vol_size': params['capacity'], + } cb('ready for upload') else: cb('OK', True) @@ -181,8 +184,7 @@ def _create_volume_with_url(self, cb, params): name = params['name'] url = params['url'] - pool_model = StoragePoolModel(conn=self.conn, - objstore=self.objstore) + pool_model = StoragePoolModel(conn=self.conn, objstore=self.objstore) pool = pool_model.lookup(pool_name) if pool['type'] in ['dir', 'netfs']: @@ -190,46 +192,54 @@ def _create_volume_with_url(self, cb, params): else: file_path = tempfile.mkstemp(prefix=name)[1] - with contextlib.closing(urllib2.urlopen(url)) as response: + with contextlib.closing(urllib.request.urlopen(url)) as response: with open(file_path, 'w') as volume_file: - remote_size = response.info().getheader('Content-Length', '-') + remote_size = response.getheader('Content-Length', '-') downloaded_size = 0 try: while True: - chunk_data = response.read(READ_CHUNK_SIZE) + chunk_data = response.read( + READ_CHUNK_SIZE).decode('utf-8') if not chunk_data: break volume_file.write(chunk_data) downloaded_size += len(chunk_data) - cb('%s/%s' % (downloaded_size, remote_size)) + cb(f'{downloaded_size}/{remote_size}') except (IOError, libvirt.libvirtError) as e: if os.path.isfile(file_path): os.remove(file_path) - raise OperationFailed('KCHVOL0007E', {'name': name, - 'pool': pool_name, - 'err': e.message}) + raise OperationFailed( + 'KCHVOL0007E', {'name': name, + 'pool': pool_name, 'err': str(e)} + ) if pool['type'] in ['dir', 'netfs']: virt_pool = StoragePoolModel.get_storagepool(pool_name, self.conn) virt_pool.refresh(0) else: + def _stream_handler(stream, nbytes, fd): return fd.read(nbytes) virt_stream = virt_vol = None try: - task = self.create(pool_name, {'name': name, - 'format': 'raw', - 'capacity': downloaded_size, - 'allocation': downloaded_size}) + task = self.create( + pool_name, + { + 'name': name, + 'format': 'raw', + 'capacity': downloaded_size, + 'allocation': downloaded_size, + }, + ) self.task.wait(task['id']) - virt_vol = StorageVolumeModel.get_storagevolume(pool_name, - name, - self.conn) + virt_vol = StorageVolumeModel.get_storagevolume( + pool_name, name, self.conn + ) virt_stream = self.conn.get().newStream(0) virt_vol.upload(virt_stream, 0, downloaded_size, 0) @@ -244,12 +254,13 @@ def _stream_handler(stream, nbytes, fd): virt_stream.abort() if virt_vol: virt_vol.delete(0) - except libvirt.libvirtError, virt_e: - wok_log.error(virt_e.message) + except libvirt.libvirtError as e: + wok_log.error(str(e)) finally: - raise OperationFailed('KCHVOL0007E', {'name': name, - 'pool': pool_name, - 'err': e.message}) + raise OperationFailed( + 'KCHVOL0007E', {'name': name, + 'pool': pool_name, 'err': str(e)} + ) finally: os.remove(file_path) @@ -258,12 +269,12 @@ def _stream_handler(stream, nbytes, fd): def get_list(self, pool_name): pool = StoragePoolModel.get_storagepool(pool_name, self.conn) if not pool.isActive(): - raise InvalidOperation("KCHVOL0006E", {'pool': pool_name}) + raise InvalidOperation('KCHVOL0006E', {'pool': pool_name}) try: pool.refresh(0) - except Exception, e: - wok_log.error("Pool refresh failed: %s" % str(e)) - return sorted(map(lambda x: x.decode('utf-8'), pool.listVolumes())) + except Exception as e: + wok_log.error(f'Pool refresh failed: {e}') + return sorted(pool.listVolumes()) class StorageVolumeModel(object): @@ -282,13 +293,13 @@ def __init__(self, **kargs): def get_storagevolume(poolname, name, conn): pool = StoragePoolModel.get_storagepool(poolname, conn) if not pool.isActive(): - raise InvalidOperation("KCHVOL0006E", {'pool': poolname}) + raise InvalidOperation('KCHVOL0006E', {'pool': poolname}) try: - return pool.storageVolLookupByName(name.encode("utf-8")) + return pool.storageVolLookupByName(name) except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_STORAGE_VOL: - raise NotFoundError("KCHVOL0002E", {'name': name, - 'pool': poolname}) + raise NotFoundError( + 'KCHVOL0002E', {'name': name, 'pool': poolname}) else: raise @@ -298,7 +309,7 @@ def lookup(self, pool, name): info = vol.info() xml = vol.XMLDesc(0) try: - fmt = xpath_get_text(xml, "/volume/target/format/@type")[0] + fmt = xpath_get_text(xml, '/volume/target/format/@type')[0] except IndexError: # Not all types of libvirt storage can provide volume format # infomation. When there is no format information, we assume @@ -323,7 +334,9 @@ def lookup(self, pool, name): # raw files), so it's necessary check the 'content' of them isvalid = True if fmt == 'raw': - if not os.path.islink(path): # Check if file is a symlink to a real block device, if so, don't check it's contents for validity + # Check if file is a symlink to a real block device, + # if so, don't check it's contents for validity + if not os.path.islink(path): try: ms = magic.open(magic.NONE) ms.load() @@ -332,28 +345,31 @@ def lookup(self, pool, name): ms.close() except UnicodeDecodeError: isvalid = False - else: # We are a symlink - if "/dev/dm-" in os.path.realpath(path): - # This is most likely a real blockdevice - isvalid = True - wok_log.error("symlink detected, validated the disk") - else: - # Doesn't point to a known blockdevice - isvalid = False + else: # We are a symlink + if '/dev/dm-' in os.path.realpath(path): + # This is most likely a real blockdevice + isvalid = True + wok_log.error('symlink detected, validated the disk') + else: + # Doesn't point to a known blockdevice + isvalid = False used_by = get_disk_used_by(self.conn, path) - if (self.libvirt_user is None): + if self.libvirt_user is None: self.libvirt_user = UserTests().probe_user() - ret, _ = probe_file_permission_as_user(os.path.realpath(path), - self.libvirt_user) - res = dict(type=VOLUME_TYPE_MAP[info[0]], - capacity=info[1], - allocation=info[2], - path=path, - used_by=used_by, - format=fmt, - isvalid=isvalid, - has_permission=ret) + ret, _ = probe_file_permission_as_user( + os.path.realpath(path), self.libvirt_user + ) + res = dict( + type=VOLUME_TYPE_MAP[info[0]], + capacity=info[1], + allocation=info[2], + path=path, + used_by=used_by, + format=fmt, + isvalid=isvalid, + has_permission=ret, + ) if fmt == 'iso': if os.path.islink(path): path = os.path.join(os.path.dirname(path), os.readlink(path)) @@ -367,8 +383,13 @@ def lookup(self, pool, name): bootable = False res.update( - dict(os_distro=os_distro, os_version=os_version, path=path, - bootable=bootable)) + dict( + os_distro=os_distro, + os_version=os_version, + path=path, + bootable=bootable, + ) + ) return res def wipe(self, pool, name): @@ -376,28 +397,33 @@ def wipe(self, pool, name): try: volume.wipePattern(libvirt.VIR_STORAGE_VOL_WIPE_ALG_ZERO, 0) except libvirt.libvirtError as e: - raise OperationFailed("KCHVOL0009E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVOL0009E', {'name': name, 'err': e.get_error_message()} + ) def delete(self, pool, name): - pool_info = StoragePoolModel(conn=self.conn, - objstore=self.objstore).lookup(pool) + pool_info = StoragePoolModel(conn=self.conn, objstore=self.objstore).lookup( + pool + ) if pool_info['type'] in READONLY_POOL_TYPE: - raise InvalidParameter("KCHVOL0012E", {'type': pool_info['type']}) + raise InvalidParameter('KCHVOL0012E', {'type': pool_info['type']}) volume = StorageVolumeModel.get_storagevolume(pool, name, self.conn) vol_path = volume.path() try: volume.delete(0) except libvirt.libvirtError as e: - raise OperationFailed("KCHVOL0010E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVOL0010E', {'name': name, 'err': e.get_error_message()} + ) try: os.remove(vol_path) - except OSError, e: - wok_log.error("Unable to delete storage volume file: %s." - "Details: %s" % (pool_info['path'], e.message)) + except OSError as e: + wok_log.error( + f"Unable to delete storage volume file: {pool_info['path']}." + f'Details: {e}' + ) def resize(self, pool, name, size): volume = StorageVolumeModel.get_storagevolume(pool, name, self.conn) @@ -415,8 +441,9 @@ def resize(self, pool, name, size): try: volume.resize(size, flags) except libvirt.libvirtError as e: - raise OperationFailed("KCHVOL0011E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVOL0011E', {'name': name, 'err': e.get_error_message()} + ) def clone(self, pool, name, new_pool=None, new_name=None): """Clone a storage volume. @@ -441,16 +468,19 @@ def clone(self, pool, name, new_pool=None, new_name=None): # is specified if new_name is None: base, ext = os.path.splitext(name) - new_name = get_next_clone_name(self.storagevolumes.get_list(pool), - base, ext) - - params = {'pool': pool, - 'name': name, - 'new_pool': new_pool, - 'new_name': new_name} - target_uri = u'/plugins/kimchi/storagepools/%s/storagevolumes/%s/clone' - taskid = AsyncTask(target_uri % (pool, new_name), self._clone_task, - params).id + new_name = get_next_clone_name( + self.storagevolumes.get_list(pool), base, ext + ) + + params = { + 'pool': pool, + 'name': name, + 'new_pool': new_pool, + 'new_name': new_name, + } + target_uri = '/plugins/kimchi/storagepools/%s/storagevolumes/%s/clone' + taskid = AsyncTask(target_uri % (pool, new_name), + self._clone_task, params).id return self.task.lookup(taskid) def _clone_task(self, cb, params): @@ -474,31 +504,36 @@ def _clone_task(self, cb, params): try: cb('setting up volume cloning') - orig_vir_vol = StorageVolumeModel.get_storagevolume(orig_pool_name, - orig_vol_name, - self.conn) + orig_vir_vol = StorageVolumeModel.get_storagevolume( + orig_pool_name, orig_vol_name, self.conn + ) orig_vol = self.lookup(orig_pool_name, orig_vol_name) - new_vir_pool = StoragePoolModel.get_storagepool(new_pool_name, - self.conn) + new_vir_pool = StoragePoolModel.get_storagepool( + new_pool_name, self.conn) cb('building volume XML') root_elem = E.volume() root_elem.append(E.name(new_vol_name)) - root_elem.append(E.capacity(unicode(orig_vol['capacity']), - unit='bytes')) + root_elem.append(E.capacity( + str(orig_vol['capacity']), unit='bytes')) target_elem = E.target() target_elem.append(E.format(type=orig_vol['format'])) root_elem.append(target_elem) - new_vol_xml = ET.tostring(root_elem, encoding='utf-8', - pretty_print=True) + new_vol_xml = ET.tostring( + root_elem, encoding='utf-8', pretty_print=True + ).decode('utf-8') cb('cloning volume') new_vir_pool.createXMLFrom(new_vol_xml, orig_vir_vol, 0) - except (InvalidOperation, NotFoundError, libvirt.libvirtError), e: - raise OperationFailed('KCHVOL0023E', - {'name': orig_vol_name, - 'pool': orig_pool_name, - 'err': e.get_error_message()}) + except (InvalidOperation, NotFoundError, libvirt.libvirtError) as e: + raise OperationFailed( + 'KCHVOL0023E', + { + 'name': orig_vol_name, + 'pool': orig_pool_name, + 'err': e.get_error_message(), + }, + ) self.lookup(new_pool_name, new_vol_name) @@ -508,7 +543,7 @@ def doUpload(self, cb, vol, offset, data, data_size): try: st = self.conn.get().newStream(0) vol.upload(st, offset, data_size) - st.send(data) + st.send(data.encode('utf-8')) st.finish() except Exception as e: st and st.abort() @@ -516,17 +551,17 @@ def doUpload(self, cb, vol, offset, data, data_size): try: vol.delete(0) - except Exception as e: + except Exception: pass - raise OperationFailed("KCHVOL0029E", {"err": e.message}) + raise OperationFailed('KCHVOL0029E', {'err': str(e)}) def update(self, pool, name, params): chunk_data = params['chunk'].fullvalue() chunk_size = int(params['chunk_size']) if len(chunk_data) != chunk_size: - raise OperationFailed("KCHVOL0026E") + raise OperationFailed('KCHVOL0026E') vol = StorageVolumeModel.get_storagevolume(pool, name, self.conn) vol_path = vol.path() @@ -534,22 +569,23 @@ def update(self, pool, name, params): vol_data = upload_volumes.get(vol_path) if vol_data is None: - raise OperationFailed("KCHVOL0027E", {"vol": vol_path}) + raise OperationFailed('KCHVOL0027E', {'vol': vol_path}) cb = vol_data['cb'] lock = vol_data['lock'] with lock: offset = vol_data['offset'] if (offset + chunk_size) > vol_capacity: - raise OperationFailed("KCHVOL0028E") + raise OperationFailed('KCHVOL0028E') - cb('%s/%s' % (offset, vol_capacity)) + cb(f'{offset}/{vol_capacity}') self.doUpload(cb, vol, offset, chunk_data, chunk_size) - cb('%s/%s' % (offset + chunk_size, vol_capacity)) + cb(f'{offset + chunk_size}/{vol_capacity}') vol_data['offset'] += chunk_size - if (vol_data['offset'] == vol_capacity) or \ - (vol_data['offset'] == vol_data['expected_vol_size']): + if (vol_data['offset'] == vol_capacity) or ( + vol_data['offset'] == vol_data['expected_vol_size'] + ): del upload_volumes[vol_path] cb('OK', True) @@ -570,16 +606,16 @@ def get_list(self): pool = StoragePoolModel.get_storagepool(pool_name, self.conn) pool.refresh(0) volumes = pool.listVolumes() - except Exception, e: + except Exception as e: # Skip inactive pools - wok_log.debug("Shallow scan: skipping pool %s because of " - "error: %s", (pool_name, e.message)) + wok_log.debug( + f'Shallow scan: skipping pool {pool_name} because of ' f'error: {e}' + ) continue for volume in volumes: - res = self.storagevolume.lookup(pool_name, - volume.decode("utf-8")) + res = self.storagevolume.lookup(pool_name, volume) if res['format'] == 'iso' and res['bootable']: - res['name'] = '%s' % volume + res['name'] = f'{volume}' iso_volumes.append(res) return iso_volumes diff --git a/model/templates.py b/model/templates.py index c27e83f71..d8257f18c 100644 --- a/model/templates.py +++ b/model/templates.py @@ -16,36 +16,37 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import copy -import libvirt -import magic import os import platform -import psutil import stat -import urlparse - -from wok.exception import InvalidOperation, InvalidParameter -from wok.exception import NotFoundError, OperationFailed -from wok.utils import probe_file_permission_as_user -from wok.utils import run_setfacl_set_attr -from wok.xmlutils.utils import xpath_get_text +import urllib.parse +import libvirt +import magic +import psutil +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.plugins.kimchi.config import get_kimchi_version from wok.plugins.kimchi.kvmusertests import UserTests from wok.plugins.kimchi.model.cpuinfo import CPUInfoModel -from wok.plugins.kimchi.utils import is_libvirtd_up, pool_name_from_uri from wok.plugins.kimchi.utils import create_disk_image +from wok.plugins.kimchi.utils import is_libvirtd_up +from wok.plugins.kimchi.utils import pool_name_from_uri from wok.plugins.kimchi.vmtemplate import VMTemplate +from wok.utils import probe_file_permission_as_user +from wok.utils import run_setfacl_set_attr +from wok.xmlutils.utils import xpath_get_text -ISO_TYPE = ["DOS/MBR", "ISO 9660 CD-ROM"] +ISO_TYPE = ['DOS/MBR', 'ISO 9660 CD-ROM'] # In PowerPC, memories must be aligned to 256 MiB PPC_MEM_ALIGN = 256 # Max memory 16TB for PPC and 4TiB for X (according to Red Hat), in KiB -MAX_MEM_LIM = 4294967296 # 4 TiB +MAX_MEM_LIM = 4294967296 # 4 TiB if os.uname()[4] in ['ppc', 'ppc64', 'ppc64le']: - MAX_MEM_LIM *= 4 # 16TiB + MAX_MEM_LIM *= 4 # 16TiB class TemplatesModel(object): @@ -59,19 +60,20 @@ def create(self, params): conn = self.conn.get() for net_name in params.get(u'networks', []): try: - conn.networkLookupByName(net_name.encode('utf-8')) + conn.networkLookupByName(net_name) except Exception: - raise InvalidParameter("KCHTMPL0003E", {'network': net_name, - 'template': name}) + raise InvalidParameter( + 'KCHTMPL0003E', {'network': net_name, 'template': name} + ) # Valid interfaces interfaces = params.get('interfaces', []) validate_interfaces(interfaces) if os.uname()[4] not in ['s390x', 's390'] and 'console' in params: - raise InvalidParameter("KCHTMPL0043E") + raise InvalidParameter('KCHTMPL0043E') # get source_media - source_media = params.pop("source_media") + source_media = params.pop('source_media') if source_media['type'] == 'netboot': params['netboot'] = True @@ -80,18 +82,22 @@ def create(self, params): # Get path of source media if it's based on disk type. path = source_media.get('path', None) if path is None: - raise InvalidParameter("KCHTMPL0016E") + raise InvalidParameter('KCHTMPL0016E') # not local image: set as remote ISO - path = path.encode('utf-8') - if urlparse.urlparse(path).scheme in ["http", "https", "tftp", "ftp", - "ftps"]: - params["cdrom"] = path + if urllib.parse.urlparse(path).scheme in [ + 'http', + 'https', + 'tftp', + 'ftp', + 'ftps', + ]: + params['cdrom'] = path return self.save_template(params) # Local file (ISO/Img) does not exist: raise error if not os.path.exists(path): - raise InvalidParameter("KCHTMPL0002E", {'path': path}) + raise InvalidParameter('KCHTMPL0002E', {'path': path}) # create magic object to discover file type file_type = magic.open(magic.MAGIC_NONE) @@ -101,7 +107,7 @@ def create(self, params): # cdrom iscdrom = [t for t in ISO_TYPE if t in ftype] if iscdrom: - params["cdrom"] = path + params['cdrom'] = path # check search permission st_mode = os.stat(path).st_mode @@ -111,13 +117,14 @@ def create(self, params): run_setfacl_set_attr(realpath, user=user) ret, excp = probe_file_permission_as_user(realpath, user) if ret is False: - raise InvalidParameter('KCHISO0008E', - {'filename': path, 'user': user, - 'err': excp}) + raise InvalidParameter( + 'KCHISO0008E', {'filename': path, + 'user': user, 'err': excp} + ) # disk else: - params["disks"] = params.get('disks', []) - params["disks"].append({"base": path}) + params['disks'] = params.get('disks', []) + params['disks'].append({'base': path}) return self.save_template(params) @@ -143,17 +150,16 @@ def save_template(self, params): name = params['name'] with self.objstore as session: if name in session.get_list('template'): - raise InvalidOperation("KCHTMPL0001E", {'name': name}) + raise InvalidOperation('KCHTMPL0001E', {'name': name}) # Store template on objectstore try: with self.objstore as session: - session.store('template', name, t.info, - get_kimchi_version()) + session.store('template', name, t.info, get_kimchi_version()) except InvalidOperation: raise - except Exception, e: - raise OperationFailed('KCHTMPL0020E', {'err': e.message}) + except Exception as e: + raise OperationFailed('KCHTMPL0020E', {'err': str(e)}) return name @@ -169,15 +175,18 @@ def template_volume_validate(self, volume, pool): pool_name = pool_name_from_uri(pool['name']) if pool['type'] in ['iscsi', 'scsi']: if not volume: - raise InvalidParameter("KCHTMPL0018E") + raise InvalidParameter('KCHTMPL0018E') storagevolumes = __import__( - "wok.plugins.kimchi.model.storagevolumes", fromlist=['']) - pool_volumes = storagevolumes.StorageVolumesModel( - **kwargs).get_list(pool_name) + 'wok.plugins.kimchi.model.storagevolumes', fromlist=[''] + ) + pool_volumes = storagevolumes.StorageVolumesModel(**kwargs).get_list( + pool_name + ) if volume not in pool_volumes: - raise InvalidParameter("KCHTMPL0019E", {'pool': pool_name, - 'volume': volume}) + raise InvalidParameter( + 'KCHTMPL0019E', {'pool': pool_name, 'volume': volume} + ) class TemplateModel(object): @@ -206,14 +215,17 @@ def lookup(self, name): def clone(self, name): # set default name - subfixs = [v[len(name):] for v in self.templates.get_list() - if v.startswith(name)] - indexs = [int(v.lstrip("-clone")) for v in subfixs - if v.startswith("-clone") and - v.lstrip("-clone").isdigit()] + subfixs = [ + v[len(name):] for v in self.templates.get_list() if v.startswith(name) + ] + indexs = [ + int(v.lstrip('-clone')) + for v in subfixs + if v.startswith('-clone') and v.lstrip('-clone').isdigit() + ] indexs.sort() - index = "1" if not indexs else str(indexs[-1] + 1) - clone_name = name + "-clone" + index + index = '1' if not indexs else str(indexs[-1] + 1) + clone_name = name + '-clone' + index temp = self.lookup(name) temp['name'] = clone_name @@ -227,7 +239,7 @@ def delete(self, name): except NotFoundError: raise except Exception as e: - raise OperationFailed('KCHTMPL0021E', {'err': e.message}) + raise OperationFailed('KCHTMPL0021E', {'err': str(e)}) def update(self, name, params): edit_template = self.lookup(name) @@ -235,17 +247,20 @@ def update(self, name, params): # If new name is not same as existing name # and new name already exists: raise exception with self.objstore as session: - if 'name' in params and name != params['name'] \ - and params['name'] in session.get_list('template'): - raise InvalidOperation("KCHTMPL0001E", - {'name': params['name']}) + if ( + 'name' in params + and name != params['name'] + and params['name'] in session.get_list('template') + ): + raise InvalidOperation( + 'KCHTMPL0001E', {'name': params['name']}) # Valid interfaces interfaces = params.get('interfaces', []) validate_interfaces(interfaces) if os.uname()[4] not in ['s390x', 's390'] and 'console' in params: - raise InvalidParameter("KCHTMPL0043E") + raise InvalidParameter('KCHTMPL0043E') # Merge graphics settings graph_args = params.get('graphics') @@ -273,10 +288,11 @@ def update(self, name, params): for net_name in params.get(u'networks', []): try: conn = self.conn.get() - conn.networkLookupByName(net_name.encode('utf-8')) + conn.networkLookupByName(net_name) except Exception: - raise InvalidParameter("KCHTMPL0003E", {'network': net_name, - 'template': name}) + raise InvalidParameter( + 'KCHTMPL0003E', {'network': net_name, 'template': name} + ) try: # make sure the new template will be created @@ -292,8 +308,8 @@ def update(self, name, params): except InvalidOperation: raise - except Exception, e: - raise OperationFailed('KCHTMPL0032E', {'err': e.message}) + except Exception as e: + raise OperationFailed('KCHTMPL0032E', {'err': str(e)}) return params['name'] @@ -304,7 +320,7 @@ def validate_interfaces(interfaces): # Otherwise FIXME to valid interfaces exist on system. # if os.uname()[4] not in ['s390x', 's390'] and interfaces: - raise InvalidParameter("KCHTMPL0039E") + raise InvalidParameter('KCHTMPL0039E') # FIXME to valid interfaces on system. @@ -324,29 +340,37 @@ def validate_memory(memory): # Memories must be lesser than 16TiB (PPC) or 4TiB (x86) and the Host # memory limit if (current > (MAX_MEM_LIM >> 10)) or (maxmem > (MAX_MEM_LIM >> 10)): - raise InvalidParameter("KCHVM0079E", - {'value': str(MAX_MEM_LIM / (1024**3))}) + raise InvalidParameter( + 'KCHVM0079E', {'value': str(MAX_MEM_LIM / (1024 ** 3))}) if (current > host_memory) or (maxmem > host_memory): - raise InvalidParameter("KCHVM0078E", {'memHost': host_memory}) + raise InvalidParameter('KCHVM0078E', {'memHost': host_memory}) # Current memory cannot be greater than maxMemory if current > maxmem: - raise InvalidParameter("KCHTMPL0031E", - {'mem': str(current), - 'maxmem': str(maxmem)}) + raise InvalidParameter( + 'KCHTMPL0031E', {'mem': str(current), 'maxmem': str(maxmem)} + ) # make sure memory and Maxmemory are alingned in 256MiB in PowerPC if platform.machine().startswith('ppc'): if current % PPC_MEM_ALIGN != 0: - raise InvalidParameter('KCHVM0071E', - {'param': "Memory", - 'mem': str(current), - 'alignment': str(PPC_MEM_ALIGN)}) + raise InvalidParameter( + 'KCHVM0071E', + { + 'param': 'Memory', + 'mem': str(current), + 'alignment': str(PPC_MEM_ALIGN), + }, + ) elif maxmem % PPC_MEM_ALIGN != 0: - raise InvalidParameter('KCHVM0071E', - {'param': "Maximum Memory", - 'mem': str(maxmem), - 'alignment': str(PPC_MEM_ALIGN)}) + raise InvalidParameter( + 'KCHVM0071E', + { + 'param': 'Maximum Memory', + 'mem': str(maxmem), + 'alignment': str(PPC_MEM_ALIGN), + }, + ) class LibvirtVMTemplate(VMTemplate): @@ -369,11 +393,12 @@ def _get_storage_pool(self, pool_uri): pool_name = pool_name_from_uri(pool_uri) try: conn = self.conn.get() - pool = conn.storagePoolLookupByName(pool_name.encode("utf-8")) + pool = conn.storagePoolLookupByName(pool_name) except libvirt.libvirtError: - raise InvalidParameter("KCHTMPL0004E", {'pool': pool_uri, - 'template': self.name}) + raise InvalidParameter( + 'KCHTMPL0004E', {'pool': pool_uri, 'template': self.name} + ) return pool @@ -389,48 +414,49 @@ def _get_all_storagepools_name(self): def _get_active_storagepools_name(self): conn = self.conn.get() names = conn.listStoragePools() - return sorted(map(lambda x: x.decode('utf-8'), names)) + return sorted(names) def _network_validate(self): names = self.info.get('networks', []) for name in names: try: conn = self.conn.get() - network = conn.networkLookupByName(name.encode('utf-8')) + network = conn.networkLookupByName(name) except libvirt.libvirtError: - raise InvalidParameter("KCHTMPL0003E", {'network': name, - 'template': self.name}) + raise InvalidParameter( + 'KCHTMPL0003E', {'network': name, 'template': self.name} + ) if not network.isActive(): - raise InvalidParameter("KCHTMPL0007E", {'network': name, - 'template': self.name}) + raise InvalidParameter( + 'KCHTMPL0007E', {'network': name, 'template': self.name} + ) def _get_storage_path(self, pool_uri=None): try: pool = self._get_storage_pool(pool_uri) - except: + except Exception: return '' xml = pool.XMLDesc(0) - return xpath_get_text(xml, "/pool/target/path")[0] + return xpath_get_text(xml, '/pool/target/path')[0] def _get_storage_type(self, pool_uri=None): try: pool = self._get_storage_pool(pool_uri) - except: + except Exception: return '' xml = pool.XMLDesc(0) - return xpath_get_text(xml, "/pool/@type")[0] + return xpath_get_text(xml, '/pool/@type')[0] def _get_volume_path(self, pool, vol): pool = self._get_storage_pool(pool) try: return pool.storageVolLookupByName(vol).path() - except: - raise NotFoundError("KCHVOL0002E", {'name': vol, - 'pool': pool}) + except Exception: + raise NotFoundError('KCHVOL0002E', {'name': vol, 'pool': pool}) def fork_vm_storage(self, vm_uuid): # Provision storages: @@ -439,19 +465,18 @@ def fork_vm_storage(self, vm_uuid): for v in disk_and_vol_list: if v['pool'] is not None: pool = self._get_storage_pool(v['pool']) - # outgoing text to libvirt, encode('utf-8') - pool.createXML(v['xml'].encode('utf-8'), 0) + # outgoing text to libvirt, decode('utf-8') + pool.createXML(v['xml'].decode('utf-8'), 0) else: capacity = v['capacity'] format_type = v['format'] path = v['path'] create_disk_image( - format_type=format_type, - path=path, - capacity=capacity) + format_type=format_type, path=path, capacity=capacity + ) except libvirt.libvirtError as e: - raise OperationFailed("KCHVMSTOR0008E", {'error': e.message}) + raise OperationFailed('KCHVMSTOR0008E', {'error': str(e)}) return disk_and_vol_list diff --git a/model/users.py b/model/users.py index d9c5ff3e2..a4b01ae49 100644 --- a/model/users.py +++ b/model/users.py @@ -16,17 +16,16 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import ldap import pwd +import ldap from wok.config import config from wok.exception import NotFoundError class UsersModel(object): def __init__(self, **args): - auth_type = config.get("authentication", "method") + auth_type = config.get('authentication', 'method') for klass in UsersModel.__subclasses__(): if auth_type == klass.auth_type: self.user = klass(**args) @@ -46,12 +45,12 @@ def __init__(self, **kargs): def _get_list(self): return [user.pw_name for user in pwd.getpwall() - if user.pw_shell.rsplit("/")[-1] not in ["nologin", "false"]] + if user.pw_shell.rsplit('/')[-1] not in ['nologin', 'false']] def _validate(self, user): try: return user in self._get_list() - except: + except Exception: return False @@ -72,19 +71,19 @@ def _validate(self, user): return False def _get_user(self, _user_id): - ldap_server = config.get("authentication", "ldap_server").strip('"') + ldap_server = config.get('authentication', 'ldap_server').strip('"') ldap_search_base = config.get( - "authentication", "ldap_search_base").strip('"') + 'authentication', 'ldap_search_base').strip('"') ldap_search_filter = config.get( - "authentication", "ldap_search_filter", - vars={"username": _user_id.encode("utf-8")}).strip('"') + 'authentication', 'ldap_search_filter', + vars={'username': _user_id.encode('utf-8')}).strip('"') connect = ldap.open(ldap_server) try: result = connect.search_s( ldap_search_base, ldap.SCOPE_SUBTREE, ldap_search_filter) if len(result) == 0: - raise NotFoundError("KCHAUTH0004E", {'user_id': _user_id}) + raise NotFoundError('KCHAUTH0004E', {'user_id': _user_id}) return result[0][1] except ldap.NO_SUCH_OBJECT: - raise NotFoundError("KCHAUTH0004E", {'user_id': _user_id}) + raise NotFoundError('KCHAUTH0004E', {'user_id': _user_id}) diff --git a/model/utils.py b/model/utils.py index da629714a..0f41641fc 100644 --- a/model/utils.py +++ b/model/utils.py @@ -16,50 +16,51 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import base64 -import libvirt +import libvirt from lxml import etree from lxml.builder import E - from wok.exception import OperationFailed -KIMCHI_META_URL = "https://github.com/kimchi-project/kimchi" -KIMCHI_NAMESPACE = "kimchi" +KIMCHI_META_URL = 'https://github.com/kimchi-project/kimchi' +KIMCHI_NAMESPACE = 'kimchi' def get_vm_name(vm_name, t_name, name_list): if vm_name: return vm_name - for i in xrange(1, 1000): + for i in range(1, 1000): # VM will have templace name, but without slashes - vm_name = "%s-vm-%i" % (t_name.replace('/', '-'), i) + vm_name = '%s-vm-%i' % (t_name.replace('/', '-'), i) if vm_name not in name_list: return vm_name - raise OperationFailed("KCHUTILS0003E") + raise OperationFailed('KCHUTILS0003E') def get_ascii_nonascii_name(name): nonascii_name = None - if name.encode('ascii', 'ignore') != name: + if name.encode('ascii', 'ignore').decode('utf-8') != name: nonascii_name = name - name = base64.urlsafe_b64encode(name.encode('utf-8')).rstrip('=') - name = unicode(name) + name = base64.urlsafe_b64encode( + nonascii_name.encode('utf-8')).decode('utf-8') + name = name.rstrip('=') - return (name, nonascii_name) + return name, nonascii_name -def get_vm_config_flag(dom, mode="persistent"): +def get_vm_config_flag(dom, mode='persistent'): # libvirt.VIR_DOMAIN_AFFECT_CURRENT is 0 # VIR_DOMAIN_AFFECT_LIVE is 1, VIR_DOMAIN_AFFECT_CONFIG is 2 - flag = {"live": libvirt.VIR_DOMAIN_AFFECT_LIVE, - "persistent": libvirt.VIR_DOMAIN_AFFECT_CONFIG, - "current": libvirt.VIR_DOMAIN_AFFECT_CURRENT, - "all": libvirt.VIR_DOMAIN_AFFECT_CONFIG + - libvirt.VIR_DOMAIN_AFFECT_LIVE if dom.isActive() and - dom.isPersistent() else libvirt.VIR_DOMAIN_AFFECT_CURRENT} + flag = { + 'live': libvirt.VIR_DOMAIN_AFFECT_LIVE, + 'persistent': libvirt.VIR_DOMAIN_AFFECT_CONFIG, + 'current': libvirt.VIR_DOMAIN_AFFECT_CURRENT, + 'all': libvirt.VIR_DOMAIN_AFFECT_CONFIG + libvirt.VIR_DOMAIN_AFFECT_LIVE + if dom.isActive() and dom.isPersistent() + else libvirt.VIR_DOMAIN_AFFECT_CURRENT, + } return flag[mode] @@ -67,34 +68,39 @@ def get_vm_config_flag(dom, mode="persistent"): # avoid duplicate codes def update_node(root, node): old_node = root.find(node.tag) - (root.replace(old_node, node) if old_node is not None - else root.append(node)) + (root.replace(old_node, node) if old_node is not None else root.append(node)) return root -def get_kimchi_metadata_node(dom, mode="current"): +def get_kimchi_metadata_node(dom, mode='current'): if not metadata_exists(dom): return None try: - xml = dom.metadata(libvirt.VIR_DOMAIN_METADATA_ELEMENT, - KIMCHI_META_URL, - flags=get_vm_config_flag(dom, mode)) + xml = dom.metadata( + libvirt.VIR_DOMAIN_METADATA_ELEMENT, + KIMCHI_META_URL, + flags=get_vm_config_flag(dom, mode), + ) return etree.fromstring(xml) except libvirt.libvirtError: return None -def set_kimchi_metadata_node(dom, metadata, mode="all"): - metadata_xml = etree.tostring(metadata) +def set_kimchi_metadata_node(dom, metadata, mode='all'): + metadata_xml = etree.tostring(metadata).decode('utf-8') # From libvirt doc, Passing None for @metadata says to remove that # element from the domain XML (passing the empty string leaves the # element present). Do not support remove the old metadata. - dom.setMetadata(libvirt.VIR_DOMAIN_METADATA_ELEMENT, metadata_xml, - KIMCHI_NAMESPACE, KIMCHI_META_URL, - flags=get_vm_config_flag(dom, mode)) + dom.setMetadata( + libvirt.VIR_DOMAIN_METADATA_ELEMENT, + metadata_xml, + KIMCHI_NAMESPACE, + KIMCHI_META_URL, + flags=get_vm_config_flag(dom, mode), + ) -def set_metadata_node(dom, nodes, mode="all"): +def set_metadata_node(dom, nodes, mode='all'): kimchi = get_kimchi_metadata_node(dom, mode) kimchi = E.metadata() if kimchi is None else kimchi @@ -104,7 +110,7 @@ def set_metadata_node(dom, nodes, mode="all"): set_kimchi_metadata_node(dom, kimchi, mode) -def remove_metadata_node(dom, tag, mode="all"): +def remove_metadata_node(dom, tag, mode='all'): kimchi = get_kimchi_metadata_node(dom, mode) if kimchi is not None: node = kimchi.find(tag) @@ -113,20 +119,20 @@ def remove_metadata_node(dom, tag, mode="all"): set_kimchi_metadata_node(dom, kimchi, mode) -def get_metadata_node(dom, tag, mode="current"): +def get_metadata_node(dom, tag, mode='current'): kimchi = get_kimchi_metadata_node(dom, mode) if kimchi is not None: node = kimchi.find(tag) if node is not None: return etree.tostring(node) - return "" + return '' def metadata_exists(dom): xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE) root = etree.fromstring(xml) - if root.find("metadata") is None: + if root.find('metadata') is None: return False return True @@ -137,7 +143,7 @@ def has_cpu_numa(dom): Returns: True or False """ root = etree.fromstring(dom.XMLDesc(0)) - return (root.find('./cpu/numa') is not None) + return root.find('./cpu/numa') is not None def set_numa_memory(mem, root): diff --git a/model/virtviewerfile.py b/model/virtviewerfile.py index 533978f1a..50240b093 100644 --- a/model/virtviewerfile.py +++ b/model/virtviewerfile.py @@ -17,15 +17,16 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # +import os import cherrypy import libvirt -import os - -from wok.exception import InvalidOperation, OperationFailed +from wok.exception import InvalidOperation +from wok.exception import OperationFailed from wok.plugins.kimchi import config as kimchi_config from wok.plugins.kimchi.model.vms import VMModel -from wok.utils import run_command, wok_log +from wok.utils import run_command +from wok.utils import wok_log def write_virt_viewer_file(params): @@ -38,7 +39,7 @@ def write_virt_viewer_file(params): file_contents = file_template % params if params.get('graphics_passwd'): - file_contents += 'password=%s\n' % params['graphics_passwd'] + file_contents += f'password={params["graphics_passwd"]}\n' try: with open(params.get('path'), 'w') as vv_file: @@ -62,21 +63,20 @@ def create_virt_viewer_file(vm_name, graphics_info): host = _get_request_host() default_dir = kimchi_config.get_virtviewerfiles_path() - file_path = os.path.join(default_dir, '%s-access.vv' % vm_name) + file_path = os.path.join(default_dir, f'{vm_name}-access.vv') file_params = { 'type': graphics_type, 'graphics_port': graphics_port, 'graphics_passwd': graphics_passwd, 'host': host, - 'path': file_path + 'path': file_path, } write_virt_viewer_file(file_params) return file_path except Exception as e: - raise OperationFailed("KCHVM0084E", - {'name': vm_name, 'err': e.message}) + raise OperationFailed('KCHVM0084E', {'name': vm_name, 'err': str(e)}) class VMVirtViewerFileModel(object): @@ -87,8 +87,8 @@ def __init__(self, **kargs): cherrypy.engine.subscribe('exit', self.cleanup) def cleanup(self): - wok_log.info('Closing any VNC/SPICE firewall ports ' - 'opened by Kimchi ...') + wok_log.info( + 'Closing any VNC/SPICE firewall ports ' 'opened by Kimchi ...') self.firewall_mngr.remove_all_vms_ports() for cb_id in self.vm_event_callbacks.values(): self.conn.get().domainEventDeregisterAny(cb_id) @@ -97,7 +97,7 @@ def _check_if_vm_running(self, name): dom = VMModel.get_vm(name, self.conn) d_info = dom.info() if d_info[0] != libvirt.VIR_DOMAIN_RUNNING: - raise InvalidOperation("KCHVM0083E", {'name': name}) + raise InvalidOperation('KCHVM0083E', {'name': name}) def event_vmshutdown_cb(self, conn, dom, event, detail, *args): if event == libvirt.VIR_DOMAIN_EVENT_STOPPED: @@ -114,7 +114,7 @@ def handleVMShutdownPowerOff(self, vm_name): dom, libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self.event_vmshutdown_cb, - None + None, ) self.vm_event_callbacks[vm_name] = cb_id @@ -122,8 +122,8 @@ def handleVMShutdownPowerOff(self, vm_name): if type(e) == AttributeError: reason = 'Libvirt service is not running' else: - reason = e.message - wok_log.error("Register of LIFECYCLE event failed: %s" % reason) + reason = str(e) + wok_log.error(f'Register of LIFECYCLE event failed: {reason}') def lookup(self, name): self._check_if_vm_running(name) @@ -135,12 +135,10 @@ def lookup(self, name): self.firewall_mngr.add_vm_graphics_port(name, graphics_port) self.handleVMShutdownPowerOff(name) - return 'plugins/kimchi/data/virtviewerfiles/%s' %\ - os.path.basename(file_path) + return f'plugins/kimchi/data/virtviewerfiles/{os.path.basename(file_path)}' class FirewallManager(object): - @staticmethod def check_if_firewall_cmd_enabled(): _, _, r_code = run_command(['firewall-cmd', '--state', '-q']) @@ -179,54 +177,48 @@ def remove_all_vms_ports(self): class FirewallCMDProvider(object): - @staticmethod def enable_tcp_port(port): _, err, r_code = run_command( - ['firewall-cmd', '--add-port=%s/tcp' % port] - ) + ['firewall-cmd', '--add-port=%s/tcp' % port]) if r_code != 0: - wok_log.error('Error when adding port to firewall-cmd: %s' % err) + wok_log.error(f'Error when adding port to firewall-cmd: {err}') @staticmethod def disable_tcp_port(port): _, err, r_code = run_command( - ['firewall-cmd', '--remove-port=%s/tcp' % port] - ) + ['firewall-cmd', f'--remove-port={port}/tcp']) if r_code != 0: - wok_log.error('Error when removing port from ' - 'firewall-cmd: %s' % err) + wok_log.error(f'Error when removing port from firewall-cmd: {err}') class UFWProvider(object): - @staticmethod def enable_tcp_port(port): - _, err, r_code = run_command(['ufw', 'allow', '%s/tcp' % port]) + _, err, r_code = run_command(['ufw', 'allow', f'{port}/tcp']) if r_code != 0: - wok_log.error('Error when adding port to ufw: %s' % err) + wok_log.error(f'Error when adding port to ufw: {err}') @staticmethod def disable_tcp_port(port): - _, err, r_code = run_command(['ufw', 'deny', '%s/tcp' % port]) + _, err, r_code = run_command(['ufw', 'deny', f'{port}/tcp']) if r_code != 0: - wok_log.error('Error when removing port from ufw: %s' % err) + wok_log.error(f'Error when removing port from ufw: {err}') class IPTablesProvider(object): - @staticmethod def enable_tcp_port(port): - cmd = ['iptables', '-I', 'INPUT', '-p', 'tcp', '--dport', - port, '-j', 'ACCEPT'] + cmd = ['iptables', '-I', 'INPUT', '-p', + 'tcp', '--dport', port, '-j', 'ACCEPT'] _, err, r_code = run_command(cmd) if r_code != 0: - wok_log.error('Error when adding port to iptables: %s' % err) + wok_log.error(f'Error when adding port to iptables: {err}') @staticmethod def disable_tcp_port(port): - cmd = ['iptables', '-D', 'INPUT', '-p', 'tcp', '--dport', - port, '-j', 'ACCEPT'] + cmd = ['iptables', '-D', 'INPUT', '-p', + 'tcp', '--dport', port, '-j', 'ACCEPT'] _, err, r_code = run_command(cmd) if r_code != 0: - wok_log.error('Error when removing port from itables: %s' % err) + wok_log.error(f'Error when removing port from itables: {err}') diff --git a/model/vmhostdevs.py b/model/vmhostdevs.py index c99205ef1..d992d0abd 100644 --- a/model/vmhostdevs.py +++ b/model/vmhostdevs.py @@ -16,36 +16,50 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import glob -import libvirt import os import platform import threading -from lxml import etree, objectify -from lxml.builder import E, ElementMaker from operator import itemgetter +import libvirt +from lxml import etree +from lxml import objectify +from lxml.builder import E +from lxml.builder import ElementMaker from wok.asynctask import AsyncTask -from wok.exception import InvalidOperation, InvalidParameter, NotFoundError +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import NotFoundError from wok.exception import OperationFailed from wok.message import WokMessage from wok.model.tasks import TaskModel -from wok.rollbackcontext import RollbackContext -from wok.utils import run_command, wok_log - from wok.plugins.kimchi.model.config import CapabilitiesModel -from wok.plugins.kimchi.model.host import DeviceModel, DevicesModel +from wok.plugins.kimchi.model.host import DeviceModel +from wok.plugins.kimchi.model.host import DevicesModel from wok.plugins.kimchi.model.utils import get_vm_config_flag -from wok.plugins.kimchi.model.vms import DOM_STATE_MAP, VMModel +from wok.plugins.kimchi.model.vms import DOM_STATE_MAP +from wok.plugins.kimchi.model.vms import VMModel from wok.plugins.kimchi.xmlutils.qemucmdline import get_qemucmdline_xml from wok.plugins.kimchi.xmlutils.qemucmdline import QEMU_NAMESPACE +from wok.rollbackcontext import RollbackContext +from wok.utils import run_command +from wok.utils import wok_log CMDLINE_FIELD_NAME = 'spapr-pci-host-bridge.mem_win_size' -USB_MODELS_PCI_HOTPLUG = ["piix3-uhci", "piix4-uhci", "ehci", "ich9-ehci1", - "ich9-uhci1", "ich9-uhci2", "ich9-uhci3", - "vt82c686b-uhci", "pci-ohci", "nec-xhci"] +USB_MODELS_PCI_HOTPLUG = [ + 'piix3-uhci', + 'piix4-uhci', + 'ehci', + 'ich9-ehci1', + 'ich9-uhci1', + 'ich9-uhci2', + 'ich9-uhci3', + 'vt82c686b-uhci', + 'pci-ohci', + 'nec-xhci', +] WINDOW_SIZE_BAR = 0x800000000 @@ -60,9 +74,7 @@ def __init__(self, **kargs): self.task = TaskModel(**kargs) self._cb = None self.events.registerAttachDevicesEvent( - self.conn, - self._event_devices, - self) + self.conn, self._event_devices, self) def get_list(self, vmid): dom = VMModel.get_vm(vmid, self.conn) @@ -88,7 +100,7 @@ def _event_devices(self, conn, dom, alias, opaque): wok_log.error('opaque must be valid') return - wok_log.info("Device %s added successfuly" % alias) + wok_log.info('Device %s added successfuly' % alias) opaque._cb('OK', True) def create(self, vmid, params): @@ -96,11 +108,12 @@ def create(self, vmid, params): dev_info = self.dev_model.lookup(dev_name) if dev_info['device_type'] == 'pci': - taskid = AsyncTask(u'/plugins/kimchi/vms/%s/hostdevs/' % - VMModel.get_vm(vmid, self.conn).name(), - self._attach_pci_device, - {'vmid': vmid, 'dev_info': dev_info, - 'lock': threading.RLock()}).id + taskid = AsyncTask( + u'/plugins/kimchi/vms/%s/hostdevs/' + % VMModel.get_vm(vmid, self.conn).name(), + self._attach_pci_device, + {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, + ).id return self.task.lookup(taskid) with RollbackContext() as rollback: @@ -114,11 +127,12 @@ def create(self, vmid, params): rollback.commitAll() - taskid = AsyncTask(u'/plugins/kimchi/vms/%s/hostdevs/' % - VMModel.get_vm(vmid, self.conn).name(), - '_attach_%s_device' % dev_info['device_type'], - {'vmid': vmid, 'dev_info': dev_info, - 'lock': threading.RLock()}).id + taskid = AsyncTask( + u'/plugins/kimchi/vms/%s/hostdevs/' + % VMModel.get_vm(vmid, self.conn).name(), + '_attach_%s_device' % dev_info['device_type'], + {'vmid': vmid, 'dev_info': dev_info, 'lock': threading.RLock()}, + ).id return self.task.lookup(taskid) @@ -127,35 +141,30 @@ def _get_pci_devices_xml(self, pci_infos, slot, driver): # all devices included in the xml will be sorted in reverse (the # function 0 will be the last one) and will include the guest # address details - for dev_info in sorted(pci_infos, - key=itemgetter('function'), - reverse=True): + for dev_info in sorted(pci_infos, key=itemgetter('function'), reverse=True): dev_info['detach_driver'] = driver - hostdevs += self._get_pci_device_xml(dev_info, - slot, - True) + hostdevs += self._get_pci_device_xml(dev_info, slot, True) return '%s' % hostdevs def have_usb_controller(self, vmid): dom = VMModel.get_vm(vmid, self.conn) - root = objectify.fromstring(dom.XMLDesc(0)) try: controllers = root.devices.controller - except AttributeError: return False for controller in controllers: - if 'model' not in controller.attrib: continue - if controller.attrib['type'] == 'usb' and \ - controller.attrib['model'] in USB_MODELS_PCI_HOTPLUG: + if ( + controller.attrib['type'] == 'usb' and + controller.attrib['model'] in USB_MODELS_PCI_HOTPLUG + ): return True return False @@ -164,34 +173,44 @@ def _get_pci_device_xml(self, dev_info, slot, is_multifunction): if 'detach_driver' not in dev_info: dev_info['detach_driver'] = 'kvm' - source = E.source(E.address(domain=str(dev_info['domain']), - bus=str(dev_info['bus']), - slot=str(dev_info['slot']), - function=str(dev_info['function']))) + source = E.source( + E.address( + domain=str(dev_info['domain']), + bus=str(dev_info['bus']), + slot=str(dev_info['slot']), + function=str(dev_info['function']), + ) + ) driver = E.driver(name=dev_info['detach_driver']) if is_multifunction: if dev_info['function'] == 0: - multi = E.address(type='pci', - domain='0', - bus='0', - slot=str(slot), - function=str(dev_info['function']), - multifunction='on') + multi = E.address( + type='pci', + domain='0', + bus='0', + slot=str(slot), + function=str(dev_info['function']), + multifunction='on', + ) else: - multi = E.address(type='pci', - domain='0', - bus='0', - slot=str(slot), - function=str(dev_info['function'])) + multi = E.address( + type='pci', + domain='0', + bus='0', + slot=str(slot), + function=str(dev_info['function']), + ) - host_dev = E.hostdev(source, driver, multi, - mode='subsystem', type='pci', managed='yes') + host_dev = E.hostdev( + source, driver, multi, mode='subsystem', type='pci', managed='yes' + ) else: - host_dev = E.hostdev(source, driver, - mode='subsystem', type='pci', managed='yes') + host_dev = E.hostdev( + source, driver, mode='subsystem', type='pci', managed='yes' + ) return etree.tostring(host_dev) @@ -200,27 +219,28 @@ def _validate_pci_passthrough_env(): # Linux kernel < 3.5 doesn't provide /sys/kernel/iommu_groups if os.path.isdir('/sys/kernel/iommu_groups'): if not glob.glob('/sys/kernel/iommu_groups/*'): - raise InvalidOperation("KCHVMHDEV0003E") + raise InvalidOperation('KCHVMHDEV0003E') # Enable virt_use_sysfs on RHEL6 and older distributions # In recent Fedora, there is no virt_use_sysfs. - out, err, rc = run_command(['getsebool', 'virt_use_sysfs'], - silent=True) - if rc == 0 and out.rstrip('\n') != "virt_use_sysfs --> on": - out, err, rc = run_command(['setsebool', '-P', - 'virt_use_sysfs=on']) + out, err, rc = run_command( + ['getsebool', 'virt_use_sysfs'], silent=True) + if rc == 0 and out.rstrip('\n') != 'virt_use_sysfs --> on': + out, err, rc = run_command( + ['setsebool', '-P', 'virt_use_sysfs=on']) if rc != 0: - wok_log.warning("Unable to turn on sebool virt_use_sysfs") + wok_log.warning('Unable to turn on sebool virt_use_sysfs') def _available_slot(self, dom): xmlstr = dom.XMLDesc(0) root = objectify.fromstring(xmlstr) - slots = [] try: devices = root.devices - slots = [self.dev_model._toint(dev.attrib['slot']) - for dev in devices.findall('.//address') - if 'slot' in dev.attrib] + slots = [ + self.dev_model._toint(dev.attrib['slot']) + for dev in devices.findall('.//address') + if 'slot' in dev.attrib + ] except AttributeError: return 1 @@ -243,9 +263,8 @@ def _attach_pci_device(self, cb, params): try: self._passthrough_device_validate(dev_info['name']) - except InvalidParameter as e: - cb(e.message, False) + cb(str(e), False) raise with lock: @@ -253,7 +272,7 @@ def _attach_pci_device(self, cb, params): self._validate_pci_passthrough_env() except InvalidOperation as e: - cb(e.message, False) + cb(str(e), False) raise dom = VMModel.get_vm(vmid, self.conn) @@ -261,54 +280,40 @@ def _attach_pci_device(self, cb, params): # 'vfio' systems requires a usb controller in order to support pci # hotplug on Power. - if driver == 'vfio' and platform.machine().startswith('ppc') and \ - DOM_STATE_MAP[dom.info()[0]] != "shutoff" and \ - not self.have_usb_controller(vmid): + if ( + driver == 'vfio' and + platform.machine().startswith('ppc') and + DOM_STATE_MAP[dom.info()[0]] != 'shutoff' and + not self.have_usb_controller(vmid) + ): msg = WokMessage('KCHVMHDEV0008E', {'vmid': vmid}) cb(msg.get_text(), False) - raise InvalidOperation("KCHVMHDEV0008E", {'vmid': vmid}) + raise InvalidOperation('KCHVMHDEV0008E', {'vmid': vmid}) # Attach all PCI devices in the same IOMMU group affected_names = self.devs_model.get_list( - _passthrough_affected_by=dev_info['name']) + _passthrough_affected_by=dev_info['name'] + ) passthrough_names = self.devs_model.get_list( - _cap='pci', _passthrough='true') + _cap='pci', _passthrough='true' + ) group_names = list(set(affected_names) & set(passthrough_names)) - pci_infos = [self.dev_model.lookup(dev_name) for dev_name in - group_names] + pci_infos = [self.dev_model.lookup( + dev_name) for dev_name in group_names] pci_infos.append(dev_info) - - is_multifunction = len(pci_infos) > 1 pci_infos = sorted(pci_infos, key=itemgetter('name')) # does not allow hot-plug of 3D graphic cards is_3D_device = self.dev_model.is_device_3D_controller(dev_info) - if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": + if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': msg = WokMessage('KCHVMHDEV0006E', {'name': dev_info['name']}) cb(msg.get_text(), False) - raise InvalidOperation('KCHVMHDEV0006E', - {'name': dev_info['name']}) + raise InvalidOperation( + 'KCHVMHDEV0006E', {'name': dev_info['name']}) # all devices in the group that is going to be attached to the vm # must be detached from the host first - with RollbackContext() as rollback: - for pci_info in pci_infos: - try: - dev = self.conn.get().nodeDeviceLookupByName( - pci_info['name']) - dev.dettach() - except Exception: - msg = WokMessage('KCHVMHDEV0005E', - {'name': pci_info['name']}) - cb(msg.get_text(), False) - raise OperationFailed('KCHVMHDEV0005E', - {'name': pci_info['name']}) - else: - rollback.prependDefer(dev.reAttach) - - rollback.commitAll() - - device_flags = get_vm_config_flag(dom, mode='all') + self._attach_all_devices(pci_infos) # when attaching a 3D graphic device it might be necessary to # increase the window size memory in order to be able to attach @@ -316,58 +321,84 @@ def _attach_pci_device(self, cb, params): if is_3D_device: self.update_mmio_guest(vmid, True) - slot = 0 + self._attach_multifunction_devices(dom, pci_infos, driver, vmid) + + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + cb('OK', True) + + def _attach_multifunction_devices(self, dom, pci_infos, driver, vmid): + slot = 0 + is_multifunction = len(pci_infos) > 1 + device_flags = get_vm_config_flag(dom, mode='all') + with RollbackContext() as rollback: + # multifuction: try to attach all functions together within one + # xml file. It requires libvirt support. if is_multifunction: # search for the first available slot in guest xml slot = self._available_slot(dom) + xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) - with RollbackContext() as rollback: - # multifuction: try to attach all functions together within one - # xml file. It requires libvirt support. - if is_multifunction: - xmlstr = self._get_pci_devices_xml(pci_infos, slot, driver) - - try: - dom.attachDeviceFlags(xmlstr, device_flags) - - except libvirt.libvirtError: - # If operation fails, we try the other way, where each - # function is attached individually - pass - else: - rollback.prependDefer(dom.detachDeviceFlags, xmlstr, - device_flags) - rollback.commitAll() - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": - cb('OK', True) - return - - # attach each function individually (multi or single function) - for pci_info in pci_infos: - pci_info['detach_driver'] = driver - xmlstr = self._get_pci_device_xml(pci_info, - slot, - is_multifunction) - try: - dom.attachDeviceFlags(xmlstr, device_flags) - - except libvirt.libvirtError: - msg = WokMessage('KCHVMHDEV0007E', - {'device': pci_info['name'], - 'vm': vmid}) - cb(msg.get_text(), False) - wok_log.error( - 'Failed to attach host device %s to VM %s: \n%s', - pci_info['name'], vmid, xmlstr) - raise - - rollback.prependDefer(dom.detachDeviceFlags, - xmlstr, device_flags) + try: + dom.attachDeviceFlags(xmlstr, device_flags) - rollback.commitAll() + except libvirt.libvirtError: + # If operation fails, we try the other way, where each + # function is attached individually + pass + else: + rollback.prependDefer( + dom.detachDeviceFlags, xmlstr, device_flags + ) + rollback.commitAll() + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + self._cb('OK', True) + return + + # attach each function individually (multi or single function) + for pci_info in pci_infos: + pci_info['detach_driver'] = driver + xmlstr = self._get_pci_device_xml( + pci_info, slot, is_multifunction) + try: + dom.attachDeviceFlags(xmlstr, device_flags) - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": - cb('OK', True) + except libvirt.libvirtError: + msg = WokMessage( + 'KCHVMHDEV0007E', { + 'device': pci_info['name'], 'vm': vmid} + ) + self._cb(msg.get_text(), False) + wok_log.error( + 'Failed to attach host device %s to VM %s: \n%s', + pci_info['name'], + vmid, + xmlstr, + ) + raise + + rollback.prependDefer( + dom.detachDeviceFlags, xmlstr, device_flags) + + rollback.commitAll() + + def _attach_all_devices(self, pci_infos): + with RollbackContext() as rollback: + for pci_info in pci_infos: + try: + dev = self.conn.get().nodeDeviceLookupByName( + pci_info['name']) + dev.dettach() + except Exception: + msg = WokMessage('KCHVMHDEV0005E', { + 'name': pci_info['name']}) + self._cb(msg.get_text(), False) + raise OperationFailed( + 'KCHVMHDEV0005E', {'name': pci_info['name']} + ) + else: + rollback.prependDefer(dev.reAttach) + + rollback.commitAll() def _count_3D_devices_attached(self, dom): counter = 0 @@ -438,8 +469,7 @@ def _update_win_memory_size(self, dom, counter, wnd_size): line.remove(arg.getprevious()) line.remove(arg) - return etree.tostring(root, encoding='utf-8', - pretty_print=True) + return etree.tostring(root, encoding='utf-8', pretty_print=True) return None @@ -467,11 +497,15 @@ def _add_win_memory_size(self, dom, wnd_size): def _get_scsi_device_xml(self, dev_info): adapter = E.adapter(name=('scsi_host%s' % dev_info['host'])) - address = E.address(type='scsi', bus=str(dev_info['bus']), - target=str(dev_info['target']), - unit=str(dev_info['lun'])) - host_dev = E.hostdev(E.source(adapter, address), - mode='subsystem', type='scsi', sgio='unfiltered') + address = E.address( + type='scsi', + bus=str(dev_info['bus']), + target=str(dev_info['target']), + unit=str(dev_info['lun']), + ) + host_dev = E.hostdev( + E.source(adapter, address), mode='subsystem', type='scsi', sgio='unfiltered' + ) return etree.tostring(host_dev) def _attach_scsi_device(self, cb, params): @@ -485,7 +519,7 @@ def _attach_scsi_device(self, cb, params): self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: - cb(e.message, False) + cb(str(e), False) raise with lock: @@ -499,20 +533,24 @@ def _attach_scsi_device(self, cb, params): dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: - msg = WokMessage('KCHVMHDEV0007E', - {'device': dev_info['name'], - 'vm': vmid}) + msg = WokMessage( + 'KCHVMHDEV0007E', { + 'device': dev_info['name'], 'vm': vmid} + ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', - dev_info['name'], vmid, xmlstr) + dev_info['name'], + vmid, + xmlstr, + ) raise - rollback.prependDefer(dom.detachDeviceFlags, xmlstr, - device_flags) + rollback.prependDefer( + dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def _get_usb_device_xml(self, dev_info): @@ -521,7 +559,8 @@ def _get_usb_device_xml(self, dev_info): E.product(id=dev_info['product']['id']), E.address(bus=str(dev_info['bus']), device=str(dev_info['device'])), - startupPolicy='optional') + startupPolicy='optional', + ) host_dev = E.hostdev(source, mode='subsystem', ype='usb', managed='yes') return etree.tostring(host_dev) @@ -538,7 +577,7 @@ def _attach_usb_device(self, cb, params): self._passthrough_device_validate(dev_info['name']) except InvalidParameter as e: - cb(e.message, False) + cb(str(e), False) raise with lock: @@ -550,20 +589,24 @@ def _attach_usb_device(self, cb, params): dom.attachDeviceFlags(xmlstr, device_flags) except libvirt.libvirtError: - msg = WokMessage('KCHVMHDEV0007E', - {'device': dev_info['name'], - 'vm': vmid}) + msg = WokMessage( + 'KCHVMHDEV0007E', { + 'device': dev_info['name'], 'vm': vmid} + ) cb(msg.get_text(), False) wok_log.error( 'Failed to attach host device %s to VM %s: \n%s', - dev_info['name'], vmid, xmlstr) + dev_info['name'], + vmid, + xmlstr, + ) raise - rollback.prependDefer(dom.detachDeviceFlags, xmlstr, - device_flags) + rollback.prependDefer( + dom.detachDeviceFlags, xmlstr, device_flags) rollback.commitAll() - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) @@ -577,9 +620,7 @@ def __init__(self, **kargs): self.dev_model = DeviceModel(**kargs) self._cb = None self.events.registerDetachDevicesEvent( - self.conn, - self._event_devices, - self) + self.conn, self._event_devices, self) def lookup(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) @@ -588,22 +629,24 @@ def lookup(self, vmid, dev_name): try: hostdev = root.devices.hostdev except AttributeError: - raise NotFoundError('KCHVMHDEV0001E', - {'vmid': vmid, 'dev_name': dev_name}) + raise NotFoundError('KCHVMHDEV0001E', { + 'vmid': vmid, 'dev_name': dev_name}) for e in hostdev: deduced_name = DeviceModel.deduce_dev_name(e, self.conn) if deduced_name == dev_name: dev_info = self.dev_model.lookup(dev_name) - return {'name': dev_name, - 'type': e.attrib['type'], - 'product': dev_info.get('product', None), - 'vendor': dev_info.get('vendor', None), - 'multifunction': dev_info.get('multifunction', None), - 'vga3d': dev_info.get('vga3d', None)} - - raise NotFoundError('KCHVMHDEV0001E', - {'vmid': vmid, 'dev_name': dev_name}) + return { + 'name': dev_name, + 'type': e.attrib['type'], + 'product': dev_info.get('product', None), + 'vendor': dev_info.get('vendor', None), + 'multifunction': dev_info.get('multifunction', None), + 'vga3d': dev_info.get('vga3d', None), + } + + raise NotFoundError('KCHVMHDEV0001E', { + 'vmid': vmid, 'dev_name': dev_name}) def delete(self, vmid, dev_name): dom = VMModel.get_vm(vmid, self.conn) @@ -614,16 +657,20 @@ def delete(self, vmid, dev_name): hostdev = root.devices.hostdev except AttributeError: - raise NotFoundError('KCHVMHDEV0001E', - {'vmid': vmid, 'dev_name': dev_name}) - - task_params = {'vmid': vmid, - 'dev_name': dev_name, - 'dom': dom, - 'hostdev': hostdev, - 'lock': threading.RLock()} - task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % \ - (VMModel.get_vm(vmid, self.conn).name(), dev_name) + raise NotFoundError('KCHVMHDEV0001E', { + 'vmid': vmid, 'dev_name': dev_name}) + + task_params = { + 'vmid': vmid, + 'dev_name': dev_name, + 'dom': dom, + 'hostdev': hostdev, + 'lock': threading.RLock(), + } + task_uri = u'/plugins/kimchi/vms/%s/hostdevs/%s' % ( + VMModel.get_vm(vmid, self.conn).name(), + dev_name, + ) taskid = AsyncTask(task_uri, self._detach_device, task_params).id return self.task.lookup(taskid) @@ -635,22 +682,22 @@ def _event_devices(self, conn, dom, alias, opaque): wok_log.error('opaque must be valid') return - wok_log.info("Device %s removed successfully" % alias) + wok_log.info('Device %s removed successfully' % alias) # Re-attach device to host if it's not managed mode if not opaque._managed: try: dev = conn.get().nodeDeviceLookupByName(alias) dev.reAttach() - except libvirt.libvirtError, e: + except libvirt.libvirtError as e: wok_log.error( - "Unable to attach device %s back to host. Error: %s", - alias, e.message + 'Unable to attach device %s back to host. Error: %s', alias, str( + e) ) else: wok_log.info( "Device %s was attached in 'managed' mode. " - "Skipping re-attach()." % alias + 'Skipping re-attach().' % alias ) opaque._cb('OK', True) @@ -665,18 +712,22 @@ def _detach_device(self, cb, params): lock = params['lock'] with lock: - pci_devs = {DeviceModel.deduce_dev_name(e, self.conn): e - for e in hostdev if e.attrib['type'] == 'pci'} + pci_devs = { + DeviceModel.deduce_dev_name(e, self.conn): e + for e in hostdev + if e.attrib['type'] == 'pci' + } dev_info = self.dev_model.lookup(dev_name) is_3D_device = self.dev_model.is_device_3D_controller(dev_info) - if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != "shutoff": - raise InvalidOperation('KCHVMHDEV0006E', - {'name': dev_info['name']}) + if is_3D_device and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': + raise InvalidOperation( + 'KCHVMHDEV0006E', {'name': dev_info['name']}) if not pci_devs.get(dev_name): - raise NotFoundError('KCHVMHDEV0001E', - {'vmid': vmid, 'dev_name': dev_name}) + raise NotFoundError( + 'KCHVMHDEV0001E', {'vmid': vmid, 'dev_name': dev_name} + ) dev_name_elem = pci_devs[dev_name] self._managed = dev_name_elem.get('managed', 'no') == 'yes' @@ -684,8 +735,7 @@ def _detach_device(self, cb, params): # check for multifunction and detach all functions together try: multi = self.unplug_multifunction_pci( - dom, hostdev, dev_name_elem - ) + dom, hostdev, dev_name_elem) except libvirt.libvirtError: multi = False @@ -695,31 +745,31 @@ def _detach_device(self, cb, params): devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) return # detach individually xmlstr = etree.tostring(dev_name_elem) - dom.detachDeviceFlags( - xmlstr, get_vm_config_flag(dom, mode='all')) + dom.detachDeviceFlags(xmlstr, get_vm_config_flag(dom, mode='all')) if dev_name_elem.attrib['type'] == 'pci': - self._delete_affected_pci_devices(dom, dev_name, - pci_devs) + self._delete_affected_pci_devices(dom, dev_name, pci_devs) if is_3D_device: devsmodel = VMHostDevsModel(conn=self.conn) devsmodel.update_mmio_guest(vmid, False) - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': cb('OK', True) def get_devices_same_addr(self, hostdevs, device_elem): def elem_has_valid_address(elem): - if elem.get('type') != 'pci' or \ - elem.address is None or \ - elem.address.get('domain') is None or \ - elem.address.get('bus') is None or \ - elem.address.get('slot') is None: + if ( + elem.get('type') != 'pci' or + elem.address is None or + elem.address.get('domain') is None or + elem.address.get('bus') is None or + elem.address.get('slot') is None + ): return False return True @@ -740,21 +790,28 @@ def elem_has_valid_address(elem): dev_bus = dev.address.get('bus') dev_slot = dev.address.get('slot') - if dev_domain == device_domain and dev_bus == device_bus and \ - dev_slot == device_slot: - devices.append(etree.tostring(dev)) + if ( + dev_domain == device_domain and + dev_bus == device_bus and + dev_slot == device_slot + ): + devices.append(etree.tostring(dev).decode('utf-8')) return devices def is_hostdev_multifunction(self, dev_elem): - if dev_elem.address is None or \ - dev_elem.address.get('multifunction') is None or \ - dev_elem.address.get('function') is None: + if ( + dev_elem.address is None or + dev_elem.address.get('multifunction') is None or + dev_elem.address.get('function') is None + ): return False - is_multi = dev_elem.address.get('multifunction') == 'on' and \ + is_multi = ( + dev_elem.address.get('multifunction') == 'on' and dev_elem.address.get('function') == '0x0' + ) return is_multi @@ -768,8 +825,7 @@ def unplug_multifunction_pci(self, dom, hostdevs, dev_elem): return False devices_xml = '%s' % ''.join(devices) - dom.detachDeviceFlags(devices_xml, - get_vm_config_flag(dom, mode='all')) + dom.detachDeviceFlags(devices_xml, get_vm_config_flag(dom, mode='all')) return True @@ -780,10 +836,11 @@ def _delete_affected_pci_devices(self, dom, dev_name, pci_devs): return affected_names = set( - DevicesModel( - conn=self.conn).get_list(_passthrough_affected_by=dev_name)) + DevicesModel(conn=self.conn).get_list( + _passthrough_affected_by=dev_name) + ) - for pci_name, e in pci_devs.iteritems(): + for pci_name, e in pci_devs.items(): if pci_name in affected_names: xmlstr = etree.tostring(e) dom.detachDeviceFlags( @@ -805,5 +862,5 @@ def get_list(self, device_id): dom_name = dom.name() if device_id in devsmodel.get_list(dom_name): state = DOM_STATE_MAP[dom.info()[0]] - res.append({"name": dom_name, "state": state}) + res.append({'name': dom_name, 'state': state}) return res diff --git a/model/vmifaces.py b/model/vmifaces.py index b36c59379..9a26d611a 100644 --- a/model/vmifaces.py +++ b/model/vmifaces.py @@ -16,17 +16,19 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import libvirt import os import random -from lxml import etree, objectify - -from wok.exception import InvalidParameter, MissingParameter -from wok.exception import NotFoundError, InvalidOperation +import libvirt +from lxml import etree +from lxml import objectify +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import MissingParameter +from wok.exception import NotFoundError from wok.plugins.kimchi.model.config import CapabilitiesModel -from wok.plugins.kimchi.model.vms import DOM_STATE_MAP, VMModel +from wok.plugins.kimchi.model.vms import DOM_STATE_MAP +from wok.plugins.kimchi.model.vms import VMModel from wok.plugins.kimchi.xmlutils.interface import get_iface_xml @@ -45,17 +47,15 @@ def create(self, vm, params): conn = self.conn.get() if params['type'] == 'network': - network = params.get("network") + network = params.get('network') if network is None: raise MissingParameter('KCHVMIF0007E') networks = conn.listNetworks() + conn.listDefinedNetworks() - networks = map(lambda x: x.decode('utf-8'), networks) - if network not in networks: - raise InvalidParameter('KCHVMIF0002E', - {'name': vm, 'network': network}) + raise InvalidParameter( + 'KCHVMIF0002E', {'name': vm, 'network': network}) # For architecture other than s390x/s390 type ovs/macvtap # and source interface are not supported. @@ -67,15 +67,14 @@ def create(self, vm, params): # For s390x/s390 architecture if os.uname()[4] in ['s390x', 's390']: - params['name'] = params.get("source", None) + params['name'] = params.get('source', None) # For type ovs and mavtap, source interface has to be provided. if params['name'] is None and params['type'] in ['ovs', 'macvtap']: raise InvalidParameter('KCHVMIF0015E') # If source interface provided, only type supported are ovs # and mavtap. - if params['name'] is not None and \ - params['type'] not in ['ovs', 'macvtap']: + if params['name'] is not None and params['type'] not in ['ovs', 'macvtap']: raise InvalidParameter('KCHVMIF0014E') # FIXME: Validation if source interface exists. @@ -93,8 +92,9 @@ def create(self, vm, params): if 'mac' in params and params['mac']: # make sure it is unique if params['mac'] in macs: - raise InvalidParameter('KCHVMIF0009E', - {'name': vm, 'mac': params['mac']}) + raise InvalidParameter( + 'KCHVMIF0009E', {'name': vm, 'mac': params['mac']} + ) # otherwise choose a random mac address else: @@ -112,7 +112,7 @@ def create(self, vm, params): flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG - if DOM_STATE_MAP[dom.info()[0]] != "shutoff": + if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE dom.attachDeviceFlags(xml, flags) @@ -124,14 +124,18 @@ def get_vmifaces(vm, conn): xml = dom.XMLDesc(0) root = objectify.fromstring(xml) - return root.devices.findall("interface") + return root.devices.findall('interface') @staticmethod def random_mac(): - mac = [0x52, 0x54, 0x00, - random.randint(0x00, 0x7f), - random.randint(0x00, 0xff), - random.randint(0x00, 0xff)] + mac = [ + 0x52, + 0x54, + 0x00, + random.randint(0x00, 0x7F), + random.randint(0x00, 0xFF), + random.randint(0x00, 0xFF), + ] return ':'.join(map(lambda x: u'%02x' % x, mac)) @@ -152,7 +156,7 @@ def lookup(self, vm, mac): iface = self._get_vmiface(vm, mac) if iface is None: - raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) + raise NotFoundError('KCHVMIF0001E', {'name': vm, 'iface': mac}) info['type'] = iface.attrib['type'] info['mac'] = iface.mac.get('address') @@ -164,17 +168,15 @@ def lookup(self, vm, mac): info['source'] = iface.source.get('dev') info['mode'] = iface.source.get('mode') info['type'] = 'macvtap' - elif (info['type'] == 'bridge' and - info.get('virtualport') == 'openvswitch'): + elif info['type'] == 'bridge' and info.get('virtualport') == 'openvswitch': info['source'] = iface.source.get('bridge') info['type'] = 'ovs' else: info['network'] = iface.source.get('network') - if iface.find("model") is not None: + if iface.find('model') is not None: info['model'] = iface.model.get('type') - if info['type'] == 'bridge' and \ - info.get('virtualport') != 'openvswitch': + if info['type'] == 'bridge' and info.get('virtualport') != 'openvswitch': info['bridge'] = iface.source.get('bridge') if info.get('network'): info['ips'] = self._get_ips(vm, info['mac'], info['network']) @@ -188,14 +190,14 @@ def _get_ips(self, vm, mac, network): # cache has entries for this MAC. conn = self.conn.get() dom = VMModel.get_vm(vm, self.conn) - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': return ips # An iface may have multiple IPs # An IP could have been assigned without libvirt. # First check the ARP cache. with open('/proc/net/arp') as f: - ips = [line.split()[0] for line in f.xreadlines() if mac in line] + ips = [line.split()[0] for line in f.readlines() if mac in line] # Some ifaces may be inactive, so if the ARP cache didn't have them, # and they happen to be assigned via DHCP, we can check there too. @@ -217,25 +219,25 @@ def delete(self, vm, mac): iface = self._get_vmiface(vm, mac) if iface is None: - raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) + raise NotFoundError('KCHVMIF0001E', {'name': vm, 'iface': mac}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG - if DOM_STATE_MAP[dom.info()[0]] != "shutoff": + if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': flags |= libvirt.VIR_DOMAIN_AFFECT_LIVE - dom.detachDeviceFlags(etree.tostring(iface), flags) + dom.detachDeviceFlags(etree.tostring(iface).decode('utf-8'), flags) def update(self, vm, mac, params): dom = VMModel.get_vm(vm, self.conn) iface = self._get_vmiface(vm, mac) if iface is None: - raise NotFoundError("KCHVMIF0001E", {'name': vm, 'iface': mac}) + raise NotFoundError('KCHVMIF0001E', {'name': vm, 'iface': mac}) # cannot change mac address in a running system - if DOM_STATE_MAP[dom.info()[0]] != "shutoff": + if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation('KCHVMIF0011E') # mac address is a required parameter @@ -244,20 +246,20 @@ def update(self, vm, mac, params): # new mac address must be unique if self._get_vmiface(vm, params['mac']) is not None: - raise InvalidParameter('KCHVMIF0009E', - {'name': vm, 'mac': params['mac']}) + raise InvalidParameter( + 'KCHVMIF0009E', {'name': vm, 'mac': params['mac']}) flags = 0 if dom.isPersistent(): flags |= libvirt.VIR_DOMAIN_AFFECT_CONFIG # remove the current nic - xml = etree.tostring(iface) + xml = etree.tostring(iface).decode('utf-8') dom.detachDeviceFlags(xml, flags=flags) # add the nic with the desired mac address iface.mac.attrib['address'] = params['mac'] - xml = etree.tostring(iface) + xml = etree.tostring(iface).decode('utf-8') dom.attachDeviceFlags(xml, flags=flags) return [vm, params['mac']] diff --git a/model/vms.py b/model/vms.py index d97790c78..0c893d9b2 100644 --- a/model/vms.py +++ b/model/vms.py @@ -16,90 +16,125 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import copy -import libvirt -import lxml.etree as ET import os -import paramiko import platform import pwd import random import signal import socket -import subprocess import string +import subprocess import threading import time import uuid -from lxml import etree, objectify -from lxml.builder import E from xml.etree import ElementTree +import libvirt +import lxml.etree as ET +import paramiko +from lxml import etree +from lxml import objectify +from lxml.builder import E from wok import websocket from wok.asynctask import AsyncTask from wok.config import config -from wok.exception import InvalidOperation, InvalidParameter -from wok.exception import NotFoundError, OperationFailed +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.model.tasks import TaskModel -from wok.rollbackcontext import RollbackContext -from wok.utils import convert_data_size -from wok.utils import import_class, run_setfacl_set_attr, run_command, wok_log -from wok.xmlutils.utils import dictize, xpath_get_text, xml_item_insert -from wok.xmlutils.utils import xml_item_remove, xml_item_update - from wok.plugins.kimchi import model from wok.plugins.kimchi import serialconsole -from wok.plugins.kimchi.config import READONLY_POOL_TYPE, get_kimchi_version +from wok.plugins.kimchi.config import config as kimchi_config +from wok.plugins.kimchi.config import get_kimchi_version +from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.kvmusertests import UserTests from wok.plugins.kimchi.model.config import CapabilitiesModel from wok.plugins.kimchi.model.cpuinfo import CPUInfoModel from wok.plugins.kimchi.model.featuretests import FeatureTests from wok.plugins.kimchi.model.templates import PPC_MEM_ALIGN -from wok.plugins.kimchi.model.templates import TemplateModel, validate_memory -from wok.plugins.kimchi.model.utils import get_ascii_nonascii_name, get_vm_name +from wok.plugins.kimchi.model.templates import TemplateModel +from wok.plugins.kimchi.model.templates import validate_memory +from wok.plugins.kimchi.model.utils import get_ascii_nonascii_name from wok.plugins.kimchi.model.utils import get_metadata_node +from wok.plugins.kimchi.model.utils import get_vm_name from wok.plugins.kimchi.model.utils import remove_metadata_node from wok.plugins.kimchi.model.utils import set_metadata_node -from wok.plugins.kimchi.osinfo import defaults, MEM_DEV_SLOTS +from wok.plugins.kimchi.osinfo import defaults +from wok.plugins.kimchi.osinfo import MEM_DEV_SLOTS from wok.plugins.kimchi.screenshot import VMScreenshot -from wok.plugins.kimchi.utils import get_next_clone_name, is_s390x +from wok.plugins.kimchi.utils import get_next_clone_name +from wok.plugins.kimchi.utils import is_s390x from wok.plugins.kimchi.utils import template_name_from_uri -from wok.plugins.kimchi.xmlutils.bootorder import get_bootorder_node from wok.plugins.kimchi.xmlutils.bootorder import get_bootmenu_node +from wok.plugins.kimchi.xmlutils.bootorder import get_bootorder_node from wok.plugins.kimchi.xmlutils.cpu import get_topology_xml -from wok.plugins.kimchi.xmlutils.disk import get_vm_disk_info, get_vm_disks -from utils import has_cpu_numa, set_numa_memory - - -DOM_STATE_MAP = {0: 'nostate', - 1: 'running', - 2: 'blocked', - 3: 'paused', - 4: 'shutdown', - 5: 'shutoff', - 6: 'crashed', - 7: 'pmsuspended'} +from wok.plugins.kimchi.xmlutils.disk import get_vm_disk_info +from wok.plugins.kimchi.xmlutils.disk import get_vm_disks +from wok.rollbackcontext import RollbackContext +from wok.utils import convert_data_size +from wok.utils import import_class +from wok.utils import run_command +from wok.utils import run_setfacl_set_attr +from wok.utils import wok_log +from wok.xmlutils.utils import dictize +from wok.xmlutils.utils import xml_item_insert +from wok.xmlutils.utils import xml_item_remove +from wok.xmlutils.utils import xml_item_update +from wok.xmlutils.utils import xpath_get_text + +from .utils import has_cpu_numa +from .utils import set_numa_memory + + +DOM_STATE_MAP = { + 0: 'nostate', + 1: 'running', + 2: 'blocked', + 3: 'paused', + 4: 'shutdown', + 5: 'shutoff', + 6: 'crashed', + 7: 'pmsuspended', +} # update parameters which are updatable when the VM is online -VM_ONLINE_UPDATE_PARAMS = ['cpu_info', 'graphics', 'groups', - 'memory', 'users', 'autostart'] +VM_ONLINE_UPDATE_PARAMS = [ + 'cpu_info', + 'graphics', + 'groups', + 'memory', + 'users', + 'autostart', +] # update parameters which are updatable when the VM is offline -VM_OFFLINE_UPDATE_PARAMS = ['cpu_info', 'graphics', 'groups', 'memory', - 'name', 'users', 'bootorder', 'bootmenu', - 'description', 'title', 'console', 'autostart'] +VM_OFFLINE_UPDATE_PARAMS = [ + 'cpu_info', + 'graphics', + 'groups', + 'memory', + 'name', + 'users', + 'bootorder', + 'bootmenu', + 'description', + 'title', + 'console', + 'autostart', +] XPATH_DOMAIN_DISK = "/domain/devices/disk[@device='disk']/source/@file" XPATH_DOMAIN_DISK_BY_FILE = "./devices/disk[@device='disk']/source[@file='%s']" XPATH_DOMAIN_NAME = '/domain/name' -XPATH_DOMAIN_MAC = "/domain/devices/interface/mac/@address" +XPATH_DOMAIN_MAC = '/domain/devices/interface/mac/@address' XPATH_DOMAIN_MAC_BY_ADDRESS = "./devices/interface/mac[@address='%s']" XPATH_DOMAIN_MEMORY = '/domain/memory' XPATH_DOMAIN_MEMORY_UNIT = '/domain/memory/@unit' XPATH_DOMAIN_UUID = '/domain/uuid' XPATH_DOMAIN_DEV_CPU_ID = '/domain/devices/spapr-cpu-socket/@id' -XPATH_DOMAIN_CONSOLE_TARGET = "/domain/devices/console/target/@type" +XPATH_DOMAIN_CONSOLE_TARGET = '/domain/devices/console/target/@type' XPATH_BOOT = 'os/boot/@dev' XPATH_BOOTMENU = 'os/bootmenu/@enable' @@ -114,7 +149,7 @@ XPATH_TOPOLOGY = './cpu/topology' XPATH_VCPU = './vcpu' XPATH_MAX_MEMORY = './maxMemory' -XPATH_CONSOLE_TARGET = "./devices/console/target" +XPATH_CONSOLE_TARGET = './devices/console/target' # key: VM name; value: lock object vm_locks = {} @@ -133,26 +168,29 @@ def create(self, params): name = get_vm_name(params.get('name'), t_name, vm_list) # incoming text, from js json, is unicode, do not need decode if name in vm_list: - raise InvalidOperation("KCHVM0001E", {'name': name}) + raise InvalidOperation('KCHVM0001E', {'name': name}) vm_overrides = dict() pool_uri = params.get('storagepool') if pool_uri: vm_overrides['storagepool'] = pool_uri vm_overrides['fc_host_support'] = self.caps.fc_host_support - t = TemplateModel.get_template(t_name, self.objstore, self.conn, - vm_overrides) + t = TemplateModel.get_template( + t_name, self.objstore, self.conn, vm_overrides) if not self.caps.qemu_stream and t.info.get('iso_stream', False): - raise InvalidOperation("KCHVM0005E") + raise InvalidOperation('KCHVM0005E') t.validate() - data = {'name': name, 'template': t, - 'graphics': params.get('graphics', {}), - "title": params.get("title", ""), - "description": params.get("description", "")} - taskid = AsyncTask(u'/plugins/kimchi/vms/%s' % name, self._create_task, - data).id + data = { + 'name': name, + 'template': t, + 'graphics': params.get('graphics', {}), + 'title': params.get('title', ''), + 'description': params.get('description', ''), + } + taskid = AsyncTask( + f'/plugins/kimchi/vms/{name}', self._create_task, data).id return self.task.lookup(taskid) @@ -176,42 +214,49 @@ def _create_task(self, cb, params): if icon: try: with self.objstore as session: - session.store('vm', vm_uuid, {'icon': icon}, - get_kimchi_version()) + session.store('vm', vm_uuid, { + 'icon': icon}, get_kimchi_version()) except Exception as e: # It is possible to continue Kimchi executions without store # vm icon info - wok_log.error('Error trying to update database with guest ' - 'icon information due error: %s', e.message) + wok_log.error( + f'Error trying to update database with guest ' + f'icon information due error: {e}' + ) cb('Provisioning storages for new VM') vol_list = t.fork_vm_storage(vm_uuid) graphics = params.get('graphics', {}) stream_protocols = self.caps.libvirt_stream_protocols - xml = t.to_vm_xml(name, vm_uuid, - libvirt_stream_protocols=stream_protocols, - graphics=graphics, - mem_hotplug_support=self.caps.mem_hotplug_support, - title=title, description=description) + xml = t.to_vm_xml( + name, + vm_uuid, + libvirt_stream_protocols=stream_protocols, + graphics=graphics, + mem_hotplug_support=self.caps.mem_hotplug_support, + title=title, + description=description, + ) cb('Defining new VM') try: - conn.defineXML(xml.encode('utf-8')) + conn.defineXML(xml) except libvirt.libvirtError as e: for v in vol_list: vol = conn.storageVolLookupByPath(v['path']) vol.delete(0) - raise OperationFailed("KCHVM0007E", {'name': name, - 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0007E', {'name': name, 'err': e.get_error_message()} + ) cb('Updating VM metadata') meta_elements = [] - distro = t.info.get("os_distro") - version = t.info.get("os_version") + distro = t.info.get('os_distro') + version = t.info.get('os_version') if distro is not None: - meta_elements.append(E.os({"distro": distro, "version": version})) + meta_elements.append(E.os({'distro': distro, 'version': version})) if nonascii_name is not None: meta_elements.append(E.name(nonascii_name)) @@ -232,8 +277,9 @@ def get_vms(conn): nonascii_node = ET.fromstring(nonascii_xml) names.append(nonascii_node.text) else: - names.append(dom.name().decode('utf-8')) - names = sorted(names, key=unicode.lower) + names.append(dom.name()) + + names = sorted(names, key=str.lower) return names @@ -244,19 +290,20 @@ def __init__(self, **kargs): self.caps = CapabilitiesModel(**kargs) self.vmscreenshot = VMScreenshotModel(**kargs) self.users = import_class( - 'plugins.kimchi.model.users.UsersModel' - )(**kargs) - self.groups = import_class( - 'plugins.kimchi.model.groups.GroupsModel' - )(**kargs) + 'wok.plugins.kimchi.model.users.UsersModel')(**kargs) + self.groups = import_class('wok.plugins.kimchi.model.groups.GroupsModel')( + **kargs + ) self.vms = VMsModel(**kargs) self.task = TaskModel(**kargs) self.storagepool = model.storagepools.StoragePoolModel(**kargs) self.storagevolume = model.storagevolumes.StorageVolumeModel(**kargs) self.storagevolumes = model.storagevolumes.StorageVolumesModel(**kargs) - cls = import_class('plugins.kimchi.model.vmsnapshots.VMSnapshotModel') + cls = import_class( + 'wok.plugins.kimchi.model.vmsnapshots.VMSnapshotModel') self.vmsnapshot = cls(**kargs) - cls = import_class('plugins.kimchi.model.vmsnapshots.VMSnapshotsModel') + cls = import_class( + 'wok.plugins.kimchi.model.vmsnapshots.VMSnapshotsModel') self.vmsnapshots = cls(**kargs) self.stats = {} self._serial_procs = [] @@ -269,8 +316,7 @@ def has_topology(self, dom): return sockets and cores and threads def update(self, name, params): - if platform.machine() not in ['s390x', 's390'] and\ - 'console' in params: + if platform.machine() not in ['s390x', 's390'] and 'console' in params: raise InvalidParameter('KCHVM0087E') lock = vm_locks.get(name) if lock is None: @@ -279,24 +325,29 @@ def update(self, name, params): with lock: dom = self.get_vm(name, self.conn) - if "autostart" in params: + if 'autostart' in params: dom.setAutostart(1 if params['autostart'] is True else 0) # You can only change offline, updating guest XML - if ("memory" in params) and ('maxmemory' in params['memory']) and\ - (DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): - raise InvalidParameter("KCHVM0080E") + if ( + ('memory' in params) and + ('maxmemory' in params['memory']) and + (DOM_STATE_MAP[dom.info()[0]] != 'shutoff') + ): + raise InvalidParameter('KCHVM0080E') if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': ext_params = set(params.keys()) - set(VM_OFFLINE_UPDATE_PARAMS) if len(ext_params) > 0: - raise InvalidParameter('KCHVM0073E', - {'params': ', '.join(ext_params)}) + raise InvalidParameter( + 'KCHVM0073E', {'params': ', '.join(ext_params)} + ) else: ext_params = set(params.keys()) - set(VM_ONLINE_UPDATE_PARAMS) if len(ext_params) > 0: - raise InvalidParameter('KCHVM0074E', - {'params': ', '.join(ext_params)}) + raise InvalidParameter( + 'KCHVM0074E', {'params': ', '.join(ext_params)} + ) # METADATA can be updated offline or online self._vm_update_access_metadata(dom, params) @@ -307,8 +358,9 @@ def update(self, name, params): # some parameters cannot change while vm is running if DOM_STATE_MAP[dom.info()[0]] != 'shutoff': if 'type' in params['graphics']: - raise InvalidParameter('KCHVM0074E', - {'params': 'graphics type'}) + raise InvalidParameter( + 'KCHVM0074E', {'params': 'graphics type'} + ) dom = self._update_graphics(dom, params) # Live updates @@ -316,7 +368,7 @@ def update(self, name, params): self._live_vm_update(dom, params) vm_name = name - if (DOM_STATE_MAP[dom.info()[0]] == 'shutoff'): + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': vm_name, dom = self._static_vm_update(name, dom, params) return vm_name @@ -347,7 +399,7 @@ def clone(self, name): """ # VM must be shutoff in order to clone it info = self.lookup(name) - if info['state'] != u'shutoff': + if info['state'] != 'shutoff': raise InvalidParameter('KCHVM0033E', {'name': name}) # the new VM's name will be used as the Task's 'target_uri' so it needs @@ -368,9 +420,11 @@ def clone(self, name): new_name = get_next_clone_name(current_vm_names, name, ts=True) # create a task with the actual clone function - taskid = AsyncTask(u'/plugins/kimchi/vms/%s/clone' % new_name, - self._clone_task, {'name': name, - 'new_name': new_name}).id + taskid = AsyncTask( + f'/plugins/kimchi/vms/{new_name}/clone', + self._clone_task, + {'name': name, 'new_name': new_name}, + ).id return self.task.lookup(taskid) @@ -391,15 +445,14 @@ def _clone_task(self, cb, params): try: vir_dom = self.get_vm(name, self.conn) flags = libvirt.VIR_DOMAIN_XML_SECURE - xml = vir_dom.XMLDesc(flags).decode('utf-8') - except libvirt.libvirtError, e: - raise OperationFailed('KCHVM0035E', {'name': name, - 'err': e.message}) + xml = vir_dom.XMLDesc(flags) + except libvirt.libvirtError as e: + raise OperationFailed('KCHVM0035E', {'name': name, 'err': str(e)}) # update UUID cb('updating VM UUID') old_uuid = xpath_get_text(xml, XPATH_DOMAIN_UUID)[0] - new_uuid = unicode(uuid.uuid4()) + new_uuid = str(uuid.uuid4()) xml = xml_item_update(xml, './uuid', new_uuid) # update MAC addresses @@ -426,9 +479,9 @@ def _clone_task(self, cb, params): vir_conn = self.conn.get() dom = vir_conn.defineXML(xml) self._update_metadata_name(dom, nonascii_name) - except libvirt.libvirtError, e: - raise OperationFailed('KCHVM0035E', {'name': name, - 'err': e.message}) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHVM0035E', {'name': name, 'err': str(e)}) rollback.commitAll() @@ -461,8 +514,9 @@ def _clone_update_mac_addresses(xml): new_macs.append(new_mac) break - xml = xml_item_update(xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, - new_mac, 'address') + xml = xml_item_update( + xml, XPATH_DOMAIN_MAC_BY_ADDRESS % mac, new_mac, 'address' + ) return xml @@ -492,11 +546,12 @@ def _clone_update_disks(self, xml, rollback): vir_orig_vol = vir_conn.storageVolLookupByPath(path) vir_pool = vir_orig_vol.storagePoolLookupByVolume() - orig_pool_name = vir_pool.name().decode('utf-8') - orig_vol_name = vir_orig_vol.name().decode('utf-8') - except libvirt.libvirtError, e: - raise OperationFailed('KCHVM0035E', {'name': domain_name, - 'err': e.message}) + orig_pool_name = vir_pool.name() + orig_vol_name = vir_orig_vol.name() + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHVM0035E', {'name': domain_name, 'err': str(e)} + ) orig_pool = self.storagepool.lookup(orig_pool_name) orig_vol = self.storagevolume.lookup(orig_pool_name, orig_vol_name) @@ -509,27 +564,30 @@ def _clone_update_disks(self, xml, rollback): # a new volume with the same size, the pool 'default' should # be used if orig_vol['capacity'] > orig_pool['available']: - wok_log.warning('storage pool \'%s\' doesn\'t have ' - 'enough free space to store image ' - '\'%s\'; falling back to \'default\'', - orig_pool_name, path) - new_pool_name = u'default' - new_pool = self.storagepool.lookup(u'default') + wok_log.warning( + f"storage pool '{orig_pool_name}' doesn't have " + f'enough free space to store image ' + f"'{path}'; falling back to 'default'" + ) + new_pool_name = 'default' + new_pool = self.storagepool.lookup('default') # ...and if even the pool 'default' cannot hold a new # volume, raise an exception if orig_vol['capacity'] > new_pool['available']: domain_name = xpath_get_text(xml, XPATH_DOMAIN_NAME)[0] - raise InvalidOperation('KCHVM0034E', - {'name': domain_name}) + raise InvalidOperation( + 'KCHVM0034E', {'name': domain_name}) elif orig_pool['type'] in ['scsi', 'iscsi']: # SCSI and iSCSI always fall back to the storage pool 'default' - wok_log.warning('cannot create new volume for clone in ' - 'storage pool \'%s\'; falling back to ' - '\'default\'', orig_pool_name) - new_pool_name = u'default' - new_pool = self.storagepool.lookup(u'default') + wok_log.warning( + f'cannot create new volume for clone in ' + f"storage pool '{orig_pool_name}'; falling back to " + f"'default'" + ) + new_pool_name = 'default' + new_pool = self.storagepool.lookup('default') # if the pool 'default' cannot hold a new volume, raise # an exception @@ -539,25 +597,28 @@ def _clone_update_disks(self, xml, rollback): else: # unexpected storage pool type - raise InvalidOperation('KCHPOOL0014E', - {'type': orig_pool['type']}) + raise InvalidOperation( + 'KCHPOOL0014E', {'type': orig_pool['type']}) # new volume name: -. # e.g. 1234-5678-9012-3456-0.img ext = os.path.splitext(path)[1] - new_vol_name = u'%s-%d%s' % (uuid, i, ext) - task = self.storagevolume.clone(orig_pool_name, orig_vol_name, - new_name=new_vol_name) + new_vol_name = f'{uuid}-{i}{ext}' + task = self.storagevolume.clone( + orig_pool_name, orig_vol_name, new_name=new_vol_name + ) self.task.wait(task['id'], 3600) # 1 h # get the new volume path and update the XML descriptor new_vol = self.storagevolume.lookup(new_pool_name, new_vol_name) - xml = xml_item_update(xml, XPATH_DOMAIN_DISK_BY_FILE % path, - new_vol['path'], 'file') + xml = xml_item_update( + xml, XPATH_DOMAIN_DISK_BY_FILE % path, new_vol['path'], 'file' + ) # remove the new volume should an error occur later - rollback.prependDefer(self.storagevolume.delete, new_pool_name, - new_vol_name) + rollback.prependDefer( + self.storagevolume.delete, new_pool_name, new_vol_name + ) return xml @@ -574,8 +635,8 @@ def _clone_update_objstore(self, old_uuid, new_uuid, rollback): try: vm = session.get('vm', old_uuid) icon = vm['icon'] - session.store('vm', new_uuid, {'icon': icon}, - get_kimchi_version()) + session.store('vm', new_uuid, { + 'icon': icon}, get_kimchi_version()) except NotFoundError: # if we cannot find an object store entry for the original VM, # don't store one with an empty value. @@ -594,8 +655,8 @@ def _rollback_objstore(): rollback.prependDefer(_rollback_objstore) def _build_access_elem(self, dom, users, groups): - auth = config.get("authentication", "method") - access_xml = get_metadata_node(dom, "access") + auth = config.get('authentication', 'method') + access_xml = get_metadata_node(dom, 'access') auth_elem = None @@ -605,7 +666,7 @@ def _build_access_elem(self, dom, users, groups): else: access_elem = ET.fromstring(access_xml) - same_auth = access_elem.xpath('./auth[@type="%s"]' % auth) + same_auth = access_elem.xpath(f'./auth[@type="{auth}"]') if len(same_auth) > 0: # there is already a sub-element 'auth' with the same type; # update it. @@ -637,18 +698,16 @@ def _build_access_elem(self, dom, users, groups): def _vm_update_access_metadata(self, dom, params): users = groups = None - if "users" in params: - users = params["users"] + if 'users' in params: + users = params['users'] for user in users: if not self.users.validate(user): - raise InvalidParameter("KCHVM0027E", - {'users': user}) - if "groups" in params: - groups = params["groups"] + raise InvalidParameter('KCHVM0027E', {'users': user}) + if 'groups' in params: + groups = params['groups'] for group in groups: if not self.groups.validate(group): - raise InvalidParameter("KCHVM0028E", - {'groups': group}) + raise InvalidParameter('KCHVM0028E', {'groups': group}) if users is None and groups is None: return @@ -658,49 +717,48 @@ def _vm_update_access_metadata(self, dom, params): def _get_access_info(self, dom): users = groups = list() - access_xml = (get_metadata_node(dom, "access") or - """""") + access_xml = get_metadata_node( + dom, 'access') or """""" access_info = dictize(access_xml) - auth = config.get("authentication", "method") - if ('auth' in access_info['access'] and - ('type' in access_info['access']['auth'] or - len(access_info['access']['auth']) > 1)): - users = xpath_get_text(access_xml, - "/access/auth[@type='%s']/user" % auth) - groups = xpath_get_text(access_xml, - "/access/auth[@type='%s']/group" % auth) + auth = config.get('authentication', 'method') + if 'auth' in access_info['access'] and ( + 'type' in access_info['access']['auth'] or + len(access_info['access']['auth']) > 1 + ): + users = xpath_get_text( + access_xml, f"/access/auth[@type='{auth}']/user") + groups = xpath_get_text( + access_xml, f"/access/auth[@type='{auth}']/group") elif auth == 'pam': # Compatible to old permission tagging - users = xpath_get_text(access_xml, "/access/user") - groups = xpath_get_text(access_xml, "/access/group") + users = xpath_get_text(access_xml, '/access/user') + groups = xpath_get_text(access_xml, '/access/group') return users, groups @staticmethod def vm_get_os_metadata(dom): - os_xml = (get_metadata_node(dom, "os") or - """""") + os_xml = get_metadata_node(dom, 'os') or """""" os_elem = ET.fromstring(os_xml) - return (os_elem.attrib.get("version"), os_elem.attrib.get("distro")) + return (os_elem.attrib.get('version'), os_elem.attrib.get('distro')) def _update_graphics(self, dom, params): root = objectify.fromstring(dom.XMLDesc(0)) - graphics = root.devices.find("graphics") + graphics = root.devices.find('graphics') if graphics is None: return dom - password = params['graphics'].get("passwd") + password = params['graphics'].get('passwd') if password is not None and len(password.strip()) == 0: - password = "".join(random.sample(string.ascii_letters + - string.digits, 8)) + password = ''.join(random.sample( + string.ascii_letters + string.digits, 8)) if password is not None: graphics.attrib['passwd'] = password - expire = params['graphics'].get("passwdValidTo") + expire = params['graphics'].get('passwdValidTo') to = graphics.attrib.get('passwdValidTo') if to is not None: - if (time.mktime(time.strptime(to, '%Y-%m-%dT%H:%M:%S')) - - time.time() <= 0): + if time.mktime(time.strptime(to, '%Y-%m-%dT%H:%M:%S')) - time.time() <= 0: expire = expire if expire is not None else 30 if expire is not None: @@ -714,11 +772,13 @@ def _update_graphics(self, dom, params): conn = self.conn.get() if not dom.isActive(): - return conn.defineXML(ET.tostring(root, encoding="utf-8")) + return conn.defineXML(ET.tostring(root, encoding='utf-8').decode('utf-8')) xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) - dom.updateDeviceFlags(etree.tostring(graphics), - libvirt.VIR_DOMAIN_AFFECT_LIVE) + dom.updateDeviceFlags( + etree.tostring(graphics).decode( + 'utf-8'), libvirt.VIR_DOMAIN_AFFECT_LIVE + ) return conn.defineXML(xml) def _backup_snapshots(self, snap, all_info): @@ -732,8 +792,8 @@ def _backup_snapshots(self, snap, all_info): all_info -- a list of dict keys: "{'xml': , 'current': '}" """ - all_info.append({'xml': snap.getXMLDesc(0), - 'current': snap.isCurrent(0)}) + all_info.append({'xml': snap.getXMLDesc( + 0), 'current': snap.isCurrent(0)}) for child in snap.listAllChildren(0): self._backup_snapshots(child, all_info) @@ -755,8 +815,8 @@ def _redefine_snapshots(self, dom, all_info): # Snapshot XML contains the VM xml from the time it was created. # Thus VM name and uuid must be updated to current ones. Otherwise, # when reverted, the vm name will be inconsistent. - name = dom.name().decode('utf-8') - uuid = dom.UUIDString().decode('utf-8') + name = dom.name() + uuid = dom.UUIDString() xml = xml_item_update(info['xml'], XPATH_SNAP_VM_NAME, name, None) xml = xml_item_update(xml, XPATH_SNAP_VM_UUID, uuid, None) @@ -773,21 +833,21 @@ def _update_bootorder(self, xml, params): et = ET.fromstring(xml) # get machine type - os = et.find("os") + os = et.find('os') # add new bootorder - if "bootorder" in params: + if 'bootorder' in params: # remove old order - [os.remove(device) for device in os.findall("boot")] + [os.remove(device) for device in os.findall('boot')] - for device in get_bootorder_node(params["bootorder"]): + for device in get_bootorder_node(params['bootorder']): os.append(device) # update bootmenu - if params.get("bootmenu") is False: - [os.remove(bm) for bm in os.findall("bootmenu")] - elif params.get("bootmenu") is True: + if params.get('bootmenu') is False: + [os.remove(bm) for bm in os.findall('bootmenu')] + elif params.get('bootmenu') is True: os.append(get_bootmenu_node()) # update @@ -796,16 +856,58 @@ def _update_bootorder(self, xml, params): def _update_s390x_console(self, xml, params): if xpath_get_text(xml, XPATH_DOMAIN_CONSOLE_TARGET): # if console is defined, update console - return xml_item_update(xml, XPATH_CONSOLE_TARGET, - params.get('console'), 'type') + return xml_item_update( + xml, XPATH_CONSOLE_TARGET, params.get('console'), 'type' + ) # if console is not defined earlier, add console - console = E.console(type="pty") + console = E.console(type='pty') console.append(E.target(type=params.get('console'), port='0')) et = ET.fromstring(xml) devices = et.find('devices') devices.append(console) return ET.tostring(et) + def _update_title(self, new_xml, title): + if len(xpath_get_text(new_xml, XPATH_TITLE)) > 0: + new_xml = xml_item_update( + new_xml, XPATH_TITLE, title, None) + else: + et = ET.fromstring(new_xml) + et.append(E.title(title)) + new_xml = ET.tostring(et) + + return new_xml + + def _update_description(self, new_xml, description): + if len(xpath_get_text(new_xml, XPATH_DESCRIPTION)) > 0: + new_xml = xml_item_update( + new_xml, XPATH_DESCRIPTION, description, None + ) + else: + et = ET.fromstring(new_xml) + et.append(E.description(description)) + new_xml = ET.tostring(et) + + return new_xml + + def _update_topology(self, dom, new_xml, topology): + sockets = str(topology['sockets']) + cores = str(topology['cores']) + threads = str(topology['threads']) + + if self.has_topology(dom): + # topology is being updated + xpath = XPATH_TOPOLOGY + new_xml = xml_item_update(new_xml, xpath, sockets, 'sockets') + new_xml = xml_item_update(new_xml, xpath, cores, 'cores') + new_xml = xml_item_update(new_xml, xpath, threads, 'threads') + else: + # topology is being added + new_xml = xml_item_insert( + new_xml, XPATH_CPU, get_topology_xml(topology) + ) + return new_xml + def _static_vm_update(self, vm_name, dom, params): old_xml = new_xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) params = copy.deepcopy(params) @@ -819,22 +921,10 @@ def _static_vm_update(self, vm_name, dom, params): new_xml = xml_item_update(new_xml, XPATH_NAME, name, None) if 'title' in params: - if len(xpath_get_text(new_xml, XPATH_TITLE)) > 0: - new_xml = xml_item_update(new_xml, XPATH_TITLE, - params['title'], None) - else: - et = ET.fromstring(new_xml) - et.append(E.title(params["title"])) - new_xml = ET.tostring(et) + new_xml = self._update_title(new_xml, params['title']) if 'description' in params: - if len(xpath_get_text(new_xml, XPATH_DESCRIPTION)) > 0: - new_xml = xml_item_update(new_xml, XPATH_DESCRIPTION, - params['description'], None) - else: - et = ET.fromstring(new_xml) - et.append(E.description(params["description"])) - new_xml = ET.tostring(et) + new_xml = self._update_description(new_xml, params['description']) # Update CPU info cpu_info = params.get('cpu_info', {}) @@ -848,30 +938,17 @@ def _static_vm_update(self, vm_name, dom, params): topology = cpu_info['topology'] if topology: - sockets = str(topology['sockets']) - cores = str(topology['cores']) - threads = str(topology['threads']) - - if self.has_topology(dom): - # topology is being updated - xpath = XPATH_TOPOLOGY - new_xml = xml_item_update(new_xml, xpath, sockets, 'sockets') - new_xml = xml_item_update(new_xml, xpath, cores, 'cores') - new_xml = xml_item_update(new_xml, xpath, threads, 'threads') - else: - # topology is being added - new_xml = xml_item_insert(new_xml, XPATH_CPU, - get_topology_xml(topology)) + new_xml = self._update_topology(dom, new_xml, topology) elif self.has_topology(dom): # topology is being undefined: remove it new_xml = xml_item_remove(new_xml, XPATH_TOPOLOGY) # Updating memory - if ('memory' in params and params['memory'] != {}): + if 'memory' in params and params['memory'] != {}: new_xml = self._update_memory_config(new_xml, params, dom) # update bootorder or bootmenu - if "bootorder" in params or "bootmenu" in params: + if 'bootorder' in params or 'bootmenu' in params: new_xml = self._update_bootorder(new_xml, params) if platform.machine() in ['s390', 's390x'] and params.get('console'): @@ -882,12 +959,18 @@ def _static_vm_update(self, vm_name, dom, params): try: if 'name' in params: lflags = libvirt.VIR_DOMAIN_SNAPSHOT_LIST_ROOTS - dflags = (libvirt.VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN | - libvirt.VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY) + dflags = ( + libvirt.VIR_DOMAIN_SNAPSHOT_DELETE_CHILDREN | + libvirt.VIR_DOMAIN_SNAPSHOT_DELETE_METADATA_ONLY + ) for virt_snap in dom.listAllSnapshots(lflags): - snapshots_info.append({'xml': virt_snap.getXMLDesc(0), - 'current': virt_snap.isCurrent(0)}) + snapshots_info.append( + { + 'xml': virt_snap.getXMLDesc(0), + 'current': virt_snap.isCurrent(0), + } + ) self._backup_snapshots(virt_snap, snapshots_info) virt_snap.delete(dflags) @@ -895,6 +978,8 @@ def _static_vm_update(self, vm_name, dom, params): # Undefine old vm, only if name is going to change dom.undefine() + new_xml = new_xml.decode( + 'utf-8') if isinstance(new_xml, bytes) else new_xml dom = conn.defineXML(new_xml) self._update_metadata_name(dom, nonascii_name) if 'name' in params: @@ -904,18 +989,65 @@ def _static_vm_update(self, vm_name, dom, params): if 'name' in params: self._redefine_snapshots(dom, snapshots_info) - raise OperationFailed("KCHVM0008E", {'name': vm_name, - 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0008E', {'name': vm_name, 'err': e.get_error_message()} + ) if name is not None: vm_name = name - return (nonascii_name if nonascii_name is not None else vm_name, dom) + return nonascii_name if nonascii_name is not None else vm_name, dom + + def _get_new_memory(self, root, newMem, oldMem, memDevs): + memDevsAmount = self._get_mem_dev_total_size(ET.tostring(root)) + + if newMem > (oldMem << 10): + return newMem - memDevsAmount + + if newMem < (oldMem << 10): + memDevs.reverse() + totRemoved = 0 + for dev in memDevs: + size = dev.find('./target/size') + totRemoved += int( + convert_data_size(size.text, size.get('unit'), 'KiB') + ) + root.find('./devices').remove(dev) + if ((oldMem << 10) - totRemoved) <= newMem: + return newMem - self._get_mem_dev_total_size( + ET.tostring(root) + ) + + if newMem == (oldMem << 10): + return newMem - memDevsAmount + + def _set_max_memory(self, root, newMem, newMaxMem, maxMemTag): + # Conditions: + if (maxMemTag is None) and (newMem != newMaxMem): + # Creates the maxMemory tag + max_mem_xml = E.maxMemory( + str(newMaxMem), unit='Kib', slots=str(defaults['mem_dev_slots']) + ) + root.insert(0, max_mem_xml) + elif (maxMemTag is None) and (newMem == newMaxMem): + # Nothing to do + pass + elif (maxMemTag is not None) and (newMem != newMaxMem): + # Just update value in max memory tag + maxMemTag.text = str(newMaxMem) + elif (maxMemTag is not None) and (newMem == newMaxMem): + if self._get_mem_dev_total_size(ET.tostring(root)) == 0: + # Remove the tag + root.remove(maxMemTag) + else: + maxMemTag.text = str(newMaxMem) + + return root, maxMemTag def _update_memory_config(self, xml, params, dom): # Cannot pass max memory if there is not support to memory hotplug # Then set max memory as memory, just to continue with the update if not self.caps.mem_hotplug_support: if 'maxmemory' in params['memory']: - raise InvalidOperation("KCHVM0046E") + raise InvalidOperation('KCHVM0046E') else: params['memory']['maxmemory'] = params['memory']['current'] @@ -932,57 +1064,21 @@ def _update_memory_config(self, xml, params, dom): newMem = (params['memory'].get('current', oldMem)) << 10 newMaxMem = (params['memory'].get('maxmemory', oldMaxMem)) << 10 - validate_memory({'current': newMem >> 10, - 'maxmemory': newMaxMem >> 10}) + validate_memory( + {'current': newMem >> 10, 'maxmemory': newMaxMem >> 10}) # Adjust memory devices to new memory, if necessary memDevs = root.findall('./devices/memory') - memDevsAmount = self._get_mem_dev_total_size(ET.tostring(root)) - if len(memDevs) != 0 and hasMem: - if newMem > (oldMem << 10): - newMem = newMem - memDevsAmount - elif newMem < (oldMem << 10): - memDevs.reverse() - totRemoved = 0 - for dev in memDevs: - size = dev.find('./target/size') - totRemoved += int(convert_data_size(size.text, - size.get('unit'), - 'KiB')) - root.find('./devices').remove(dev) - if ((oldMem << 10) - totRemoved) <= newMem: - newMem = newMem - self._get_mem_dev_total_size( - ET.tostring(root)) - break - elif newMem == (oldMem << 10): - newMem = newMem - memDevsAmount + newMem = self._get_new_memory(root, newMem, oldMem, memDevs) # There is an issue in Libvirt/Qemu, where Guest does not start if # memory and max memory are the same. So we decided to remove max # memory and only add it if user explicitly provides it, willing to # do memory hotplug if hasMaxMem: - # Conditions: - if (maxMemTag is None) and (newMem != newMaxMem): - # Creates the maxMemory tag - max_mem_xml = E.maxMemory( - str(newMaxMem), - unit='Kib', - slots=str(defaults['mem_dev_slots'])) - root.insert(0, max_mem_xml) - elif (maxMemTag is None) and (newMem == newMaxMem): - # Nothing to do - pass - elif (maxMemTag is not None) and (newMem != newMaxMem): - # Just update value in max memory tag - maxMemTag.text = str(newMaxMem) - elif (maxMemTag is not None) and (newMem == newMaxMem): - if self._get_mem_dev_total_size(ET.tostring(root)) == 0: - # Remove the tag - root.remove(maxMemTag) - else: - maxMemTag.text = str(newMaxMem) + root, maxMemTag = self._set_max_memory( + root, newMem, newMaxMem, maxMemTag) # Update memory, if necessary if hasMem: @@ -1007,8 +1103,9 @@ def _update_memory_config(self, xml, params, dom): memory.text = str(newMem) if (maxMemTag is not None) and (not hasMaxMem): - if (newMem == newMaxMem and - (self._get_mem_dev_total_size(ET.tostring(root)) == 0)): + if newMem == newMaxMem and ( + self._get_mem_dev_total_size(ET.tostring(root)) == 0 + ): root.remove(maxMemTag) # Setting memory hard limit to max_memory + 1GiB @@ -1017,9 +1114,9 @@ def _update_memory_config(self, xml, params, dom): hl = memtune.find('hard_limit') if hl is not None: memtune.remove(hl) - memtune.insert(0, E.hard_limit(str(newMaxMem + 1048576), - unit='Kib')) - return ET.tostring(root, encoding="utf-8") + memtune.insert(0, E.hard_limit( + str(newMaxMem + 1048576), unit='Kib')) + return ET.tostring(root, encoding='utf-8') def get_vm_cpu_cores(self, vm_xml): return xpath_get_text(vm_xml, XPATH_TOPOLOGY + '/@cores')[0] @@ -1037,11 +1134,7 @@ def get_vm_cpu_topology(self, dom): cores = int(self.get_vm_cpu_cores(dom.XMLDesc(0))) threads = int(self.get_vm_cpu_threads(dom.XMLDesc(0))) - topology = { - 'sockets': sockets, - 'cores': cores, - 'threads': threads, - } + topology = {'sockets': sockets, 'cores': cores, 'threads': threads} return topology @@ -1054,11 +1147,7 @@ def _update_cpu_info(self, new_xml, dom, new_info): xml_vcpus = xpath_get_text(new_xml, './vcpu/@current') vcpus = int(xml_vcpus[0]) if xml_vcpus else maxvcpus - cpu_info = { - 'maxvcpus': maxvcpus, - 'vcpus': vcpus, - 'topology': topology, - } + cpu_info = {'maxvcpus': maxvcpus, 'vcpus': vcpus, 'topology': topology} cpu_info.update(new_info) # Revalidate cpu info - may raise CPUInfo exceptions @@ -1069,7 +1158,7 @@ def _update_cpu_info(self, new_xml, dom, new_info): def _live_vm_update(self, dom, params): # Memory Hotplug/Unplug - if (('memory' in params) and ('current' in params['memory'])): + if ('memory' in params) and ('current' in params['memory']): self._update_memory_live(dom, params) if 'vcpus' in params.get('cpu_info', {}): @@ -1079,8 +1168,7 @@ def _live_vm_update(self, dom, params): def cpu_hotplug_precheck(self, dom, params): - if (('maxvcpus' in params['cpu_info']) or - ('topology' in params['cpu_info'])): + if ('maxvcpus' in params['cpu_info']) or ('topology' in params['cpu_info']): raise InvalidParameter('KCHCPUHOTP0001E') topology = self.get_vm_cpu_topology(dom) @@ -1089,36 +1177,29 @@ def cpu_hotplug_precheck(self, dom, params): maxvcpus = int(xml_maxvcpus[0]) vcpus = params['cpu_info'].get('vcpus') - cpu_info = { - 'maxvcpus': maxvcpus, - 'vcpus': vcpus, - 'topology': topology, - } + cpu_info = {'maxvcpus': maxvcpus, 'vcpus': vcpus, 'topology': topology} cpu_model = CPUInfoModel(conn=self.conn) cpu_model.check_cpu_info(cpu_info) def update_cpu_live(self, dom, vcpus): - flags = libvirt.VIR_DOMAIN_AFFECT_LIVE | \ - libvirt.VIR_DOMAIN_AFFECT_CONFIG + flags = libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG try: dom.setVcpusFlags(vcpus, flags) except libvirt.libvirtError as e: - raise OperationFailed('KCHCPUHOTP0002E', {'err': e.message}) + raise OperationFailed('KCHCPUHOTP0002E', {'err': str(e)}) def _get_mem_dev_total_size(self, xml): root = ET.fromstring(xml) totMemDevs = 0 for size in root.findall('./devices/memory/target/size'): - totMemDevs += convert_data_size(size.text, - size.get('unit'), - 'KiB') + totMemDevs += convert_data_size(size.text, size.get('unit'), 'KiB') return int(totMemDevs) def _update_memory_live(self, dom, params): # Check if host supports memory device if not self.caps.mem_hotplug_support: - raise InvalidOperation("KCHVM0046E") + raise InvalidOperation('KCHVM0046E') xml = dom.XMLDesc(0) max_mem = xpath_get_text(xml, './maxMemory') @@ -1132,14 +1213,20 @@ def _update_memory_live(self, dom, params): if platform.machine().startswith('ppc'): # make sure memory is alingned in 256MiB in PowerPC - if (new_mem % PPC_MEM_ALIGN != 0): - raise InvalidParameter('KCHVM0071E', - {'param': "Memory", - 'mem': str(new_mem), - 'alignment': str(PPC_MEM_ALIGN)}) + if new_mem % PPC_MEM_ALIGN != 0: + raise InvalidParameter( + 'KCHVM0071E', + { + 'param': 'Memory', + 'mem': str(new_mem), + 'alignment': str(PPC_MEM_ALIGN), + }, + ) # Check number of slots supported - if len(xpath_get_text(xml, './devices/memory')) == \ - MEM_DEV_SLOTS[os.uname()[4]]: + if ( + len(xpath_get_text(xml, './devices/memory')) == + MEM_DEV_SLOTS[os.uname()[4]] + ): raise InvalidOperation('KCHVM0045E') if memory == 0: @@ -1151,13 +1238,14 @@ def _update_memory_live(self, dom, params): # Finally HotPlug operation ( memory > 0 ) try: # Create memory device xml - tmp_xml = E.memory(E.target(E.size(str(memory), - unit='MiB')), model='dimm') + tmp_xml = E.memory( + E.target(E.size(str(memory), unit='MiB')), model='dimm') if has_cpu_numa(dom): tmp_xml.find('target').append(E.node('0')) - dom.attachDeviceFlags(etree.tostring(tmp_xml), flags) + dom.attachDeviceFlags(etree.tostring( + tmp_xml.decode('utf-8')), flags) except Exception as e: - raise OperationFailed("KCHVM0047E", {'error': e.message}) + raise OperationFailed('KCHVM0047E', {'error': str(e)}) def _has_video(self, dom): dom = ElementTree.fromstring(dom.XMLDesc(0)) @@ -1190,7 +1278,7 @@ def _update_guest_stats(self, name): except Exception as e: # VM might be deleted just after we get the list. # This is OK, just skip. - wok_log.debug('Error processing VM stats: %s', e.message) + wok_log.debug(f'Error processing VM stats: {e}') def _get_percentage_cpu_usage(self, vm_uuid, info, seconds): prevCpuTime = self.stats[vm_uuid].get('cputime', 0) @@ -1198,7 +1286,7 @@ def _get_percentage_cpu_usage(self, vm_uuid, info, seconds): cpus = info[3] cpuTime = info[4] - prevCpuTime - base = (((cpuTime) * 100.0) / (seconds * 1000.0 * 1000.0 * 1000.0)) + base = ((cpuTime) * 100.0) / (seconds * 1000.0 * 1000.0 * 1000.0) percentage = max(0.0, min(100.0, base / cpus)) self.stats[vm_uuid].update({'cputime': info[4], 'cpu': percentage}) @@ -1208,7 +1296,7 @@ def _get_percentage_mem_usage(self, vm_uuid, dom, seconds): memStats = dom.memoryStats() if ('available' in memStats) and ('unused' in memStats): memUsed = memStats.get('available') - memStats.get('unused') - percentage = ((memUsed * 100.0) / memStats.get('available')) + percentage = (memUsed * 100.0) / memStats.get('available') elif ('rss' in memStats) and ('actual' in memStats): percentage = memStats.get('rss') * 100.0 / memStats.get('actual') else: @@ -1242,8 +1330,14 @@ def _get_network_io_rate(self, vm_uuid, dom, seconds): rate = rx_stats + tx_stats max_net_io = round(max(currentMaxNetRate, int(rate)), 1) - self.stats[vm_uuid].update({'net_io': rate, 'max_net_io': max_net_io, - 'netRxKB': netRxKB, 'netTxKB': netTxKB}) + self.stats[vm_uuid].update( + { + 'net_io': rate, + 'max_net_io': max_net_io, + 'netRxKB': netRxKB, + 'netTxKB': netTxKB, + } + ) def _get_disk_io_rate(self, vm_uuid, dom, seconds): prevDiskRdKB = self.stats[vm_uuid].get('diskRdKB', 0) @@ -1254,8 +1348,8 @@ def _get_disk_io_rate(self, vm_uuid, dom, seconds): wr_bytes = 0 tree = ElementTree.fromstring(dom.XMLDesc(0)) - for target in tree.findall("devices/disk/target"): - dev = target.get("dev") + for target in tree.findall('devices/disk/target'): + dev = target.get('dev') io = dom.blockStats(dev) rd_bytes += io[1] wr_bytes += io[3] @@ -1269,10 +1363,14 @@ def _get_disk_io_rate(self, vm_uuid, dom, seconds): rate = rd_stats + wr_stats max_disk_io = round(max(currentMaxDiskRate, int(rate)), 1) - self.stats[vm_uuid].update({'disk_io': rate, - 'max_disk_io': max_disk_io, - 'diskRdKB': diskRdKB, - 'diskWrKB': diskWrKB}) + self.stats[vm_uuid].update( + { + 'disk_io': rate, + 'max_disk_io': max_disk_io, + 'diskRdKB': diskRdKB, + 'diskWrKB': diskWrKB, + } + ) def lookup(self, name): dom = self.get_vm(name, self.conn) @@ -1281,18 +1379,21 @@ def lookup(self, name): # command. info = dom.info() except libvirt.libvirtError as e: - wok_log.error('Operation error while retrieving virtual machine ' - '"%s" information: %s', name, e.message) - raise OperationFailed('KCHVM0009E', {'name': name, - 'err': e.message}) + wok_log.error( + f'Operation error while retrieving virtual machine ' + f'"{name}" information: {e}' + ) + raise OperationFailed('KCHVM0009E', {'name': name, 'err': str(e)}) state = DOM_STATE_MAP[info[0]] screenshot = None # (type, listen, port, passwd, passwdValidTo) graphics = self.get_graphics(name, self.conn) graphics_port = graphics[2] graphics_port = graphics_port if state == 'running' else None + # only take a screenshot if configured to do so + take_screenshot = kimchi_config.get('kimchi', {}).get('take_screenshot', True) try: - if state == 'running' and self._has_video(dom): + if take_screenshot and state == 'running' and self._has_video(dom): screenshot = self.vmscreenshot.lookup(name) elif state == 'shutoff': # reset vm stats when it is powered off to avoid sending @@ -1322,11 +1423,7 @@ def lookup(self, name): xml = dom.XMLDesc(0) maxvcpus = int(xpath_get_text(xml, XPATH_VCPU)[0]) - cpu_info = { - 'vcpus': info[3], - 'maxvcpus': maxvcpus, - 'topology': {}, - } + cpu_info = {'vcpus': info[3], 'maxvcpus': maxvcpus, 'topology': {}} if self.has_topology(dom): sockets = int(xpath_get_text(xml, XPATH_TOPOLOGY + '/@sockets')[0]) @@ -1345,7 +1442,7 @@ def lookup(self, name): # from Libvirt API maxMemory() function, regardeless of the VM state # Case VM changed currentMemory outside Kimchi, sum mem devs memory = dom.maxMemory() >> 10 - curr_mem = (info[2] >> 10) + curr_mem = info[2] >> 10 # On CentOS, dom.info does not retrieve memory. So, if machine does # not have memory hotplug, parse memory from xml @@ -1370,34 +1467,36 @@ def lookup(self, name): # get boot order and bootmenu boot = xpath_get_text(xml, XPATH_BOOT) - bootmenu = "yes" if "yes" in xpath_get_text(xml, XPATH_BOOTMENU) \ - else "no" - - vm_info = {'name': name, - 'title': "".join(xpath_get_text(xml, XPATH_TITLE)), - 'description': - "".join(xpath_get_text(xml, XPATH_DESCRIPTION)), - 'state': state, - 'stats': res, - 'uuid': dom.UUIDString(), - 'memory': {'current': memory, 'maxmemory': maxmemory}, - 'cpu_info': cpu_info, - 'screenshot': screenshot, - 'icon': icon, - # (type, listen, port, passwd, passwdValidTo) - 'graphics': {"type": graphics[0], - "listen": graphics[1], - "port": graphics_port, - "passwd": graphics[3], - "passwdValidTo": graphics[4]}, - 'users': users, - 'groups': groups, - 'access': 'full', - 'persistent': True if dom.isPersistent() else False, - 'bootorder': boot, - 'bootmenu': bootmenu, - 'autostart': dom.autostart() - } + bootmenu = 'yes' if 'yes' in xpath_get_text( + xml, XPATH_BOOTMENU) else 'no' + + vm_info = { + 'name': name, + 'title': ''.join(xpath_get_text(xml, XPATH_TITLE)), + 'description': ''.join(xpath_get_text(xml, XPATH_DESCRIPTION)), + 'state': state, + 'stats': res, + 'uuid': dom.UUIDString(), + 'memory': {'current': memory, 'maxmemory': maxmemory}, + 'cpu_info': cpu_info, + 'screenshot': screenshot, + 'icon': icon, + # (type, listen, port, passwd, passwdValidTo) + 'graphics': { + 'type': graphics[0], + 'listen': graphics[1], + 'port': graphics_port, + 'passwd': graphics[3], + 'passwdValidTo': graphics[4], + }, + 'users': users, + 'groups': groups, + 'access': 'full', + 'persistent': True if dom.isPersistent() else False, + 'bootorder': boot, + 'bootmenu': bootmenu, + 'autostart': dom.autostart(), + } if platform.machine() in ['s390', 's390x']: vm_console = xpath_get_text(xml, XPATH_DOMAIN_CONSOLE_TARGET) vm_info['console'] = vm_console[0] if vm_console else '' @@ -1413,15 +1512,15 @@ def _vm_get_disk_paths(self, dom): def get_vm(name, conn): def raise_exception(error_code): if error_code == libvirt.VIR_ERR_NO_DOMAIN: - raise NotFoundError("KCHVM0002E", {'name': name}) + raise NotFoundError('KCHVM0002E', {'name': name}) else: - raise OperationFailed("KCHVM0009E", {'name': name, - 'err': e.message}) + raise OperationFailed( + 'KCHVM0009E', {'name': name, 'err': error_code}) + conn = conn.get() FeatureTests.disable_libvirt_error_logging() try: - # outgoing text to libvirt, encode('utf-8') - return conn.lookupByName(name.encode("utf-8")) + return conn.lookupByName(name) except libvirt.libvirtError as e: name, nonascii_name = get_ascii_nonascii_name(name) if nonascii_name is None: @@ -1438,7 +1537,7 @@ def delete(self, name): conn = self.conn.get() dom = self.get_vm(name, self.conn) if not dom.isPersistent(): - raise InvalidOperation("KCHVM0036E", {'name': name}) + raise InvalidOperation('KCHVM0036E', {'name': name}) self._vmscreenshot_delete(dom.UUIDString()) paths = self._vm_get_disk_paths(dom) @@ -1454,9 +1553,10 @@ def delete(self, name): # "OperationFailed" in that case. try: snapshot_names = self.vmsnapshots.get_list(name) - except OperationFailed, e: - wok_log.error('cannot list snapshots: %s; ' - 'skipping snapshot deleting...' % e.message) + except OperationFailed as e: + wok_log.error( + f'cannot list snapshots: {e}; ' f'skipping snapshot deleting...' + ) else: for s in snapshot_names: self.vmsnapshot.delete(name, s) @@ -1464,37 +1564,35 @@ def delete(self, name): try: dom.undefine() except libvirt.libvirtError as e: - raise OperationFailed("KCHVM0021E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0021E', {'name': name, 'err': e.get_error_message()} + ) for path in paths: try: vol = conn.storageVolLookupByPath(path) pool = vol.storagePoolLookupByVolume() xml = pool.XMLDesc(0) - pool_type = xpath_get_text(xml, "/pool/@type")[0] + pool_type = xpath_get_text(xml, '/pool/@type')[0] if pool_type not in READONLY_POOL_TYPE: vol.delete(0) except libvirt.libvirtError as e: - wok_log.error('Unable to get storage volume by path: %s' % - e.message) + wok_log.error(f'Unable to get storage volume by path: {e}') try: if is_s390x() and os.path.exists(path): os.remove(path) except Exception as e: - wok_log.error('Unable to delete storage path: %s' % - e.message) + wok_log.error(f'Unable to delete storage path: {e}') except Exception as e: - raise OperationFailed('KCHVOL0017E', {'err': e.message}) + raise OperationFailed('KCHVOL0017E', {'err': str(e)}) try: with self.objstore as session: session.delete('vm', dom.UUIDString(), ignore_missing=True) except Exception as e: # It is possible to delete vm without delete its database info - wok_log.error('Error deleting vm information from database: ' - '%s', e.message) + wok_log.error(f'Error deleting vm information from database: {e}') websocket.remove_proxy_token(name) @@ -1512,64 +1610,68 @@ def start(self, name): dom = self.get_vm(name, self.conn) # vm already running: return error 400 - if DOM_STATE_MAP[dom.info()[0]] == "running": - raise InvalidOperation("KCHVM0048E", {'name': name}) + if DOM_STATE_MAP[dom.info()[0]] == 'running': + raise InvalidOperation('KCHVM0048E', {'name': name}) try: dom.create() except libvirt.libvirtError as e: - raise OperationFailed("KCHVM0019E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0019E', {'name': name, 'err': e.get_error_message()} + ) def poweroff(self, name): dom = self.get_vm(name, self.conn) # vm already powered off: return error 400 - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": - raise InvalidOperation("KCHVM0049E", {'name': name}) + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + raise InvalidOperation('KCHVM0049E', {'name': name}) try: dom.destroy() except libvirt.libvirtError as e: - raise OperationFailed("KCHVM0020E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0020E', {'name': name, 'err': e.get_error_message()} + ) def shutdown(self, name): dom = self.get_vm(name, self.conn) # vm already powered off: return error 400 - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": - raise InvalidOperation("KCHVM0050E", {'name': name}) + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + raise InvalidOperation('KCHVM0050E', {'name': name}) try: dom.shutdown() except libvirt.libvirtError as e: - raise OperationFailed("KCHVM0029E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0029E', {'name': name, 'err': e.get_error_message()} + ) def reset(self, name): dom = self.get_vm(name, self.conn) # vm already powered off: return error 400 - if DOM_STATE_MAP[dom.info()[0]] == "shutoff": - raise InvalidOperation("KCHVM0051E", {'name': name}) + if DOM_STATE_MAP[dom.info()[0]] == 'shutoff': + raise InvalidOperation('KCHVM0051E', {'name': name}) try: dom.reset(flags=0) except libvirt.libvirtError as e: - raise OperationFailed("KCHVM0022E", - {'name': name, 'err': e.get_error_message()}) + raise OperationFailed( + 'KCHVM0022E', {'name': name, 'err': e.get_error_message()} + ) def _vm_check_serial(self, name): dom = self.get_vm(name, self.conn) xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) - expr = "/domain/devices/serial/@type" + expr = '/domain/devices/serial/@type' # on s390x serial is not supported if platform.machine() != 's390x' and not xpath_get_text(xml, expr): return False - expr = "/domain/devices/console/@type" + expr = '/domain/devices/console/@type' if not xpath_get_text(xml, expr): return False @@ -1580,11 +1682,11 @@ def get_graphics(name, conn): dom = VMModel.get_vm(name, conn) xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE) - expr = "/domain/devices/graphics/@type" + expr = '/domain/devices/graphics/@type' res = xpath_get_text(xml, expr) graphics_type = res[0] if res else None - expr = "/domain/devices/graphics/@listen" + expr = '/domain/devices/graphics/@listen' res = xpath_get_text(xml, expr) graphics_listen = res[0] if res else None @@ -1604,32 +1706,38 @@ def get_graphics(name, conn): to = time.mktime(time.strptime(res[0], '%Y-%m-%dT%H:%M:%S')) graphics_passwdValidTo = to - time.mktime(time.gmtime()) - return (graphics_type, graphics_listen, graphics_port, - graphics_passwd, graphics_passwdValidTo) + return ( + graphics_type, + graphics_listen, + graphics_port, + graphics_passwd, + graphics_passwdValidTo, + ) def serial(self, name): if not self._vm_check_serial(name): - raise OperationFailed("KCHVM0076E", {'name': name}) + raise OperationFailed('KCHVM0076E', {'name': name}) if not os.path.isdir(serialconsole.BASE_DIRECTORY): try: os.mkdir(serialconsole.BASE_DIRECTORY) - except OSError as e: - raise OperationFailed("KCHVM0081E", - {'dir': serialconsole.BASE_DIRECTORY}) + except OSError: + raise OperationFailed( + 'KCHVM0081E', {'dir': serialconsole.BASE_DIRECTORY} + ) - websocket.add_proxy_token(name.encode('utf-8')+'-console', - os.path.join(serialconsole.BASE_DIRECTORY, - name.encode('utf-8')), True) + websocket.add_proxy_token( + name + + '-console', os.path.join(serialconsole.BASE_DIRECTORY, name), True + ) try: - proc = serialconsole.main(name.encode('utf-8'), - self.conn.get().getURI()) + proc = serialconsole.main(name, self.conn.get().getURI()) proc.join(2) if not proc.is_alive(): - raise OperationFailed("KCHVM0082E", {'name': name}) + raise OperationFailed('KCHVM0082E', {'name': name}) self._serial_procs.append(proc) @@ -1637,20 +1745,20 @@ def serial(self, name): raise except Exception as e: - wok_log.error(e.message) - raise OperationFailed("KCHVM0077E", {'name': name}) + wok_log.error(str(e)) + raise OperationFailed('KCHVM0077E', {'name': name}) def connect(self, name): # (type, listen, port, passwd, passwdValidTo) graphics_port = self.get_graphics(name, self.conn)[2] if graphics_port is not None: - websocket.add_proxy_token(name.encode('utf-8'), graphics_port) + websocket.add_proxy_token(name, graphics_port) else: - raise OperationFailed("KCHVM0010E", {'name': name}) + raise OperationFailed('KCHVM0010E', {'name': name}) def _vmscreenshot_delete(self, vm_uuid): - screenshot = VMScreenshotModel.get_screenshot(vm_uuid, self.objstore, - self.conn) + screenshot = VMScreenshotModel.get_screenshot( + vm_uuid, self.objstore, self.conn) screenshot.delete() try: with self.objstore as session: @@ -1658,8 +1766,9 @@ def _vmscreenshot_delete(self, vm_uuid): except Exception as e: # It is possible to continue Kimchi executions without delete # screenshots - wok_log.error('Error trying to delete vm screenshot from ' - 'database due error: %s', e.message) + wok_log.error( + f'Error trying to delete vm screenshot from ' f'database due error: {e}' + ) def suspend(self, name): """Suspend the virtual machine's execution and puts it in the @@ -1677,9 +1786,8 @@ def suspend(self, name): try: vir_dom.suspend() - except libvirt.libvirtError, e: - raise OperationFailed('KCHVM0038E', {'name': name, - 'err': e.message}) + except libvirt.libvirtError as e: + raise OperationFailed('KCHVM0038E', {'name': name, 'err': str(e)}) def resume(self, name): """Resume the virtual machine's execution and puts it in the @@ -1698,48 +1806,39 @@ def resume(self, name): try: vir_dom.resume() - except libvirt.libvirtError, e: - raise OperationFailed('KCHVM0040E', {'name': name, - 'err': e.message}) + except libvirt.libvirtError as e: + raise OperationFailed('KCHVM0040E', {'name': name, 'err': str(e)}) def _check_if_host_not_localhost(self, remote_host): hostname = socket.gethostname() if remote_host in ['localhost', '127.0.0.1', hostname]: - raise OperationFailed("KCHVM0055E", {'host': remote_host}) + raise OperationFailed('KCHVM0055E', {'host': remote_host}) - def _check_if_migrating_same_arch_hypervisor(self, remote_host, - user='root'): + def _check_if_migrating_same_arch_hypervisor(self, remote_host, user='root'): remote_conn = None try: - remote_conn = self._get_remote_libvirt_conn( - remote_host, - user - ) + remote_conn = self._get_remote_libvirt_conn(remote_host, user) source_hyp = self.conn.get().getType() dest_hyp = remote_conn.getType() if source_hyp != dest_hyp: raise OperationFailed( - "KCHVM0065E", - { - 'host': remote_host, - 'srchyp': source_hyp, - 'desthyp': dest_hyp - } + 'KCHVM0065E', + {'host': remote_host, 'srchyp': source_hyp, 'desthyp': dest_hyp}, ) source_arch = self.conn.get().getInfo()[0] dest_arch = remote_conn.getInfo()[0] if source_arch != dest_arch: raise OperationFailed( - "KCHVM0064E", + 'KCHVM0064E', { 'host': remote_host, 'srcarch': source_arch, - 'destarch': dest_arch - } + 'destarch': dest_arch, + }, ) - except Exception, e: - raise OperationFailed("KCHVM0066E", {'error': e.message}) + except Exception as e: + raise OperationFailed('KCHVM0066E', {'error': str(e)}) finally: if remote_conn: @@ -1762,10 +1861,15 @@ def _get_local_ppc64_subpercore(): return local_sub_per_core def _get_remote_ppc64_subpercore(remote_host, user): - username_host = "%s@%s" % (user, remote_host) - ssh_cmd = ['ssh', '-oNumberOfPasswordPrompts=0', - '-oStrictHostKeyChecking=no', username_host, - 'ppc64_cpu', '--subcores-per-core'] + username_host = f'{user}@{remote_host}' + ssh_cmd = [ + 'ssh', + '-oNumberOfPasswordPrompts=0', + '-oStrictHostKeyChecking=no', + username_host, + 'ppc64_cpu', + '--subcores-per-core', + ] out, err, returncode = run_command(ssh_cmd, 5, silent=True) if returncode != 0: return None @@ -1779,33 +1883,37 @@ def _get_remote_ppc64_subpercore(remote_host, user): remote_sub_per_core = _get_remote_ppc64_subpercore(remote_host, user) if local_sub_per_core != remote_sub_per_core: - raise OperationFailed("KCHVM0067E", {'host': remote_host}) - - def _check_if_password_less_login_enabled(self, remote_host, - user, password): - username_host = "%s@%s" % (user, remote_host) - ssh_cmd = ['ssh', '-oNumberOfPasswordPrompts=0', - '-oStrictHostKeyChecking=no', username_host, - 'echo', 'hello'] + raise OperationFailed('KCHVM0067E', {'host': remote_host}) + + def _check_if_password_less_login_enabled(self, remote_host, user, password): + username_host = f'{user}@{remote_host}' + ssh_cmd = [ + 'ssh', + '-oNumberOfPasswordPrompts=0', + '-oStrictHostKeyChecking=no', + username_host, + 'echo', + 'hello', + ] stdout, stderr, returncode = run_command(ssh_cmd, 5, silent=True) if returncode != 0: if password is None: - raise OperationFailed("KCHVM0056E", - {'host': remote_host, 'user': user}) + raise OperationFailed( + 'KCHVM0056E', {'host': remote_host, 'user': user}) else: self._set_password_less_login(remote_host, user, password) def _set_password_less_login(self, remote_host, user, passwd): - home_dir = '/root' if user is 'root' else '/home/%s' % user + home_dir = '/root' if user == 'root' else f'/home/{user}' - id_rsa_file = "%s/.ssh/id_rsa" % home_dir + id_rsa_file = f'{home_dir}/.ssh/id_rsa' id_rsa_pub_file = id_rsa_file + '.pub' ssh_port = 22 ssh_client = None def read_id_rsa_pub_file(): data = None - with open(id_rsa_pub_file, "r") as id_file: + with open(id_rsa_pub_file, 'r') as id_file: data = id_file.read() return data @@ -1813,23 +1921,20 @@ def create_root_ssh_key_if_required(): if os.path.isfile(id_rsa_pub_file): return - with open("/dev/zero") as zero_input: + with open('/dev/zero') as zero_input: cmd = ['ssh-keygen', '-q', '-N', '', '-f', id_rsa_file] proc = subprocess.Popen( - cmd, - stdin=zero_input, - stdout=open(os.devnull, 'wb') + cmd, stdin=zero_input, stdout=open(os.devnull, 'wb') ) out, err = proc.communicate() if not os.path.isfile(id_rsa_pub_file): - raise OperationFailed("KCHVM0070E") + raise OperationFailed('KCHVM0070E') - if user is not 'root': + if user != 'root': id_rsa_content = read_id_rsa_pub_file() updated_content = id_rsa_content.replace( - ' root@', ' %s@' % user - ) + ' root@', f' {user}@') with open(id_rsa_pub_file, 'w+') as f: f.write(updated_content) @@ -1841,26 +1946,25 @@ def create_root_ssh_key_if_required(): def get_ssh_client(remote_host, user, passwd): ssh_client = paramiko.SSHClient() ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) - ssh_client.connect(remote_host, ssh_port, username=user, - password=passwd, timeout=4) + ssh_client.connect( + remote_host, ssh_port, username=user, password=passwd, timeout=4 + ) return ssh_client def append_id_rsa_to_remote_authorized_keys(ssh_client, id_rsa_data): sftp_client = ssh_client.open_sftp() - ssh_dir = '%s/.ssh' % home_dir + ssh_dir = f'{home_dir}/.ssh' try: sftp_client.chdir(ssh_dir) except IOError: raise OperationFailed( - "KCHVM0089E", - {'host': remote_host, 'user': user, 'sshdir': ssh_dir} + 'KCHVM0089E', {'host': remote_host, + 'user': user, 'sshdir': ssh_dir} ) file_handler = sftp_client.file( - '%s/.ssh/authorized_keys' % home_dir, - mode='a', - bufsize=1 + f'{home_dir}/.ssh/authorized_keys', mode='a', bufsize=1 ) file_handler.write(id_rsa_data) file_handler.flush() @@ -1872,49 +1976,41 @@ def append_id_rsa_to_remote_authorized_keys(ssh_client, id_rsa_data): create_root_ssh_key_if_required() id_rsa_data = read_id_rsa_pub_file() ssh_client = get_ssh_client(remote_host, user, passwd) - append_id_rsa_to_remote_authorized_keys( - ssh_client, - id_rsa_data - ) - except Exception, e: + append_id_rsa_to_remote_authorized_keys(ssh_client, id_rsa_data) + except Exception as e: raise OperationFailed( - "KCHVM0068E", - {'host': remote_host, 'user': user, 'error': e.message} + 'KCHVM0068E', {'host': remote_host, + 'user': user, 'error': str(e)} ) finally: if ssh_client: ssh_client.close() - def _check_remote_libvirt_conn(self, remote_host, - user='root', transport='ssh'): + def _check_remote_libvirt_conn(self, remote_host, user='root', transport='ssh'): - dest_uri = 'qemu+%s://%s@%s/system' % (transport, user, remote_host) + dest_uri = f'qemu+{transport}://{user}@{remote_host}/system' cmd = ['virsh', '-c', dest_uri, 'list'] - proc = subprocess.Popen(cmd, stdout=subprocess.PIPE, - shell=True, preexec_fn=os.setsid) + proc = subprocess.Popen( + cmd, stdout=subprocess.PIPE, shell=True, preexec_fn=os.setsid + ) timeout = 0 while proc.poll() is None: time.sleep(1) timeout += 1 if timeout == 5: os.killpg(os.getpgid(proc.pid), signal.SIGTERM) - raise OperationFailed("KCHVM0090E", - {'host': remote_host, 'user': user}) + raise OperationFailed( + 'KCHVM0090E', {'host': remote_host, 'user': user}) - def _get_remote_libvirt_conn(self, remote_host, - user='root', transport='ssh'): - dest_uri = 'qemu+%s://%s@%s/system' % (transport, user, remote_host) + def _get_remote_libvirt_conn(self, remote_host, user='root', transport='ssh'): + dest_uri = f'qemu+{transport}://{user}@{remote_host}/system' # TODO: verify why LibvirtConnection(dest_uri) does not work here return libvirt.open(dest_uri) def migration_pre_check(self, remote_host, user, password): self._check_if_host_not_localhost(remote_host) - self._check_if_password_less_login_enabled( - remote_host, - user, - password - ) + self._check_if_password_less_login_enabled(remote_host, user, password) self._check_remote_libvirt_conn(remote_host, user) self._check_if_migrating_same_arch_hypervisor(remote_host, user) @@ -1922,35 +2018,36 @@ def migration_pre_check(self, remote_host, user, password): self._check_ppc64_subcores_per_core(remote_host, user) def _check_if_path_exists_in_remote_host(self, path, remote_host, user): - username_host = "%s@%s" % (user, remote_host) - cmd = ['ssh', '-oStrictHostKeyChecking=no', username_host, - 'test', '-e', path] + username_host = f'{user}@{remote_host}' + cmd = ['ssh', '-oStrictHostKeyChecking=no', + username_host, 'test', '-e', path] _, _, returncode = run_command(cmd, 5, silent=True) return returncode == 0 def _get_vm_devices_infos(self, vm_name): dom = VMModel.get_vm(vm_name, self.conn) - infos = [get_vm_disk_info(dom, dev_name) - for dev_name in get_vm_disks(dom).keys()] + infos = [ + get_vm_disk_info(dom, dev_name) for dev_name in get_vm_disks(dom).keys() + ] return infos def _check_if_nonshared_migration(self, vm_name, remote_host, user): for dev_info in self._get_vm_devices_infos(vm_name): dev_path = dev_info.get('path') if not self._check_if_path_exists_in_remote_host( - dev_path, remote_host, user): + dev_path, remote_host, user + ): return True return False def _create_remote_path(self, path, remote_host, user): - username_host = "%s@%s" % (user, remote_host) - cmd = ['ssh', '-oStrictHostKeyChecking=no', username_host, - 'touch', path] + username_host = f'{user}@{remote_host}' + cmd = ['ssh', '-oStrictHostKeyChecking=no', + username_host, 'touch', path] _, _, returncode = run_command(cmd, 5, silent=True) if returncode != 0: raise OperationFailed( - "KCHVM0061E", - {'path': path, 'host': remote_host, 'user': user} + 'KCHVM0061E', {'path': path, 'host': remote_host, 'user': user} ) def _get_img_size(self, disk_path): @@ -1958,52 +2055,46 @@ def _get_img_size(self, disk_path): conn = self.conn.get() vol_obj = conn.storageVolLookupByPath(disk_path) return vol_obj.info()[1] - except Exception, e: + except Exception as e: raise OperationFailed( - "KCHVM0062E", - {'path': disk_path, 'error': e.message} - ) + 'KCHVM0062E', {'path': disk_path, 'error': str(e)}) def _create_remote_disk(self, disk_info, remote_host, user): - username_host = "%s@%s" % (user, remote_host) + username_host = f'{user}@{remote_host}' disk_fmt = disk_info.get('format') disk_path = disk_info.get('path') disk_size = self._get_img_size(disk_path) - cmd = ['ssh', '-oStrictHostKeyChecking=no', username_host, - 'qemu-img', 'create', '-f', disk_fmt, - disk_path, str(disk_size)] + cmd = [ + 'ssh', + '-oStrictHostKeyChecking=no', + username_host, + 'qemu-img', + 'create', + '-f', + disk_fmt, + disk_path, + str(disk_size), + ] out, err, returncode = run_command(cmd, silent=True) if returncode != 0: raise OperationFailed( - "KCHVM0063E", - { - 'error': err, - 'path': disk_path, - 'host': remote_host, - 'user': user - } + 'KCHVM0063E', + {'error': err, 'path': disk_path, + 'host': remote_host, 'user': user}, ) def _create_vm_remote_paths(self, vm_name, remote_host, user): for dev_info in self._get_vm_devices_infos(vm_name): dev_path = dev_info.get('path') if not self._check_if_path_exists_in_remote_host( - dev_path, remote_host, user): + dev_path, remote_host, user + ): if dev_info.get('type') == 'cdrom': - self._create_remote_path( - dev_path, - remote_host, - user - ) + self._create_remote_path(dev_path, remote_host, user) else: - self._create_remote_disk( - dev_info, - remote_host, - user - ) + self._create_remote_disk(dev_info, remote_host, user) - def migrate(self, name, remote_host, user=None, password=None, - enable_rdma=None): + def migrate(self, name, remote_host, user=None, password=None, enable_rdma=None): name = name.decode('utf-8') remote_host = remote_host.decode('utf-8') @@ -2017,19 +2108,19 @@ def migrate(self, name, remote_host, user=None, password=None, dest_conn = self._get_remote_libvirt_conn(remote_host, user) non_shared = self._check_if_nonshared_migration( - name, - remote_host, - user - ) - - params = {'name': name, - 'dest_conn': dest_conn, - 'non_shared': non_shared, - 'remote_host': remote_host, - 'user': user, - 'enable_rdma': enable_rdma} - task_id = AsyncTask('/plugins/kimchi/vms/%s/migrate' % name, - self._migrate_task, params).id + name, remote_host, user) + + params = { + 'name': name, + 'dest_conn': dest_conn, + 'non_shared': non_shared, + 'remote_host': remote_host, + 'user': user, + 'enable_rdma': enable_rdma, + } + task_id = AsyncTask( + f'/plugins/kimchi/vms/{name}/migrate', self._migrate_task, params + ).id return self.task.lookup(task_id) @@ -2048,23 +2139,17 @@ def _migrate_task(self, cb, params): flags = libvirt.VIR_MIGRATE_PEER2PEER if state == 'shutoff': - flags |= (libvirt.VIR_MIGRATE_OFFLINE | - libvirt.VIR_MIGRATE_PERSIST_DEST) + flags |= libvirt.VIR_MIGRATE_OFFLINE | libvirt.VIR_MIGRATE_PERSIST_DEST elif state in ['running', 'paused']: flags |= libvirt.VIR_MIGRATE_LIVE | libvirt.VIR_MIGRATE_TUNNELLED if dom.isPersistent(): flags |= libvirt.VIR_MIGRATE_PERSIST_DEST else: dest_conn.close() - raise OperationFailed("KCHVM0057E", {'name': name, - 'state': state}) + raise OperationFailed('KCHVM0057E', {'name': name, 'state': state}) if non_shared: flags |= libvirt.VIR_MIGRATE_NON_SHARED_DISK - self._create_vm_remote_paths( - name, - remote_host, - user - ) + self._create_vm_remote_paths(name, remote_host, user) try: if enable_rdma: @@ -2074,8 +2159,7 @@ def _migrate_task(self, cb, params): dom.migrate(dest_conn, flags) except libvirt.libvirtError as e: cb('Migrate failed', False) - raise OperationFailed('KCHVM0058E', {'err': e.message, - 'name': name}) + raise OperationFailed('KCHVM0058E', {'err': str(e), 'name': name}) finally: dest_conn.close() @@ -2092,20 +2176,23 @@ def lookup(self, name): d_info = dom.info() vm_uuid = dom.UUIDString() if DOM_STATE_MAP[d_info[0]] != 'running': - raise NotFoundError("KCHVM0004E", {'name': name}) + raise NotFoundError('KCHVM0004E', {'name': name}) screenshot = self.get_screenshot(vm_uuid, self.objstore, self.conn) img_path = screenshot.lookup() # screenshot info changed after scratch generation try: with self.objstore as session: - session.store('screenshot', vm_uuid, screenshot.info, - get_kimchi_version()) + session.store( + 'screenshot', vm_uuid, screenshot.info, get_kimchi_version() + ) except Exception as e: # It is possible to continue Kimchi executions without store # screenshots - wok_log.error('Error trying to update database with guest ' - 'screenshot information due error: %s', e.message) + wok_log.error( + f'Error trying to update database with guest ' + f'screenshot information due error: {e}' + ) return img_path @staticmethod @@ -2116,15 +2203,17 @@ def get_screenshot(vm_uuid, objstore, conn): params = session.get('screenshot', vm_uuid) except NotFoundError: params = {'uuid': vm_uuid} - session.store('screenshot', vm_uuid, params, - get_kimchi_version()) + session.store('screenshot', vm_uuid, + params, get_kimchi_version()) except Exception as e: # The 'except' outside of 'with' is necessary to catch possible # exception from '__exit__' when calling 'session.store' # It is possible to continue Kimchi vm executions without # screenshots - wok_log.error('Error trying to update database with guest ' - 'screenshot information due error: %s', e.message) + wok_log.error( + f'Error trying to update database with guest ' + f'screenshot information due error:{e}' + ) return LibvirtVMScreenshot(params, conn) @@ -2138,7 +2227,7 @@ def handler(stream, buf, opaque): fd = opaque os.write(fd, buf) - fd = os.open(thumbnail, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0644) + fd = os.open(thumbnail, os.O_WRONLY | os.O_TRUNC | os.O_CREAT, 0o644) try: conn = self.conn.get() dom = conn.lookupByUUIDString(self.vm_uuid) @@ -2149,9 +2238,9 @@ def handler(stream, buf, opaque): except libvirt.libvirtError: try: stream.abort() - except: + except Exception: pass - raise NotFoundError("KCHVM0006E", {'name': vm_name}) + raise NotFoundError('KCHVM0006E', {'name': vm_name}) else: stream.finish() finally: diff --git a/model/vmsnapshots.py b/model/vmsnapshots.py index 8c30a5370..d3ee6fa7e 100644 --- a/model/vmsnapshots.py +++ b/model/vmsnapshots.py @@ -16,20 +16,21 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import time import libvirt import lxml.etree as ET -import time from lxml import objectify from lxml.builder import E - from wok.asynctask import AsyncTask -from wok.exception import InvalidOperation, NotFoundError, OperationFailed -from wok.xmlutils.utils import xpath_get_text +from wok.exception import InvalidOperation +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.model.tasks import TaskModel - from wok.plugins.kimchi.model.vms import VMModel -from wok.plugins.kimchi.model.vmstorages import VMStorageModel, VMStoragesModel +from wok.plugins.kimchi.model.vmstorages import VMStorageModel +from wok.plugins.kimchi.model.vmstorages import VMStoragesModel +from wok.xmlutils.utils import xpath_get_text class VMSnapshotsModel(object): @@ -65,14 +66,18 @@ def create(self, vm_name, params=None): format = storage['format'] if type != u'cdrom' and format != u'qcow2': - raise InvalidOperation('KCHSNAP0010E', {'vm': vm_name, - 'format': format}) + raise InvalidOperation( + 'KCHSNAP0010E', {'vm': vm_name, 'format': format} + ) - name = params.get('name', unicode(int(time.time()))) + name = params.get('name', str(int(time.time()))) task_params = {'vm_name': vm_name, 'name': name} - taskid = AsyncTask(u'/plugins/kimchi/vms/%s/snapshots/%s' % (vm_name, - name), self._create_task, task_params).id + taskid = AsyncTask( + u'/plugins/kimchi/vms/%s/snapshots/%s' % (vm_name, name), + self._create_task, + task_params, + ).id return self.task.lookup(taskid) def _create_task(self, cb, params): @@ -90,17 +95,17 @@ def _create_task(self, cb, params): cb('building snapshot XML') root_elem = E.domainsnapshot() root_elem.append(E.name(name)) - xml = ET.tostring(root_elem, encoding='utf-8') + xml = ET.tostring(root_elem, encoding='utf-8').decode('utf-8') try: cb('fetching snapshot domain') vir_dom = VMModel.get_vm(vm_name, self.conn) cb('creating snapshot') vir_dom.snapshotCreateXML(xml, 0) - except (NotFoundError, OperationFailed, libvirt.libvirtError), e: - raise OperationFailed('KCHSNAP0002E', - {'name': name, 'vm': vm_name, - 'err': e.message}) + except (NotFoundError, OperationFailed, libvirt.libvirtError) as e: + raise OperationFailed( + 'KCHSNAP0002E', {'name': name, 'vm': vm_name, 'err': str(e)} + ) cb('OK', True) @@ -109,11 +114,10 @@ def get_list(self, vm_name): try: vir_snaps = vir_dom.listAllSnapshots(0) - return sorted([s.getName().decode('utf-8') for s in vir_snaps], - key=unicode.lower) - except libvirt.libvirtError, e: - raise OperationFailed('KCHSNAP0005E', - {'vm': vm_name, 'err': e.message}) + return sorted([s.getName() for s in vir_snaps], key=str.lower) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHSNAP0005E', {'vm': vm_name, 'err': str(e)}) class VMSnapshotModel(object): @@ -124,32 +128,34 @@ def lookup(self, vm_name, name): vir_snap = self.get_vmsnapshot(vm_name, name) try: - snap_xml_str = vir_snap.getXMLDesc(0).decode('utf-8') - except libvirt.libvirtError, e: - raise OperationFailed('KCHSNAP0004E', {'name': name, - 'vm': vm_name, - 'err': e.message}) + snap_xml_str = vir_snap.getXMLDesc(0) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHSNAP0004E', {'name': name, 'vm': vm_name, 'err': str(e)} + ) snap_xml = objectify.fromstring(snap_xml_str) try: - parent = unicode(snap_xml.parent.name) + parent = str(snap_xml.parent.name) except AttributeError: parent = u'' - return {'created': unicode(snap_xml.creationTime), - 'name': unicode(snap_xml.name), - 'parent': parent, - 'state': unicode(snap_xml.state)} + return { + 'created': str(snap_xml.creationTime), + 'name': str(snap_xml.name), + 'parent': parent, + 'state': str(snap_xml.state), + } def delete(self, vm_name, name): try: vir_snap = self.get_vmsnapshot(vm_name, name) vir_snap.delete(0) - except libvirt.libvirtError, e: - raise OperationFailed('KCHSNAP0006E', {'name': name, - 'vm': vm_name, - 'err': e.message}) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHSNAP0006E', {'name': name, 'vm': vm_name, 'err': str(e)} + ) def revert(self, vm_name, name): try: @@ -158,28 +164,29 @@ def revert(self, vm_name, name): vir_dom.revertToSnapshot(vir_snap, 0) # get vm name recorded in the snapshot and return new uri params - vm_new_name = xpath_get_text(vir_snap.getXMLDesc(0), - 'domain/name')[0] + vm_new_name = xpath_get_text( + vir_snap.getXMLDesc(0), 'domain/name')[0] return [vm_new_name, name] - except libvirt.libvirtError, e: - raise OperationFailed('KCHSNAP0009E', {'name': name, - 'vm': vm_name, - 'err': e.message}) + except libvirt.libvirtError as e: + raise OperationFailed( + 'KCHSNAP0009E', {'name': name, 'vm': vm_name, 'err': str(e)} + ) def get_vmsnapshot(self, vm_name, name): vir_dom = VMModel.get_vm(vm_name, self.conn) try: return vir_dom.snapshotLookupByName(name, 0) - except libvirt.libvirtError, e: + except libvirt.libvirtError as e: code = e.get_error_code() if code == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: - raise NotFoundError('KCHSNAP0003E', {'name': name, - 'vm': vm_name}) + raise NotFoundError( + 'KCHSNAP0003E', {'name': name, 'vm': vm_name}) else: - raise OperationFailed('KCHSNAP0004E', {'name': name, - 'vm': vm_name, - 'err': e.message}) + raise OperationFailed( + 'KCHSNAP0004E', {'name': name, + 'vm': vm_name, 'err': str(e)} + ) class CurrentVMSnapshotModel(object): @@ -192,12 +199,12 @@ def lookup(self, vm_name): try: vir_snap = vir_dom.snapshotCurrent(0) - snap_name = vir_snap.getName().decode('utf-8') - except libvirt.libvirtError, e: + snap_name = vir_snap.getName() + except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN_SNAPSHOT: return {} - raise OperationFailed('KCHSNAP0008E', - {'vm': vm_name, 'err': e.message}) + raise OperationFailed( + 'KCHSNAP0008E', {'vm': vm_name, 'err': str(e)}) return self.vmsnapshot.lookup(vm_name, snap_name) diff --git a/model/vmstorages.py b/model/vmstorages.py index db6812193..0ebb43b91 100644 --- a/model/vmstorages.py +++ b/model/vmstorages.py @@ -16,24 +16,28 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os import string -from lxml import etree -from wok.exception import InvalidOperation, InvalidParameter, NotFoundError +from lxml import etree +from wok.exception import InvalidOperation +from wok.exception import InvalidParameter +from wok.exception import NotFoundError from wok.exception import OperationFailed -from wok.utils import wok_log - from wok.plugins.kimchi.model.config import CapabilitiesModel from wok.plugins.kimchi.model.diskutils import get_disk_used_by from wok.plugins.kimchi.model.storagevolumes import StorageVolumeModel from wok.plugins.kimchi.model.utils import get_vm_config_flag -from wok.plugins.kimchi.model.vms import DOM_STATE_MAP, VMModel +from wok.plugins.kimchi.model.vms import DOM_STATE_MAP +from wok.plugins.kimchi.model.vms import VMModel from wok.plugins.kimchi.osinfo import lookup -from wok.plugins.kimchi.utils import create_disk_image, is_s390x -from wok.plugins.kimchi.xmlutils.disk import get_device_node, get_disk_xml -from wok.plugins.kimchi.xmlutils.disk import get_vm_disk_info, get_vm_disks +from wok.plugins.kimchi.utils import create_disk_image +from wok.plugins.kimchi.utils import is_s390x +from wok.plugins.kimchi.xmlutils.disk import get_device_node +from wok.plugins.kimchi.xmlutils.disk import get_disk_xml +from wok.plugins.kimchi.xmlutils.disk import get_vm_disk_info +from wok.plugins.kimchi.xmlutils.disk import get_vm_disks +from wok.utils import wok_log HOTPLUG_TYPE = ['scsi', 'virtio'] @@ -42,9 +46,9 @@ def _get_device_bus(dev_type, dom): try: version, distro = VMModel.vm_get_os_metadata(dom) - except: + except Exception: version, distro = ('unknown', 'unknown') - return lookup(distro, version)[dev_type+'_bus'] + return lookup(distro, version)[dev_type + '_bus'] class VMStoragesModel(object): @@ -72,45 +76,49 @@ def _get_available_bus_address(self, bus_type, vm_name): valid_id.remove((bus_id, unit_id)) continue if not valid_id: - raise OperationFailed('KCHVMSTOR0014E', - {'type': 'ide', 'limit': 4}) + raise OperationFailed( + 'KCHVMSTOR0014E', {'type': 'ide', 'limit': 4}) else: - address = {'controller': controller_id, - 'bus': valid_id[0][0], 'unit': valid_id[0][1]} + address = { + 'controller': controller_id, + 'bus': valid_id[0][0], + 'unit': valid_id[0][1], + } return dict(address=address) def create(self, vm_name, params): - vol_model = None # Path will never be blank due to API.json verification. # There is no need to cover this case here. if not ('vol' in params) ^ ('path' in params): if not is_s390x(): - raise InvalidParameter("KCHVMSTOR0017E") + raise InvalidParameter('KCHVMSTOR0017E') if 'dir_path' not in params: - raise InvalidParameter("KCHVMSTOR0019E") + raise InvalidParameter('KCHVMSTOR0019E') dom = VMModel.get_vm(vm_name, self.conn) params['bus'] = _get_device_bus(params['type'], dom) if is_s390x() and params['type'] == 'disk' and 'dir_path' in params: if 'format' not in params: - raise InvalidParameter("KCHVMSTOR0020E") + raise InvalidParameter('KCHVMSTOR0020E') size = params['size'] name = params['name'] dir_path = params.get('dir_path') - params['path'] = dir_path + "/" + name + params['path'] = dir_path + '/' + name if os.path.exists(params['path']): - raise InvalidParameter("KCHVMSTOR0021E", - {'disk_path': params['path']}) - create_disk_image(format_type=params['format'], - path=params['path'], capacity=size) + raise InvalidParameter( + 'KCHVMSTOR0021E', {'disk_path': params['path']}) + create_disk_image( + format_type=params['format'], path=params['path'], capacity=size + ) else: params['format'] = 'raw' - dev_list = [dev for dev, bus in get_vm_disks(dom).iteritems() - if bus == params['bus']] + dev_list = [ + dev for dev, bus in get_vm_disks(dom).items() if bus == params['bus'] + ] dev_list.sort() if len(dev_list) == 0: params['index'] = 0 @@ -118,39 +126,14 @@ def create(self, vm_name, params): char = dev_list.pop()[2] params['index'] = string.ascii_lowercase.index(char) + 1 - if (params['bus'] not in HOTPLUG_TYPE and - DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): + if ( + params['bus'] not in HOTPLUG_TYPE and + DOM_STATE_MAP[dom.info()[0]] != 'shutoff' + ): raise InvalidOperation('KCHVMSTOR0011E') if params.get('vol'): - try: - pool = params['pool'] - vol_model = StorageVolumeModel(conn=self.conn, - objstore=self.objstore) - vol_info = vol_model.lookup(pool, params['vol']) - except KeyError: - raise InvalidParameter("KCHVMSTOR0012E") - except Exception as e: - raise InvalidParameter("KCHVMSTOR0015E", {'error': e}) - if len(vol_info['used_by']) != 0: - raise InvalidParameter("KCHVMSTOR0016E") - - valid_format = { - "disk": ["raw", "qcow", "qcow2", "qed", "vmdk", "vpc"], - "cdrom": "iso"} - - if vol_info['type'] == 'file': - if (params['type'] == 'disk' and - vol_info['format'] in valid_format[params['type']]): - params['format'] = vol_info['format'] - else: - raise InvalidParameter("KCHVMSTOR0018E", - {"format": vol_info['format'], - "type": params['type']}) - - if (params['format'] == 'raw' and not vol_info['isvalid']): - message = 'This is not a valid RAW disk image.' - raise OperationFailed('KCHVMSTOR0008E', {'error': message}) + vol_info = self._get_vol_info(params) params['path'] = vol_info['path'] params['disk'] = vol_info['type'] @@ -163,11 +146,11 @@ def create(self, vm_name, params): dom = VMModel.get_vm(vm_name, self.conn) dom.attachDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: - raise OperationFailed("KCHVMSTOR0008E", {'error': e.message}) + raise OperationFailed('KCHVMSTOR0008E', {'error': str(e)}) # Don't put a try-block here. Let the exception be raised. If we # allow disks used_by to be out of sync, data corruption could - # occour if a disk is added to two guests unknowingly. + # occur if a disk is added to two guests unknowingly. if params.get('vol'): used_by = vol_info['used_by'] used_by.append(vm_name) @@ -178,6 +161,42 @@ def get_list(self, vm_name): dom = VMModel.get_vm(vm_name, self.conn) return get_vm_disks(dom).keys() + def _get_vol_info(self, params): + try: + pool = params['pool'] + vol_model = StorageVolumeModel( + conn=self.conn, objstore=self.objstore) + vol_info = vol_model.lookup(pool, params['vol']) + except KeyError: + raise InvalidParameter('KCHVMSTOR0012E') + except Exception as e: + raise InvalidParameter('KCHVMSTOR0015E', {'error': e}) + if len(vol_info['used_by']) != 0: + raise InvalidParameter('KCHVMSTOR0016E') + + valid_format = { + 'disk': ['raw', 'qcow', 'qcow2', 'qed', 'vmdk', 'vpc'], + 'cdrom': 'iso', + } + + if vol_info['type'] == 'file': + if ( + params['type'] == 'disk' and + vol_info['format'] in valid_format[params['type']] + ): + params['format'] = vol_info['format'] + else: + raise InvalidParameter( + 'KCHVMSTOR0018E', + {'format': vol_info['format'], 'type': params['type']}, + ) + + if params['format'] == 'raw' and not vol_info['isvalid']: + message = 'This is not a valid RAW disk image.' + raise OperationFailed('KCHVMSTOR0008E', {'error': message}) + + return vol_info + class VMStorageModel(object): def __init__(self, **kargs): @@ -197,8 +216,7 @@ def delete(self, vm_name, dev_name): except NotFoundError: raise - if (bus_type not in HOTPLUG_TYPE and - DOM_STATE_MAP[dom.info()[0]] != 'shutoff'): + if bus_type not in HOTPLUG_TYPE and DOM_STATE_MAP[dom.info()[0]] != 'shutoff': raise InvalidOperation('KCHVMSTOR0011E') try: @@ -212,18 +230,24 @@ def delete(self, vm_name, dev_name): if path is not None: used_by = get_disk_used_by(self.conn, path) else: - wok_log.error("Unable to decrement volume used_by on" - " delete because no path could be found.") - dom.detachDeviceFlags(etree.tostring(disk), - get_vm_config_flag(dom, 'all')) + wok_log.error( + 'Unable to decrement volume used_by on' + ' delete because no path could be found.' + ) + dom.detachDeviceFlags( + etree.tostring(disk).decode( + 'utf-8'), get_vm_config_flag(dom, 'all') + ) except Exception as e: - raise OperationFailed("KCHVMSTOR0010E", {'error': e.message}) + raise OperationFailed('KCHVMSTOR0010E', {'error': str(e)}) if used_by is not None and vm_name in used_by: used_by.remove(vm_name) else: - wok_log.error("Unable to update %s:%s used_by on delete." - % (vm_name, dev_name)) + wok_log.error( + 'Unable to update %s:%s used_by on delete.' % ( + vm_name, dev_name) + ) def update(self, vm_name, dev_name, params): old_disk_used_by = None @@ -233,33 +257,36 @@ def update(self, vm_name, dev_name, params): dev_info = self.lookup(vm_name, dev_name) if dev_info['type'] != 'cdrom': - raise InvalidOperation("KCHVMSTOR0006E") + raise InvalidOperation('KCHVMSTOR0006E') params['path'] = params.get('path', '') old_disk_path = dev_info['path'] new_disk_path = params['path'] if new_disk_path != old_disk_path: # An empty path means a CD-ROM was empty or ejected: - if old_disk_path is not '': + if old_disk_path != '': old_disk_used_by = get_disk_used_by(self.conn, old_disk_path) - if new_disk_path is not '': + if new_disk_path != '': new_disk_used_by = get_disk_used_by(self.conn, new_disk_path) dev_info.update(params) dev, xml = get_disk_xml(dev_info) try: + # FIXME: when updating from local file to remote file (http) + # libvirt adds a new device with same name instead of replacing + # the existing one dom.updateDeviceFlags(xml, get_vm_config_flag(dom, 'all')) except Exception as e: - raise OperationFailed("KCHVMSTOR0009E", {'error': e.message}) + raise OperationFailed('KCHVMSTOR0009E', {'error': str(e)}) try: - if old_disk_used_by is not None and \ - vm_name in old_disk_used_by: + if old_disk_used_by is not None and vm_name in old_disk_used_by: old_disk_used_by.remove(vm_name) if new_disk_used_by is not None: new_disk_used_by.append(vm_name) except Exception as e: - wok_log.error("Unable to update dev used_by on update due to" - " %s:" % e.message) + wok_log.error( + 'Unable to update dev used_by on update due to' ' %s:' % str(e) + ) return dev diff --git a/network.py b/network.py index 69944e7a7..eedaaff2e 100644 --- a/network.py +++ b/network.py @@ -17,19 +17,18 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # - -import ethtool import glob -import ipaddr import os from distutils.spawn import find_executable +import ethtool +import ipaddr from wok.stringutils import encode_value from wok.utils import run_command -APrivateNets = ipaddr.IPNetwork("10.0.0.0/8") -BPrivateNets = ipaddr.IPNetwork("172.16.0.0/12") +APrivateNets = ipaddr.IPNetwork('10.0.0.0/8') +BPrivateNets = ipaddr.IPNetwork('172.16.0.0/12') CPrivateNets = ipaddr.IPNetwork('192.168.0.0/16') PrivateNets = [CPrivateNets, BPrivateNets, APrivateNets] DefaultNetsPool = [ipaddr.IPNetwork('192.168.122.0/23'), @@ -115,10 +114,10 @@ def vlans(): List[str]: a list with the vlans found. """ - return list(set([b.split('/')[-1] - for b in glob.glob(NET_PATH + '/*')]) & - set([b.split('/')[-1] - for b in glob.glob(PROC_NET_VLAN + '*')])) + return list( + set([b.split('/')[-1] for b in glob.glob(NET_PATH + '/*')]) & + set([b.split('/')[-1] for b in glob.glob(PROC_NET_VLAN + '*')]) + ) def is_vlan(iface): @@ -185,7 +184,7 @@ def ovs_bridges(): if not is_openvswitch_running(): return [] - ovs_cmd = find_executable("ovs-vsctl") + ovs_cmd = find_executable('ovs-vsctl') # openvswitch not installed: there is no OVS bridge configured if ovs_cmd is None: @@ -234,7 +233,7 @@ def ovs_bridge_ports(ovsbr): if not is_openvswitch_running(): return [] - ovs_cmd = find_executable("ovs-vsctl") + ovs_cmd = find_executable('ovs-vsctl') # openvswitch not installed: there is no OVS bridge configured if ovs_cmd is None: @@ -254,7 +253,7 @@ def all_interfaces(): List[str]: a list with all interfaces of the host. """ - return [d.rsplit("/", 1)[-1] for d in glob.glob(NET_PATH + '/*')] + return [d.rsplit('/', 1)[-1] for d in glob.glob(NET_PATH + '/*')] def slaves(bonding): @@ -348,7 +347,7 @@ def get_vlan_device(vlan): if os.path.exists(PROC_NET_VLAN + vlan): with open(PROC_NET_VLAN + vlan) as vlan_file: for line in vlan_file: - if "Device:" in line: + if 'Device:' in line: dummy, dev = line.split() break return dev @@ -451,13 +450,13 @@ def get_interface_type(iface): """ try: if is_nic(iface): - return "nic" + return 'nic' if is_bonding(iface): - return "bonding" + return 'bonding' if is_bridge(iface): - return "bridge" + return 'bridge' if is_vlan(iface): - return "vlan" + return 'vlan' return 'unknown' except IOError: return 'unknown' @@ -471,7 +470,7 @@ def get_dev_macaddr(dev): def get_dev_netaddr(dev): info = ethtool.get_interfaces_info(dev)[0] return (info.ipv4_address and - "%s/%s" % (info.ipv4_address, info.ipv4_netmask) or '') + '%s/%s' % (info.ipv4_address, info.ipv4_netmask) or '') def get_dev_netaddrs(): diff --git a/osinfo.py b/osinfo.py index 8de917f19..d8735191b 100644 --- a/osinfo.py +++ b/osinfo.py @@ -16,25 +16,26 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import copy import glob import os import platform -import psutil from collections import defaultdict -from configobj import ConfigObj from distutils.version import LooseVersion +import psutil +from configobj import ConfigObj from wok.config import PluginPaths -from wok.utils import wok_log from wok.exception import InvalidParameter from wok.plugins.kimchi.config import kimchiPaths +from wok.utils import wok_log -SUPPORTED_ARCHS = {'x86': ('i386', 'i686', 'x86_64'), - 'power': ('ppc', 'ppc64'), - 'ppc64le': ('ppc64le'), - 's390x': ('s390x')} +SUPPORTED_ARCHS = { + 'x86': ('i386', 'i686', 'x86_64'), + 'power': ('ppc', 'ppc64'), + 'ppc64le': ('ppc64le'), + 's390x': ('s390x'), +} # Memory devices slot limits by architecture HOST_DISTRO = platform.linux_distribution() @@ -48,68 +49,103 @@ } -template_specs = {'x86': {'old': dict(disk_bus='ide', - nic_model='e1000', sound_model='ich6'), - 'modern': dict(disk_bus='virtio', - nic_model='virtio', - sound_model='ich6', - tablet_bus='usb')}, - 'power': {'old': dict(disk_bus='scsi', - nic_model='spapr-vlan', - cdrom_bus='scsi', - kbd_type="kbd", - kbd_bus='usb', mouse_bus='usb', - tablet_bus='usb'), - 'modern': dict(disk_bus='virtio', - nic_model='virtio', - cdrom_bus='scsi', - kbd_bus='usb', - kbd_type="kbd", - mouse_bus='usb', tablet_bus='usb')}, - 'ppc64le': {'old': dict(disk_bus='virtio', - nic_model='virtio', - cdrom_bus='scsi', - kbd_bus='usb', - kbd_type="keyboard", - mouse_bus='usb', tablet_bus='usb'), - 'modern': dict(disk_bus='virtio', - nic_model='virtio', - cdrom_bus='scsi', - kbd_bus='usb', - kbd_type="keyboard", - mouse_bus='usb', - tablet_bus='usb')}, - 's390x': {'old': dict(disk_bus='virtio', - nic_model='virtio', cdrom_bus='scsi'), - 'modern': dict(disk_bus='virtio', - nic_model='virtio', - cdrom_bus='scsi')}} - - -custom_specs = {'fedora': {'22': {'x86': dict(video_model='qxl')}}, - 'windows': {'xp': {'x86': dict(nic_model='pcnet')}}} - - -modern_version_bases = {'x86': {'debian': '6.0', 'ubuntu': '7.10', - 'opensuse': '10.3', 'centos': '5.3', - 'rhel': '6.0', 'fedora': '16', 'gentoo': '0', - 'sles': '11', 'arch': '0'}, - 'power': {'rhel': '6.5', 'fedora': '19', - 'ubuntu': '14.04', - 'opensuse': '13.1', - 'sles': '11sp3'}, - 'ppc64le': {'rhel': '6.5', 'fedora': '19', - 'ubuntu': '14.04', - 'opensuse': '13.1', - 'sles': '11sp3'}} - - -icon_available_distros = [icon[5:-4] for icon in glob.glob1('%s/images/' - % PluginPaths('kimchi').ui_dir, 'icon-*.png')] +template_specs = { + 'x86': { + 'old': dict(disk_bus='ide', nic_model='e1000', sound_model='ich6'), + 'modern': dict( + disk_bus='virtio', nic_model='virtio', sound_model='ich6', tablet_bus='usb' + ), + }, + 'power': { + 'old': dict( + disk_bus='scsi', + nic_model='spapr-vlan', + cdrom_bus='scsi', + kbd_type='kbd', + kbd_bus='usb', + mouse_bus='usb', + tablet_bus='usb', + ), + 'modern': dict( + disk_bus='virtio', + nic_model='virtio', + cdrom_bus='scsi', + kbd_bus='usb', + kbd_type='kbd', + mouse_bus='usb', + tablet_bus='usb', + ), + }, + 'ppc64le': { + 'old': dict( + disk_bus='virtio', + nic_model='virtio', + cdrom_bus='scsi', + kbd_bus='usb', + kbd_type='keyboard', + mouse_bus='usb', + tablet_bus='usb', + ), + 'modern': dict( + disk_bus='virtio', + nic_model='virtio', + cdrom_bus='scsi', + kbd_bus='usb', + kbd_type='keyboard', + mouse_bus='usb', + tablet_bus='usb', + ), + }, + 's390x': { + 'old': dict(disk_bus='virtio', nic_model='virtio', cdrom_bus='scsi'), + 'modern': dict(disk_bus='virtio', nic_model='virtio', cdrom_bus='scsi'), + }, +} + + +custom_specs = { + 'fedora': {'22': {'x86': dict(video_model='qxl')}}, + 'windows': {'xp': {'x86': dict(nic_model='pcnet')}}, +} + + +modern_version_bases = { + 'x86': { + 'debian': '6.0', + 'ubuntu': '7.10', + 'opensuse': '10.3', + 'centos': '5.3', + 'rhel': '6.0', + 'fedora': '16', + 'gentoo': '0', + 'sles': '11', + 'arch': '0', + }, + 'power': { + 'rhel': '6.5', + 'fedora': '19', + 'ubuntu': '14.04', + 'opensuse': '13.1', + 'sles': '11sp3', + }, + 'ppc64le': { + 'rhel': '6.5', + 'fedora': '19', + 'ubuntu': '14.04', + 'opensuse': '13.1', + 'sles': '11sp3', + }, +} + + +icon_available_distros = [ + icon[5:-4] + for icon in glob.glob1('%s/images/' % PluginPaths('kimchi').ui_dir, 'icon-*.png') +] def _get_arch(): - for arch, sub_archs in SUPPORTED_ARCHS.iteritems(): + for arch, sub_archs in SUPPORTED_ARCHS.items(): if os.uname()[4] in sub_archs: return arch @@ -160,10 +196,15 @@ def _get_tmpl_defaults(): if host_arch in ['s390x', 's390']: tmpl_defaults['main']['networks'] = [] - tmpl_defaults['memory'] = {'current': _get_default_template_mem(), - 'maxmemory': _get_default_template_mem()} - tmpl_defaults['storage']['disk.0'] = {'size': 10, 'format': 'qcow2', - 'pool': 'default'} + tmpl_defaults['memory'] = { + 'current': _get_default_template_mem(), + 'maxmemory': _get_default_template_mem(), + } + tmpl_defaults['storage']['disk.0'] = { + 'size': 10, + 'format': 'qcow2', + 'pool': 'default', + } is_on_s390x = True if _get_arch() == 's390x' else False if is_on_s390x: @@ -209,9 +250,11 @@ def _get_tmpl_defaults(): # On s390x if config file has both path and pool uncommented # then path should take preference. if config_pool and config_path: - wok_log.warning("Both default pool and path are specified in" + - " template.conf. Hence default pool is being" + - " ignored and only default path will be used") + wok_log.warning( + 'Both default pool and path are specified in' + + ' template.conf. Hence default pool is being' + + ' ignored and only default path will be used' + ) config.get('storage').get('disk.0').pop('pool') # Merge default configuration with file configuration @@ -219,8 +262,13 @@ def _get_tmpl_defaults(): # Create a dict with default values according to data structure # expected by VMTemplate - defaults = {'domain': 'kvm', 'arch': os.uname()[4], - 'cdrom_bus': 'ide', 'cdrom_index': 2, 'mouse_bus': 'ps2'} + defaults = { + 'domain': 'kvm', + 'arch': os.uname()[4], + 'cdrom_bus': 'ide', + 'cdrom_index': 2, + 'mouse_bus': 'ps2', + } # Parse main section to get networks and memory values defaults.update(default_config.pop('main')) defaults['memory'] = default_config.pop('memory') @@ -252,15 +300,19 @@ def _get_tmpl_defaults(): else: data['format'] = storage_section[disk].pop('format') else: - data['pool'] = {"name": '/plugins/kimchi/storagepools/' + - storage_section[disk].pop('pool')} + data['pool'] = { + 'name': '/plugins/kimchi/storagepools/' + + storage_section[disk].pop('pool') + } defaults['disks'].append(data) # Parse processor section to get vcpus and cpu_topology values processor_section = default_config.pop('processor') - defaults['cpu_info'] = {'vcpus': processor_section.pop('vcpus'), - 'maxvcpus': processor_section.pop('maxvcpus')} + defaults['cpu_info'] = { + 'vcpus': processor_section.pop('vcpus'), + 'maxvcpus': processor_section.pop('maxvcpus'), + } if len(processor_section.keys()) > 0: defaults['cpu_info']['topology'] = processor_section @@ -300,26 +352,25 @@ def lookup(distro, version): arch = _get_arch() # set up arch to ppc64 instead of ppc64le due to libvirt compatibility - if params["arch"] == "ppc64le": - params["arch"] = "ppc64" + if params['arch'] == 'ppc64le': + params['arch'] = 'ppc64' # On s390x, template spec does not change based on version. - if params["arch"] == "s390x" or arch == "s390x": + if params['arch'] == 's390x' or arch == 's390x': params.update(template_specs[arch]['old']) if not distro: - params['os_distro'] = params['os_version'] = "unknown" + params['os_distro'] = params['os_version'] = 'unknown' elif distro in modern_version_bases[arch]: - if LooseVersion(version) >= LooseVersion( - modern_version_bases[arch][distro]): + if LooseVersion(version) >= LooseVersion(modern_version_bases[arch][distro]): params.update(template_specs[arch]['modern']) else: params.update(template_specs[arch]['old']) else: - params['os_distro'] = params['os_version'] = "unknown" + params['os_distro'] = params['os_version'] = 'unknown' params.update(template_specs[arch]['old']) # Get custom specifications specs = custom_specs.get(distro, {}) - for v, config in specs.iteritems(): + for v, config in specs.items(): if LooseVersion(version) >= LooseVersion(v): params.update(config.get(arch, {})) diff --git a/requirements-dev.txt b/requirements-dev.txt new file mode 100644 index 000000000..416634f52 --- /dev/null +++ b/requirements-dev.txt @@ -0,0 +1 @@ +pre-commit diff --git a/root.py b/root.py index d42e78701..a78e2248b 100644 --- a/root.py +++ b/root.py @@ -16,15 +16,15 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json import os import tempfile -from wok.plugins.kimchi import config, mockmodel -from wok.plugins.kimchi.i18n import messages +import cherrypy +from wok.plugins.kimchi import config +from wok.plugins.kimchi import mockmodel from wok.plugins.kimchi.control import sub_nodes +from wok.plugins.kimchi.i18n import messages from wok.plugins.kimchi.model import model as kimchiModel from wok.plugins.kimchi.utils import upgrade_objectstore_data from wok.plugins.kimchi.utils import upgrade_objectstore_memory @@ -39,7 +39,7 @@ def __init__(self, wok_options): os.path.dirname(os.path.abspath(config.get_object_store())), os.path.abspath(config.get_distros_store()), os.path.abspath(config.get_screenshot_path()), - os.path.abspath(config.get_virtviewerfiles_path()) + os.path.abspath(config.get_virtviewerfiles_path()), ] for directory in make_dirs: if not os.path.isdir(directory): @@ -48,14 +48,16 @@ def __init__(self, wok_options): # When running on test mode, specify the objectstore location to # remove the file on server shutting down. That way, the system will # not suffer any change while running on test mode - if wok_options.test and (wok_options.test is True or - wok_options.test.lower() == 'true'): + if wok_options.test and ( + wok_options.test is True or wok_options.test.lower() == 'true' + ): self.objectstore_loc = tempfile.mktemp() self.model = mockmodel.MockModel(self.objectstore_loc) def remove_objectstore(): if os.path.exists(self.objectstore_loc): os.unlink(self.objectstore_loc) + cherrypy.engine.subscribe('exit', remove_objectstore) else: self.model = kimchiModel.Model() @@ -66,8 +68,12 @@ def remove_objectstore(): for ident, node in sub_nodes.items(): setattr(self, ident, node(self.model)) - self.api_schema = json.load(open(os.path.join(os.path.dirname( - os.path.abspath(__file__)), 'API.json'))) + with open( + os.path.join(os.path.dirname( + os.path.abspath(__file__)), 'API.json') + ) as fd: + self.api_schema = json.load(fd) + self.paths = config.kimchiPaths self.domain = 'kimchi' self.messages = messages @@ -77,8 +83,8 @@ def remove_objectstore(): # are necessary. if upgrade_objectstore_schema(config.get_object_store(), 'version'): upgrade_objectstore_data('icon', 'images', 'plugins/kimchi/') - upgrade_objectstore_data('storagepool', '/storagepools', - '/plugins/kimchi') + upgrade_objectstore_data( + 'storagepool', '/storagepools', '/plugins/kimchi') upgrade_objectstore_template_disks(self.model.conn) # Upgrade memory data, if necessary diff --git a/scan.py b/scan.py index b9d9e12d1..7b47e7ffe 100644 --- a/scan.py +++ b/scan.py @@ -17,7 +17,6 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # - import glob import hashlib import os.path @@ -25,10 +24,10 @@ import tempfile import time +from wok.plugins.kimchi.isoinfo import IsoImage +from wok.plugins.kimchi.isoinfo import probe_iso from wok.utils import wok_log -from wok.plugins.kimchi.isoinfo import IsoImage, probe_iso - SCAN_IGNORE = ['/tmp/kimchi-scan-*'] @@ -49,16 +48,16 @@ def clean_stale(self, window=SCAN_TTL): """ try: now = time.time() - clean_list = glob.glob("/tmp/kimchi-scan-*") + clean_list = glob.glob('/tmp/kimchi-scan-*') for d in clean_list: - transient_pool = \ - os.path.basename(d).replace('kimchi-scan-', '')[0: -6] + transient_pool = os.path.basename( + d).replace('kimchi-scan-', '')[0:-6] if now - os.path.getmtime(d) > window: shutil.rmtree(d) self.clean_cb(transient_pool) except OSError as e: - msg = "Exception %s occured when cleaning stale pool, ignore" - wok_log.debug(msg % e.message) + msg = f'Exception {e} occured when cleaning stale pool, ignore' + wok_log.debug(msg) def scan_dir_prepare(self, name): # clean stale scan storage pools @@ -69,21 +68,26 @@ def start_scan(self, cb, params): def updater(iso_info): iso_name = os.path.basename(iso_info['path'])[:-3] - duplicates = "%s/%s*" % (params['pool_path'], iso_name) + duplicates = '%s/%s*' % (params['pool_path'], iso_name) for f in glob.glob(duplicates): iso_img = IsoImage(f) - if (iso_info['distro'], iso_info['version']) == \ - iso_img.probe(): + if (iso_info['distro'], iso_info['version']) == iso_img.probe(): return - iso_path = iso_name + hashlib.md5(iso_info['path']).hexdigest() + \ + iso_path = ( + iso_name + + hashlib.md5(iso_info['path'].encode('utf-8')).hexdigest() + '.iso' - link_name = os.path.join(params['pool_path'], - os.path.basename(iso_path)) + ) + link_name = os.path.join( + params['pool_path'], os.path.basename(iso_path)) os.symlink(iso_info['path'], link_name) ignore_paths = params.get('ignore_list', []) - scan_params = dict(path=params['scan_path'], updater=updater, - ignore_list=ignore_paths + SCAN_IGNORE) + scan_params = dict( + path=params['scan_path'], + updater=updater, + ignore_list=ignore_paths + SCAN_IGNORE, + ) probe_iso(None, scan_params) cb('', True) diff --git a/screenshot.py b/screenshot.py index 81122288a..be3f63979 100644 --- a/screenshot.py +++ b/screenshot.py @@ -17,7 +17,6 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # - import glob import os import signal @@ -78,7 +77,7 @@ def _clean_extra(self, window=-1): """ try: now = time.time() - clear_list = glob.glob("%s/%s-*.png" % + clear_list = glob.glob('%s/%s-*.png' % (config.get_screenshot_path(), self.vm_uuid)) for f in clear_list: @@ -98,7 +97,7 @@ def _generate_scratch(self, thumbnail): pass def _create_black_image(self, thumbnail): - image = Image.new("RGB", self.THUMBNAIL_SIZE, 'black') + image = Image.new('RGB', self.THUMBNAIL_SIZE, 'black') image.save(thumbnail) def _watch_stream_creation(self, thumbnail): @@ -120,7 +119,7 @@ def _watch_stream_creation(self, thumbnail): try: self._generate_scratch(thumbnail) os._exit(0) - except: + except Exception: os._exit(1) else: counter = 0 @@ -130,21 +129,21 @@ def _watch_stream_creation(self, thumbnail): time.sleep(1) ret = os.waitpid(pid, os.WNOHANG) - fd = open(pipe, "a") + fd = open(pipe, 'a') if ret != (pid, 0): - fd.write("-") + fd.write('-') if ret[0] != pid: os.kill(int(pid), signal.SIGKILL) os.waitpid(pid, 0) else: - fd.write("+") + fd.write('+') fd.close() def _get_test_result(self): if not os.path.exists(pipe): return - fd = open(pipe, "r") + fd = open(pipe, 'r') data = fd.read() fd.close() @@ -163,9 +162,9 @@ def _generate_thumbnail(self): elif stream_test_result: try: self._generate_scratch(thumbnail) - except: - wok_log.error("screenshot_creation: Unable to create " - "screenshot image %s." % thumbnail) + except Exception: + wok_log.error('screenshot_creation: Unable to create ' + 'screenshot image %s.' % thumbnail) else: self._create_black_image(thumbnail) @@ -178,7 +177,7 @@ def _generate_thumbnail(self): # work around pic truncate validation in thumbnail generation im.thumbnail(self.THUMBNAIL_SIZE) except Exception as e: - wok_log.warning("Image load with warning: %s." % e) - im.save(thumbnail, "PNG") + wok_log.warning('Image load with warning: %s.' % e) + im.save(thumbnail, 'PNG') self.info['thumbnail'] = thumbnail diff --git a/serialconsole.py b/serialconsole.py index 13ef3313b..e2fc6cd19 100644 --- a/serialconsole.py +++ b/serialconsole.py @@ -17,20 +17,17 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # - -import libvirt import os import socket import sys import threading import time - from multiprocessing import Process +import libvirt from wok.config import config as wok_config -from wok.utils import wok_log - from wok.plugins.kimchi import model +from wok.utils import wok_log SOCKET_QUEUE_BACKLOG = 0 @@ -70,18 +67,15 @@ def __init__(self, guest_name, URI): self._uri = URI self._server_addr = os.path.join(BASE_DIRECTORY, guest_name) if os.path.exists(self._server_addr): - raise RuntimeError('There is an existing connection to %s' % - guest_name) - - self._socket = socket.socket(socket.AF_UNIX, - socket.SOCK_STREAM) - self._socket.setsockopt(socket.SOL_SOCKET, - socket.SO_REUSEADDR, - 1) + raise RuntimeError( + 'There is an existing connection to %s' % guest_name) + + self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._socket.bind(self._server_addr) self._socket.listen(SOCKET_QUEUE_BACKLOG) - wok_log.info('[%s] socket server to guest %s created', self.name, - guest_name) + wok_log.info('[%s] socket server to guest %s created', + self.name, guest_name) def run(self): """Implements customized run method from Process. @@ -101,13 +95,12 @@ def _event_loop(): while not is_listening: libvirt.virEventRunDefaultImpl() - console.eventAddCallback(libvirt.VIR_STREAM_EVENT_READABLE, - _test_output, - None) + console.eventAddCallback( + libvirt.VIR_STREAM_EVENT_READABLE, _test_output, None) libvirt_loop = threading.Thread(target=_event_loop) libvirt_loop.start() - console.send("\n") + console.send(b'\n') libvirt_loop.join(1) if not libvirt_loop.is_alive(): @@ -126,8 +119,8 @@ def _send_to_client(self, stream, event, opaque): data = stream.recv(1024) except Exception as e: - wok_log.info('[%s] Error when reading from console: %s', - self.name, e.message) + wok_log.info( + '[%s] Error when reading from console: %s', self.name, str(e)) return # return if no data received or client socket(opaque) is not valid @@ -157,8 +150,12 @@ def listen(self): guest = LibvirtGuest(self._guest_name, self._uri, self.name) except Exception as e: - wok_log.error('[%s] Cannot open the guest %s due to %s', - self.name, self._guest_name, e.message) + wok_log.error( + '[%s] Cannot open the guest %s due to %s', + self.name, + self._guest_name, + str(e), + ) self._socket.close() sys.exit(1) @@ -170,8 +167,9 @@ def listen(self): try: console = guest.get_console() if console is None: - wok_log.error('[%s] Cannot get the console to %s', - self.name, self._guest_name) + wok_log.error( + '[%s] Cannot get the console to %s', self.name, self._guest_name + ) return if not self._is_vm_listening_serial(console): @@ -184,8 +182,11 @@ def listen(self): pass finally: - wok_log.info("[%s] Shutting down the socket server to %s console", - self.name, self._guest_name) + wok_log.info( + '[%s] Shutting down the socket server to %s console', + self.name, + self._guest_name, + ) self._socket.close() if os.path.exists(self._server_addr): os.unlink(self._server_addr) @@ -194,8 +195,8 @@ def listen(self): console.eventRemoveCallback() except Exception as e: - wok_log.info('[%s] Callback is probably removed: %s', - self.name, e.message) + wok_log.info( + '[%s] Callback is probably removed: %s', self.name, str(e)) guest.close() @@ -211,17 +212,18 @@ def _listen(self, guest, console): session_timeout = wok_config.get('server', 'session_timeout') client.settimeout(int(session_timeout) * 60) - wok_log.info('[%s] Client connected to %s', self.name, - self._guest_name) + wok_log.info('[%s] Client connected to %s', + self.name, self._guest_name) # register the callback to receive any data from the console - console.eventAddCallback(libvirt.VIR_STREAM_EVENT_READABLE, - self._send_to_client, - client) + console.eventAddCallback( + libvirt.VIR_STREAM_EVENT_READABLE, self._send_to_client, client + ) # start the libvirt event loop in a python thread - libvirt_loop = threading.Thread(target=self.libvirt_event_loop, - args=(guest, client)) + libvirt_loop = threading.Thread( + target=self.libvirt_event_loop, args=(guest, client) + ) libvirt_loop.start() while True: @@ -230,8 +232,12 @@ def _listen(self, guest, console): data = client.recv(1024) except Exception as e: - wok_log.info('[%s] Client disconnected from %s: %s', - self.name, self._guest_name, e.message) + wok_log.info( + '[%s] Client disconnected from %s: %s', + self.name, + self._guest_name, + str(e), + ) break if not data or data == CTRL_Q: @@ -242,23 +248,25 @@ def _listen(self, guest, console): try: console.send(data) - except: - wok_log.info('[%s] Console of %s is not accessible', - self.name, self._guest_name) + except Exception: + wok_log.info( + '[%s] Console of %s is not accessible', self.name, self._guest_name + ) break # clear used resources when the connection is closed and, if possible, # tell the client the connection was lost. try: - client.send('\r\n\r\nClient disconnected\r\n') + client.send(b'\\r\\n\\r\\nClient disconnected\\r\\n') - except: + except Exception: pass + + # socket_server class LibvirtGuest(object): - def __init__(self, guest_name, uri, process_name): """ Constructs a guest object that opens a connection to libvirt and @@ -270,8 +278,10 @@ def __init__(self, guest_name, uri, process_name): self._guest = model.vms.VMModel.get_vm(guest_name, libvirt) except Exception as e: - wok_log.error('[%s] Cannot open guest %s: %s', self._proc_name, - guest_name, e.message) + wok_log.error( + '[%s] Cannot open guest %s: %s', self._proc_name, guest_name, str( + e) + ) raise self._libvirt = libvirt.get() @@ -282,8 +292,10 @@ def is_running(self): """ Checks if this guest is currently in a running state. """ - return self._guest.state(0)[0] == libvirt.VIR_DOMAIN_RUNNING or \ + return ( + self._guest.state(0)[0] == libvirt.VIR_DOMAIN_RUNNING or self._guest.state(0)[0] == libvirt.VIR_DOMAIN_PAUSED + ) def get_console(self): """ @@ -294,8 +306,11 @@ def get_console(self): # guest must be in a running state to get its console counter = 10 while not self.is_running(): - wok_log.info('[%s] Guest %s is not running, waiting for it', - self._proc_name, self._name) + wok_log.info( + '[%s] Guest %s is not running, waiting for it', + self._proc_name, + self._name, + ) counter -= 1 if counter <= 0: @@ -305,19 +320,23 @@ def get_console(self): # attach a stream in the guest console so we can read from/write to it if self._stream is None: - wok_log.info('[%s] Opening the console for guest %s', - self._proc_name, self._name) + wok_log.info( + '[%s] Opening the console for guest %s', self._proc_name, self._name + ) self._stream = self._libvirt.newStream(libvirt.VIR_STREAM_NONBLOCK) - self._guest.openConsole(None, - self._stream, - libvirt.VIR_DOMAIN_CONSOLE_FORCE | - libvirt.VIR_DOMAIN_CONSOLE_SAFE) + self._guest.openConsole( + None, + self._stream, + libvirt.VIR_DOMAIN_CONSOLE_FORCE | libvirt.VIR_DOMAIN_CONSOLE_SAFE, + ) return self._stream def close(self): """Closes the libvirt connection. """ self._libvirt.close() + + # guest @@ -331,7 +350,7 @@ def main(guest_name, URI='qemu:///system'): server = SocketServer(guest_name, URI) except Exception as e: - wok_log.error('Cannot create the socket server: %s', e.message) + wok_log.error('Cannot create the socket server: %s', str(e)) raise server.start() @@ -354,7 +373,7 @@ def main(guest_name, URI='qemu:///system'): """ argc = len(sys.argv) if argc != 2: - print 'usage: ./%s ' % sys.argv[0] + print(f'usage: ./{sys.argv[0]} ') sys.exit(1) main(sys.argv[1]) diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 000000000..3c57cbacd --- /dev/null +++ b/setup.cfg @@ -0,0 +1,3 @@ +[pycodestyle] +ignore = E226,E302,E41,W503,W504,E741 +max-line-length = 88 diff --git a/tests/iso_gen.py b/tests/iso_gen.py index 8c8fb123e..c28424643 100644 --- a/tests/iso_gen.py +++ b/tests/iso_gen.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import platform import struct @@ -24,8 +23,7 @@ iso_des = [ - ('openbsd', lambda v: True, - lambda v: 'OpenBSD/i386 %s Install CD' % v), + ('openbsd', lambda v: True, lambda v: 'OpenBSD/i386 %s Install CD' % v), ('centos', lambda v: True, lambda v: 'CentOS_%s_Final' % v), ('windows', '2000', 'W2AFPP'), ('windows', 'xp', 'WXPFPP'), @@ -47,14 +45,28 @@ ('rhel', '4.8', 'RHEL/4-U8'), ('rhel', lambda v: v.startswith('6.'), lambda v: 'RHEL_%s' % v), ('debian', lambda v: True, lambda v: 'Debian %s' % v), - ('ubuntu', - lambda v: v in ('7.10', '8.04', '8.10', '9.04', '9.10', '10.04', '10.10', - '11.04', '11.10', '12.04', '12.10', '13.04', '13.10', - '14.04'), - lambda v: 'Ubuntu %s' % v), - ('fedora', - lambda v: v in ('16', '17', '18', '19'), - lambda v: 'Fedora %s' % v) + ( + 'ubuntu', + lambda v: v + in ( + '7.10', + '8.04', + '8.10', + '9.04', + '9.10', + '10.04', + '10.10', + '11.04', + '11.10', + '12.04', + '12.10', + '13.04', + '13.10', + '14.04', + ), + lambda v: 'Ubuntu %s' % v, + ), + ('fedora', lambda v: v in ('16', '17', '18', '19'), lambda v: 'Fedora %s' % v), ] @@ -79,11 +91,11 @@ def _build_prim_vol(self, fd, iso_volid): fd.seek(16 * IsoImage.SECTOR_SIZE) fmt = IsoImage.VOL_DESC vd_type = 1 - vd_ident = 'CD001' + vd_ident = b'CD001' vd_ver = 1 pad0 = 1 - sys_id = 'fake os' - vol_id = iso_volid + sys_id = b'fake os' + vol_id = iso_volid.encode('utf-8') data = (vd_type, vd_ident, vd_ver, pad0, sys_id, vol_id) s = fmt.pack(*data) fd.write(s) @@ -92,16 +104,16 @@ def _build_prim_vol(self, fd, iso_volid): def _add_sector_padding(self, fd, s): padding_len = IsoImage.SECTOR_SIZE - len(s) fmt = struct.Struct('=%ss' % padding_len) - s = fmt.pack('a' * padding_len) + s = fmt.pack(b'a' * padding_len) fd.write(s) def _build_el_torito(self, fd): fmt = IsoImage.EL_TORITO_BOOT_RECORD vd_type = 0 - vd_ident = 'CD001' + vd_ident = b'CD001' vd_ver = 1 - et_ident = "EL TORITO SPECIFICATION:" - pad0 = 'a' * 32 + et_ident = b'EL TORITO SPECIFICATION:' + pad0 = b'a' * 32 boot_cat = 0 data = (vd_type, vd_ident, vd_ver, et_ident, pad0, boot_cat) s = fmt.pack(*data) @@ -113,10 +125,10 @@ def _build_el_boot(self, fd, bootable): hdr_id = 0 platform_id = 0 pad0 = 1 - ident = 'c' * 24 + ident = b'c' * 24 csum = 1 key55 = 0x55 - keyAA = 0xaa + keyAA = 0xAA data = (hdr_id, platform_id, pad0, ident, csum, key55, keyAA) s = fmt.pack(*data) fd.write(s) @@ -136,23 +148,23 @@ def _build_el_boot(self, fd, bootable): s = fmt.pack(*data) fd.write(s) - s = 'a' * IsoImage.SECTOR_SIZE + s = b'a' * IsoImage.SECTOR_SIZE fd.write(s) def _build_bootable_ppc_path_table(self, fd): # write path table locator PATH_TABLE_LOC_OFFSET = 16 * IsoImage.SECTOR_SIZE + 132 - PATH_TABLE_SIZE_LOC = struct.Struct(" 0: iface = interfaces[0]['name'] - _do_network_test(self, model, {'name': u'vlan-tagged-bridge', - 'connection': 'bridge', - 'interface': iface, 'vlan_id': 987}) + _do_network_test( + self, + model, + { + 'name': u'vlan-tagged-bridge', + 'connection': 'bridge', + 'interface': iface, + 'vlan_id': 987, + }, + ) diff --git a/tests/test_mock_storagepool.py b/tests/test_mock_storagepool.py index f584382db..7ed170a40 100644 --- a/tests/test_mock_storagepool.py +++ b/tests/test_mock_storagepool.py @@ -17,13 +17,16 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json import unittest +import urllib from functools import partial -from tests.utils import patch_auth, request, run_server +import cherrypy + +from tests.utils import patch_auth +from tests.utils import request +from tests.utils import run_server model = None test_server = None @@ -47,9 +50,7 @@ def setUp(self): model.reset() def _task_lookup(self, taskid): - return json.loads( - self.request('/plugins/kimchi/tasks/%s' % taskid).read() - ) + return json.loads(self.request('/plugins/kimchi/tasks/%s' % taskid).read()) def test_storagepool(self): # MockModel always returns 2 VGs (hostVG, kimchiVG) @@ -57,9 +58,8 @@ def test_storagepool(self): vg_names = [vg['name'] for vg in vgs] # MockModel always returns 2 partitions (vdx, vdz) - partitions = json.loads( - self.request('/plugins/kimchi/host/partitions').read() - ) + partitions = json.loads(self.request( + '/plugins/kimchi/host/partitions').read()) devs = [dev['path'] for dev in partitions] # MockModel always returns 3 FC devices @@ -69,72 +69,91 @@ def test_storagepool(self): fc_devs = [dev['name'] for dev in fc_devs] poolDefs = [ - {'type': 'dir', 'name': u'kīмсhīUnitTestDirPool', - 'path': '/tmp/kimchi-images'}, - {'type': 'netfs', 'name': u'kīмсhīUnitTestNSFPool', - 'source': {'host': 'localhost', - 'path': '/var/lib/kimchi/nfs-pool'}}, - {'type': 'scsi', 'name': u'kīмсhīUnitTestSCSIFCPool', - 'source': {'adapter_name': fc_devs[0]}}, - {'type': 'iscsi', 'name': u'kīмсhīUnitTestISCSIPool', - 'source': {'host': '127.0.0.1', - 'target': 'iqn.2015-01.localhost.kimchiUnitTest'}}, - {'type': 'logical', 'name': u'kīмсhīUnitTestLogicalPool', - 'source': {'devices': [devs[0]]}}, - {'type': 'logical', 'name': vg_names[0], - 'source': {'from_vg': True}}] + { + 'type': 'dir', + 'name': 'kīмсhīUnitTestDirPool', + 'path': '/tmp/kimchi-images', + }, + { + 'type': 'netfs', + 'name': 'kīмсhīUnitTestNSFPool', + 'source': {'host': 'localhost', 'path': '/var/lib/kimchi/nfs-pool'}, + }, + { + 'type': 'scsi', + 'name': 'kīмсhīUnitTestSCSIFCPool', + 'source': {'adapter_name': fc_devs[0]}, + }, + { + 'type': 'iscsi', + 'name': 'kīмсhīUnitTestISCSIPool', + 'source': { + 'host': '127.0.0.1', + 'target': 'iqn.2015-01.localhost.kimchiUnitTest', + }, + }, + { + 'type': 'logical', + 'name': 'kīмсhīUnitTestLogicalPool', + 'source': {'devices': [devs[0]]}, + }, + {'type': 'logical', 'name': vg_names[0], 'source': { + 'from_vg': True}}, + ] def _do_test(params): name = params['name'] - uri = '/plugins/kimchi/storagepools/%s' % name.encode('utf-8') + uri = urllib.parse.quote(f'/plugins/kimchi/storagepools/{name}') req = json.dumps(params) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # activate the storage pool resp = self.request(uri + '/activate', '{}', 'POST') - storagepool = json.loads(self.request(uri).read()) - self.assertEquals('active', storagepool['state']) + storagepool = json.loads(self.request(uri).read().decode('utf-8')) + self.assertEqual('active', storagepool['state']) # Set autostart flag of an active storage pool for autostart in [True, False]: t = {'autostart': autostart} req = json.dumps(t) resp = self.request(uri, req, 'PUT') - storagepool = json.loads(self.request(uri).read()) - self.assertEquals(autostart, storagepool['autostart']) + storagepool = json.loads( + self.request(uri).read().decode('utf-8')) + self.assertEqual(autostart, storagepool['autostart']) # Extend an active logical pool if params['type'] == 'logical': t = {'disks': [devs[1]]} req = json.dumps(t) resp = self.request(uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) # Deactivate the storage pool resp = self.request(uri + '/deactivate', '{}', 'POST') - storagepool = json.loads(self.request(uri).read()) - self.assertEquals('inactive', storagepool['state']) + storagepool = json.loads(self.request(uri).read().decode('utf-8')) + self.assertEqual('inactive', storagepool['state']) # Set autostart flag of an inactive storage pool for autostart in [True, False]: t = {'autostart': autostart} req = json.dumps(t) resp = self.request(uri, req, 'PUT') - storagepool = json.loads(self.request(uri).read()) - self.assertEquals(autostart, storagepool['autostart']) + storagepool = json.loads( + self.request(uri).read().decode('utf-8')) + self.assertEqual(autostart, storagepool['autostart']) # Extend an inactive logical pool if params['type'] == 'logical': t = {'disks': [devs[1]]} req = json.dumps(t) resp = self.request(uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) # Delete the storage pool resp = self.request(uri, '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) for pool in poolDefs: _do_test(pool) diff --git a/tests/test_mock_storagevolume.py b/tests/test_mock_storagevolume.py index 4b3306b33..1c8c3a1c1 100644 --- a/tests/test_mock_storagevolume.py +++ b/tests/test_mock_storagevolume.py @@ -17,16 +17,18 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json import unittest +import urllib from functools import partial -from tests.utils import patch_auth, request, run_server - +import cherrypy from test_model_storagevolume import _do_volume_test +from tests.utils import patch_auth +from tests.utils import request +from tests.utils import run_server + model = None test_server = None @@ -51,37 +53,58 @@ def setUp(self): def test_storagevolume(self): # MockModel always returns 2 partitions (vdx, vdz) partitions = json.loads( - self.request('/plugins/kimchi/host/partitions').read() + self.request( + '/plugins/kimchi/host/partitions').read().decode('utf-8') ) devs = [dev['path'] for dev in partitions] # MockModel always returns 3 FC devices fc_devs = json.loads( - self.request('/plugins/kimchi/host/devices?_cap=fc_host').read() + self.request('/plugins/kimchi/host/devices?_cap=fc_host') + .read() + .decode('utf-8') ) fc_devs = [dev['name'] for dev in fc_devs] poolDefs = [ - {'type': 'dir', 'name': u'kīмсhīUnitTestDirPool', - 'path': '/tmp/kimchi-images'}, - {'type': 'netfs', 'name': u'kīмсhīUnitTestNSFPool', - 'source': {'host': 'localhost', - 'path': '/var/lib/kimchi/nfs-pool'}}, - {'type': 'scsi', 'name': u'kīмсhīUnitTestSCSIFCPool', - 'source': {'adapter_name': fc_devs[0]}}, - {'type': 'iscsi', 'name': u'kīмсhīUnitTestISCSIPool', - 'source': {'host': '127.0.0.1', - 'target': 'iqn.2015-01.localhost.kimchiUnitTest'}}, - {'type': 'logical', 'name': u'kīмсhīUnitTestLogicalPool', - 'source': {'devices': [devs[0]]}}] + { + 'type': 'dir', + 'name': 'kīмсhīUnitTestDirPool', + 'path': '/tmp/kimchi-images', + }, + { + 'type': 'netfs', + 'name': 'kīмсhīUnitTestNSFPool', + 'source': {'host': 'localhost', 'path': '/var/lib/kimchi/nfs-pool'}, + }, + { + 'type': 'scsi', + 'name': 'kīмсhīUnitTestSCSIFCPool', + 'source': {'adapter_name': fc_devs[0]}, + }, + { + 'type': 'iscsi', + 'name': 'kīмсhīUnitTestISCSIPool', + 'source': { + 'host': '127.0.0.1', + 'target': 'iqn.2015-01.localhost.kimchiUnitTest', + }, + }, + { + 'type': 'logical', + 'name': 'kīмсhīUnitTestLogicalPool', + 'source': {'devices': [devs[0]]}, + }, + ] for pool in poolDefs: pool_name = pool['name'] - uri = '/plugins/kimchi/storagepools/%s' % pool_name.encode('utf-8') + uri = urllib.parse.quote( + f'/plugins/kimchi/storagepools/{pool_name}') req = json.dumps(pool) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # activate the storage pool resp = self.request(uri + '/activate', '{}', 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) _do_volume_test(self, model, pool_name) diff --git a/tests/test_mockmodel.py b/tests/test_mockmodel.py index 8d478cf22..4057b62a9 100644 --- a/tests/test_mockmodel.py +++ b/tests/test_mockmodel.py @@ -16,20 +16,20 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json import os import time import unittest -from tests.utils import patch_auth, request, run_server -from tests.utils import wait_task - +import cherrypy +import iso_gen from wok.exception import InvalidOperation from wok.plugins.kimchi.osinfo import get_template_default -import iso_gen +from tests.utils import patch_auth +from tests.utils import request +from tests.utils import run_server +from tests.utils import wait_task test_server = None model = None @@ -57,46 +57,54 @@ def setUp(self): def test_screenshot_refresh(self): # Create a VM - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) request('/plugins/kimchi/templates', req, 'POST') - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = request('/plugins/kimchi/vms', req, 'POST') - task = json.loads(resp.read()) + task = json.loads(resp.read().decode('utf-8')) wait_task(model.task_lookup, task['id']) # Test screenshot refresh for running vm - request('/plugins/kimchi/vms/test-vm/start', '{}', - 'POST') + request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') resp = request('/plugins/kimchi/vms/test-vm/screenshot') - self.assertEquals(200, resp.status) - self.assertEquals('image/png', resp.getheader('content-type')) + self.assertEqual(200, resp.status) + self.assertEqual('image/png', resp.getheader('content-type')) resp1 = request('/plugins/kimchi/vms/test-vm') - rspBody = resp1.read() + rspBody = resp1.read().decode('utf-8') testvm_Data = json.loads(rspBody) screenshotURL = '/' + testvm_Data['screenshot'] time.sleep(5) resp2 = request(screenshotURL) - self.assertEquals(200, resp2.status) - self.assertEquals(resp2.getheader('content-type'), - resp.getheader('content-type')) - self.assertEquals(resp2.getheader('content-length'), - resp.getheader('content-length')) - self.assertEquals(resp2.getheader('last-modified'), - resp.getheader('last-modified')) + self.assertEqual(200, resp2.status) + self.assertEqual( + resp2.getheader('content-type'), resp.getheader('content-type') + ) + self.assertEqual( + resp2.getheader('content-length'), resp.getheader('content-length') + ) + self.assertEqual( + resp2.getheader('last-modified'), resp.getheader('last-modified') + ) def test_vm_list_sorted(self): - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) request('/plugins/kimchi/templates', req, 'POST') def add_vm(name): # Create a VM - req = json.dumps({'name': name, - 'template': '/plugins/kimchi/templates/test'}) - task = json.loads(request('/plugins/kimchi/vms', - req, 'POST').read()) + req = json.dumps( + {'name': name, 'template': '/plugins/kimchi/templates/test'} + ) + task = json.loads( + request('/plugins/kimchi/vms', req, + 'POST').read().decode('utf-8') + ) wait_task(model.task_lookup, task['id']) vms = [u'abc', u'bca', u'cab', u'xba'] @@ -107,24 +115,26 @@ def add_vm(name): self.assertEqual(model.vms_get_list(), sorted(vms)) def test_memory_window_changes(self): - model.templates_create({'name': u'test', - 'source_media': {'type': 'disk', - 'path': fake_iso}}) - task = model.vms_create({'name': u'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + model.templates_create( + {'name': u'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) + task = model.vms_create( + {'name': u'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) wait_task(model.task_lookup, task['id']) info = model.device_lookup('pci_0000_1a_00_0') model.vmhostdevs_update_mmio_guest(u'test-vm', True) - model._attach_device(u'test-vm', - model._get_pci_device_xml(info, 0, False)) + model._attach_device( + u'test-vm', model._get_pci_device_xml(info, 0, False)) def test_hotplug_3D_card(self): - model.templates_create({'name': u'test', - 'source_media': {'type': 'disk', - 'path': fake_iso}}) - task = model.vms_create({'name': u'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + model.templates_create( + {'name': u'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) + task = model.vms_create( + {'name': u'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) wait_task(model.task_lookup, task['id']) model.vm_start(u'test-vm') @@ -141,34 +151,59 @@ def test_hotplug_3D_card(self): self.assertEqual(e.message[:14], u'KCHVMHDEV0006E') def test_vm_info(self): - model.templates_create({'name': u'test', - 'source_media': {'type': 'disk', - 'path': fake_iso}}) - task = model.vms_create({'name': u'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + model.templates_create( + {'name': u'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) + task = model.vms_create( + {'name': u'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) wait_task(model.task_lookup, task['id']) vms = model.vms_get_list() - self.assertEquals(2, len(vms)) + self.assertEqual(2, len(vms)) self.assertIn(u'test-vm', vms) - keys = set(('name', 'state', 'stats', 'uuid', 'memory', 'cpu_info', - 'screenshot', 'icon', 'graphics', 'users', 'groups', - 'access', 'persistent', 'bootorder', 'bootmenu', 'title', - 'description', 'autostart')) - - stats_keys = set(('cpu_utilization', 'mem_utilization', - 'net_throughput', 'net_throughput_peak', - 'io_throughput', 'io_throughput_peak')) + keys = set( + ( + 'name', + 'state', + 'stats', + 'uuid', + 'memory', + 'cpu_info', + 'screenshot', + 'icon', + 'graphics', + 'users', + 'groups', + 'access', + 'persistent', + 'bootorder', + 'bootmenu', + 'title', + 'description', + 'autostart', + ) + ) + + stats_keys = set( + ( + 'cpu_utilization', + 'mem_utilization', + 'net_throughput', + 'net_throughput_peak', + 'io_throughput', + 'io_throughput_peak', + ) + ) info = model.vm_lookup(u'test-vm') - self.assertEquals(keys, set(info.keys())) - self.assertEquals('shutoff', info['state']) - self.assertEquals('test-vm', info['name']) - self.assertEquals(get_template_default('old', 'memory'), - info['memory']) - self.assertEquals(1, info['cpu_info']['vcpus']) - self.assertEquals(1, info['cpu_info']['maxvcpus']) - self.assertEquals('plugins/kimchi/images/icon-vm.png', info['icon']) - self.assertEquals(stats_keys, set(info['stats'].keys())) - self.assertEquals('vnc', info['graphics']['type']) - self.assertEquals('127.0.0.1', info['graphics']['listen']) + self.assertEqual(keys, set(info.keys())) + self.assertEqual('shutoff', info['state']) + self.assertEqual('test-vm', info['name']) + self.assertEqual(get_template_default('old', 'memory'), info['memory']) + self.assertEqual(1, info['cpu_info']['vcpus']) + self.assertEqual(1, info['cpu_info']['maxvcpus']) + self.assertEqual('plugins/kimchi/images/icon-vm.png', info['icon']) + self.assertEqual(stats_keys, set(info['stats'].keys())) + self.assertEqual('vnc', info['graphics']['type']) + self.assertEqual('127.0.0.1', info['graphics']['listen']) diff --git a/tests/test_model.py b/tests/test_model.py index 32b5ba8f0..815ece485 100644 --- a/tests/test_model.py +++ b/tests/test_model.py @@ -17,15 +17,10 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import __builtin__ as builtins - import base64 +import builtins import grp -import libvirt import json -import lxml.etree as ET -import mock import os import platform import pwd @@ -34,20 +29,20 @@ import time import unittest -from lxml import objectify - -import tests.utils as utils - +import libvirt +import lxml.etree as ET +import mock import wok.objectstore +from iso_gen import construct_fake_iso +from lxml import objectify from wok.asynctask import AsyncTask from wok.basemodel import Singleton -from wok.config import config, PluginPaths +from wok.config import config +from wok.config import PluginPaths from wok.exception import InvalidOperation -from wok.exception import InvalidParameter, NotFoundError, OperationFailed -from wok.rollbackcontext import RollbackContext -from wok.utils import convert_data_size -from wok.xmlutils.utils import xpath_get_text - +from wok.exception import InvalidParameter +from wok.exception import NotFoundError +from wok.exception import OperationFailed from wok.plugins.kimchi import network as netinfo from wok.plugins.kimchi import osinfo from wok.plugins.kimchi.config import kimchiPaths as paths @@ -56,14 +51,19 @@ from wok.plugins.kimchi.model.virtviewerfile import FirewallManager from wok.plugins.kimchi.model.virtviewerfile import VMVirtViewerFileModel from wok.plugins.kimchi.model.vms import VMModel +from wok.rollbackcontext import RollbackContext +from wok.utils import convert_data_size +from wok.xmlutils.utils import xpath_get_text -import iso_gen +import tests.utils as utils -invalid_repository_urls = ['www.fedora.org', # missing protocol - '://www.fedora.org', # missing protocol - 'http://www.fedora', # invalid domain name - 'file:///home/foobar'] # invalid path +invalid_repository_urls = [ + 'www.fedora.org', # missing protocol + '://www.fedora.org', # missing protocol + 'http://www.fedora', # invalid domain name + 'file:///home/foobar', +] # invalid path TMP_DIR = '/var/lib/kimchi/tests/' UBUNTU_ISO = TMP_DIR + 'ubuntu14.04.iso' @@ -86,7 +86,7 @@ def setUpModule(): if not os.path.exists(TMP_DIR): os.makedirs(TMP_DIR) - iso_gen.construct_fake_iso(UBUNTU_ISO, True, '14.04', 'ubuntu') + construct_fake_iso(UBUNTU_ISO, True, '14.04', 'ubuntu') # Some FeatureTests functions depend on server to validate their result. # As CapabilitiesModel is a Singleton class it will get the first result @@ -108,8 +108,10 @@ def get_remote_iso_path(): """ host_arch = os.uname()[4] remote_path = '' - with open(os.path.join(PluginPaths('kimchi').conf_dir, 'distros.d', - 'fedora.json')) as fedora_isos: + with open( + os.path.join(PluginPaths('kimchi').conf_dir, + 'distros.d', 'fedora.json') + ) as fedora_isos: # Get a list of dicts json_isos_list = json.load(fedora_isos) for iso in json_isos_list: @@ -122,12 +124,14 @@ def get_remote_iso_path(): def _setDiskPoolDefault(): osinfo.defaults['disks'][0]['pool'] = { - 'name': '/plugins/kimchi/storagepools/default'} + 'name': '/plugins/kimchi/storagepools/default' + } def _setDiskPoolDefaultTest(): osinfo.defaults['disks'][0]['pool'] = { - 'name': '/plugins/kimchi/storagepools/default-pool'} + 'name': '/plugins/kimchi/storagepools/default-pool' + } class ModelTests(unittest.TestCase): @@ -145,47 +149,77 @@ def tearDown(self): def test_vm_info(self): inst = model.Model('test:///default', self.tmp_store) vms = inst.vms_get_list() - self.assertEquals(1, len(vms)) - self.assertEquals('test', vms[0]) - - keys = set(('name', 'state', 'stats', 'uuid', 'memory', 'cpu_info', - 'screenshot', 'icon', 'graphics', 'users', 'groups', - 'access', 'persistent', 'bootorder', 'bootmenu', 'title', - 'description', 'autostart')) + self.assertEqual(1, len(vms)) + self.assertEqual('test', vms[0]) + + keys = set( + ( + 'name', + 'state', + 'stats', + 'uuid', + 'memory', + 'cpu_info', + 'screenshot', + 'icon', + 'graphics', + 'users', + 'groups', + 'access', + 'persistent', + 'bootorder', + 'bootmenu', + 'title', + 'description', + 'autostart', + ) + ) - stats_keys = set(('cpu_utilization', 'mem_utilization', - 'net_throughput', 'net_throughput_peak', - 'io_throughput', 'io_throughput_peak')) + stats_keys = set( + ( + 'cpu_utilization', + 'mem_utilization', + 'net_throughput', + 'net_throughput_peak', + 'io_throughput', + 'io_throughput_peak', + ) + ) info = inst.vm_lookup('test') - self.assertEquals(keys, set(info.keys())) - self.assertEquals('running', info['state']) - self.assertEquals('test', info['name']) - self.assertEquals(2048, info['memory']['current']) - self.assertEquals(2, info['cpu_info']['vcpus']) - self.assertEquals(2, info['cpu_info']['maxvcpus']) - self.assertEquals(None, info['icon']) - self.assertEquals(stats_keys, set(info['stats'].keys())) + self.assertEqual(keys, set(info.keys())) + self.assertEqual('running', info['state']) + self.assertEqual('test', info['name']) + self.assertEqual(2048, info['memory']['current']) + self.assertEqual(2, info['cpu_info']['vcpus']) + self.assertEqual(2, info['cpu_info']['maxvcpus']) + self.assertEqual(None, info['icon']) + self.assertEqual(stats_keys, set(info['stats'].keys())) self.assertRaises(NotFoundError, inst.vm_lookup, 'nosuchvm') - self.assertEquals([], info['users']) - self.assertEquals([], info['groups']) + self.assertEqual([], info['users']) + self.assertEqual([], info['groups']) self.assertTrue(info['persistent']) - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", 'Must be run as root') + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) def test_vm_lifecycle(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: vol_params = {'name': u'test-vol', 'capacity': 1024} task = inst.storagevolumes_create(u'default', vol_params) - rollback.prependDefer(inst.storagevolume_delete, u'default', - vol_params['name']) + rollback.prependDefer( + inst.storagevolume_delete, u'default', vol_params['name'] + ) inst.task_wait(task['id']) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) - params = {'name': 'test', - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') @@ -196,7 +230,7 @@ def test_vm_lifecycle(self): rollback.prependDefer(inst.vm_delete, 'kimchi-vm-new') inst.task_wait(task['id'], 10) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) vms = inst.vms_get_list() self.assertTrue('kimchi-vm' in vms) @@ -204,12 +238,12 @@ def test_vm_lifecycle(self): inst.vm_start('kimchi-vm') info = inst.vm_lookup('kimchi-vm') - self.assertEquals('running', info['state']) + self.assertEqual('running', info['state']) task = inst.vmsnapshots_create(u'kimchi-vm') inst.task_wait(task['id']) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) snap_name = task['target_uri'].split('/')[-1] created_snaps = [snap_name] @@ -217,30 +251,31 @@ def test_vm_lifecycle(self): vm = inst.vm_lookup(u'kimchi-vm') current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') - self.assertEquals(created_snaps[0], current_snap['name']) + self.assertEqual(created_snaps[0], current_snap['name']) # this snapshot should be deleted when its VM is deleted params = {'name': u'mysnap'} task = inst.vmsnapshots_create(u'kimchi-vm', params) inst.task_wait(task['id']) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) created_snaps.append(params['name']) - self.assertRaises(NotFoundError, inst.vmsnapshot_lookup, - u'kimchi-vm', u'foobar') + self.assertRaises( + NotFoundError, inst.vmsnapshot_lookup, u'kimchi-vm', u'foobar' + ) snap = inst.vmsnapshot_lookup(u'kimchi-vm', params['name']) self.assertTrue(int(time.time()) >= int(snap['created'])) - self.assertEquals(vm['state'], snap['state']) - self.assertEquals(params['name'], snap['name']) - self.assertEquals(created_snaps[0], snap['parent']) + self.assertEqual(vm['state'], snap['state']) + self.assertEqual(params['name'], snap['name']) + self.assertEqual(created_snaps[0], snap['parent']) snaps = inst.vmsnapshots_get_list(u'kimchi-vm') - self.assertEquals(created_snaps, snaps) + self.assertEqual(created_snaps, snaps) current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') - self.assertEquals(snap, current_snap) + self.assertEqual(snap, current_snap) task = inst.vmsnapshots_create(u'kimchi-vm') snap_name = task['target_uri'].split('/')[-1] @@ -248,15 +283,15 @@ def test_vm_lifecycle(self): u'kimchi-vm-new', snap_name) inst.task_wait(task['id']) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) created_snaps.append(snap_name) snaps = inst.vmsnapshots_get_list(u'kimchi-vm') - self.assertEquals(sorted(created_snaps, key=unicode.lower), snaps) + self.assertEqual(sorted(created_snaps, key=str.lower), snaps) snap = inst.vmsnapshot_lookup(u'kimchi-vm', snap_name) current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm') - self.assertEquals(snap, current_snap) + self.assertEqual(snap, current_snap) # update vm name inst.vm_update('kimchi-vm', {'name': u'kimchi-vm-new'}) @@ -266,32 +301,34 @@ def test_vm_lifecycle(self): # snapshot revert to the first created vm result = inst.vmsnapshot_revert(u'kimchi-vm-new', params['name']) - self.assertEquals(result, ['kimchi-vm-new', snap['name']]) + self.assertEqual(result, ['kimchi-vm-new', snap['name']]) vm = inst.vm_lookup(u'kimchi-vm-new') - self.assertEquals(vm['state'], snap['state']) + self.assertEqual(vm['state'], snap['state']) current_snap = inst.currentvmsnapshot_lookup(u'kimchi-vm-new') - self.assertEquals(params['name'], current_snap['name']) + self.assertEqual(params['name'], current_snap['name']) # suspend and resume the VM info = inst.vm_lookup(u'kimchi-vm-new') - self.assertEquals(info['state'], 'shutoff') - self.assertRaises(InvalidOperation, inst.vm_suspend, - u'kimchi-vm-new') + self.assertEqual(info['state'], 'shutoff') + self.assertRaises(InvalidOperation, + inst.vm_suspend, u'kimchi-vm-new') inst.vm_start(u'kimchi-vm-new') info = inst.vm_lookup(u'kimchi-vm-new') - self.assertEquals(info['state'], 'running') + self.assertEqual(info['state'], 'running') inst.vm_suspend(u'kimchi-vm-new') info = inst.vm_lookup(u'kimchi-vm-new') - self.assertEquals(info['state'], 'paused') - self.assertRaises(InvalidParameter, inst.vm_update, - u'kimchi-vm-new', {'name': 'foo'}) + self.assertEqual(info['state'], 'paused') + self.assertRaises( + InvalidParameter, inst.vm_update, u'kimchi-vm-new', { + 'name': 'foo'} + ) inst.vm_resume(u'kimchi-vm-new') info = inst.vm_lookup(u'kimchi-vm-new') - self.assertEquals(info['state'], 'running') - self.assertRaises(InvalidOperation, inst.vm_resume, - u'kimchi-vm-new') + self.assertEqual(info['state'], 'running') + self.assertRaises(InvalidOperation, + inst.vm_resume, u'kimchi-vm-new') # leave the VM suspended to make sure a paused VM can be # deleted correctly inst.vm_suspend('kimchi-vm-new') @@ -299,41 +336,53 @@ def test_vm_lifecycle(self): vms = inst.vms_get_list() self.assertFalse('kimchi-vm-new' in vms) - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", 'Must be run as root') + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) def test_image_based_template(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: vol = 'base-vol.img' - params = {'name': vol, - 'capacity': 1073741824, # 1 GiB - 'allocation': 1048576, # 1 MiB - 'format': 'qcow2'} + params = { + 'name': vol, + 'capacity': 1073741824, # 1 GiB + 'allocation': 1048576, # 1 MiB + 'format': 'qcow2', + } task_id = inst.storagevolumes_create('default', params)['id'] rollback.prependDefer(inst.storagevolume_delete, 'default', vol) inst.task_wait(task_id) - self.assertEquals('finished', inst.task_lookup(task_id)['status']) + self.assertEqual('finished', inst.task_lookup(task_id)['status']) vol_path = inst.storagevolume_lookup('default', vol)['path'] # Create template based on IMG file - tmpl_name = "img-tmpl" - tmpl_info = {"cpu_info": {"vcpus": 1}, "name": tmpl_name, - "graphics": {"type": "vnc", "listen": "127.0.0.1"}, - "networks": ["default"], "memory": {'current': 1024}, - "folder": [], "icon": "images/icon-vm.png", - "os_distro": "unknown", "os_version": "unknown", - "source_media": {'type': 'disk', 'path': vol_path}} + tmpl_name = 'img-tmpl' + tmpl_info = { + 'cpu_info': {'vcpus': 1}, + 'name': tmpl_name, + 'graphics': {'type': 'vnc', 'listen': '127.0.0.1'}, + 'networks': ['default'], + 'memory': {'current': 1024}, + 'folder': [], + 'icon': 'images/icon-vm.png', + 'os_distro': 'unknown', + 'os_version': 'unknown', + 'source_media': {'type': 'disk', 'path': vol_path}, + } inst.templates_create(tmpl_info) rollback.prependDefer(inst.template_delete, tmpl_name) # verify disk tmpl = inst.template_lookup(tmpl_name) - self.assertEquals(vol_path, tmpl["disks"][0]["base"]) + self.assertEqual(vol_path, tmpl['disks'][0]['base']) - params = {'name': 'kimchi-vm', - 'template': '/plugins/kimchi/templates/img-tmpl'} + params = { + 'name': 'kimchi-vm', + 'template': '/plugins/kimchi/templates/img-tmpl', + } task = inst.vms_create(params) inst.task_wait(task['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-vm') @@ -345,71 +394,85 @@ def test_image_based_template(self): rollback.prependDefer(inst.vm_poweroff, 'kimchi-vm') info = inst.vm_lookup('kimchi-vm') - self.assertEquals('running', info['state']) + self.assertEqual('running', info['state']) - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", 'Must be run as root') + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) def test_vm_graphics(self): inst = model.Model(objstore_loc=self.tmp_store) - params = {'name': 'test', - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': UBUNTU_ISO}} inst.templates_create(params) with RollbackContext() as rollback: - params = {'name': 'kimchi-graphics', - 'template': '/plugins/kimchi/templates/test'} + params = { + 'name': 'kimchi-graphics', + 'template': '/plugins/kimchi/templates/test', + } task1 = inst.vms_create(params) inst.task_wait(task1['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-graphics') info = inst.vm_lookup('kimchi-graphics') - self.assertEquals('vnc', info['graphics']['type']) - self.assertEquals('127.0.0.1', info['graphics']['listen']) + self.assertEqual('vnc', info['graphics']['type']) + self.assertEqual('127.0.0.1', info['graphics']['listen']) graphics = {'type': 'spice'} params = {'graphics': graphics} inst.vm_update('kimchi-graphics', params) info = inst.vm_lookup('kimchi-graphics') - self.assertEquals('spice', info['graphics']['type']) - self.assertEquals('127.0.0.1', info['graphics']['listen']) + self.assertEqual('spice', info['graphics']['type']) + self.assertEqual('127.0.0.1', info['graphics']['listen']) inst.template_delete('test') @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_virtviewerfile_vmnotrunning(self): inst = model.Model(objstore_loc=self.tmp_store) - params = {'name': 'test', - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': UBUNTU_ISO}} inst.templates_create(params) vm_name = 'kìmchí-vñç' with RollbackContext() as rollback: - params = {'name': vm_name.decode('utf-8'), + params = {'name': vm_name, 'template': '/plugins/kimchi/templates/test'} task1 = inst.vms_create(params) inst.task_wait(task1['id']) - rollback.prependDefer(inst.vm_delete, vm_name.decode('utf-8')) + rollback.prependDefer(inst.vm_delete, vm_name) - error_msg = "KCHVM0083E" + error_msg = 'KCHVM0083E' with self.assertRaisesRegexp(InvalidOperation, error_msg): vvmodel = VMVirtViewerFileModel(conn=inst.conn) - vvmodel.lookup(vm_name.decode('utf-8')) + vvmodel.lookup(vm_name) inst.template_delete('test') @mock.patch('wok.plugins.kimchi.model.virtviewerfile._get_request_host') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'VMModel.get_graphics') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'FirewallManager.add_vm_graphics_port') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'VMVirtViewerFileModel.handleVMShutdownPowerOff') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'VMVirtViewerFileModel._check_if_vm_running') - def test_vm_virtviewerfile_vnc(self, mock_vm_running, mock_handleVMOff, - mock_add_port, mock_get_graphics, - mock_get_host): + @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' 'VMModel.get_graphics') + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'FirewallManager.add_vm_graphics_port' + ) + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'VMVirtViewerFileModel.handleVMShutdownPowerOff' + ) + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'VMVirtViewerFileModel._check_if_vm_running' + ) + def test_vm_virtviewerfile_vnc( + self, + mock_vm_running, + mock_handleVMOff, + mock_add_port, + mock_get_graphics, + mock_get_host, + ): mock_get_host.return_value = 'kimchi-test-host' mock_get_graphics.return_value = ['vnc', 'listen', '5999', None] @@ -422,15 +485,14 @@ def test_vm_virtviewerfile_vnc(self, mock_vm_running, mock_handleVMOff, vvfilepath = vvmodel.lookup('kimchi-vm') self.assertEqual( - vvfilepath, - 'plugins/kimchi/data/virtviewerfiles/kimchi-vm-access.vv' + vvfilepath, 'plugins/kimchi/data/virtviewerfiles/kimchi-vm-access.vv' ) - expected_write_content = "[virt-viewer]\ntype=vnc\n"\ - "host=kimchi-test-host\nport=5999\n" - self.assertEqual( - open_().write.mock_calls, [mock.call(expected_write_content)] + expected_write_content = ( + '[virt-viewer]\ntype=vnc\n' 'host=kimchi-test-host\nport=5999\n' ) + self.assertEqual(open_().write.mock_calls, [ + mock.call(expected_write_content)]) mock_get_graphics.assert_called_once_with('kimchi-vm', None) mock_vm_running.assert_called_once_with('kimchi-vm') @@ -438,24 +500,31 @@ def test_vm_virtviewerfile_vnc(self, mock_vm_running, mock_handleVMOff, mock_add_port.assert_called_once_with('kimchi-vm', '5999') @mock.patch('wok.plugins.kimchi.model.virtviewerfile._get_request_host') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'VMModel.get_graphics') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'FirewallManager.add_vm_graphics_port') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'VMVirtViewerFileModel.handleVMShutdownPowerOff') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'VMVirtViewerFileModel._check_if_vm_running') - def test_vm_virtviewerfile_spice_passwd(self, mock_vm_running, - mock_handleVMOff, - mock_add_port, - mock_get_graphics, - mock_get_host): + @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' 'VMModel.get_graphics') + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'FirewallManager.add_vm_graphics_port' + ) + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'VMVirtViewerFileModel.handleVMShutdownPowerOff' + ) + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'VMVirtViewerFileModel._check_if_vm_running' + ) + def test_vm_virtviewerfile_spice_passwd( + self, + mock_vm_running, + mock_handleVMOff, + mock_add_port, + mock_get_graphics, + mock_get_host, + ): mock_get_host.return_value = 'kimchi-test-host' mock_get_graphics.return_value = [ - 'spice', 'listen', '6660', 'spicepasswd' - ] + 'spice', 'listen', '6660', 'spicepasswd'] mock_vm_running.return_value = True vvmodel = VMVirtViewerFileModel(conn=None) @@ -465,15 +534,15 @@ def test_vm_virtviewerfile_spice_passwd(self, mock_vm_running, vvfilepath = vvmodel.lookup('kimchi-vm') self.assertEqual( - vvfilepath, - 'plugins/kimchi/data/virtviewerfiles/kimchi-vm-access.vv' + vvfilepath, 'plugins/kimchi/data/virtviewerfiles/kimchi-vm-access.vv' ) - expected_write_content = "[virt-viewer]\ntype=spice\n"\ - "host=kimchi-test-host\nport=6660\npassword=spicepasswd\n" - self.assertEqual( - open_().write.mock_calls, [mock.call(expected_write_content)] + expected_write_content = ( + '[virt-viewer]\ntype=spice\n' + 'host=kimchi-test-host\nport=6660\npassword=spicepasswd\n' ) + self.assertEqual(open_().write.mock_calls, [ + mock.call(expected_write_content)]) mock_get_graphics.assert_called_once_with('kimchi-vm', None) mock_vm_running.assert_called_once_with('kimchi-vm') @@ -482,9 +551,7 @@ def test_vm_virtviewerfile_spice_passwd(self, mock_vm_running, @mock.patch('wok.plugins.kimchi.model.virtviewerfile.run_command') def test_firewall_provider_firewallcmd(self, mock_run_cmd): - mock_run_cmd.side_effect = [ - ['', '', 0], ['', '', 0], ['', '', 0] - ] + mock_run_cmd.side_effect = [['', '', 0], ['', '', 0], ['', '', 0]] fw_manager = FirewallManager() fw_manager.add_vm_graphics_port('vm-name', 5905) @@ -494,15 +561,17 @@ def test_firewall_provider_firewallcmd(self, mock_run_cmd): self.assertEqual(fw_manager.opened_ports, {}) mock_run_cmd.assert_has_calls( - [mock.call(['firewall-cmd', '--state', '-q']), - mock.call(['firewall-cmd', '--add-port=5905/tcp']), - mock.call(['firewall-cmd', '--remove-port=5905/tcp'])]) + [ + mock.call(['firewall-cmd', '--state', '-q']), + mock.call(['firewall-cmd', '--add-port=5905/tcp']), + mock.call(['firewall-cmd', '--remove-port=5905/tcp']), + ] + ) @mock.patch('wok.plugins.kimchi.model.virtviewerfile.run_command') def test_firewall_provider_ufw(self, mock_run_cmd): - mock_run_cmd.side_effect = [ - ['', '', 1], ['', '', 0], ['', '', 0], ['', '', 0] - ] + mock_run_cmd.side_effect = [['', '', 1], [ + '', '', 0], ['', '', 0], ['', '', 0]] fw_manager = FirewallManager() fw_manager.add_vm_graphics_port('vm-name', 5905) @@ -512,16 +581,18 @@ def test_firewall_provider_ufw(self, mock_run_cmd): self.assertEqual(fw_manager.opened_ports, {}) mock_run_cmd.assert_has_calls( - [mock.call(['firewall-cmd', '--state', '-q']), - mock.call(['ufw', 'status']), - mock.call(['ufw', 'allow', '5905/tcp']), - mock.call(['ufw', 'deny', '5905/tcp'])]) + [ + mock.call(['firewall-cmd', '--state', '-q']), + mock.call(['ufw', 'status']), + mock.call(['ufw', 'allow', '5905/tcp']), + mock.call(['ufw', 'deny', '5905/tcp']), + ] + ) @mock.patch('wok.plugins.kimchi.model.virtviewerfile.run_command') def test_firewall_provider_iptables(self, mock_run_cmd): - mock_run_cmd.side_effect = [ - ['', '', 1], ['', '', 1], ['', '', 0], ['', '', 0] - ] + mock_run_cmd.side_effect = [['', '', 1], [ + '', '', 1], ['', '', 0], ['', '', 0]] fw_manager = FirewallManager() fw_manager.add_vm_graphics_port('vm-name', 5905) @@ -530,70 +601,99 @@ def test_firewall_provider_iptables(self, mock_run_cmd): fw_manager.remove_vm_graphics_port('vm-name') self.assertEqual(fw_manager.opened_ports, {}) - iptables_add = ['iptables', '-I', 'INPUT', '-p', 'tcp', '--dport', - 5905, '-j', 'ACCEPT'] + iptables_add = [ + 'iptables', + '-I', + 'INPUT', + '-p', + 'tcp', + '--dport', + 5905, + '-j', + 'ACCEPT', + ] - iptables_del = ['iptables', '-D', 'INPUT', '-p', 'tcp', '--dport', - 5905, '-j', 'ACCEPT'] + iptables_del = [ + 'iptables', + '-D', + 'INPUT', + '-p', + 'tcp', + '--dport', + 5905, + '-j', + 'ACCEPT', + ] mock_run_cmd.assert_has_calls( - [mock.call(['firewall-cmd', '--state', '-q']), - mock.call(['ufw', 'status']), - mock.call(iptables_add), mock.call(iptables_del)]) - - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", 'Must be run as root') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'FirewallManager.remove_vm_graphics_port') - @mock.patch('wok.plugins.kimchi.model.virtviewerfile.' - 'FirewallManager.add_vm_graphics_port') - def test_vm_virtviewerfile_vmlifecycle(self, mock_add_port, - mock_remove_port): + [ + mock.call(['firewall-cmd', '--state', '-q']), + mock.call(['ufw', 'status']), + mock.call(iptables_add), + mock.call(iptables_del), + ] + ) + + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'FirewallManager.remove_vm_graphics_port' + ) + @mock.patch( + 'wok.plugins.kimchi.model.virtviewerfile.' + 'FirewallManager.add_vm_graphics_port' + ) + def test_vm_virtviewerfile_vmlifecycle(self, mock_add_port, mock_remove_port): inst = model.Model(objstore_loc=self.tmp_store) - params = {'name': 'test', - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': UBUNTU_ISO}} inst.templates_create(params) vm_name = 'kìmçhí-vñç' with RollbackContext() as rollback: - params = {'name': u'%s' % vm_name.decode('utf-8'), + params = {'name': vm_name, 'template': '/plugins/kimchi/templates/test'} task1 = inst.vms_create(params) inst.task_wait(task1['id']) - rollback.prependDefer(inst.vm_delete, vm_name.decode('utf-8')) + rollback.prependDefer(inst.vm_delete, vm_name) - inst.vm_start(vm_name.decode('utf-8')) + inst.vm_start(vm_name) - graphics_info = VMModel.get_graphics(vm_name.decode('utf-8'), - inst.conn) + graphics_info = VMModel.get_graphics(vm_name, inst.conn) graphics_port = graphics_info[2] vvmodel = VMVirtViewerFileModel(conn=inst.conn) - vvmodel.lookup(vm_name.decode('utf-8')) + vvmodel.lookup(vm_name) - inst.vm_poweroff(vm_name.decode('utf-8')) + inst.vm_poweroff(vm_name) time.sleep(5) - mock_add_port.assert_called_once_with(vm_name.decode('utf-8'), - graphics_port) + mock_add_port.assert_called_once_with(vm_name, graphics_port) mock_remove_port.assert_called_once_with( - base64.b64encode(vm_name) + base64.b64encode(vm_name.encode('utf-8')).decode('utf-8') ) inst.template_delete('test') - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", "Must be run as root") + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) def test_vm_serial(self): inst = model.Model(objstore_loc=self.tmp_store) - params = {'name': 'test', - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': UBUNTU_ISO}} inst.templates_create(params) with RollbackContext() as rollback: - params = {'name': 'kimchi-serial', - 'template': '/plugins/kimchi/templates/test'} + params = { + 'name': 'kimchi-serial', + 'template': '/plugins/kimchi/templates/test', + } task1 = inst.vms_create(params) inst.task_wait(task1['id']) rollback.prependDefer(inst.vm_delete, 'kimchi-serial') @@ -610,16 +710,20 @@ def test_vm_serial(self): def test_vm_ifaces(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: - params = {'name': 'test', - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') # Create a network net_name = 'test-network' - net_args = {'name': net_name, - 'connection': 'nat', - 'subnet': '127.0.100.0/24'} + net_args = { + 'name': net_name, + 'connection': 'nat', + 'subnet': '127.0.100.0/24', + } inst.networks_create(net_args) rollback.prependDefer(inst.network_delete, net_name) inst.network_activate(net_name) @@ -633,74 +737,76 @@ def test_vm_ifaces(self): rollback.prependDefer(inst.vm_delete, vm_name) ifaces = inst.vmifaces_get_list(vm_name) - if not os.uname()[4] == "s390x": - self.assertEquals(1, len(ifaces)) + if not os.uname()[4] == 's390x': + self.assertEqual(1, len(ifaces)) iface = inst.vmiface_lookup(vm_name, ifaces[0]) - self.assertEquals(17, len(iface['mac'])) - self.assertEquals("default", iface['network']) - self.assertIn("model", iface) + self.assertEqual(17, len(iface['mac'])) + self.assertEqual('default', iface['network']) + self.assertIn('model', iface) # attach network interface to vm - iface_args = {"type": "network", - "network": "test-network", - "model": "virtio"} + iface_args = { + 'type': 'network', + 'network': 'test-network', + 'model': 'virtio', + } mac = inst.vmifaces_create(vm_name, iface_args) # detach network interface from vm rollback.prependDefer(inst.vmiface_delete, vm_name, mac) - self.assertEquals(17, len(mac)) + self.assertEqual(17, len(mac)) iface = inst.vmiface_lookup(vm_name, mac) - self.assertEquals("network", iface["type"]) - self.assertEquals("test-network", iface['network']) - self.assertEquals("virtio", iface["model"]) + self.assertEqual('network', iface['type']) + self.assertEqual('test-network', iface['network']) + self.assertEqual('virtio', iface['model']) # attach network interface to vm without providing model - iface_args = {"type": "network", - "network": "test-network"} + iface_args = {'type': 'network', 'network': 'test-network'} mac = inst.vmifaces_create(vm_name, iface_args) rollback.prependDefer(inst.vmiface_delete, vm_name, mac) iface = inst.vmiface_lookup(vm_name, mac) - self.assertEquals("network", iface["type"]) - self.assertEquals("test-network", iface['network']) + self.assertEqual('network', iface['type']) + self.assertEqual('test-network', iface['network']) # update vm interface newMacAddr = '54:50:e3:44:8a:af' - iface_args = {"mac": newMacAddr} + iface_args = {'mac': newMacAddr} inst.vmiface_update(vm_name, mac, iface_args) iface = inst.vmiface_lookup(vm_name, newMacAddr) - self.assertEquals(newMacAddr, iface['mac']) + self.assertEqual(newMacAddr, iface['mac']) # undo mac address change - iface_args = {"mac": mac} + iface_args = {'mac': mac} inst.vmiface_update(vm_name, newMacAddr, iface_args) iface = inst.vmiface_lookup(vm_name, mac) - self.assertEquals(mac, iface['mac']) + self.assertEqual(mac, iface['mac']) - if os.uname()[4] == "s390x": + if os.uname()[4] == 's390x': # attach macvtap interface to vm - iface_args = {"type": "macvtap", - "source": "test-network", - "mode": "vepa"} + iface_args = { + 'type': 'macvtap', + 'source': 'test-network', + 'mode': 'vepa', + } mac = inst.vmifaces_create(vm_name, iface_args) rollback.prependDefer(inst.vmiface_delete, vm_name, mac) iface = inst.vmiface_lookup(vm_name, mac) - self.assertEquals("macvtap", iface["type"]) - self.assertEquals("test-network", iface['source']) - self.assertEquals("vepa", iface['mode']) + self.assertEqual('macvtap', iface['type']) + self.assertEqual('test-network', iface['source']) + self.assertEqual('vepa', iface['mode']) # attach ovs interface to vm - iface_args = {"type": "ovs", - "source": "test-network"} + iface_args = {'type': 'ovs', 'source': 'test-network'} mac = inst.vmifaces_create(vm_name, iface_args) rollback.prependDefer(inst.vmiface_delete, vm_name, mac) iface = inst.vmiface_lookup(vm_name, mac) - self.assertEquals("ovs", iface["type"]) - self.assertEquals("test-network", iface['source']) + self.assertEqual('ovs', iface['type']) + self.assertEqual('test-network', iface['source']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_netboot(self): @@ -711,13 +817,15 @@ def test_vm_netboot(self): inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test-netboot') - params = {'name': 'kimchi-netboot-vm', - 'template': '/plugins/kimchi/templates/test-netboot'} + params = { + 'name': 'kimchi-netboot-vm', + 'template': '/plugins/kimchi/templates/test-netboot', + } task = inst.vms_create(params) rollback.prependDefer(inst.vm_delete, 'kimchi-netboot-vm') inst.task_wait(task['id'], 10) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) vms = inst.vms_get_list() self.assertTrue('kimchi-netboot-vm' in vms) @@ -725,33 +833,33 @@ def test_vm_netboot(self): inst.vm_start('kimchi-netboot-vm') info = inst.vm_lookup('kimchi-netboot-vm') - self.assertEquals('running', info['state']) + self.assertEqual('running', info['state']) inst.vm_poweroff(u'kimchi-netboot-vm') vms = inst.vms_get_list() self.assertFalse('kimchi-netboot-vm' in vms) - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", 'Must be run as root') + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) def test_vm_disk(self): disk_path = os.path.join(TMP_DIR, 'existent2.iso') open(disk_path, 'w').close() modern_disk_bus = osinfo.get_template_default('modern', 'disk_bus') def _attach_disk(expect_bus=modern_disk_bus): - disk_args = {"type": "disk", - "pool": pool, - "vol": vol} + disk_args = {'type': 'disk', 'pool': pool, 'vol': vol} disk = inst.vmstorages_create(vm_name, disk_args) storage_list = inst.vmstorages_get_list(vm_name) - self.assertEquals(prev_count + 1, len(storage_list)) + self.assertEqual(prev_count + 1, len(storage_list)) # Check the bus type to be 'virtio' disk_info = inst.vmstorage_lookup(vm_name, disk) - self.assertEquals(u'disk', disk_info['type']) - self.assertEquals(vol_path, disk_info['path']) - self.assertEquals(expect_bus, disk_info['bus']) + self.assertEqual(u'disk', disk_info['type']) + self.assertEqual(vol_path, disk_info['path']) + self.assertEqual(expect_bus, disk_info['bus']) return disk inst = model.Model(objstore_loc=self.tmp_store) @@ -759,14 +867,12 @@ def _attach_disk(expect_bus=modern_disk_bus): path = os.path.join(TMP_DIR, 'kimchi-images') pool = 'test-pool' vol = 'test-volume.img' - vol_path = "%s/%s" % (path, vol) + vol_path = '%s/%s' % (path, vol) if not os.path.exists(path): os.mkdir(path) rollback.prependDefer(shutil.rmtree, path) - args = {'name': pool, - 'path': path, - 'type': 'dir'} + args = {'name': pool, 'path': path, 'type': 'dir'} inst.storagepools_create(args) rollback.prependDefer(inst.storagepool_delete, pool) @@ -774,17 +880,22 @@ def _attach_disk(expect_bus=modern_disk_bus): inst.storagepool_activate(pool) rollback.prependDefer(inst.storagepool_deactivate, pool) - params = {'name': vol, - 'capacity': 1073741824, # 1 GiB - 'allocation': 536870912, # 512 MiB - 'format': 'qcow2'} + params = { + 'name': vol, + 'capacity': 1073741824, # 1 GiB + 'allocation': 536870912, # 512 MiB + 'format': 'qcow2', + } task_id = inst.storagevolumes_create(pool, params)['id'] rollback.prependDefer(inst.storagevolume_delete, pool, vol) inst.task_wait(task_id) vm_name = 'kimchi-cdrom' - params = {'name': 'test', 'disks': [], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': vm_name, @@ -794,12 +905,13 @@ def _attach_disk(expect_bus=modern_disk_bus): rollback.prependDefer(inst.vm_delete, vm_name) prev_count = len(inst.vmstorages_get_list(vm_name)) - self.assertEquals(1, prev_count) + self.assertEqual(1, prev_count) # Volume format with mismatched type raise error - cdrom_args = {"type": "cdrom", "pool": pool, "vol": vol} - self.assertRaises(InvalidParameter, inst.vmstorages_create, - vm_name, cdrom_args) + cdrom_args = {'type': 'cdrom', 'pool': pool, 'vol': vol} + self.assertRaises( + InvalidParameter, inst.vmstorages_create, vm_name, cdrom_args + ) # Cold plug and unplug a disk disk = _attach_disk() @@ -812,28 +924,30 @@ def _attach_disk(expect_bus=modern_disk_bus): # VM disk still there after powered off inst.vm_poweroff(vm_name) disk_info = inst.vmstorage_lookup(vm_name, disk) - self.assertEquals(u'disk', disk_info['type']) + self.assertEqual(u'disk', disk_info['type']) inst.vmstorage_delete(vm_name, disk) # Specifying pool and path at same time will fail - disk_args = {"type": "disk", - "pool": pool, - "vol": vol, - "path": disk_path} + disk_args = {'type': 'disk', 'pool': pool, + 'vol': vol, 'path': disk_path} self.assertRaises( - InvalidParameter, inst.vmstorages_create, vm_name, disk_args) + InvalidParameter, inst.vmstorages_create, vm_name, disk_args + ) old_distro_iso = TMP_DIR + 'rhel4_8.iso' - iso_gen.construct_fake_iso(old_distro_iso, True, '4.8', 'rhel') + construct_fake_iso(old_distro_iso, True, '4.8', 'rhel') vm_name = 'kimchi-ide-bus-vm' - params = {'name': 'old_distro_template', 'disks': [], - 'source_media': {'type': 'disk', 'path': old_distro_iso}} + params = { + 'name': 'old_distro_template', + 'disks': [], + 'source_media': {'type': 'disk', 'path': old_distro_iso}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'old_distro_template') params = { 'name': vm_name, - 'template': '/plugins/kimchi/templates/old_distro_template' + 'template': '/plugins/kimchi/templates/old_distro_template', } task2 = inst.vms_create(params) inst.task_wait(task2['id']) @@ -853,8 +967,11 @@ def test_vm_cdrom(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: vm_name = 'kimchi-cdrom' - params = {'name': 'test', 'disks': [], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') params = {'name': vm_name, @@ -864,7 +981,7 @@ def test_vm_cdrom(self): rollback.prependDefer(inst.vm_delete, vm_name) prev_count = len(inst.vmstorages_get_list(vm_name)) - self.assertEquals(1, prev_count) + self.assertEqual(1, prev_count) # dummy .iso files iso_path = os.path.join(TMP_DIR, 'existent.iso') @@ -876,76 +993,86 @@ def test_vm_cdrom(self): wrong_iso_path = '/nonexistent.iso' # Create a cdrom - cdrom_args = {"type": "cdrom", - "path": iso_path} + cdrom_args = {'type': 'cdrom', 'path': iso_path} cdrom_dev = inst.vmstorages_create(vm_name, cdrom_args) storage_list = inst.vmstorages_get_list(vm_name) - self.assertEquals(prev_count + 1, len(storage_list)) + self.assertEqual(prev_count + 1, len(storage_list)) # Get cdrom info cd_info = inst.vmstorage_lookup(vm_name, cdrom_dev) - self.assertEquals(u'cdrom', cd_info['type']) - self.assertEquals(iso_path, cd_info['path']) + self.assertEqual(u'cdrom', cd_info['type']) + self.assertEqual(iso_path, cd_info['path']) # update path of existing cd with # non existent iso - self.assertRaises(InvalidParameter, inst.vmstorage_update, - vm_name, cdrom_dev, {'path': wrong_iso_path}) + self.assertRaises( + InvalidParameter, + inst.vmstorage_update, + vm_name, + cdrom_dev, + {'path': wrong_iso_path}, + ) # Make sure CD ROM still exists after failure cd_info = inst.vmstorage_lookup(vm_name, cdrom_dev) - self.assertEquals(u'cdrom', cd_info['type']) - self.assertEquals(iso_path, cd_info['path']) + self.assertEqual(u'cdrom', cd_info['type']) + self.assertEqual(iso_path, cd_info['path']) # update path of existing cd with existent iso of shutoff vm inst.vmstorage_update(vm_name, cdrom_dev, {'path': iso_path2}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) - self.assertEquals(iso_path2, cdrom_info['path']) + self.assertEqual(iso_path2, cdrom_info['path']) # update path of existing cd with existent iso of running vm inst.vm_start(vm_name) inst.vmstorage_update(vm_name, cdrom_dev, {'path': iso_path}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) - self.assertEquals(iso_path, cdrom_info['path']) + self.assertEqual(iso_path, cdrom_info['path']) # eject cdrom cdrom_dev = inst.vmstorage_update(vm_name, cdrom_dev, {'path': ''}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) - self.assertEquals('', cdrom_info['path']) + self.assertEqual('', cdrom_info['path']) inst.vm_poweroff(vm_name) # removing non existent cdrom - self.assertRaises(NotFoundError, inst.vmstorage_delete, vm_name, - "fakedev") + self.assertRaises( + NotFoundError, inst.vmstorage_delete, vm_name, 'fakedev') # removing valid cdrom inst.vmstorage_delete(vm_name, cdrom_dev) storage_list = inst.vmstorages_get_list(vm_name) - self.assertEquals(prev_count, len(storage_list)) + self.assertEqual(prev_count, len(storage_list)) # Create a new cdrom using a remote iso valid_remote_iso_path = get_remote_iso_path() - cdrom_args = {"type": "cdrom", - "path": valid_remote_iso_path} + cdrom_args = {'type': 'cdrom', 'path': valid_remote_iso_path} cdrom_dev = inst.vmstorages_create(vm_name, cdrom_args) storage_list = inst.vmstorages_get_list(vm_name) - self.assertEquals(prev_count + 1, len(storage_list)) + self.assertEqual(prev_count + 1, len(storage_list)) # Update remote-backed cdrom with the same ISO - inst.vmstorage_update(vm_name, cdrom_dev, - {'path': valid_remote_iso_path}) + inst.vmstorage_update(vm_name, cdrom_dev, { + 'path': valid_remote_iso_path}) cdrom_info = inst.vmstorage_lookup(vm_name, cdrom_dev) - cur_cdrom_path = re.sub(":80/", '/', cdrom_info['path']) - self.assertEquals(valid_remote_iso_path, cur_cdrom_path) + cur_cdrom_path = re.sub(':80/', '/', cdrom_info['path']) + self.assertEqual(valid_remote_iso_path, cur_cdrom_path) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_vm_storage_provisioning(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: - params = {'name': 'test', 'disks': [{'size': 1, 'pool': { - 'name': '/plugins/kimchi/storagepools/default'}}], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [ + { + 'size': 1, + 'pool': {'name': '/plugins/kimchi/storagepools/default'}, + } + ], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') @@ -958,22 +1085,27 @@ def test_vm_storage_provisioning(self): vm_info = inst.vm_lookup(params['name']) disk_path = '%s/%s-0.img' % ( - inst.storagepool_lookup('default')['path'], vm_info['uuid']) + inst.storagepool_lookup('default')['path'], + vm_info['uuid'], + ) self.assertTrue(os.access(disk_path, os.F_OK)) self.assertFalse(os.access(disk_path, os.F_OK)) def _create_template_conf_with_disk_format(self, vol_format): if vol_format is None: - conf_file_data = "[main]\n\n[storage]\n\n[[disk.0]]\n" \ - "#format = \n\n[graphics]\n\n[processor]\n" + conf_file_data = ( + '[main]\n\n[storage]\n\n[[disk.0]]\n' + '#format = \n\n[graphics]\n\n[processor]\n' + ) else: - conf_file_data = "[main]\n\n[storage]\n\n[[disk.0]]\n" \ - "format = %s\n\n[graphics]\n\n[processor]\n"\ - % vol_format + conf_file_data = ( + '[main]\n\n[storage]\n\n[[disk.0]]\n' + 'format = %s\n\n[graphics]\n\n[processor]\n' % vol_format + ) config_file = os.path.join(paths.sysconf_dir, 'template.conf') - config_bkp_file = \ - os.path.join(paths.sysconf_dir, 'template.conf-unit_test_bkp') + config_bkp_file = os.path.join( + paths.sysconf_dir, 'template.conf-unit_test_bkp') os.rename(config_file, config_bkp_file) @@ -984,8 +1116,8 @@ def _create_template_conf_with_disk_format(self, vol_format): def _restore_template_conf_file(self): config_file = os.path.join(paths.sysconf_dir, 'template.conf') - config_bkp_file = \ - os.path.join(paths.sysconf_dir, 'template.conf-unit_test_bkp') + config_bkp_file = os.path.join( + paths.sysconf_dir, 'template.conf-unit_test_bkp') os.rename(config_bkp_file, config_file) osinfo.defaults = osinfo._get_tmpl_defaults() @@ -1003,9 +1135,16 @@ def test_template_get_default_vol_format_from_conf(self): self._create_template_conf_with_disk_format('vmdk') rollback.prependDefer(self._restore_template_conf_file) - params = {'name': 'test', 'disks': [{'size': 1, 'pool': { - 'name': '/plugins/kimchi/storagepools/default'}}], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [ + { + 'size': 1, + 'pool': {'name': '/plugins/kimchi/storagepools/default'}, + } + ], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') @@ -1016,8 +1155,7 @@ def test_template_get_default_vol_format_from_conf(self): rollback.prependDefer(inst.vm_delete, 'test-vm-1') created_disk_format = self._get_disk_format_from_vm( - 'test-vm-1', inst.conn - ) + 'test-vm-1', inst.conn) self.assertEqual(created_disk_format, 'vmdk') @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') @@ -1030,10 +1168,17 @@ def test_template_creates_user_defined_vol_format_instead_default(self): self._create_template_conf_with_disk_format(default_vol) rollback.prependDefer(self._restore_template_conf_file) - params = {'name': 'test', 'disks': [{ - 'size': 1, 'format': user_vol, - 'pool': {'name': '/plugins/kimchi/storagepools/default'}}], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [ + { + 'size': 1, + 'format': user_vol, + 'pool': {'name': '/plugins/kimchi/storagepools/default'}, + } + ], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') @@ -1045,8 +1190,7 @@ def test_template_creates_user_defined_vol_format_instead_default(self): rollback.prependDefer(inst.vm_delete, 'test-vm-1') created_disk_format = self._get_disk_format_from_vm( - 'test-vm-1', inst.conn - ) + 'test-vm-1', inst.conn) self.assertEqual(created_disk_format, user_vol) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') @@ -1057,9 +1201,16 @@ def test_template_uses_qcow2_format_if_no_user_or_default_defined(self): self._create_template_conf_with_disk_format(None) rollback.prependDefer(self._restore_template_conf_file) - params = {'name': 'test', 'disks': [{'size': 1, 'pool': { - 'name': '/plugins/kimchi/storagepools/default'}}], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [ + { + 'size': 1, + 'pool': {'name': '/plugins/kimchi/storagepools/default'}, + } + ], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') @@ -1070,60 +1221,66 @@ def test_template_uses_qcow2_format_if_no_user_or_default_defined(self): rollback.prependDefer(inst.vm_delete, 'test-vm-1') created_disk_format = self._get_disk_format_from_vm( - 'test-vm-1', inst.conn - ) + 'test-vm-1', inst.conn) self.assertEqual(created_disk_format, 'qcow2') def test_vm_memory_hotplug(self): - config.set("authentication", "method", "pam") + config.set('authentication', 'method', 'pam') inst = model.Model(None, objstore_loc=self.tmp_store) - orig_params = {'name': 'test', - 'memory': {'current': 1024, - 'maxmemory': 4096 - if os.uname()[4] != "s390x" else 2048}, - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + orig_params = { + 'name': 'test', + 'memory': { + 'current': 1024, + 'maxmemory': 4096 if os.uname()[4] != 's390x' else 2048, + }, + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(orig_params) with RollbackContext() as rollback: - params = {'name': 'kimchi-vm1', - 'template': '/plugins/kimchi/templates/test'} + params = { + 'name': 'kimchi-vm1', + 'template': '/plugins/kimchi/templates/test', + } task1 = inst.vms_create(params) inst.task_wait(task1['id']) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - 'kimchi-vm1') + rollback.prependDefer(utils.rollback_wrapper, + inst.vm_delete, 'kimchi-vm1') # Start vm inst.vm_start('kimchi-vm1') - rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, - 'kimchi-vm1') + rollback.prependDefer( + utils.rollback_wrapper, inst.vm_poweroff, 'kimchi-vm1' + ) # Hotplug memory, only available in Libvirt >= 1.2.14 params = {'memory': {'current': 2048}} if inst.capabilities_lookup()['mem_hotplug_support']: inst.vm_update('kimchi-vm1', params) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - 'kimchi-vm1') + rollback.prependDefer( + utils.rollback_wrapper, inst.vm_delete, 'kimchi-vm1' + ) params['memory']['maxmemory'] = 4096 - self.assertEquals(params['memory'], - inst.vm_lookup('kimchi-vm1')['memory']) + self.assertEqual( + params['memory'], inst.vm_lookup('kimchi-vm1')['memory'] + ) params['memory']['current'] = 4096 del params['memory']['maxmemory'] inst.vm_update('kimchi-vm1', params) vm = inst.vm_lookup('kimchi-vm1') - self.assertEquals(4096, vm['memory']['current']) + self.assertEqual(4096, vm['memory']['current']) # Test memory devices conn = inst.conn.get() xml = conn.lookupByName('kimchi-vm1').XMLDesc() root = ET.fromstring(xml) devs = root.findall('./devices/memory/target/size') - self.assertEquals(2, len(devs)) + self.assertEqual(2, len(devs)) totMemDevs = 0 for size in devs: totMemDevs += convert_data_size(size.text, - size.get('unit'), - 'MiB') - self.assertEquals(3072, totMemDevs) + size.get('unit'), 'MiB') + self.assertEqual(3072, totMemDevs) inst.vm_poweroff('kimchi-vm1') # Remove all devs: @@ -1132,7 +1289,7 @@ def test_vm_memory_hotplug(self): xml = conn.lookupByName('kimchi-vm1').XMLDesc() root = ET.fromstring(xml) devs = root.findall('./devices/memory') - self.assertEquals(0, len(devs)) + self.assertEqual(0, len(devs)) # Hotplug 1G DIMM , 512M , 256M and 256M inst.vm_start('kimchi-vm1') @@ -1146,18 +1303,17 @@ def test_vm_memory_hotplug(self): inst.vm_update('kimchi-vm1', params) vm = inst.vm_lookup('kimchi-vm1') - self.assertEquals(3072, vm['memory']['current']) + self.assertEqual(3072, vm['memory']['current']) xml = conn.lookupByName('kimchi-vm1').XMLDesc() root = ET.fromstring(xml) devs = root.findall('./devices/memory/target/size') - self.assertEquals(4, len(devs)) + self.assertEqual(4, len(devs)) totMemDevs = 0 for size in devs: totMemDevs += convert_data_size(size.text, - size.get('unit'), - 'MiB') - self.assertEquals(2048, totMemDevs) + size.get('unit'), 'MiB') + self.assertEqual(2048, totMemDevs) inst.vm_poweroff('kimchi-vm1') # Remove 2x256M + 1x512M ... then sum 256M to virtual memory @@ -1166,22 +1322,22 @@ def test_vm_memory_hotplug(self): xml = conn.lookupByName('kimchi-vm1').XMLDesc() root = ET.fromstring(xml) devs = root.findall('./devices/memory/target/size') - self.assertEquals(1, len(devs)) + self.assertEqual(1, len(devs)) totMemDevs = 0 for size in devs: totMemDevs += convert_data_size(size.text, - size.get('unit'), - 'MiB') - self.assertEquals(1024, totMemDevs) + size.get('unit'), 'MiB') + self.assertEqual(1024, totMemDevs) else: - self.assertRaises(InvalidOperation, inst.vm_update, - 'kimchi-vm1', params) + self.assertRaises( + InvalidOperation, inst.vm_update, 'kimchi-vm1', params + ) - msg = "Memory hotplug in non-numa guests only for PowerPC arch." + msg = 'Memory hotplug in non-numa guests only for PowerPC arch.' @unittest.skipUnless(('ppc64' in os.uname()[4]), msg) def test_non_numa_vm_memory_hotplug(self): - config.set("authentication", "method", "pam") + config.set('authentication', 'method', 'pam') inst = model.Model(None, objstore_loc=self.tmp_store) conn = inst.conn.get() vm = 'non-numa-kimchi-test' @@ -1196,61 +1352,76 @@ def test_non_numa_vm_memory_hotplug(self): # Hotplug memory params = {'memory': {'current': 3072}} inst.vm_update(vm, params) - self.assertEquals(params['memory']['current'], - inst.vm_lookup(vm)['memory']['current']) + self.assertEqual( + params['memory']['current'], inst.vm_lookup( + vm)['memory']['current'] + ) # Test number and size of memory device added root = ET.fromstring(conn.lookupByName(vm).XMLDesc()) devs = root.findall('./devices/memory/target/size') - self.assertEquals(1, len(devs)) - self.assertEquals(2048 << 10, int(devs[0].text)) + self.assertEqual(1, len(devs)) + self.assertEqual(2048 << 10, int(devs[0].text)) params = {'memory': {'current': 4096}} inst.vm_update(vm, params) - self.assertEquals(params['memory']['current'], - inst.vm_lookup(vm)['memory']['current']) + self.assertEqual( + params['memory']['current'], inst.vm_lookup( + vm)['memory']['current'] + ) # Test number and size of memory device added root = ET.fromstring(conn.lookupByName(vm).XMLDesc()) devs = root.findall('./devices/memory/target/size') - self.assertEquals(2, len(devs)) - self.assertEquals(1024 << 10, int(devs[1].text)) - self.assertEquals(3072 << 10, - int(devs[0].text) + int(devs[1].text)) + self.assertEqual(2, len(devs)) + self.assertEqual(1024 << 10, int(devs[1].text)) + self.assertEqual(3072 << 10, int(devs[0].text) + int(devs[1].text)) # Stop vm and test persistence inst.vm_poweroff(vm) - self.assertEquals(params['memory']['current'], - inst.vm_lookup(vm)['memory']['current']) + self.assertEqual( + params['memory']['current'], inst.vm_lookup( + vm)['memory']['current'] + ) def test_vm_edit(self): - config.set("authentication", "method", "pam") - inst = model.Model(None, - objstore_loc=self.tmp_store) + config.set('authentication', 'method', 'pam') + inst = model.Model(None, objstore_loc=self.tmp_store) # template disk format must be qcow2 because vmsnapshot # only supports this format orig_params = { - 'name': 'test', 'memory': {'current': 1024, 'maxmemory': 2048}, + 'name': 'test', + 'memory': {'current': 1024, 'maxmemory': 2048}, 'cpu_info': {'vcpus': 1}, 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, - 'disks': [{'size': 1, 'format': 'qcow2', 'pool': { - 'name': '/plugins/kimchi/storagepools/default'}}]} + 'disks': [ + { + 'size': 1, + 'format': 'qcow2', + 'pool': {'name': '/plugins/kimchi/storagepools/default'}, + } + ], + } inst.templates_create(orig_params) with RollbackContext() as rollback: - params_1 = {'name': 'kimchi-vm1', - 'template': '/plugins/kimchi/templates/test'} - params_2 = {'name': 'kimchi-vm2', - 'template': '/plugins/kimchi/templates/test'} + params_1 = { + 'name': 'kimchi-vm1', + 'template': '/plugins/kimchi/templates/test', + } + params_2 = { + 'name': 'kimchi-vm2', + 'template': '/plugins/kimchi/templates/test', + } task1 = inst.vms_create(params_1) inst.task_wait(task1['id']) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - 'kimchi-vm1') + rollback.prependDefer(utils.rollback_wrapper, + inst.vm_delete, 'kimchi-vm1') task2 = inst.vms_create(params_2) inst.task_wait(task2['id']) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - 'kimchi-vm2') + rollback.prependDefer(utils.rollback_wrapper, + inst.vm_delete, 'kimchi-vm2') vms = inst.vms_get_list() self.assertTrue('kimchi-vm1' in vms) @@ -1258,170 +1429,204 @@ def test_vm_edit(self): # make sure "vm_update" works when the domain has a snapshot inst.vmsnapshots_create(u'kimchi-vm1') - if os.uname()[4] != "s390x": + if os.uname()[4] != 's390x': # update vm graphics when vm is not running - inst.vm_update(u'kimchi-vm1', - {"graphics": {"passwd": "123456"}}) + inst.vm_update( + u'kimchi-vm1', {'graphics': {'passwd': '123456'}}) inst.vm_start('kimchi-vm1') - rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, - 'kimchi-vm1') + rollback.prependDefer( + utils.rollback_wrapper, inst.vm_poweroff, 'kimchi-vm1' + ) - vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals('123456', vm_info['graphics']["passwd"]) - self.assertEquals(None, vm_info['graphics']["passwdValidTo"]) + vm_info = inst.vm_lookup('kimchi-vm1') + self.assertEqual('123456', vm_info['graphics']['passwd']) + self.assertEqual(None, vm_info['graphics']['passwdValidTo']) # update vm graphics when vm is running - inst.vm_update(u'kimchi-vm1', - {"graphics": {"passwd": "abcdef", - "passwdValidTo": 20}}) - vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals('abcdef', vm_info['graphics']["passwd"]) - self.assertGreaterEqual(20, - vm_info['graphics']['passwdValidTo']) + inst.vm_update( + 'kimchi-vm1', + {'graphics': {'passwd': 'abcdef', 'passwdValidTo': 20}}, + ) + vm_info = inst.vm_lookup('kimchi-vm1') + self.assertEqual('abcdef', vm_info['graphics']['passwd']) + self.assertGreaterEqual( + 20, vm_info['graphics']['passwdValidTo']) info = inst.vm_lookup('kimchi-vm1') - self.assertEquals('running', info['state']) + self.assertEqual('running', info['state']) params = {'name': 'new-vm'} - self.assertRaises(InvalidParameter, inst.vm_update, - 'kimchi-vm1', params) + self.assertRaises( + InvalidParameter, inst.vm_update, 'kimchi-vm1', params + ) else: inst.vm_start('kimchi-vm1') # change VM users and groups, when wm is running. - inst.vm_update(u'kimchi-vm1', - {'users': ['root'], 'groups': ['root']}) + inst.vm_update( + u'kimchi-vm1', {'users': ['root'], 'groups': ['root']}) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals(['root'], vm_info['users']) - self.assertEquals(['root'], vm_info['groups']) + self.assertEqual(['root'], vm_info['users']) + self.assertEqual(['root'], vm_info['groups']) # change VM users and groups by removing all elements, # when vm is running. inst.vm_update(u'kimchi-vm1', {'users': [], 'groups': []}) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals([], vm_info['users']) - self.assertEquals([], vm_info['groups']) + self.assertEqual([], vm_info['users']) + self.assertEqual([], vm_info['groups']) # power off vm inst.vm_poweroff('kimchi-vm1') - self.assertRaises(OperationFailed, inst.vm_update, - 'kimchi-vm1', {'name': 'kimchi-vm2'}) + self.assertRaises( + OperationFailed, inst.vm_update, 'kimchi-vm1', { + 'name': 'kimchi-vm2'} + ) # update maxvcpus only inst.vm_update(u'kimchi-vm1', {'cpu_info': {'maxvcpus': 8}}) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals(8, vm_info['cpu_info']['maxvcpus']) + self.assertEqual(8, vm_info['cpu_info']['maxvcpus']) # update vcpus only inst.vm_update(u'kimchi-vm1', {'cpu_info': {'vcpus': 4}}) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals(4, vm_info['cpu_info']['vcpus']) + self.assertEqual(4, vm_info['cpu_info']['vcpus']) # vcpus > maxvcpus: failure - self.assertRaises(InvalidParameter, inst.vm_update, u'kimchi-vm1', - {'cpu_info': {'vcpus': 10}}) + self.assertRaises( + InvalidParameter, + inst.vm_update, + u'kimchi-vm1', + {'cpu_info': {'vcpus': 10}}, + ) # define CPU topology - inst.vm_update(u'kimchi-vm1', {'cpu_info': {'topology': { - 'sockets': 2, 'cores': 2, 'threads': 2}}}) + inst.vm_update( + u'kimchi-vm1', + {'cpu_info': {'topology': {'sockets': 2, 'cores': 2, 'threads': 2}}}, + ) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals({'sockets': 2, 'cores': 2, 'threads': 2}, - vm_info['cpu_info']['topology']) + self.assertEqual( + {'sockets': 2, 'cores': 2, 'threads': 2}, + vm_info['cpu_info']['topology'], + ) # vcpus not a multiple of threads - self.assertRaises(InvalidParameter, inst.vm_update, u'kimchi-vm1', - {'cpu_info': {'vcpus': 5}}) + self.assertRaises( + InvalidParameter, + inst.vm_update, + u'kimchi-vm1', + {'cpu_info': {'vcpus': 5}}, + ) # maxvcpus different of (sockets * cores * threads) - self.assertRaises(InvalidParameter, inst.vm_update, u'kimchi-vm1', - {'cpu_info': {'maxvcpus': 4}}) + self.assertRaises( + InvalidParameter, + inst.vm_update, + u'kimchi-vm1', + {'cpu_info': {'maxvcpus': 4}}, + ) # topology does not match maxvcpus (8 != 3 * 2 * 2) - self.assertRaises(InvalidParameter, inst.vm_update, u'kimchi-vm1', - {'cpu_info': {'topology': { - 'sockets': 3, 'cores': 2, 'threads': 2}}}) + self.assertRaises( + InvalidParameter, + inst.vm_update, + u'kimchi-vm1', + {'cpu_info': {'topology': {'sockets': 3, 'cores': 2, 'threads': 2}}}, + ) # undefine CPU topology inst.vm_update(u'kimchi-vm1', {'cpu_info': {'topology': {}}}) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals({}, vm_info['cpu_info']['topology']) + self.assertEqual({}, vm_info['cpu_info']['topology']) # reduce maxvcpus to same as vcpus inst.vm_update(u'kimchi-vm1', {'cpu_info': {'maxvcpus': 4}}) vm_info = inst.vm_lookup(u'kimchi-vm1') - self.assertEquals(4, vm_info['cpu_info']['maxvcpus']) + self.assertEqual(4, vm_info['cpu_info']['maxvcpus']) # rename and increase memory when vm is not running - params = {'name': u'пeω-∨м', - 'memory': {'current': 2048}} + params = {'name': u'пeω-∨м', 'memory': {'current': 2048}} inst.vm_update('kimchi-vm1', params) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - u'пeω-∨м') - self.assertEquals(vm_info['uuid'], - inst.vm_lookup(u'пeω-∨м')['uuid']) + rollback.prependDefer(utils.rollback_wrapper, + inst.vm_delete, u'пeω-∨м') + self.assertEqual( + vm_info['uuid'], inst.vm_lookup(u'пeω-∨м')['uuid']) info = inst.vm_lookup(u'пeω-∨м') # Max memory is returned, add to test params['memory']['maxmemory'] = 2048 for key in params.keys(): - self.assertEquals(params[key], info[key]) + self.assertEqual(params[key], info[key]) # change only VM users - groups are not changed (default is empty) users = inst.users_get_list()[:3] inst.vm_update(u'пeω-∨м', {'users': users}) - self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) - self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['groups']) + self.assertEqual(users, inst.vm_lookup(u'пeω-∨м')['users']) + self.assertEqual([], inst.vm_lookup(u'пeω-∨м')['groups']) # change only VM groups - users are not changed (default is empty) groups = inst.groups_get_list()[:2] inst.vm_update(u'пeω-∨м', {'groups': groups}) - self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) - self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) + self.assertEqual(users, inst.vm_lookup(u'пeω-∨м')['users']) + self.assertEqual(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users and groups by adding a new element to each one users.append(pwd.getpwuid(os.getuid()).pw_name) groups.append(grp.getgrgid(os.getgid()).gr_name) inst.vm_update(u'пeω-∨м', {'users': users, 'groups': groups}) - self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) - self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) + self.assertEqual(users, inst.vm_lookup(u'пeω-∨м')['users']) + self.assertEqual(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users (wrong value) and groups # when an error occurs, everything fails and nothing is changed - self.assertRaises(InvalidParameter, inst.vm_update, u'пeω-∨м', - {'users': ['userdoesnotexist'], 'groups': []}) - self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) - self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) + self.assertRaises( + InvalidParameter, + inst.vm_update, + u'пeω-∨м', + {'users': ['userdoesnotexist'], 'groups': []}, + ) + self.assertEqual(users, inst.vm_lookup(u'пeω-∨м')['users']) + self.assertEqual(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users and groups (wrong value) # when an error occurs, everything fails and nothing is changed - self.assertRaises(InvalidParameter, inst.vm_update, u'пeω-∨м', - {'users': [], 'groups': ['groupdoesnotexist']}) - self.assertEquals(users, inst.vm_lookup(u'пeω-∨м')['users']) - self.assertEquals(groups, inst.vm_lookup(u'пeω-∨м')['groups']) + self.assertRaises( + InvalidParameter, + inst.vm_update, + u'пeω-∨м', + {'users': [], 'groups': ['groupdoesnotexist']}, + ) + self.assertEqual(users, inst.vm_lookup(u'пeω-∨м')['users']) + self.assertEqual(groups, inst.vm_lookup(u'пeω-∨м')['groups']) # change VM users and groups by removing all elements inst.vm_update(u'пeω-∨м', {'users': [], 'groups': []}) - self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['users']) - self.assertEquals([], inst.vm_lookup(u'пeω-∨м')['groups']) + self.assertEqual([], inst.vm_lookup(u'пeω-∨м')['users']) + self.assertEqual([], inst.vm_lookup(u'пeω-∨м')['groups']) # change bootorder - b_order = ["hd", "network", "cdrom"] - inst.vm_update(u'пeω-∨м', {"bootorder": b_order}) - self.assertEquals(b_order, inst.vm_lookup(u'пeω-∨м')['bootorder']) + b_order = ['hd', 'network', 'cdrom'] + inst.vm_update(u'пeω-∨м', {'bootorder': b_order}) + self.assertEqual(b_order, inst.vm_lookup(u'пeω-∨м')['bootorder']) # try to add empty list - self.assertRaises(OperationFailed, inst.vm_update, u'пeω-∨м', - {"bootorder": [""]}) + self.assertRaises( + OperationFailed, inst.vm_update, u'пeω-∨м', {'bootorder': ['']} + ) # try to pass invalid parameter - self.assertRaises(OperationFailed, inst.vm_update, u'пeω-∨м', - {"bootorder": ["bla"]}) + self.assertRaises( + OperationFailed, inst.vm_update, u'пeω-∨м', { + 'bootorder': ['bla']} + ) # enable/disable bootmenu - inst.vm_update(u'пeω-∨м', {"bootmenu": True}) - self.assertEquals("yes", inst.vm_lookup(u'пeω-∨м')['bootmenu']) - inst.vm_update(u'пeω-∨м', {"bootmenu": False}) - self.assertEquals("no", inst.vm_lookup(u'пeω-∨м')['bootmenu']) + inst.vm_update(u'пeω-∨м', {'bootmenu': True}) + self.assertEqual('yes', inst.vm_lookup(u'пeω-∨м')['bootmenu']) + inst.vm_update(u'пeω-∨м', {'bootmenu': False}) + self.assertEqual('no', inst.vm_lookup(u'пeω-∨м')['bootmenu']) def test_get_vm_cpu_cores(self): xml = """\ @@ -1446,7 +1651,7 @@ def test_get_vm_cpu_threads(self): @mock.patch('wok.plugins.kimchi.model.vms.VMModel.has_topology') def test_get_vm_cpu_topology(self, mock_has_topology): - class FakeDom(): + class FakeDom: def XMLDesc(self, flag): return """\ \ @@ -1464,7 +1669,7 @@ def name(self): @mock.patch('wok.plugins.kimchi.model.vms.VMModel.has_topology') def test_get_vm_cpu_topology_blank(self, mock_has_topology): - class FakeDom(): + class FakeDom: def XMLDesc(self, flag): return """""" @@ -1482,12 +1687,12 @@ def test_vm_cpu_hotplug_invalidparam_fail(self): inst = model.Model(None, objstore_loc=self.tmp_store) with self.assertRaisesRegexp(InvalidParameter, 'KCHCPUHOTP0001E'): - params = {"cpu_info": {"vcpus": 1, 'maxvcpus': 4}} + params = {'cpu_info': {'vcpus': 1, 'maxvcpus': 4}} inst.vm_cpu_hotplug_precheck('', params) @mock.patch('wok.plugins.kimchi.model.vms.VMModel.has_topology') def test_vm_cpu_hotplug_abovemax_fail(self, mock_has_topology): - class FakeDom(): + class FakeDom: def XMLDesc(self, flag): return """\ 8<\ @@ -1500,14 +1705,15 @@ def name(self): inst = model.Model(None, objstore_loc=self.tmp_store) with self.assertRaisesRegexp(InvalidParameter, 'KCHCPUINF0001E'): - params = {"cpu_info": {"vcpus": 16}} + params = {'cpu_info': {'vcpus': 16}} inst.vm_cpu_hotplug_precheck(FakeDom(), params) @mock.patch('wok.plugins.kimchi.model.vms.VMModel.has_topology') @mock.patch('wok.plugins.kimchi.model.vms.VMModel.get_vm_cpu_topology') - def test_vm_cpu_hotplug_topology_mismatch_fail(self, mock_topology, - mock_has_topology): - class FakeDom(): + def test_vm_cpu_hotplug_topology_mismatch_fail( + self, mock_topology, mock_has_topology + ): + class FakeDom: def XMLDesc(self, flag): return """\ 48<\ @@ -1522,11 +1728,11 @@ def name(self): inst = model.Model(None, objstore_loc=self.tmp_store) with self.assertRaisesRegexp(InvalidParameter, 'KCHCPUINF0005E'): - params = {"cpu_info": {"vcpus": 10}} + params = {'cpu_info': {'vcpus': 10}} inst.vm_cpu_hotplug_precheck(FakeDom(), params) def test_vm_cpu_hotplug_error(self): - class FakeDom(): + class FakeDom: def setVcpusFlags(self, vcpu, flags): raise libvirt.libvirtError('') @@ -1535,20 +1741,19 @@ def setVcpusFlags(self, vcpu, flags): inst.vm_update_cpu_live(FakeDom(), '') # enable/disable autostart - inst.vm_update(u'пeω-∨м', {"autostart": True}) - self.assertEquals(1, inst.vm_lookup(u'пeω-∨м')['autostart']) - inst.vm_update(u'пeω-∨м', {"autostart": False}) - self.assertEquals(0, inst.vm_lookup(u'пeω-∨м')['autostart']) + inst.vm_update(u'пeω-∨м', {'autostart': True}) + self.assertEqual(1, inst.vm_lookup(u'пeω-∨м')['autostart']) + inst.vm_update(u'пeω-∨м', {'autostart': False}) + self.assertEqual(0, inst.vm_lookup(u'пeω-∨м')['autostart']) def test_get_interfaces(self): - inst = model.Model('test:///default', - objstore_loc=self.tmp_store) + inst = model.Model('test:///default', objstore_loc=self.tmp_store) expected_ifaces = netinfo.all_favored_interfaces() ifaces = inst.interfaces_get_list() - self.assertEquals(len(expected_ifaces), len(ifaces)) + self.assertEqual(len(expected_ifaces), len(ifaces)) for name in expected_ifaces: iface = inst.interface_lookup(name) - self.assertEquals(iface['name'], name) + self.assertEqual(iface['name'], name) self.assertIn('type', iface) self.assertIn('status', iface) self.assertIn('ipaddr', iface) @@ -1568,65 +1773,72 @@ def long_op(cb, params): def abnormal_op(cb, params): try: raise task_except - except: - cb("Exception raised", False) + except Exception: + cb('Exception raised', False) def continuous_ops(cb, params): - cb("step 1 OK") + cb('step 1 OK') time.sleep(2) - cb("step 2 OK") + cb('step 2 OK') time.sleep(2) - cb("step 3 OK", params.get('result', True)) + cb('step 3 OK', params.get('result', True)) - inst = model.Model('test:///default', - objstore_loc=self.tmp_store) + inst = model.Model('test:///default', objstore_loc=self.tmp_store) taskid = AsyncTask('', quick_op, 'Hello').id inst.task_wait(taskid) - self.assertEquals('finished', inst.task_lookup(taskid)['status']) - self.assertEquals('Hello', inst.task_lookup(taskid)['message']) + self.assertEqual('finished', inst.task_lookup(taskid)['status']) + self.assertEqual('Hello', inst.task_lookup(taskid)['message']) params = {'delay': 3, 'result': False, 'message': 'It was not meant to be'} taskid = AsyncTask('', long_op, params).id - self.assertEquals('running', inst.task_lookup(taskid)['status']) - self.assertEquals('The request is being processing.', - inst.task_lookup(taskid)['message']) + self.assertEqual('running', inst.task_lookup(taskid)['status']) + self.assertEqual( + 'The request is being processing.', inst.task_lookup(taskid)[ + 'message'] + ) inst.task_wait(taskid) - self.assertEquals('failed', inst.task_lookup(taskid)['status']) - self.assertEquals('It was not meant to be', - inst.task_lookup(taskid)['message']) + self.assertEqual('failed', inst.task_lookup(taskid)['status']) + self.assertEqual('It was not meant to be', + inst.task_lookup(taskid)['message']) taskid = AsyncTask('', abnormal_op, {}).id inst.task_wait(taskid) - self.assertEquals('Exception raised', - inst.task_lookup(taskid)['message']) - self.assertEquals('failed', inst.task_lookup(taskid)['status']) + self.assertEqual('Exception raised', + inst.task_lookup(taskid)['message']) + self.assertEqual('failed', inst.task_lookup(taskid)['status']) taskid = AsyncTask('', continuous_ops, {'result': True}).id - self.assertEquals('running', inst.task_lookup(taskid)['status']) + self.assertEqual('running', inst.task_lookup(taskid)['status']) inst.task_wait(taskid, timeout=10) - self.assertEquals('finished', inst.task_lookup(taskid)['status']) + self.assertEqual('finished', inst.task_lookup(taskid)['status']) @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_delete_running_vm(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: - params = {'name': u'test', 'disks': [], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': u'test', + 'disks': [], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') - params = {'name': u'kīмсhī-∨м', - 'template': u'/plugins/kimchi/templates/test'} + params = { + 'name': u'kīмсhī-∨м', + 'template': u'/plugins/kimchi/templates/test', + } task = inst.vms_create(params) inst.task_wait(task['id']) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - u'kīмсhī-∨м') + rollback.prependDefer(utils.rollback_wrapper, + inst.vm_delete, u'kīмсhī-∨м') inst.vm_start(u'kīмсhī-∨м') - self.assertEquals(inst.vm_lookup(u'kīмсhī-∨м')['state'], 'running') - rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, - u'kīмсhī-∨м') + self.assertEqual(inst.vm_lookup(u'kīмсhī-∨м')['state'], 'running') + rollback.prependDefer( + utils.rollback_wrapper, inst.vm_poweroff, u'kīмсhī-∨м' + ) inst.vm_delete(u'kīмсhī-∨м') @@ -1638,8 +1850,11 @@ def test_vm_list_sorted(self): inst = model.Model(objstore_loc=self.tmp_store) with RollbackContext() as rollback: - params = {'name': 'test', 'disks': [], - 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}} + params = { + 'name': 'test', + 'disks': [], + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(params) rollback.prependDefer(inst.template_delete, 'test') @@ -1651,7 +1866,7 @@ def test_vm_list_sorted(self): vms = inst.vms_get_list() - self.assertEquals(vms, sorted(vms, key=unicode.lower)) + self.assertEqual(vms, sorted(vms, key=str.lower)) def test_vm_clone(self): inst = model.Model('test:///default', objstore_loc=self.tmp_store) @@ -1680,18 +1895,20 @@ def test_vm_clone(self): rollback.prependDefer(inst.vm_delete, clone2_name) inst.task_wait(task1['id']) task1 = inst.task_lookup(task1['id']) - self.assertEquals('finished', task1['status']) + self.assertEqual('finished', task1['status']) inst.task_wait(task2['id']) task2 = inst.task_lookup(task2['id']) - self.assertEquals('finished', task2['status']) + self.assertEqual('finished', task2['status']) # update the original VM info because its state has changed original_vm = inst.vm_lookup(name) clone_vm = inst.vm_lookup(clone1_name) self.assertNotEqual(original_vm['name'], clone_vm['name']) - self.assertTrue(re.match(u'%s-clone-\d+' % original_vm['name'], - clone_vm['name'])) + self.assertTrue( + re.match(u'%s-clone-\\d+' % + original_vm['name'], clone_vm['name']) + ) del original_vm['name'] del clone_vm['name'] @@ -1701,11 +1918,10 @@ def test_vm_clone(self): # compare all VM settings except the ones already compared # (and removed) above (i.e. 'name' and 'uuid') - self.assertEquals(original_vm, clone_vm) + self.assertEqual(original_vm, clone_vm) def test_use_test_host(self): - inst = model.Model('test:///default', - objstore_loc=self.tmp_store) + inst = model.Model('test:///default', objstore_loc=self.tmp_store) with RollbackContext() as rollback: params = { @@ -1713,7 +1929,7 @@ def test_use_test_host(self): 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, 'domain': 'test', 'arch': 'i686', - 'disks': [] + 'disks': [], } _setDiskPoolDefaultTest() @@ -1733,8 +1949,7 @@ def test_use_test_host(self): self.assertTrue('kimchi-vm' in vms) def test_get_distros(self): - inst = model.Model('test:///default', - objstore_loc=self.tmp_store) + inst = model.Model('test:///default', objstore_loc=self.tmp_store) distros = inst.distros_get_list() for d in distros: distro = inst.distro_lookup(d) @@ -1746,8 +1961,7 @@ def test_get_distros(self): @unittest.skipUnless(utils.running_as_root(), 'Must be run as root') def test_deep_scan(self): - inst = model.Model(None, - objstore_loc=self.tmp_store) + inst = model.Model(None, objstore_loc=self.tmp_store) with RollbackContext() as rollback: deep_path = os.path.join(TMP_DIR, 'deep-scan') subdir_path = os.path.join(deep_path, 'isos') @@ -1755,12 +1969,14 @@ def test_deep_scan(self): os.makedirs(subdir_path) ubuntu_iso = os.path.join(deep_path, 'ubuntu12.04.iso') sles_iso = os.path.join(subdir_path, 'sles10.iso') - iso_gen.construct_fake_iso(ubuntu_iso, True, '12.04', 'ubuntu') - iso_gen.construct_fake_iso(sles_iso, True, '10', 'sles') + construct_fake_iso(ubuntu_iso, True, '12.04', 'ubuntu') + construct_fake_iso(sles_iso, True, '10', 'sles') - args = {'name': 'kimchi-scanning-pool', - 'path': deep_path, - 'type': 'kimchi-iso'} + args = { + 'name': 'kimchi-scanning-pool', + 'path': deep_path, + 'type': 'kimchi-iso', + } inst.storagepools_create(args) rollback.prependDefer(shutil.rmtree, deep_path) rollback.prependDefer(shutil.rmtree, args['path']) @@ -1768,14 +1984,14 @@ def test_deep_scan(self): time.sleep(1) volumes = inst.storagevolumes_get_list(args['name']) - self.assertEquals(len(volumes), 2) + self.assertEqual(len(volumes), 2) def _host_is_power(): return platform.machine().startswith('ppc') @unittest.skipUnless(_host_is_power(), 'Only required for Power hosts') def test_pci_hotplug_requires_usb_controller(self): - config.set("authentication", "method", "pam") + config.set('authentication', 'method', 'pam') inst = model.Model(None, objstore_loc=self.tmp_store) tpl_params = {'name': 'test', 'memory': 1024, 'cdrom': UBUNTU_ISO} inst.templates_create(tpl_params) @@ -1784,15 +2000,15 @@ def test_pci_hotplug_requires_usb_controller(self): vm_params = {'name': 'kimchi-vm1', 'template': '/templates/test'} task1 = inst.vms_create(vm_params) inst.task_wait(task1['id']) - rollback.prependDefer(utils.rollback_wrapper, inst.vm_delete, - 'kimchi-vm1') + rollback.prependDefer(utils.rollback_wrapper, + inst.vm_delete, 'kimchi-vm1') # Start vm inst.vm_start('kimchi-vm1') - rollback.prependDefer(utils.rollback_wrapper, inst.vm_poweroff, - 'kimchi-vm1') + rollback.prependDefer( + utils.rollback_wrapper, inst.vm_poweroff, 'kimchi-vm1' + ) # check if create VM has USB controller - self.assertTrue( - inst.vmhostdevs_have_usb_controller('kimchi-vm1')) + self.assertTrue(inst.vmhostdevs_have_usb_controller('kimchi-vm1')) def get_hostdevs_xml(self): return """\ @@ -1878,18 +2094,15 @@ def test_vmhostdev_is_hostdev_multifunction(self): inst = model.Model(None, objstore_loc=self.tmp_store) hostdev_multi_elem = objectify.fromstring( - self.get_hostdev_multifunction_xml() - ) + self.get_hostdev_multifunction_xml()) self.assertTrue( - inst.vmhostdev_is_hostdev_multifunction(hostdev_multi_elem) - ) + inst.vmhostdev_is_hostdev_multifunction(hostdev_multi_elem)) hostdev_nomulti_elem = objectify.fromstring( self.get_hostdev_nomultifunction_xml() ) self.assertFalse( - inst.vmhostdev_is_hostdev_multifunction(hostdev_nomulti_elem) - ) + inst.vmhostdev_is_hostdev_multifunction(hostdev_nomulti_elem)) def test_vmhostdev_get_devices_same_addr(self): inst = model.Model(None, objstore_loc=self.tmp_store) @@ -1898,8 +2111,7 @@ def test_vmhostdev_get_devices_same_addr(self): hostdevs = root.devices.hostdev hostdev_multi_elem = objectify.fromstring( - self.get_hostdev_multifunction_xml() - ) + self.get_hostdev_multifunction_xml()) hostdev_same_addr_str = """\ \ @@ -1908,26 +2120,27 @@ def test_vmhostdev_get_devices_same_addr(self):
\ """ same_addr_devices = [ - ET.tostring(hostdev_multi_elem), hostdev_same_addr_str + ET.tostring(hostdev_multi_elem).decode('utf-8'), + hostdev_same_addr_str, ] - self.assertItemsEqual( - same_addr_devices, - inst.vmhostdev_get_devices_same_addr(hostdevs, hostdev_multi_elem) + self.assertEqual( + set(same_addr_devices) + - set(inst.vmhostdev_get_devices_same_addr(hostdevs, hostdev_multi_elem)), + set(), ) nomatch_elem = objectify.fromstring( - self.get_hostdev_nomultifunction_xml() - ) + self.get_hostdev_nomultifunction_xml()) self.assertEqual( inst.vmhostdev_get_devices_same_addr(hostdevs, nomatch_elem), - [ET.tostring(nomatch_elem)] + [ET.tostring(nomatch_elem).decode('utf-8')], ) @mock.patch('wok.plugins.kimchi.model.vmhostdevs.get_vm_config_flag') def test_vmhostdev_unplug_multifunction_pci(self, mock_conf_flag): - class FakeDom(): + class FakeDom: def detachDeviceFlags(self, xml, config_flag): pass @@ -1939,21 +2152,20 @@ def detachDeviceFlags(self, xml, config_flag): hostdevs = root.devices.hostdev hostdev_multi_elem = objectify.fromstring( - self.get_hostdev_multifunction_xml() - ) + self.get_hostdev_multifunction_xml()) self.assertTrue( - inst.vmhostdev_unplug_multifunction_pci(FakeDom(), hostdevs, - hostdev_multi_elem) + inst.vmhostdev_unplug_multifunction_pci( + FakeDom(), hostdevs, hostdev_multi_elem + ) ) nomatch_elem = objectify.fromstring( - self.get_hostdev_nomultifunction_xml() - ) + self.get_hostdev_nomultifunction_xml()) self.assertFalse( - inst.vmhostdev_unplug_multifunction_pci(FakeDom(), hostdevs, - nomatch_elem) + inst.vmhostdev_unplug_multifunction_pci( + FakeDom(), hostdevs, nomatch_elem) ) @@ -1976,4 +2188,4 @@ def __init__(self): def test_root_model(self): t = BaseModelTests.TestModel() t.foos_create({'item1': 10}) - self.assertEquals(t.foos_get_list(), ['item1']) + self.assertEqual(t.foos_get_list(), ['item1']) diff --git a/tests/test_model_libvirtevents.py b/tests/test_model_libvirtevents.py index 177bbae8d..4f9d524a6 100644 --- a/tests/test_model_libvirtevents.py +++ b/tests/test_model_libvirtevents.py @@ -17,9 +17,7 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import json -import libvirt import os import shutil import tempfile @@ -27,11 +25,11 @@ import unittest import iso_gen -import tests.utils as utils - +import libvirt +from wok.plugins.kimchi.model import model from wok.rollbackcontext import RollbackContext -from wok.plugins.kimchi.model import model +import tests.utils as utils TMP_DIR = '/var/lib/kimchi/tests/' @@ -72,16 +70,16 @@ def _get_event_id(): def _store_event(data): global TMP_EVENT - with open(TMP_EVENT, "a") as file: - file.write("%s\n" % data) + with open(TMP_EVENT, 'a') as file: + file.write('%s\n' % data) def _get_event(id): global TMP_EVENT - with open(TMP_EVENT, "r") as file: + with open(TMP_EVENT, 'r') as file: for event in [line.rstrip('\n') for line in file.readlines()]: fields = event.split('|') - if fields[0] == id: + if fields[0] == str(id): return fields[1] @@ -96,33 +94,56 @@ def domain_event_lifecycle_cb(self, conn, dom, event, detail, *args): """ Callback to handle Domain (VMs) events - VM Livecycle. """ - evStrings = ("Defined", "Undefined", "Started", "Suspended", "Resumed", - "Stopped", "Shutdown", "PMSuspended", "Crashed") - evDetails = (("Added", "Updated"), - ("Removed", ), - ("Booted", "Migrated", "Restored", "Snapshot", "Wakeup"), - ("Paused", "Migrated", "IOError", "Watchdog", "Restored", - "Snapshot", "API error"), - ("Unpaused", "Migrated", "Snapshot"), - ("Shutdown", "Destroyed", "Crashed", "Migrated", "Saved", - "Failed", "Snapshot"), - ("Finished", ), - ("Memory", "Disk"), - ("Panicked")) - - data = {'domain': dom.name(), 'event': evStrings[event], - 'event_detail': evDetails[event][detail]} + evStrings = ( + 'Defined', + 'Undefined', + 'Started', + 'Suspended', + 'Resumed', + 'Stopped', + 'Shutdown', + 'PMSuspended', + 'Crashed', + ) + evDetails = ( + ('Added', 'Updated'), + ('Removed',), + ('Booted', 'Migrated', 'Restored', 'Snapshot', 'Wakeup'), + ( + 'Paused', + 'Migrated', + 'IOError', + 'Watchdog', + 'Restored', + 'Snapshot', + 'API error', + ), + ('Unpaused', 'Migrated', 'Snapshot'), + ( + 'Shutdown', + 'Destroyed', + 'Crashed', + 'Migrated', + 'Saved', + 'Failed', + 'Snapshot', + ), + ('Finished',), + ('Memory', 'Disk'), + ('Panicked'), + ) + + data = { + 'domain': dom.name(), + 'event': evStrings[event], + 'event_detail': evDetails[event][detail], + } _store_event('%s|%s' % (_get_next_event_id(), json.dumps(data))) - def domain_event_reboot_cb(self, conn, dom, *args): - """ - Callback to handle Domain (VMs) events - VM Reboot. - """ - data = {'domain': dom.name(), 'event': 'Rebooted'} - _store_event('%s|%s' % (_get_next_event_id(), json.dumps(data))) - - @unittest.skipUnless(utils.running_as_root() and - os.uname()[4] != "s390x", 'Must be run as root') + @unittest.skipUnless( + utils.running_as_root() and os.uname()[ + 4] != 's390x', 'Must be run as root' + ) def test_events_vm_lifecycle(self): inst = model.Model(objstore_loc=self.tmp_store) self.objstore = inst.objstore @@ -131,81 +152,75 @@ def test_events_vm_lifecycle(self): # Create a template and VM to test, and start lifecycle tests with RollbackContext() as rollback: # Register the most common Libvirt domain events to be handled. - event_map = [(libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, - self.domain_event_lifecycle_cb), - (libvirt.VIR_DOMAIN_EVENT_ID_REBOOT, - self.domain_event_reboot_cb)] + event_map = [ + (libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE, self.domain_event_lifecycle_cb) + ] for event, event_cb in event_map: - ev_id = conn.domainEventRegisterAny(None, event, event_cb, - None) - rollback.prependDefer(conn.domainEventDeregisterAny, ev_id) + conn.domainEventRegister(event_cb, None) + rollback.prependDefer(conn.domainEventDeregister, event_cb) # Create a template - template_params = {'name': 'ttest', - 'source_media': {'type': 'disk', - 'path': UBUNTU_ISO}} + template_params = { + 'name': 'ttest', + 'source_media': {'type': 'disk', 'path': UBUNTU_ISO}, + } inst.templates_create(template_params) rollback.prependDefer(inst.template_delete, 'ttest') # Create a VM (guest) - vm_params = {'name': 'kimchi-vm1', - 'template': '/plugins/kimchi/templates/ttest'} + vm_params = { + 'name': 'kimchi-vm1', + 'template': '/plugins/kimchi/templates/ttest', + } task = inst.vms_create(vm_params) inst.task_wait(task['id'], 10) task = inst.task_lookup(task['id']) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) time.sleep(5) # Check event of domain definition (addition) res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Defined', res['event']) - self.assertEquals('Added', res['event_detail']) + self.assertEqual('kimchi-vm1', res['domain']) + self.assertEqual('Defined', res['event']) + self.assertEqual('Added', res['event_detail']) # Start the VM and check the event inst.vm_start('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Started', res['event']) - self.assertEquals('Booted', res['event_detail']) + self.assertEqual('kimchi-vm1', res['domain']) + self.assertEqual('Started', res['event']) + self.assertEqual('Booted', res['event_detail']) # Suspend the VM and check the event inst.vm_suspend('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Suspended', res['event']) - self.assertEquals('Paused', res['event_detail']) + self.assertEqual('kimchi-vm1', res['domain']) + self.assertEqual('Suspended', res['event']) + self.assertEqual('Paused', res['event_detail']) # Resume the VM and check the event inst.vm_resume('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Resumed', res['event']) - self.assertEquals('Unpaused', res['event_detail']) - - # Reboot the VM and check the event - inst.vm_reset('kimchi-vm1') - time.sleep(5) - res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Rebooted', res['event']) + self.assertEqual('kimchi-vm1', res['domain']) + self.assertEqual('Resumed', res['event']) + self.assertEqual('Unpaused', res['event_detail']) # PowerOff (hard stop) the VM and check the event inst.vm_poweroff('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Stopped', res['event']) - self.assertEquals('Destroyed', res['event_detail']) + self.assertEqual('kimchi-vm1', res['domain']) + self.assertEqual('Stopped', res['event']) + self.assertEqual('Destroyed', res['event_detail']) # Delete the VM and check the event inst.vm_delete('kimchi-vm1') time.sleep(5) res = json.loads(_get_event(str(_get_event_id()))) - self.assertEquals('kimchi-vm1', res['domain']) - self.assertEquals('Undefined', res['event']) - self.assertEquals('Removed', res['event_detail']) + self.assertEqual('kimchi-vm1', res['domain']) + self.assertEqual('Undefined', res['event']) + self.assertEqual('Removed', res['event_detail']) diff --git a/tests/test_model_network.py b/tests/test_model_network.py index e83c5159a..47771c4a3 100644 --- a/tests/test_model_network.py +++ b/tests/test_model_network.py @@ -17,21 +17,22 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json -import mock import os import tempfile import unittest +import urllib from functools import partial -from tests.utils import patch_auth, request, rollback_wrapper -from tests.utils import run_server - +import cherrypy +import mock +from wok.plugins.kimchi.model.featuretests import FeatureTests from wok.rollbackcontext import RollbackContext -from wok.plugins.kimchi.model.featuretests import FeatureTests +from tests.utils import patch_auth +from tests.utils import request +from tests.utils import rollback_wrapper +from tests.utils import run_server model = None objectstore_loc = tempfile.mktemp() @@ -56,39 +57,38 @@ def tearDownModule(): def _do_network_test(self, model, params): with RollbackContext() as rollback: net_name = params['name'] - uri = '/plugins/kimchi/networks/%s' % net_name.encode('utf-8') + uri = urllib.parse.quote(f'/plugins/kimchi/networks/{net_name}') # Create a network req = json.dumps(params) resp = self.request('/plugins/kimchi/networks', req, 'POST') - rollback.prependDefer(rollback_wrapper, model.network_delete, - net_name) - self.assertEquals(201, resp.status) + rollback.prependDefer(rollback_wrapper, model.network_delete, net_name) + self.assertEqual(201, resp.status) # Verify the network resp = self.request(uri) - network = json.loads(resp.read()) - self.assertEquals('inactive', network['state']) + network = json.loads(resp.read().decode('utf-8')) + self.assertEqual('inactive', network['state']) self.assertTrue(network['persistent']) # activate the network resp = self.request(uri + '/activate', '{}', 'POST') - rollback.prependDefer(rollback_wrapper, - model.network_deactivate, net_name) - self.assertEquals(200, resp.status) + rollback.prependDefer( + rollback_wrapper, model.network_deactivate, net_name) + self.assertEqual(200, resp.status) resp = self.request(uri) - network = json.loads(resp.read()) - self.assertEquals('active', network['state']) + network = json.loads(resp.read().decode('utf-8')) + self.assertEqual('active', network['state']) # Deactivate the network resp = self.request(uri + '/deactivate', '{}', 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) resp = self.request(uri) - network = json.loads(resp.read()) - self.assertEquals('inactive', network['state']) + network = json.loads(resp.read().decode('utf-8')) + self.assertEqual('inactive', network['state']) # Define network update parameters - updateParams = {'name': net_name + u'renamed'} + updateParams = {'name': net_name + 'renamed'} connection = params.get('connection') if connection in ['isolated', 'nat'] and 'subnet' in params: updateParams['subnet'] = '127.0.200.0/24' @@ -98,15 +98,15 @@ def _do_network_test(self, model, params): # Test network update req = json.dumps(updateParams) resp = self.request(uri, req, 'PUT') - self.assertEquals(303, resp.status) + self.assertEqual(303, resp.status) # Assert old name does not exist anymore resp = self.request(uri, '{}', 'GET') - self.assertEquals(404, resp.status) + self.assertEqual(404, resp.status) # Delete the network - resp = self.request(uri + 'renamed'.encode('utf-8'), '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request(uri + 'renamed', '{}', 'DELETE') + self.assertEqual(204, resp.status) class NetworkTests(unittest.TestCase): @@ -114,78 +114,108 @@ def setUp(self): self.request = partial(request) def test_get_networks(self): - networks = json.loads(self.request('/plugins/kimchi/networks').read()) + networks = json.loads( + self.request('/plugins/kimchi/networks').read().decode('utf-8') + ) self.assertIn('default', [net['name'] for net in networks]) with RollbackContext() as rollback: # Now add a couple of Networks to the mock model - for i in xrange(5): + for i in range(5): name = 'network-%i' % i - req = json.dumps({'name': name, - 'connection': 'nat', - 'subnet': '127.0.10%i.0/24' % i}) + req = json.dumps( + {'name': name, 'connection': 'nat', + 'subnet': '127.0.10%i.0/24' % i} + ) resp = self.request('/plugins/kimchi/networks', req, 'POST') rollback.prependDefer(model.network_delete, name) - self.assertEquals(201, resp.status) - network = json.loads(resp.read()) - self.assertEquals([], network["vms"]) + self.assertEqual(201, resp.status) + network = json.loads(resp.read().decode('utf-8')) + self.assertEqual([], network['vms']) - nets = json.loads(self.request('/plugins/kimchi/networks').read()) - self.assertEquals(len(networks) + 5, len(nets)) + nets = json.loads( + self.request('/plugins/kimchi/networks').read().decode('utf-8') + ) + self.assertEqual(len(networks) + 5, len(nets)) network = json.loads( - self.request('/plugins/kimchi/networks/network-1').read() + self.request('/plugins/kimchi/networks/network-1') + .read() + .decode('utf-8') ) - keys = [u'name', u'connection', u'interfaces', u'subnet', u'dhcp', - u'vms', u'in_use', u'autostart', u'state', u'persistent'] - self.assertEquals(sorted(keys), sorted(network.keys())) + keys = [ + 'name', + 'connection', + 'interfaces', + 'subnet', + 'dhcp', + 'vms', + 'in_use', + 'autostart', + 'state', + 'persistent', + ] + self.assertEqual(sorted(keys), sorted(network.keys())) def test_network_lifecycle(self): # Verify all the supported network type - networks = [{'name': u'kīмсhī-пet', 'connection': 'isolated'}, - {'name': u'&', 'connection': 'nat'}, - {'name': u'subnet-network', 'connection': 'nat', - 'subnet': '127.0.100.0/24'}] + networks = [ + {'name': 'kīмсhī-пet', 'connection': 'isolated'}, + {'name': '&', 'connection': 'nat'}, + {'name': 'subnet-network', 'connection': 'nat', + 'subnet': '127.0.100.0/24'}, + ] # Verify the current system has at least one interface to create a # bridged network interfaces = json.loads( - self.request( - '/plugins/kimchi/interfaces?_inuse=false&type=nic').read()) + self.request('/plugins/kimchi/interfaces?_inuse=false&type=nic') + .read() + .decode('utf-8') + ) if len(interfaces) > 0: iface = interfaces[0]['name'] - networks.append({'name': u'macvtap-network', - 'connection': 'macvtap', 'interfaces': [iface]}) + networks.append( + { + 'name': 'macvtap-network', + 'connection': 'macvtap', + 'interfaces': [iface], + } + ) if not FeatureTests.is_nm_running(): - networks.append({'name': u'bridge-network', - 'connection': 'bridge', - 'interfaces': [iface]}) + networks.append( + { + 'name': 'bridge-network', + 'connection': 'bridge', + 'interfaces': [iface], + } + ) for net in networks: _do_network_test(self, model, net) def test_macvtap_network_create_fails_more_than_one_interface(self): network = { - 'name': u'macvtap-network', + 'name': 'macvtap-network', 'connection': 'macvtap', - 'interfaces': ['fake_iface1', 'fake_iface2', 'fake_iface3'] + 'interfaces': ['fake_iface1', 'fake_iface2', 'fake_iface3'], } - expected_error_msg = "KCHNET0030E" + expected_error_msg = 'KCHNET0030E' req = json.dumps(network) resp = self.request('/plugins/kimchi/networks', req, 'POST') - self.assertEquals(400, resp.status) - self.assertIn(expected_error_msg, resp.read()) + self.assertEqual(400, resp.status) + self.assertIn(expected_error_msg, resp.read().decode('utf-8')) def test_bridge_network_create_fails_more_than_one_interface(self): network = { - 'name': u'bridge-network', + 'name': 'bridge-network', 'connection': 'bridge', - 'interfaces': ['fake_iface1', 'fake_iface2', 'fake_iface3'] + 'interfaces': ['fake_iface1', 'fake_iface2', 'fake_iface3'], } - expected_error_msg = "KCHNET0030E" + expected_error_msg = 'KCHNET0030E' req = json.dumps(network) resp = self.request('/plugins/kimchi/networks', req, 'POST') - self.assertEquals(400, resp.status) - self.assertIn(expected_error_msg, resp.read()) + self.assertEqual(400, resp.status) + self.assertIn(expected_error_msg, resp.read().decode('utf-8')) diff --git a/tests/test_model_storagepool.py b/tests/test_model_storagepool.py index a1a5d95a2..e15f8e092 100644 --- a/tests/test_model_storagepool.py +++ b/tests/test_model_storagepool.py @@ -17,19 +17,20 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json -import mock import os import shutil import tempfile import unittest +import urllib from functools import partial +import cherrypy +import mock from wok.rollbackcontext import RollbackContext -from tests.utils import patch_auth, request +from tests.utils import patch_auth +from tests.utils import request from tests.utils import run_server model = None @@ -57,61 +58,81 @@ def setUp(self): self.request = partial(request) def test_get_storagepools(self): - storagepools = json.loads( - self.request('/plugins/kimchi/storagepools').read() - ) + storagepools = json.loads(self.request( + '/plugins/kimchi/storagepools').read()) self.assertIn('default', [pool['name'] for pool in storagepools]) with RollbackContext() as rollback: # Now add a couple of storage pools - for i in xrange(3): - name = u'kīмсhī-storagepool-%i' % i - path = '/var/lib/libvirt/images/%i' % i + for i in range(3): + name = f'kīмсhī-storagepool-{i}' + path = f'/var/lib/libvirt/images/{i}' req = json.dumps({'name': name, 'type': 'dir', 'path': path}) - resp = self.request('/plugins/kimchi/storagepools', req, - 'POST') + resp = self.request( + '/plugins/kimchi/storagepools', req, 'POST') rollback.prependDefer(model.storagepool_delete, name) rollback.prependDefer(shutil.rmtree, path) - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Pool name must be unique - req = json.dumps({'name': name, 'type': 'dir', - 'path': '/var/lib/libvirt/images/%i' % i}) - resp = self.request('/plugins/kimchi/storagepools', req, - 'POST') - self.assertEquals(400, resp.status) + req = json.dumps( + { + 'name': name, + 'type': 'dir', + 'path': f'/var/lib/libvirt/images/{i}', + } + ) + resp = self.request( + '/plugins/kimchi/storagepools', req, 'POST') + self.assertEqual(400, resp.status) # Verify pool information - resp = self.request('/plugins/kimchi/storagepools/%s' % - name.encode("utf-8")) + quote_uri = urllib.parse.quote( + f'/plugins/kimchi/storagepools/{name}') + resp = self.request(quote_uri) p = json.loads(resp.read()) - keys = [u'name', u'state', u'capacity', u'allocated', - u'available', u'path', u'source', u'type', - u'nr_volumes', u'autostart', u'persistent', 'in_use'] - self.assertEquals(sorted(keys), sorted(p.keys())) - self.assertEquals(name, p['name']) - self.assertEquals('inactive', p['state']) - self.assertEquals(True, p['persistent']) - self.assertEquals(True, p['autostart']) - self.assertEquals(0, p['nr_volumes']) - - pools = json.loads( - self.request('/plugins/kimchi/storagepools').read() - ) - self.assertEquals(len(storagepools) + 3, len(pools)) + keys = [ + 'name', + 'state', + 'capacity', + 'allocated', + 'available', + 'path', + 'source', + 'type', + 'nr_volumes', + 'autostart', + 'persistent', + 'in_use', + ] + self.assertEqual(sorted(keys), sorted(p.keys())) + self.assertEqual(name, p['name']) + self.assertEqual('inactive', p['state']) + self.assertEqual(True, p['persistent']) + self.assertEqual(True, p['autostart']) + self.assertEqual(0, p['nr_volumes']) + + pools = json.loads(self.request( + '/plugins/kimchi/storagepools').read()) + self.assertEqual(len(storagepools) + 3, len(pools)) # Create a pool with an existing path tmp_path = tempfile.mkdtemp(dir='/var/lib/kimchi') rollback.prependDefer(os.rmdir, tmp_path) - req = json.dumps({'name': 'existing_path', 'type': 'dir', - 'path': tmp_path}) + req = json.dumps( + {'name': 'existing_path', 'type': 'dir', 'path': tmp_path}) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') rollback.prependDefer(model.storagepool_delete, 'existing_path') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Reserved pool return 400 - req = json.dumps({'name': 'kimchi_isos', 'type': 'dir', - 'path': '/var/lib/libvirt/images/%i' % i}) + req = json.dumps( + { + 'name': 'kimchi_isos', + 'type': 'dir', + 'path': '/var/lib/libvirt/images/%i' % i, + } + ) resp = request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) diff --git a/tests/test_model_storagevolume.py b/tests/test_model_storagevolume.py index 36be9613d..c9e036bf2 100644 --- a/tests/test_model_storagevolume.py +++ b/tests/test_model_storagevolume.py @@ -17,25 +17,29 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json -import mock import os -import requests import tempfile import unittest +import urllib from functools import partial -from requests.exceptions import ConnectionError - -from tests.utils import fake_auth_header, HOST -from tests.utils import patch_auth, PORT, request -from tests.utils import rollback_wrapper, run_server, wait_task +import cherrypy +import mock +import requests +from requests.exceptions import ConnectionError from wok.config import paths +from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.rollbackcontext import RollbackContext -from wok.plugins.kimchi.config import READONLY_POOL_TYPE +from tests.utils import fake_auth_header +from tests.utils import HOST +from tests.utils import patch_auth +from tests.utils import PORT +from tests.utils import request +from tests.utils import rollback_wrapper +from tests.utils import run_server +from tests.utils import wait_task model = None objectstore_loc = tempfile.mktemp() @@ -60,37 +64,41 @@ def tearDownModule(): def _do_volume_test(self, model, pool_name): def _task_lookup(taskid): return json.loads( - self.request('/plugins/kimchi/tasks/%s' % taskid).read() + self.request( + f'/plugins/kimchi/tasks/{taskid}').read().decode('utf-8') ) - uri = '/plugins/kimchi/storagepools/%s/storagevolumes' \ - % pool_name.encode('utf-8') + uri = urllib.parse.quote( + f'/plugins/kimchi/storagepools/{pool_name}/storagevolumes') resp = self.request(uri) - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) - resp = self.request('/plugins/kimchi/storagepools/%s' % - pool_name.encode('utf-8')) - pool_info = json.loads(resp.read()) + resp = self.request(urllib.parse.quote( + f'/plugins/kimchi/storagepools/{pool_name}')) + pool_info = json.loads(resp.read().decode('utf-8')) with RollbackContext() as rollback: # Create storage volume with 'capacity' vol = 'test-volume' vol_uri = uri + '/' + vol - req = json.dumps({'name': vol, 'format': 'raw', - 'capacity': 1073741824}) # 1 GiB + req = json.dumps( + {'name': vol, 'format': 'raw', 'capacity': 1073741824} + ) # 1 GiB resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) else: - rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, - pool_name, vol) - self.assertEquals(202, resp.status) - task_id = json.loads(resp.read())['id'] + rollback.prependDefer( + rollback_wrapper, model.storagevolume_delete, pool_name, vol + ) + self.assertEqual(202, resp.status) + task_id = json.loads(resp.read().decode('utf-8'))['id'] wait_task(_task_lookup, task_id) status = json.loads( - self.request('/plugins/kimchi/tasks/%s' % task_id).read() + self.request( + f'/plugins/kimchi/tasks/{task_id}').read().decode('utf-8') ) - self.assertEquals('finished', status['status']) - vol_info = json.loads(self.request(vol_uri).read()) + self.assertEqual('finished', status['status']) + vol_info = json.loads(self.request(vol_uri).read().decode('utf-8')) vol_info['name'] = vol vol_info['format'] = 'raw' vol_info['capacity'] = 1073741824 @@ -98,42 +106,48 @@ def _task_lookup(taskid): # Resize the storage volume: increase its capacity to 2 GiB req = json.dumps({'size': 2147483648}) # 2 GiB resp = self.request(vol_uri + '/resize', req, 'POST') - self.assertEquals(200, resp.status) - storagevolume = json.loads(self.request(vol_uri).read()) - self.assertEquals(2147483648, storagevolume['capacity']) + self.assertEqual(200, resp.status) + storagevolume = json.loads( + self.request(vol_uri).read().decode('utf-8')) + self.assertEqual(2147483648, storagevolume['capacity']) # Resize the storage volume: decrease its capacity to 512 MiB # This test case may fail if libvirt does not include the fix for # https://bugzilla.redhat.com/show_bug.cgi?id=1021802 req = json.dumps({'size': 536870912}) # 512 MiB resp = self.request(vol_uri + '/resize', req, 'POST') - self.assertEquals(200, resp.status) - storagevolume = json.loads(self.request(vol_uri).read()) - self.assertEquals(536870912, storagevolume['capacity']) + self.assertEqual(200, resp.status) + storagevolume = json.loads( + self.request(vol_uri).read().decode('utf-8')) + self.assertEqual(536870912, storagevolume['capacity']) # Wipe the storage volume resp = self.request(vol_uri + '/wipe', '{}', 'POST') - self.assertEquals(200, resp.status) - storagevolume = json.loads(self.request(vol_uri).read()) - self.assertEquals(0, storagevolume['allocation']) + self.assertEqual(200, resp.status) + storagevolume = json.loads( + self.request(vol_uri).read().decode('utf-8')) + self.assertEqual(0, storagevolume['allocation']) # Clone the storage volume - vol_info = json.loads(self.request(vol_uri).read()) + vol_info = json.loads(self.request(vol_uri).read().decode('utf-8')) resp = self.request(vol_uri + '/clone', '{}', 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) cloned_vol_name = task['target_uri'].split('/')[-2] - rollback.prependDefer(model.storagevolume_delete, pool_name, - cloned_vol_name) + rollback.prependDefer( + model.storagevolume_delete, pool_name, cloned_vol_name + ) wait_task(_task_lookup, task['id']) task = json.loads( - self.request('/plugins/kimchi/tasks/%s' % task['id']).read() + self.request('/plugins/kimchi/tasks/%s' % task['id']) + .read() + .decode('utf-8') ) - self.assertEquals('finished', task['status']) - resp = self.request(uri + '/' + cloned_vol_name.encode('utf-8')) + self.assertEqual('finished', task['status']) + resp = self.request(uri + '/' + cloned_vol_name) - self.assertEquals(200, resp.status) - cloned_vol = json.loads(resp.read()) + self.assertEqual(200, resp.status) + cloned_vol = json.loads(resp.read().decode('utf-8')) self.assertNotEquals(vol_info['name'], cloned_vol['name']) self.assertNotEquals(vol_info['path'], cloned_vol['path']) @@ -141,13 +155,13 @@ def _task_lookup(taskid): del vol_info[key] del cloned_vol[key] - self.assertEquals(vol_info, cloned_vol) + self.assertEqual(vol_info, cloned_vol) # Delete the storage volume resp = self.request(vol_uri, '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) resp = self.request(vol_uri) - self.assertEquals(404, resp.status) + self.assertEqual(404, resp.status) # Storage volume upload # It is done through a sequence of POST and several PUT requests @@ -156,20 +170,27 @@ def _task_lookup(taskid): filesize = os.stat(filepath).st_size # Create storage volume for upload - req = json.dumps({'name': filename, 'format': 'raw', - 'capacity': filesize, 'upload': True}) + req = json.dumps( + {'name': filename, 'format': 'raw', + 'capacity': filesize, 'upload': True} + ) resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) else: - rollback.prependDefer(rollback_wrapper, model.storagevolume_delete, - pool_name, filename) - self.assertEquals(202, resp.status) - task_id = json.loads(resp.read())['id'] + rollback.prependDefer( + rollback_wrapper, model.storagevolume_delete, pool_name, filename + ) + self.assertEqual(202, resp.status) + task = json.loads(resp.read().decode('utf-8')) + task_id = task['id'] wait_task(_task_lookup, task_id) - status = json.loads(self.request('/plugins/kimchi/tasks/%s' % - task_id).read()) - self.assertEquals('ready for upload', status['message']) + status = json.loads( + self.request('/plugins/kimchi/tasks/%s' % task_id) + .read() + .decode('utf-8') + ) + self.assertEqual('ready for upload', status['message']) # Upload volume content url = 'http://%s:%s' % (HOST, PORT) + uri + '/' + filename @@ -181,8 +202,8 @@ def _task_lookup(taskid): # test case expects for exception raised by cherrypy. newfile = '/tmp/5m-file' with open(newfile, 'wb') as fd: - fd.seek(5*1024*1024-1) - fd.write("\0") + fd.seek(5 * 1024 * 1024 - 1) + fd.write(b'\0') rollback.prependDefer(os.remove, newfile) with open(newfile, 'rb') as fd: @@ -191,12 +212,15 @@ def _task_lookup(taskid): tmp_fd.write(data) with open(newfile + '.tmp', 'rb') as tmp_fd: - error_msg = "Connection aborted" + error_msg = 'Connection aborted' with self.assertRaisesRegexp(ConnectionError, error_msg): - requests.put(url, data={'chunk_size': len(data)}, - files={'chunk': tmp_fd}, - verify=False, - headers=fake_auth_header()) + requests.put( + url, + data={'chunk_size': len(data)}, + files={'chunk': tmp_fd}, + verify=False, + headers=fake_auth_header(), + ) # Do upload index = 0 @@ -206,17 +230,20 @@ def _task_lookup(taskid): with open(filepath, 'rb') as fd: while True: with open(filepath + '.tmp', 'wb') as tmp_fd: - fd.seek(index*chunk_size) + fd.seek(index * chunk_size) data = fd.read(chunk_size) tmp_fd.write(data) with open(filepath + '.tmp', 'rb') as tmp_fd: - r = requests.put(url, data={'chunk_size': len(data)}, - files={'chunk': tmp_fd}, - verify=False, - headers=fake_auth_header()) - self.assertEquals(r.status_code, 200) - content += data + r = requests.put( + url, + data={'chunk_size': len(data)}, + files={'chunk': tmp_fd}, + verify=False, + headers=fake_auth_header(), + ) + self.assertEqual(r.status_code, 200) + content += data.decode('utf-8') index = index + 1 if len(data) < chunk_size: @@ -224,12 +251,12 @@ def _task_lookup(taskid): rollback.prependDefer(os.remove, filepath + '.tmp') resp = self.request(uri + '/' + filename) - self.assertEquals(200, resp.status) - uploaded_path = json.loads(resp.read())['path'] + self.assertEqual(200, resp.status) + uploaded_path = json.loads(resp.read().decode('utf-8'))['path'] with open(uploaded_path) as fd: uploaded_content = fd.read() - self.assertEquals(content, uploaded_content) + self.assertEqual(content, uploaded_content) # Create storage volume with 'url' url = 'https://github.com/kimchi-project/kimchi/raw/master/COPYING' @@ -237,15 +264,15 @@ def _task_lookup(taskid): resp = self.request(uri, req, 'POST') if pool_info['type'] in READONLY_POOL_TYPE: - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) else: - rollback.prependDefer(model.storagevolume_delete, pool_name, - 'COPYING') - self.assertEquals(202, resp.status) - task = json.loads(resp.read()) + rollback.prependDefer( + model.storagevolume_delete, pool_name, 'COPYING') + self.assertEqual(202, resp.status) + task = json.loads(resp.read().decode('utf-8')) wait_task(_task_lookup, task['id']) resp = self.request(uri + '/COPYING') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) class StorageVolumeTests(unittest.TestCase): @@ -255,20 +282,29 @@ def setUp(self): def test_get_storagevolume(self): uri = '/plugins/kimchi/storagepools/default/storagevolumes' resp = self.request(uri) - self.assertEquals(200, resp.status) - - keys = [u'name', u'type', u'capacity', u'allocation', u'path', - u'used_by', u'format', u'isvalid', u'has_permission'] - for vol in json.loads(resp.read()): + self.assertEqual(200, resp.status) + + keys = [ + 'name', + 'type', + 'capacity', + 'allocation', + 'path', + 'used_by', + 'format', + 'isvalid', + 'has_permission', + ] + for vol in json.loads(resp.read().decode('utf-8')): resp = self.request(uri + '/' + vol['name']) - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) all_keys = keys[:] - vol_info = json.loads(resp.read()) + vol_info = json.loads(resp.read().decode('utf-8')) if vol_info['format'] == 'iso': - all_keys.extend([u'os_distro', u'os_version', u'bootable']) + all_keys.extend(['os_distro', 'os_version', 'bootable']) - self.assertEquals(sorted(all_keys), sorted(vol_info.keys())) + self.assertEqual(sorted(all_keys), sorted(vol_info.keys())) def test_storagevolume_action(self): _do_volume_test(self, model, 'default') diff --git a/tests/test_networkxml.py b/tests/test_networkxml.py index 6c133c56e..27b6460a8 100644 --- a/tests/test_networkxml.py +++ b/tests/test_networkxml.py @@ -16,20 +16,16 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA +import unittest import ipaddr import lxml.etree as ET -import unittest - - -from wok.xmlutils.utils import xpath_get_text - from wok.plugins.kimchi.xmlutils import network as nxml +from wok.xmlutils.utils import xpath_get_text def normalize_xml(xml_str): - return ET.tostring(ET.fromstring(xml_str, - ET.XMLParser(remove_blank_text=True))) + return ET.tostring(ET.fromstring(xml_str, ET.XMLParser(remove_blank_text=True))) class NetworkXmlTests(unittest.TestCase): @@ -37,124 +33,128 @@ def test_dhcp_xml(self): """ Test network dhcp xml """ - dhcp_range = {"start": "192.168.122.100", "end": "192.168.122.254"} - host1 = {"mac": "00:16:3e:77:e2:ed", - "name": "foo.example.com", - "ip": "192.168.122.10"} - host2 = {"mac": "00:16:3e:3e:a9:1a", - "name": "bar.example.com", - "ip": "192.168.122.11"} + dhcp_range = {'start': '192.168.122.100', 'end': '192.168.122.254'} + host1 = { + 'mac': '00:16:3e:77:e2:ed', + 'name': 'foo.example.com', + 'ip': '192.168.122.10', + } + host2 = { + 'mac': '00:16:3e:3e:a9:1a', + 'name': 'bar.example.com', + 'ip': '192.168.122.11', + } params = {} dhcp = nxml._get_dhcp_elem(**params) - self.assertEquals(None, dhcp) + self.assertEqual(None, dhcp) - params["range"] = dhcp_range + params['range'] = dhcp_range xml = ET.tostring(nxml._get_dhcp_elem(**params)) - start = xpath_get_text(xml, "/dhcp/range/@start") - end = xpath_get_text(xml, "/dhcp/range/@end") - self.assertEquals(dhcp_range['start'], start[0]) - self.assertEquals(dhcp_range['end'], end[0]) + start = xpath_get_text(xml, '/dhcp/range/@start') + end = xpath_get_text(xml, '/dhcp/range/@end') + self.assertEqual(dhcp_range['start'], start[0]) + self.assertEqual(dhcp_range['end'], end[0]) - params["hosts"] = [host1, host2] + params['hosts'] = [host1, host2] xml = ET.tostring(nxml._get_dhcp_elem(**params)) - ip = xpath_get_text(xml, "/dhcp/host/@ip") - self.assertEquals(ip, [host1['ip'], host2['ip']]) + ip = xpath_get_text(xml, '/dhcp/host/@ip') + self.assertEqual(ip, [host1['ip'], host2['ip']]) def test_ip_xml(self): """ Test network ip xml """ - dhcp_range = {"start": "192.168.122.100", "end": "192.168.122.254"} + dhcp_range = {'start': '192.168.122.100', 'end': '192.168.122.254'} params = {} dhcp = nxml._get_dhcp_elem(**params) - self.assertEquals(None, dhcp) + self.assertEqual(None, dhcp) - params["net"] = "192.168.122.0/255.255.255.0" - params["dhcp"] = {'range': dhcp_range} + params['net'] = '192.168.122.0/255.255.255.0' + params['dhcp'] = {'range': dhcp_range} xml = ET.tostring(nxml._get_ip_elem(**params)) - start = xpath_get_text(xml, "/ip/dhcp/range/@start")[0] - end = xpath_get_text(xml, "/ip/dhcp/range/@end")[0] - self.assertEquals(dhcp_range['start'], start) - self.assertEquals(dhcp_range['end'], end) + start = xpath_get_text(xml, '/ip/dhcp/range/@start')[0] + end = xpath_get_text(xml, '/ip/dhcp/range/@end')[0] + self.assertEqual(dhcp_range['start'], start) + self.assertEqual(dhcp_range['end'], end) - address = xpath_get_text(xml, "/ip/@address")[0] - netmask = xpath_get_text(xml, "/ip/@netmask")[0] - self.assertEquals(address, params["net"].split("/")[0]) - self.assertEquals(netmask, params["net"].split("/")[1]) + address = xpath_get_text(xml, '/ip/@address')[0] + netmask = xpath_get_text(xml, '/ip/@netmask')[0] + self.assertEqual(address, params['net'].split('/')[0]) + self.assertEqual(netmask, params['net'].split('/')[1]) # test _get_ip_xml can accepts strings: '192.168.122.0/24', # which is same as "192.168.122.0/255.255.255.0" - params["net"] = "192.168.122.0/24" + params['net'] = '192.168.122.0/24' xml = ET.tostring(nxml._get_ip_elem(**params)) - netmask = xpath_get_text(xml, "/ip/@netmask")[0] - self.assertEquals(netmask, - str(ipaddr.IPNetwork(params["net"]).netmask)) + netmask = xpath_get_text(xml, '/ip/@netmask')[0] + self.assertEqual(netmask, str(ipaddr.IPNetwork(params['net']).netmask)) def test_forward_xml(self): """ Test network forward xml """ - params = {"mode": None} + params = {'mode': None} forward = nxml._get_forward_elem(**params) - self.assertEquals(None, forward) + self.assertEqual(None, forward) - params["mode"] = 'nat' - params["dev"] = 'eth0' + params['mode'] = 'nat' + params['dev'] = 'eth0' xml = ET.tostring(nxml._get_forward_elem(**params)) - mode = xpath_get_text(xml, "/forward/@mode")[0] - dev = xpath_get_text(xml, "/forward/@dev")[0] - self.assertEquals(params['mode'], mode) - self.assertEquals(params['dev'], dev) + mode = xpath_get_text(xml, '/forward/@mode')[0] + dev = xpath_get_text(xml, '/forward/@dev')[0] + self.assertEqual(params['mode'], mode) + self.assertEqual(params['dev'], dev) def test_network_xml(self): """ Test network xml """ - params = {"name": "test", - "forward": {"mode": "nat", "dev": ""}, - "net": "192.168.0.0/255.255.255.0"} + params = { + 'name': 'test', + 'forward': {'mode': 'nat', 'dev': ''}, + 'net': '192.168.0.0/255.255.255.0', + } xml = nxml.to_network_xml(**params) - name = xpath_get_text(xml, "/network/name")[0] - self.assertEquals(name, params['name']) + name = xpath_get_text(xml, '/network/name')[0] + self.assertEqual(name, params['name']) - forward_mode = xpath_get_text(xml, "/network/forward/@mode")[0] - self.assertEquals(forward_mode, params['forward']['mode']) - forward_dev = xpath_get_text(xml, "/network/forward/@dev")[0] - self.assertEquals(forward_dev, '') + forward_mode = xpath_get_text(xml, '/network/forward/@mode')[0] + self.assertEqual(forward_mode, params['forward']['mode']) + forward_dev = xpath_get_text(xml, '/network/forward/@dev')[0] + self.assertEqual(forward_dev, '') - address = xpath_get_text(xml, "/network/ip/@address")[0] - self.assertEquals(address, params["net"].split("/")[0]) - netmask = xpath_get_text(xml, "/network/ip/@netmask")[0] - self.assertEquals(netmask, params["net"].split("/")[1]) + address = xpath_get_text(xml, '/network/ip/@address')[0] + self.assertEqual(address, params['net'].split('/')[0]) + netmask = xpath_get_text(xml, '/network/ip/@netmask')[0] + self.assertEqual(netmask, params['net'].split('/')[1]) - dhcp_start = xpath_get_text(xml, "/network/ip/dhcp/range/@start") - self.assertEquals(dhcp_start, []) - dhcp_end = xpath_get_text(xml, "/network/ip/dhcp/range/@end") - self.assertEquals(dhcp_end, []) + dhcp_start = xpath_get_text(xml, '/network/ip/dhcp/range/@start') + self.assertEqual(dhcp_start, []) + dhcp_end = xpath_get_text(xml, '/network/ip/dhcp/range/@end') + self.assertEqual(dhcp_end, []) # test optional params - params['forward']['dev'] = "eth0" - params['dhcp'] = {"range": {'start': '192.168.0.1', - 'end': '192.168.0.254'}} + params['forward']['dev'] = 'eth0' + params['dhcp'] = { + 'range': {'start': '192.168.0.1', 'end': '192.168.0.254'}} xml = nxml.to_network_xml(**params) - forward_dev = xpath_get_text(xml, "/network/forward/@dev")[0] - self.assertEquals(forward_dev, params['forward']['dev']) + forward_dev = xpath_get_text(xml, '/network/forward/@dev')[0] + self.assertEqual(forward_dev, params['forward']['dev']) - dhcp_start = xpath_get_text(xml, "/network/ip/dhcp/range/@start")[0] - self.assertEquals(dhcp_start, params['dhcp']['range']['start']) - dhcp_end = xpath_get_text(xml, "/network/ip/dhcp/range/@end")[0] - self.assertEquals(dhcp_end, params['dhcp']['range']['end']) + dhcp_start = xpath_get_text(xml, '/network/ip/dhcp/range/@start')[0] + self.assertEqual(dhcp_start, params['dhcp']['range']['start']) + dhcp_end = xpath_get_text(xml, '/network/ip/dhcp/range/@end')[0] + self.assertEqual(dhcp_end, params['dhcp']['range']['end']) # test _get_ip_xml can accepts strings: '192.168.122.0/24', # which is same as "192.168.122.0/255.255.255.0" - params["net"] = "192.168.0.0/24" + params['net'] = '192.168.0.0/24' xml = nxml.to_network_xml(**params) - netmask = xpath_get_text(xml, "/network/ip/@netmask")[0] - self.assertEquals(netmask, - str(ipaddr.IPNetwork(params["net"]).netmask)) + netmask = xpath_get_text(xml, '/network/ip/@netmask')[0] + self.assertEqual(netmask, str(ipaddr.IPNetwork(params['net']).netmask)) def test_vepa_network_singledev_xml(self): expected_xml = """\ @@ -165,11 +165,8 @@ def test_vepa_network_singledev_xml(self): """ params = { - "name": "test_vepa", - "forward": { - "mode": "vepa", - "devs": ["vepa_switch_interface"] - } + 'name': 'test_vepa', + 'forward': {'mode': 'vepa', 'devs': ['vepa_switch_interface']}, } xml_str = nxml.to_network_xml(**params) self.assertEqual(xml_str, expected_xml) @@ -185,15 +182,15 @@ def test_vepa_network_multipledevs_xml(self): """ params = { - "name": "test_vepa", - "forward": { - "mode": "vepa", - "devs": [ - "vepa_switch_interface1", - "vepa_switch_interface2", - "vepa_switch_interface3" - ] - } + 'name': 'test_vepa', + 'forward': { + 'mode': 'vepa', + 'devs': [ + 'vepa_switch_interface1', + 'vepa_switch_interface2', + 'vepa_switch_interface3', + ], + }, } xml_str = nxml.to_network_xml(**params) self.assertEqual(xml_str, expected_xml) @@ -207,11 +204,8 @@ def test_passthrough_network_singledev_xml(self): """ params = { - "name": "test_passthrough", - "forward": { - "mode": "passthrough", - "devs": ["passthrough_interface"] - } + 'name': 'test_passthrough', + 'forward': {'mode': 'passthrough', 'devs': ['passthrough_interface']}, } xml_str = nxml.to_network_xml(**params) self.assertEqual(xml_str, expected_xml) @@ -227,22 +221,21 @@ def test_passthrough_network_multipledevs_xml(self): """ params = { - "name": "test_passthrough", - "forward": { - "mode": "passthrough", - "devs": [ - "passthrough_interface1", - "passthrough_interface2", - "passthrough_interface3" - ] - } + 'name': 'test_passthrough', + 'forward': { + 'mode': 'passthrough', + 'devs': [ + 'passthrough_interface1', + 'passthrough_interface2', + 'passthrough_interface3', + ], + }, } xml_str = nxml.to_network_xml(**params) self.assertEqual(xml_str, expected_xml) class InterfaceXmlTests(unittest.TestCase): - def test_vlan_tagged_bridge_no_ip(self): expected_xml = """ @@ -257,7 +250,7 @@ def test_vlan_tagged_bridge_no_ip(self): """ actual_xml = nxml.create_vlan_tagged_bridge_xml('br10', 'em1', '10') - self.assertEquals(actual_xml, normalize_xml(expected_xml)) + self.assertEqual(actual_xml, normalize_xml(expected_xml)) def test_linux_bridge_no_ip(self): em1_xml = """ @@ -279,6 +272,6 @@ def test_linux_bridge_no_ip(self): """ - actual_xml = nxml.create_linux_bridge_xml('br10', 'em1', - normalize_xml(em1_xml)) - self.assertEquals(actual_xml, normalize_xml(expected_xml)) + actual_xml = nxml.create_linux_bridge_xml( + 'br10', 'em1', normalize_xml(em1_xml)) + self.assertEqual(actual_xml, normalize_xml(expected_xml)) diff --git a/tests/test_osinfo.py b/tests/test_osinfo.py index e78e1c034..4ac04cd69 100644 --- a/tests/test_osinfo.py +++ b/tests/test_osinfo.py @@ -16,58 +16,76 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os import unittest -from wok.plugins.kimchi.osinfo import _get_arch, get_template_default, lookup +from wok.plugins.kimchi.osinfo import _get_arch +from wok.plugins.kimchi.osinfo import get_template_default +from wok.plugins.kimchi.osinfo import lookup from wok.plugins.kimchi.osinfo import modern_version_bases class OSInfoTests(unittest.TestCase): def test_default_lookup(self): entry = lookup(None, None) - self.assertEquals('unknown', entry['os_distro']) - self.assertEquals('unknown', entry['os_version']) - if not os.uname()[4] == "s390x": - self.assertEquals(['default'], entry['networks']) + self.assertEqual('unknown', entry['os_distro']) + self.assertEqual('unknown', entry['os_version']) + if not os.uname()[4] == 's390x': + self.assertEqual(['default'], entry['networks']) def test_old_distros(self): - old_versions = {'debian': '5.0', 'ubuntu': '7.04', 'opensuse': '10.1', - 'centos': '5.1', 'rhel': '5.1', 'fedora': '15'} - for distro, version in old_versions.iteritems(): + old_versions = { + 'debian': '5.0', + 'ubuntu': '7.04', + 'opensuse': '10.1', + 'centos': '5.1', + 'rhel': '5.1', + 'fedora': '15', + } + for distro, version in old_versions.items(): entry = lookup(distro, version) - self.assertEquals(entry['disk_bus'], - get_template_default('old', 'disk_bus')) - self.assertEquals(entry['nic_model'], - get_template_default('old', 'nic_model')) + self.assertEqual(entry['disk_bus'], + get_template_default('old', 'disk_bus')) + self.assertEqual( + entry['nic_model'], get_template_default('old', 'nic_model') + ) def test_modern_bases(self): - if not os.uname()[4] == "s390x": - for distro, version in\ - modern_version_bases[_get_arch()].iteritems(): + if not os.uname()[4] == 's390x': + for distro, version in modern_version_bases[_get_arch()].items(): entry = lookup(distro, version) - self.assertEquals(entry['disk_bus'], - get_template_default('modern', 'disk_bus')) - self.assertEquals(entry['nic_model'], - get_template_default('modern', 'nic_model')) + self.assertEqual( + entry['disk_bus'], get_template_default( + 'modern', 'disk_bus') + ) + self.assertEqual( + entry['nic_model'], get_template_default( + 'modern', 'nic_model') + ) def test_modern_distros(self): # versions based on ppc64 modern distros - modern_versions = {'ubuntu': '14.04', 'opensuse': '13.1', - 'rhel': '6.5', 'fedora': '19', 'sles': '11sp3'} - for distro, version in modern_versions.iteritems(): + modern_versions = { + 'ubuntu': '14.04', + 'opensuse': '13.1', + 'rhel': '6.5', + 'fedora': '19', + 'sles': '11sp3', + } + for distro, version in modern_versions.items(): entry = lookup(distro, version) - self.assertEquals(entry['disk_bus'], - get_template_default('modern', 'disk_bus')) - self.assertEquals(entry['nic_model'], - get_template_default('modern', 'nic_model')) + self.assertEqual( + entry['disk_bus'], get_template_default('modern', 'disk_bus') + ) + self.assertEqual( + entry['nic_model'], get_template_default('modern', 'nic_model') + ) def test_lookup_unknown_distro_version_returns_old_distro(self): distro = 'unknown_distro' version = 'unknown_version' entry = lookup(distro, version) - self.assertEquals(entry['disk_bus'], - get_template_default('old', 'disk_bus')) - self.assertEquals(entry['nic_model'], - get_template_default('old', 'nic_model')) + self.assertEqual(entry['disk_bus'], + get_template_default('old', 'disk_bus')) + self.assertEqual(entry['nic_model'], + get_template_default('old', 'nic_model')) diff --git a/tests/test_rest.py b/tests/test_rest.py index 331cf9b84..cfccae853 100644 --- a/tests/test_rest.py +++ b/tests/test_rest.py @@ -17,33 +17,38 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy import json import os import re import time import unittest -import urllib2 -import urlparse +import urllib from functools import partial -from tests.utils import patch_auth, request, run_server, wait_task - +import cherrypy +import iso_gen from wok.asynctask import AsyncTask -from wok.rollbackcontext import RollbackContext - from wok.plugins.kimchi.osinfo import get_template_default +from wok.rollbackcontext import RollbackContext -import iso_gen +from tests.utils import patch_auth +from tests.utils import request +from tests.utils import run_server +from tests.utils import wait_task test_server = None model = None fake_iso = '/tmp/fake.iso' -DISKS = [{'size': 10, 'format': 'qcow2', 'index': 0, 'pool': { - 'name': '/plugins/kimchi/storagepools/default-pool'}}] +DISKS = [ + { + 'size': 10, + 'format': 'qcow2', + 'index': 0, + 'pool': {'name': '/plugins/kimchi/storagepools/default-pool'}, + } +] def setUpModule(): @@ -55,14 +60,15 @@ def setUpModule(): # Create fake ISO to do the tests iso_gen.construct_fake_iso(fake_iso, True, '12.04', 'ubuntu') - iso_gen.construct_fake_iso("/var/lib/libvirt/images/fedora.iso", True, - "17", "fedora") + iso_gen.construct_fake_iso( + '/var/lib/libvirt/images/fedora.iso', True, '17', 'fedora' + ) def tearDownModule(): test_server.stop() os.unlink(fake_iso) - os.unlink("/var/lib/libvirt/images/fedora.iso") + os.unlink('/var/lib/libvirt/images/fedora.iso') class RestTests(unittest.TestCase): @@ -72,8 +78,9 @@ def _async_op(self, cb, opaque): def _except_op(self, cb, opaque): time.sleep(1) - raise Exception("Oops, this is an exception handle test." - " You can ignore it safely") + raise Exception( + 'Oops, this is an exception handle test.' ' You can ignore it safely' + ) cb('success', True) def _intermid_op(self, cb, opaque): @@ -86,392 +93,401 @@ def setUp(self): def assertHTTPStatus(self, code, *args): resp = self.request(*args) - self.assertEquals(code, resp.status) + self.assertEqual(code, resp.status) def test_get_vms(self): vms = json.loads(self.request('/plugins/kimchi/vms').read()) # test_rest.py uses MockModel() which connects to libvirt URI # test:///default. By default this driver already has one VM created - self.assertEquals(1, len(vms)) + self.assertEqual(1, len(vms)) # Create a template as a base for our VMs - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) test_users = ['root'] test_groups = ['wheel'] # Now add a couple of VMs to the mock model - for i in xrange(10): + for i in range(10): name = 'vm-%i' % i - req = json.dumps({'name': name, - 'template': '/plugins/kimchi/templates/test', - 'users': test_users, 'groups': test_groups}) + req = json.dumps( + { + 'name': name, + 'template': '/plugins/kimchi/templates/test', + 'users': test_users, + 'groups': test_groups, + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) vms = json.loads(self.request('/plugins/kimchi/vms').read()) - self.assertEquals(11, len(vms)) + self.assertEqual(11, len(vms)) vm = json.loads(self.request('/plugins/kimchi/vms/vm-1').read()) - self.assertEquals('vm-1', vm['name']) - self.assertEquals('shutoff', vm['state']) - self.assertEquals([], vm['users']) - self.assertEquals([], vm['groups']) + self.assertEqual('vm-1', vm['name']) + self.assertEqual('shutoff', vm['state']) + self.assertEqual([], vm['users']) + self.assertEqual([], vm['groups']) def test_edit_vm_cpuhotplug(self): - req = json.dumps({'name': 'template_cpuhotplug', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + { + 'name': 'template_cpuhotplug', + 'source_media': {'type': 'disk', 'path': fake_iso}, + } + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) req = json.dumps( - {'name': 'vm-cpuhotplug', - 'template': '/plugins/kimchi/templates/template_cpuhotplug'} + { + 'name': 'vm-cpuhotplug', + 'template': '/plugins/kimchi/templates/template_cpuhotplug', + } ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) req = json.dumps({'cpu_info': {'maxvcpus': 5, 'vcpus': 1}}) - resp = self.request('/plugins/kimchi/vms/vm-cpuhotplug', - req, 'PUT') - self.assertEquals(200, resp.status) + resp = self.request('/plugins/kimchi/vms/vm-cpuhotplug', req, 'PUT') + self.assertEqual(200, resp.status) - resp = self.request('/plugins/kimchi/vms/vm-cpuhotplug/start', - '{}', 'POST') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/vm-cpuhotplug/start', '{}', 'POST') + self.assertEqual(200, resp.status) req = json.dumps({'cpu_info': {'vcpus': 5}}) resp = self.request('/plugins/kimchi/vms/vm-cpuhotplug', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) def test_edit_vm(self): - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) - req = json.dumps({'name': 'vm-1', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'vm-1', 'template': '/plugins/kimchi/templates/test'}) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) vm = json.loads(self.request('/plugins/kimchi/vms/vm-1').read()) - self.assertEquals('vm-1', vm['name']) + self.assertEqual('vm-1', vm['name']) req = json.dumps({'cpu_info': {'maxvcpus': 5, 'vcpus': 3}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) # Test max memory req = json.dumps({'memory': {'maxmemory': 23}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'memory': {'maxmemory': 'maxmem 80'}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Check if there is support to memory hotplug resp = self.request('/plugins/kimchi/config/capabilities').read() conf = json.loads(resp) - if os.uname()[4] != "s390x" and conf['mem_hotplug_support']: + if os.uname()[4] != 's390x' and conf['mem_hotplug_support']: req = json.dumps({'memory': {'maxmemory': 3072}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) resp = self.request('/plugins/kimchi/vms/vm-1/start', '{}', 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) req = json.dumps({'unsupported-attr': 'attr'}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'name': 'new-vm'}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Test memory hotplug req = json.dumps({'memory': {'current': 2048}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') if conf['mem_hotplug_support']: - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) else: - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) - req = json.dumps({"graphics": {'passwd': "abcdef"}}) + req = json.dumps({'graphics': {'passwd': 'abcdef'}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) info = json.loads(resp.read()) - self.assertEquals('abcdef', info["graphics"]["passwd"]) - self.assertEquals(None, info["graphics"]["passwdValidTo"]) + self.assertEqual('abcdef', info['graphics']['passwd']) + self.assertEqual(None, info['graphics']['passwdValidTo']) resp = self.request('/plugins/kimchi/vms/vm-1/poweroff', '{}', 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) - req = json.dumps({"graphics": {'passwd': "123456", - 'passwdValidTo': 20}}) + req = json.dumps( + {'graphics': {'passwd': '123456', 'passwdValidTo': 20}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') info = json.loads(resp.read()) - self.assertEquals('123456', info["graphics"]["passwd"]) - self.assertGreaterEqual(20, info["graphics"]["passwdValidTo"]) + self.assertEqual('123456', info['graphics']['passwd']) + self.assertGreaterEqual(20, info['graphics']['passwdValidTo']) req = json.dumps({'name': 12}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'name': ''}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'cpu_info': {'vcpus': -2}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'cpu_info': {'vcpus': 'four'}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'memory': {'current': 100}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) req = json.dumps({'memory': {'current': 'ten gigas'}}) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) - req = json.dumps({'name': 'new-name', 'cpu_info': {'vcpus': 5}, - 'UUID': 'notallowed'}) + req = json.dumps( + {'name': 'new-name', 'cpu_info': {'vcpus': 5}, 'UUID': 'notallowed'} + ) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) - vm = json.loads( - self.request('/plugins/kimchi/vms/vm-1', req).read() - ) + vm = json.loads(self.request('/plugins/kimchi/vms/vm-1', req).read()) # The maxmemory will be automatically increased when the amount of # memory value is greater than the current maxmemory value - params = {'name': u'∨м-црdαtеd', 'cpu_info': {'vcpus': 5}, - 'memory': {'current': 3072}} + params = { + 'name': '∨м-црdαtеd', + 'cpu_info': {'vcpus': 5}, + 'memory': {'current': 3072}, + } req = json.dumps(params) resp = self.request('/plugins/kimchi/vms/vm-1', req, 'PUT') - self.assertEquals(303, resp.status) - vm_updated = json.loads( - self.request('/plugins/kimchi/vms/∨м-црdαtеd', req).read() - ) + self.assertEqual(303, resp.status) + quoted_uri = urllib.parse.quote('/plugins/kimchi/vms/∨м-црdαtеd') + vm_updated = json.loads(self.request(quoted_uri, req).read()) # Memory was hot plugged - vm['name'] = u'∨м-црdαtеd' + vm['name'] = '∨м-црdαtеd' vm['cpu_info'].update(params['cpu_info']) vm['memory']['current'] = 3072 vm['memory']['maxmemory'] = 3072 for key in params.keys(): - self.assertEquals(vm[key], vm_updated[key]) + self.assertEqual(vm[key], vm_updated[key]) # change only VM users - groups are not changed (default is empty) resp = self.request('/plugins/kimchi/users', '{}', 'GET') users = json.loads(resp.read()) req = json.dumps({'users': users}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(200, resp.status) - info = json.loads( - self.request('/plugins/kimchi/vms/∨м-црdαtеd', '{}').read() - ) - self.assertEquals(users, info['users']) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(200, resp.status) + info = json.loads(self.request(quoted_uri, '{}').read()) + self.assertEqual(users, info['users']) # change only VM groups - users are not changed (default is empty) resp = self.request('/plugins/kimchi/groups', '{}', 'GET') groups = json.loads(resp.read()) req = json.dumps({'groups': groups}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(200, resp.status) - info = json.loads( - self.request('/plugins/kimchi/vms/∨м-црdαtеd', '{}').read() - ) - self.assertEquals(groups, info['groups']) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(200, resp.status) + info = json.loads(self.request(quoted_uri, '{}').read()) + self.assertEqual(groups, info['groups']) # change VM users (wrong value) and groups # when an error occurs, everything fails and nothing is changed req = json.dumps({'users': ['userdoesnotexist'], 'groups': []}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(400, resp.status) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(400, resp.status) # change VM users and groups (wrong value) # when an error occurs, everything fails and nothing is changed req = json.dumps({'users': [], 'groups': ['groupdoesnotexist']}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(400, resp.status) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(400, resp.status) # change bootorder - b_order = ["hd", "network", "cdrom"] - req = json.dumps({"bootorder": b_order}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(200, resp.status) - self.assertEquals(json.loads(resp.read())["bootorder"], b_order) + b_order = ['hd', 'network', 'cdrom'] + req = json.dumps({'bootorder': b_order}) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(200, resp.status) + self.assertEqual(json.loads(resp.read())['bootorder'], b_order) - req = json.dumps({"bootorder": ["bla"]}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(400, resp.status) + req = json.dumps({'bootorder': ['bla']}) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(400, resp.status) # change vm graphics type - req = json.dumps({"graphics": {"type": "spice"}}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(json.loads(resp.read())["graphics"]["type"], "spice") + req = json.dumps({'graphics': {'type': 'spice'}}) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(json.loads(resp.read())['graphics']['type'], 'spice') # try to add a invalid type - req = json.dumps({"graphics": {"type": "test"}}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(400, resp.status) + req = json.dumps({'graphics': {'type': 'test'}}) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(400, resp.status) # set vm autostart tests (powered off) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd/start', - '{}', 'POST') - self.assertEquals(200, resp.status) - req = json.dumps({"autostart": True}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(200, resp.status) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', - '{}', 'GET').read() - self.assertEquals(json.loads(resp)["autostart"], True) + resp = self.request(f'{quoted_uri}/start', '{}', 'POST') + self.assertEqual(200, resp.status) + req = json.dumps({'autostart': True}) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(200, resp.status) + resp = self.request(quoted_uri, '{}', 'GET').read() + self.assertEqual(json.loads(resp)['autostart'], True) # set vm autostart tests (running) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd/poweroff', - '{}', 'POST') - self.assertEquals(200, resp.status) - req = json.dumps({"autostart": False}) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', req, 'PUT') - self.assertEquals(200, resp.status) - resp = self.request('/plugins/kimchi/vms/∨м-црdαtеd', - '{}', 'GET').read() - self.assertEquals(json.loads(resp)["autostart"], False) + resp = self.request(f'{quoted_uri}/poweroff', '{}', 'POST') + self.assertEqual(200, resp.status) + req = json.dumps({'autostart': False}) + resp = self.request(quoted_uri, req, 'PUT') + self.assertEqual(200, resp.status) + resp = self.request(quoted_uri, '{}', 'GET').read() + self.assertEqual(json.loads(resp)['autostart'], False) def test_vm_lifecycle(self): # Create a Template - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}, - 'disks': DISKS, - 'icon': 'plugins/kimchi/images/icon-debian.png'}) + req = json.dumps( + { + 'name': 'test', + 'source_media': {'type': 'disk', 'path': fake_iso}, + 'disks': DISKS, + 'icon': 'plugins/kimchi/images/icon-debian.png', + } + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create a VM - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) # Verify the VM vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) - self.assertEquals('plugins/kimchi/images/icon-debian.png', vm['icon']) + self.assertEqual('shutoff', vm['state']) + self.assertEqual('plugins/kimchi/images/icon-debian.png', vm['icon']) # Verify the volume was created - vol_uri = '/plugins/kimchi/storagepools/default-pool/storagevolumes/' \ - + '%s-0.img' + vol_uri = ( + '/plugins/kimchi/storagepools/default-pool/storagevolumes/' + '%s-0.img' + ) resp = self.request(vol_uri % vm['uuid']) vol = json.loads(resp.read()) - self.assertEquals(10 << 30, vol['capacity']) - self.assertEquals(['test-vm'], vol['used_by']) + self.assertEqual(10 << 30, vol['capacity']) + self.assertEqual(['test-vm'], vol['used_by']) # verify if poweroff command returns correct status - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', - 'POST') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') + self.assertEqual(400, resp.status) # verify if shutdown command returns correct status - resp = self.request('/plugins/kimchi/vms/test-vm/shutdown', '{}', - 'POST') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/shutdown', '{}', 'POST') + self.assertEqual(400, resp.status) # verify if reset command returns correct status resp = self.request('/plugins/kimchi/vms/test-vm/reset', '{}', 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Start the VM resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('running', vm['state']) + self.assertEqual('running', vm['state']) # verify if start command returns correct status resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Test screenshot resp = self.request('/' + vm['screenshot'], method='HEAD') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image')) # Test Virt Viewer file resp = self.request( - '/plugins/kimchi/vms/test-vm/virtviewerfile', - '{}', - 'GET') - self.assertEquals(200, resp.status) - vvfilecontent = resp.read() + '/plugins/kimchi/vms/test-vm/virtviewerfile', '{}', 'GET') + self.assertEqual(200, resp.status) + vvfilecontent = resp.read().decode('utf-8') self.assertEqual( - vvfilecontent, - "[virt-viewer]\ntype=vnc\nhost=127.0.0.1\nport=5999\n" + vvfilecontent, '[virt-viewer]\ntype=vnc\nhost=127.0.0.1\nport=5999\n' ) # Clone a running VM resp = self.request('/plugins/kimchi/vms/test-vm/clone', '{}', 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Create a snapshot on running vm VM params = {'name': 'test-snap2'} - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots', - json.dumps(params), - 'POST') - self.assertEquals(202, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots', json.dumps(params), 'POST' + ) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) - task = json.loads( - self.request('/plugins/kimchi/tasks/%s' % task['id']).read() - ) - self.assertEquals('finished', task['status']) + task = json.loads(self.request( + '/plugins/kimchi/tasks/%s' % task['id']).read()) + self.assertEqual('finished', task['status']) # Delete a snapshot - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' % - params['name'], '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/%s' % params['name'], '{}', 'DELETE' + ) + self.assertEqual(204, resp.status) # Force poweroff the VM - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) + self.assertEqual('shutoff', vm['state']) # Test create VM with same name fails with 400 - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Clone a VM resp = self.request('/plugins/kimchi/vms/test-vm/clone', '{}', 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) task = json.loads( self.request('/plugins/kimchi/tasks/%s' % task['id'], '{}').read() ) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) clone_vm_name = task['target_uri'].split('/')[-2] - self.assertTrue(re.match(u'test-vm-clone-\d+', clone_vm_name)) + self.assertTrue(re.match('test-vm-clone-\\d+', clone_vm_name)) resp = self.request('/plugins/kimchi/vms/test-vm', '{}') original_vm_info = json.loads(resp.read()) resp = self.request('/plugins/kimchi/vms/%s' % clone_vm_name, '{}') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) clone_vm_info = json.loads(resp.read()) self.assertNotEqual(original_vm_info['name'], clone_vm_info['name']) @@ -482,695 +498,779 @@ def test_vm_lifecycle(self): del original_vm_info['uuid'] del clone_vm_info['uuid'] - self.assertEquals(original_vm_info, clone_vm_info) + self.assertEqual(original_vm_info, clone_vm_info) # Create a snapshot on a stopped VM params = {'name': 'test-snap'} - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots', - json.dumps(params), - 'POST') - self.assertEquals(202, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots', json.dumps(params), 'POST' + ) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) - task = json.loads( - self.request('/plugins/kimchi/tasks/%s' % task['id']).read() - ) - self.assertEquals('finished', task['status']) + task = json.loads(self.request( + '/plugins/kimchi/tasks/%s' % task['id']).read()) + self.assertEqual('finished', task['status']) # Look up a non-existing snapshot - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/snap404', - '{}', 'GET') - self.assertEquals(404, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/snap404', '{}', 'GET' + ) + self.assertEqual(404, resp.status) # Look up a snapshot - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' % - params['name'], '{}', 'GET') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/%s' % params['name'], '{}', 'GET' + ) + self.assertEqual(200, resp.status) snap = json.loads(resp.read()) self.assertTrue(int(time.time()) >= int(snap['created'])) - self.assertEquals(params['name'], snap['name']) - self.assertEquals(u'', snap['parent']) - self.assertEquals(u'shutoff', snap['state']) + self.assertEqual(params['name'], snap['name']) + self.assertEqual('', snap['parent']) + self.assertEqual('shutoff', snap['state']) - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots', '{}', - 'GET') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots', '{}', 'GET') + self.assertEqual(200, resp.status) snaps = json.loads(resp.read()) - self.assertEquals(1, len(snaps)) + self.assertEqual(1, len(snaps)) # Look up current snapshot (the one created above) - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current', - '{}', 'GET') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/current', '{}', 'GET' + ) + self.assertEqual(200, resp.status) snap = json.loads(resp.read()) - self.assertEquals(params['name'], snap['name']) + self.assertEqual(params['name'], snap['name']) - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots', '{}', - 'POST') - self.assertEquals(202, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots', '{}', 'POST') + self.assertEqual(202, resp.status) task = json.loads(resp.read()) snap_name = task['target_uri'].split('/')[-1] wait_task(self._task_lookup, task['id']) - resp = self.request('/plugins/kimchi/tasks/%s' % task['id'], '{}', - 'GET') + resp = self.request('/plugins/kimchi/tasks/%s' % + task['id'], '{}', 'GET') task = json.loads(resp.read()) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots', '{}', - 'GET') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots', '{}', 'GET') + self.assertEqual(200, resp.status) snaps = json.loads(resp.read()) - self.assertEquals(2, len(snaps)) + self.assertEqual(2, len(snaps)) # Look up current snapshot (the one created above) - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current', - '{}', 'GET') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/current', '{}', 'GET' + ) + self.assertEqual(200, resp.status) snap = json.loads(resp.read()) - self.assertEquals(snap_name, snap['name']) + self.assertEqual(snap_name, snap['name']) # Revert to snapshot - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s/revert' % - params['name'], '{}', 'POST') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/%s/revert' % params['name'], + '{}', + 'POST', + ) + self.assertEqual(200, resp.status) snap = json.loads(resp.read()) resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'GET') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) vm = json.loads(resp.read()) - self.assertEquals(vm['state'], snap['state']) - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/current', - '{}', 'GET') - self.assertEquals(200, resp.status) + self.assertEqual(vm['state'], snap['state']) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/current', '{}', 'GET' + ) + self.assertEqual(200, resp.status) current_snap = json.loads(resp.read()) - self.assertEquals(snap, current_snap) + self.assertEqual(snap, current_snap) # Delete a snapshot - resp = self.request('/plugins/kimchi/vms/test-vm/snapshots/%s' % - params['name'], '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/snapshots/%s' % params['name'], '{}', 'DELETE' + ) + self.assertEqual(204, resp.status) # Suspend the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'GET') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) vm = json.loads(resp.read()) - self.assertEquals(vm['state'], 'shutoff') - resp = self.request('/plugins/kimchi/vms/test-vm/suspend', '{}', - 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(vm['state'], 'shutoff') + resp = self.request( + '/plugins/kimchi/vms/test-vm/suspend', '{}', 'POST') + self.assertEqual(400, resp.status) resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'GET') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) vm = json.loads(resp.read()) - self.assertEquals(vm['state'], 'running') - resp = self.request('/plugins/kimchi/vms/test-vm/suspend', '{}', - 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(vm['state'], 'running') + resp = self.request( + '/plugins/kimchi/vms/test-vm/suspend', '{}', 'POST') + self.assertEqual(200, resp.status) resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'GET') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) vm = json.loads(resp.read()) - self.assertEquals(vm['state'], 'paused') + self.assertEqual(vm['state'], 'paused') # Resume the VM resp = self.request('/plugins/kimchi/vms/test-vm/resume', '{}', 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'GET') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) vm = json.loads(resp.read()) - self.assertEquals(vm['state'], 'running') + self.assertEqual(vm['state'], 'running') # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Delete the Template resp = self.request('/plugins/kimchi/templates/test', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri % vm['uuid']) def test_vm_netboot(self): # Create a Template - req = json.dumps({'name': 'tnetboot', - 'source_media': {'type': 'netboot'}}) + req = json.dumps( + {'name': 'tnetboot', 'source_media': {'type': 'netboot'}}) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create a VM - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/tnetboot'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/tnetboot'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) # Verify the VM vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) - self.assertEquals('plugins/kimchi/images/icon-vm.png', vm['icon']) + self.assertEqual('shutoff', vm['state']) + self.assertEqual('plugins/kimchi/images/icon-vm.png', vm['icon']) # verify if poweroff command returns correct status - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', - 'POST') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') + self.assertEqual(400, resp.status) # verify if shutdown command returns correct status - resp = self.request('/plugins/kimchi/vms/test-vm/shutdown', '{}', - 'POST') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/shutdown', '{}', 'POST') + self.assertEqual(400, resp.status) # verify if reset command returns correct status resp = self.request('/plugins/kimchi/vms/test-vm/reset', '{}', 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Start the VM resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('running', vm['state']) + self.assertEqual('running', vm['state']) # verify if start command returns correct status resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Force poweroff the VM - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) + self.assertEqual('shutoff', vm['state']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Delete the Template - resp = self.request('/plugins/kimchi/templates/tnetboot', '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/templates/tnetboot', '{}', 'DELETE') + self.assertEqual(204, resp.status) def test_vm_graphics(self): # Create a Template - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create a VM with default args - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('127.0.0.1', vm['graphics']['listen']) - self.assertEquals('vnc', vm['graphics']['type']) + self.assertEqual('127.0.0.1', vm['graphics']['listen']) + self.assertEqual('vnc', vm['graphics']['type']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Create a VM with specified graphics type and listen graphics = {'type': 'vnc', 'listen': '127.0.0.1'} - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test', - 'graphics': graphics}) + req = json.dumps( + { + 'name': 'test-vm', + 'template': '/plugins/kimchi/templates/test', + 'graphics': graphics, + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('127.0.0.1', vm['graphics']['listen']) - self.assertEquals('vnc', vm['graphics']['type']) + self.assertEqual('127.0.0.1', vm['graphics']['listen']) + self.assertEqual('vnc', vm['graphics']['type']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Create a VM with listen as ipv6 address graphics = {'type': 'spice', 'listen': 'fe00::0'} - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test', - 'graphics': graphics}) + req = json.dumps( + { + 'name': 'test-vm', + 'template': '/plugins/kimchi/templates/test', + 'graphics': graphics, + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('fe00::0', vm['graphics']['listen']) - self.assertEquals('spice', vm['graphics']['type']) + self.assertEqual('fe00::0', vm['graphics']['listen']) + self.assertEqual('spice', vm['graphics']['type']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Create a VM with specified graphics type and default listen graphics = {'type': 'spice'} - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test', - 'graphics': graphics}) + req = json.dumps( + { + 'name': 'test-vm', + 'template': '/plugins/kimchi/templates/test', + 'graphics': graphics, + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Verify the VM vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('127.0.0.1', vm['graphics']['listen']) - self.assertEquals('spice', vm['graphics']['type']) + self.assertEqual('127.0.0.1', vm['graphics']['listen']) + self.assertEqual('spice', vm['graphics']['type']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Try to create a VM with invalid graphics type graphics = {'type': 'invalid'} - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test', - 'graphics': graphics}) + req = json.dumps( + { + 'name': 'test-vm', + 'template': '/plugins/kimchi/templates/test', + 'graphics': graphics, + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Try to create a VM with invalid graphics listen graphics = {'type': 'spice', 'listen': 'invalid'} - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test', - 'graphics': graphics}) + req = json.dumps( + { + 'name': 'test-vm', + 'template': '/plugins/kimchi/templates/test', + 'graphics': graphics, + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Delete the Template resp = self.request('/plugins/kimchi/templates/test', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) def test_vm_storage_devices(self): with RollbackContext() as rollback: # Create a template as a base for our VMs - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', - 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Delete the template - rollback.prependDefer(self.request, - '/plugins/kimchi/templates/test', '{}', - 'DELETE') + rollback.prependDefer( + self.request, '/plugins/kimchi/templates/test', '{}', 'DELETE' + ) # Create a VM with default args - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Delete the VM - rollback.prependDefer(self.request, '/plugins/kimchi/vms/test-vm', - '{}', 'DELETE') + rollback.prependDefer( + self.request, '/plugins/kimchi/vms/test-vm', '{}', 'DELETE' + ) # Check storage devices - resp = self.request('/plugins/kimchi/vms/test-vm/storages', '{}', - 'GET') + resp = self.request( + '/plugins/kimchi/vms/test-vm/storages', '{}', 'GET') devices = json.loads(resp.read()) - self.assertEquals(2, len(devices)) + self.assertEqual(2, len(devices)) dev_types = [] for d in devices: - self.assertIn(u'type', d.keys()) - self.assertIn(u'dev', d.keys()) - self.assertIn(u'path', d.keys()) + self.assertIn('type', d.keys()) + self.assertIn('dev', d.keys()) + self.assertIn('path', d.keys()) dev_types.append(d['type']) - self.assertEquals(['cdrom', 'disk'], sorted(dev_types)) + self.assertEqual(['cdrom', 'disk'], sorted(dev_types)) # Attach cdrom with nonexistent iso - req = json.dumps({'dev': 'hdx', - 'type': 'cdrom', - 'path': '/tmp/nonexistent.iso'}) - resp = self.request('/plugins/kimchi/vms/test-vm/storages', req, - 'POST') - self.assertEquals(400, resp.status) + req = json.dumps( + {'dev': 'hdx', 'type': 'cdrom', 'path': '/tmp/nonexistent.iso'} + ) + resp = self.request( + '/plugins/kimchi/vms/test-vm/storages', req, 'POST') + self.assertEqual(400, resp.status) # Create temp storage pool - req = json.dumps({'name': 'tmp', - 'capacity': 1024, - 'allocated': 512, - 'path': '/tmp', - 'type': 'dir'}) + req = json.dumps( + { + 'name': 'tmp', + 'capacity': 1024, + 'allocated': 512, + 'path': '/tmp', + 'type': 'dir', + } + ) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(201, resp.status) - resp = self.request('/plugins/kimchi/storagepools/tmp/activate', - req, 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(201, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/tmp/activate', req, 'POST' + ) + self.assertEqual(200, resp.status) # 'name' is required for this type of volume open('/tmp/attach-volume', 'w').close() - req = json.dumps({'capacity': 1024, - 'allocation': 512, - 'type': 'disk', - 'format': 'raw'}) + req = json.dumps( + {'capacity': 1024, 'allocation': 512, + 'type': 'disk', 'format': 'raw'} + ) resp = self.request( '/plugins/kimchi/storagepools/tmp/storagevolumes', req, 'POST' ) - self.assertEquals(400, resp.status) - req = json.dumps({'name': "attach-volume", - 'capacity': 1024, - 'allocation': 512, - 'type': 'disk', - 'format': 'raw'}) + self.assertEqual(400, resp.status) + req = json.dumps( + { + 'name': 'attach-volume', + 'capacity': 1024, + 'allocation': 512, + 'type': 'disk', + 'format': 'raw', + } + ) resp = self.request( '/plugins/kimchi/storagepools/tmp/storagevolumes', req, 'POST' ) - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) time.sleep(1) # Attach cdrom with both path and volume specified open('/tmp/existent.iso', 'w').close() - req = json.dumps({'dev': 'hdx', - 'type': 'cdrom', - 'pool': 'tmp', - 'vol': 'attach-volume', - 'path': '/tmp/existent.iso'}) - resp = self.request( - '/plugins/kimchi/vms/test-vm/storages', req, 'POST' + req = json.dumps( + { + 'dev': 'hdx', + 'type': 'cdrom', + 'pool': 'tmp', + 'vol': 'attach-volume', + 'path': '/tmp/existent.iso', + } ) - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/storages', req, 'POST') + self.assertEqual(400, resp.status) # Attach disk with both path and volume specified - req = json.dumps({'dev': 'hdx', - 'type': 'disk', - 'pool': 'tmp', - 'vol': 'attach-volume', - 'path': '/tmp/existent.iso'}) - resp = self.request( - '/plugins/kimchi/vms/test-vm/storages', req, 'POST' + req = json.dumps( + { + 'dev': 'hdx', + 'type': 'disk', + 'pool': 'tmp', + 'vol': 'attach-volume', + 'path': '/tmp/existent.iso', + } ) - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/storages', req, 'POST') + self.assertEqual(400, resp.status) # Attach disk with only pool specified - req = json.dumps({'dev': 'hdx', - 'type': 'cdrom', - 'pool': 'tmp'}) + req = json.dumps({'dev': 'hdx', 'type': 'cdrom', 'pool': 'tmp'}) resp = self.request( - '/plugins/kimchi/vms/test-vm/storages', req, 'POST' - ) - self.assertEquals(400, resp.status) + '/plugins/kimchi/vms/test-vm/storages', req, 'POST') + self.assertEqual(400, resp.status) # Attach disk with pool and vol specified - req = json.dumps({'type': 'disk', - 'pool': 'tmp', - 'vol': 'attach-volume'}) + req = json.dumps( + {'type': 'disk', 'pool': 'tmp', 'vol': 'attach-volume'}) resp = self.request( - '/plugins/kimchi/vms/test-vm/storages', req, 'POST' - ) - self.assertEquals(201, resp.status) + '/plugins/kimchi/vms/test-vm/storages', req, 'POST') + self.assertEqual(201, resp.status) cd_info = json.loads(resp.read()) - self.assertEquals('disk', cd_info['type']) + self.assertEqual('disk', cd_info['type']) # Attach a cdrom with existent dev name - req = json.dumps({'type': 'cdrom', - 'path': '/tmp/existent.iso'}) + req = json.dumps({'type': 'cdrom', 'path': '/tmp/existent.iso'}) resp = self.request( - '/plugins/kimchi/vms/test-vm/storages', req, 'POST' - ) - self.assertEquals(201, resp.status) + '/plugins/kimchi/vms/test-vm/storages', req, 'POST') + self.assertEqual(201, resp.status) cd_info = json.loads(resp.read()) cd_dev = cd_info['dev'] - self.assertEquals('cdrom', cd_info['type']) - self.assertEquals('/tmp/existent.iso', cd_info['path']) + self.assertEqual('cdrom', cd_info['type']) + self.assertEqual('/tmp/existent.iso', cd_info['path']) # Delete the file and cdrom - rollback.prependDefer(self.request, - '/plugins/kimchi/vms/test-vm/storages/hdx', - '{}', 'DELETE') + rollback.prependDefer( + self.request, + '/plugins/kimchi/vms/test-vm/storages/hdx', + '{}', + 'DELETE' + ) os.remove('/tmp/existent.iso') os.remove('/tmp/attach-volume') # Change path of storage cdrom - cdrom = u'http://mirrors.kernel.org/fedora/releases/23/Live/'\ - 'x86_64/Fedora-Live-KDE-x86_64-23-10.iso' + cdrom = 'http://mirrors.kernel.org/fedora/releases/29/Everything' \ + '/x86_64/iso/Fedora-Everything-netinst-x86_64-29-1.2.iso' req = json.dumps({'path': cdrom}) - resp = self.request('/plugins/kimchi/vms/test-vm/storages/' + - cd_dev, req, 'PUT') - if not os.uname()[4] == "s390x": - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/storages/' + cd_dev, req, 'PUT' + ) + + if not os.uname()[4] == 's390x': + self.assertEqual(200, resp.status) cd_info = json.loads(resp.read()) - self.assertEquals(urlparse.urlparse(cdrom).path, - urlparse.urlparse(cd_info['path']).path) + self.assertEqual( + urllib.parse.urlparse(cdrom).path, + urllib.parse.urlparse(cd_info['path']).path, + ) # Test GET devs = json.loads( self.request('/plugins/kimchi/vms/test-vm/storages').read() ) - self.assertEquals(4, len(devs)) + self.assertEqual(4, len(devs)) # Detach storage cdrom - resp = self.request('/plugins/kimchi/vms/test-vm/storages/' + - cd_dev, '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/storages/' + cd_dev, '{}', 'DELETE' + ) + self.assertEqual(204, resp.status) # Test GET devs = json.loads( self.request('/plugins/kimchi/vms/test-vm/storages').read() ) - self.assertEquals(3, len(devs)) - resp = self.request('/plugins/kimchi/storagepools/tmp/deactivate', - '{}', 'POST') - self.assertEquals(200, resp.status) + + self.assertEqual(3, len(devs)) + resp = self.request( + '/plugins/kimchi/storagepools/tmp/deactivate', '{}', 'POST' + ) + self.assertEqual(200, resp.status) # cannot delete storagepool with volumes associate to guests - resp = self.request('/plugins/kimchi/storagepools/tmp', '{}', - 'DELETE') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/tmp', '{}', 'DELETE') + self.assertEqual(400, resp.status) # activate pool - resp = self.request('/plugins/kimchi/storagepools/tmp/activate', - '{}', 'POST') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/tmp/activate', '{}', 'POST' + ) + self.assertEqual(200, resp.status) # delete volumes - if not os.uname()[4] == "s390x": - l = '/plugins/kimchi/vms/test-vm/storages/hdd' + if not os.uname()[4] == 's390x': + uri = '/plugins/kimchi/vms/test-vm/storages/hdd' else: - l = '/plugins/kimchi/vms/test-vm/storages/vdb' - resp = self.request(l, '{}', 'DELETE') - self.assertEquals(204, resp.status) + uri = '/plugins/kimchi/vms/test-vm/storages/vdb' + resp = self.request(uri, '{}', 'DELETE') + self.assertEqual(204, resp.status) # deactive and delete storage pool - resp = self.request('/plugins/kimchi/storagepools/tmp/deactivate', - '{}', 'POST') - self.assertEquals(200, resp.status) - resp = self.request('/plugins/kimchi/storagepools/tmp', '{}', - 'DELETE') - - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/tmp/deactivate', '{}', 'POST' + ) + self.assertEqual(200, resp.status) + # Pool is associated with VM test (create above) + resp = self.request( + '/plugins/kimchi/storagepools/tmp', '{}', 'DELETE') + self.assertEqual(204, resp.status) def test_vm_iface(self): with RollbackContext() as rollback: # Create a template as a base for our VMs - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', - 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Delete the template - rollback.prependDefer(self.request, - '/plugins/kimchi/templates/test', '{}', - 'DELETE') + rollback.prependDefer( + self.request, '/plugins/kimchi/templates/test', '{}', 'DELETE' + ) # Create a VM with default args - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Delete the VM - rollback.prependDefer(self.request, - '/plugins/kimchi/vms/test-vm', '{}', - 'DELETE') + rollback.prependDefer( + self.request, '/plugins/kimchi/vms/test-vm', '{}', 'DELETE' + ) # Create a network - req = json.dumps({'name': 'test-network', - 'connection': 'nat', - 'net': '127.0.1.0/24'}) + req = json.dumps( + {'name': 'test-network', 'connection': 'nat', 'net': '127.0.1.0/24'} + ) resp = self.request('/plugins/kimchi/networks', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Delete the network - rollback.prependDefer(self.request, - '/plugins/kimchi/networks/test-network', - '{}', 'DELETE') + rollback.prependDefer( + self.request, '/plugins/kimchi/networks/test-network', '{}', 'DELETE' + ) ifaces = json.loads( self.request('/plugins/kimchi/vms/test-vm/ifaces').read() ) - if not os.uname()[4] == "s390x": - self.assertEquals(1, len(ifaces)) + if not os.uname()[4] == 's390x': + self.assertEqual(1, len(ifaces)) for iface in ifaces: res = json.loads( - self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - iface['mac']).read() + self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % iface['mac'] + ).read() ) - self.assertEquals('default', res['network']) - self.assertEquals(17, len(res['mac'])) - self.assertEquals(get_template_default('old', 'nic_model'), - res['model']) + self.assertEqual('default', res['network']) + self.assertEqual(17, len(res['mac'])) + self.assertEqual(get_template_default( + 'old', 'nic_model'), res['model']) self.assertTrue('ips' in res) # try to attach an interface without specifying 'model' req = json.dumps({'type': 'network'}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req, - 'POST') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces', req, 'POST') + self.assertEqual(400, resp.status) # try to attach an interface of type "macvtap" without source - if os.uname()[4] == "s390x": + if os.uname()[4] == 's390x': req = json.dumps({'type': 'macvtap'}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req, - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces', req, 'POST') self.assertEqual(400, resp.status) # try to attach an interface of type "ovs" without source req = json.dumps({'type': 'ovs'}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req, - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces', req, 'POST') self.assertEqual(400, resp.status) # attach network interface to vm - req = json.dumps({"type": "network", - "network": "test-network", - "model": "virtio"}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req, - 'POST') - self.assertEquals(201, resp.status) + req = json.dumps( + {'type': 'network', 'network': 'test-network', 'model': 'virtio'} + ) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces', req, 'POST') + self.assertEqual(201, resp.status) iface = json.loads(resp.read()) - self.assertEquals('test-network', iface['network']) - self.assertEquals(17, len(iface['mac'])) - self.assertEquals('virtio', iface['model']) - self.assertEquals('network', iface['type']) + self.assertEqual('test-network', iface['network']) + self.assertEqual(17, len(iface['mac'])) + self.assertEqual('virtio', iface['model']) + self.assertEqual('network', iface['type']) # update vm interface newMacAddr = '54:50:e3:44:8a:af' - req = json.dumps({"network": "default", "model": "virtio", - "type": "network", "mac": newMacAddr}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - iface['mac'], req, 'PUT') - self.assertEquals(303, resp.status) + req = json.dumps( + { + 'network': 'default', + 'model': 'virtio', + 'type': 'network', + 'mac': newMacAddr, + } + ) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % iface['mac'], req, 'PUT' + ) + self.assertEqual(303, resp.status) iface = json.loads( - self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - newMacAddr).read() + self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % newMacAddr + ).read() ) - self.assertEquals(newMacAddr, iface['mac']) + self.assertEqual(newMacAddr, iface['mac']) # Start the VM - resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('running', vm['state']) + self.assertEqual('running', vm['state']) # Check for an IP address iface = json.loads( - self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - newMacAddr).read() + self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % newMacAddr + ).read() ) self.assertTrue(len(iface['ips']) > 0) # Force poweroff the VM - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) + self.assertEqual('shutoff', vm['state']) # detach network interface from vm - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - iface['mac'], '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % iface['mac'], '{}', 'DELETE' + ) + self.assertEqual(204, resp.status) - if os.uname()[4] == "s390x": + if os.uname()[4] == 's390x': # attach macvtap interface to vm - req = json.dumps({"type": "macvtap", - "source": "test-network"}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req, - 'POST') - self.assertEquals(201, resp.status) + req = json.dumps({'type': 'macvtap', 'source': 'test-network'}) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces', req, 'POST') + self.assertEqual(201, resp.status) iface = json.loads(resp.read()) - self.assertEquals('test-network', iface['source']) - self.assertEquals('macvtap', iface['type']) + self.assertEqual('test-network', iface['source']) + self.assertEqual('macvtap', iface['type']) # Start the VM - resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', - 'POST') - vm = json.loads( - self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('running', vm['state']) + resp = self.request( + '/plugins/kimchi/vms/test-vm/start', '{}', 'POST') + vm = json.loads(self.request( + '/plugins/kimchi/vms/test-vm').read()) + self.assertEqual('running', vm['state']) # Force poweroff the VM - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', - '{}', 'POST') - vm = json.loads( - self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST' + ) + vm = json.loads(self.request( + '/plugins/kimchi/vms/test-vm').read()) + self.assertEqual('shutoff', vm['state']) # detach network interface from vm - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - iface['mac'], '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % iface['mac'], + '{}', + 'DELETE', + ) + self.assertEqual(204, resp.status) # attach ovs interface to vm - req = json.dumps({"type": "ovs", - "source": "test-network"}) - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces', req, - 'POST') - self.assertEquals(201, resp.status) + req = json.dumps({'type': 'ovs', 'source': 'test-network'}) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces', req, 'POST') + self.assertEqual(201, resp.status) iface = json.loads(resp.read()) - self.assertEquals('test-network', iface['source']) - self.assertEquals('ovs', iface['type']) + self.assertEqual('test-network', iface['source']) + self.assertEqual('ovs', iface['type']) # Start the VM - resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', - 'POST') - vm = json.loads( - self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('running', vm['state']) + resp = self.request( + '/plugins/kimchi/vms/test-vm/start', '{}', 'POST') + vm = json.loads(self.request( + '/plugins/kimchi/vms/test-vm').read()) + self.assertEqual('running', vm['state']) # Force poweroff the VM - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', - '{}', 'POST') - vm = json.loads( - self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST' + ) + vm = json.loads(self.request( + '/plugins/kimchi/vms/test-vm').read()) + self.assertEqual('shutoff', vm['state']) # detach ovs interface from vm - resp = self.request('/plugins/kimchi/vms/test-vm/ifaces/%s' % - iface['mac'], '{}', 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/ifaces/%s' % iface['mac'], + '{}', + 'DELETE', + ) + self.assertEqual(204, resp.status) def test_vm_customise_storage(self): # Create a Template - req = json.dumps({'name': 'test', 'disks': DISKS, - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + { + 'name': 'test', + 'disks': DISKS, + 'source_media': {'type': 'disk', 'path': fake_iso}, + } + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create alternate storage - req = json.dumps({'name': 'alt', - 'capacity': 1024, - 'allocated': 512, - 'path': '/tmp', - 'type': 'dir'}) + req = json.dumps( + { + 'name': 'alt', + 'capacity': 1024, + 'allocated': 512, + 'path': '/tmp', + 'type': 'dir', + } + ) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(201, resp.status) - resp = self.request('/plugins/kimchi/storagepools/alt/activate', req, - 'POST') - self.assertEquals(200, resp.status) + self.assertEqual(201, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/alt/activate', req, 'POST') + self.assertEqual(200, resp.status) # Create a VM - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test', - 'storagepool': '/plugins/kimchi/storagepools/alt'}) + req = json.dumps( + { + 'name': 'test-vm', + 'template': '/plugins/kimchi/templates/test', + 'storagepool': '/plugins/kimchi/storagepools/alt', + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'GET') @@ -1178,30 +1278,36 @@ def test_vm_customise_storage(self): # Test template not changed after vm customise its pool t = json.loads(self.request('/plugins/kimchi/templates/test').read()) - self.assertEquals(t['disks'][0]['pool']['name'], - '/plugins/kimchi/storagepools/default-pool') + self.assertEqual( + t['disks'][0]['pool']['name'], '/plugins/kimchi/storagepools/default-pool' + ) # Verify the volume was created - vol_uri = '/plugins/kimchi/storagepools/alt/storagevolumes/%s-0.img' \ - % vm_info['uuid'] + vol_uri = ( + '/plugins/kimchi/storagepools/alt/storagevolumes/%s-0.img' % vm_info['uuid'] + ) resp = self.request(vol_uri) vol = json.loads(resp.read()) - self.assertEquals(10 << 30, vol['capacity']) + self.assertEqual(10 << 30, vol['capacity']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) # Verify the volume was deleted self.assertHTTPStatus(404, vol_uri) def test_scsi_fc_storage(self): # Create scsi fc pool - req = json.dumps({'name': 'scsi_fc_pool', - 'type': 'scsi', - 'source': {'adapter_name': 'scsi_host2'}}) + req = json.dumps( + { + 'name': 'scsi_fc_pool', + 'type': 'scsi', + 'source': {'adapter_name': 'scsi_host2'}, + } + ) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Test create vms using lun of this pool # activate the storage pool @@ -1213,127 +1319,144 @@ def test_scsi_fc_storage(self): tmpl_params = { 'name': 'test_fc_pool', 'source_media': {'type': 'disk', 'path': fake_iso}, - 'disks': [{'pool': - {'name': '/plugins/kimchi/storagepools/scsi_fc_pool'}}]} + 'disks': [{'pool': {'name': '/plugins/kimchi/storagepools/scsi_fc_pool'}}], + } req = json.dumps(tmpl_params) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Choose SCSI volume to create template resp = self.request( - '/plugins/kimchi/storagepools/scsi_fc_pool/storagevolumes' - ) + '/plugins/kimchi/storagepools/scsi_fc_pool/storagevolumes') lun_name = json.loads(resp.read())[0]['name'] pool_name = tmpl_params['disks'][0]['pool']['name'] - tmpl_params['disks'] = [{'index': 0, 'volume': lun_name, 'pool': { - 'name': pool_name}, 'format': 'raw'}] + tmpl_params['disks'] = [ + { + 'index': 0, + 'volume': lun_name, + 'pool': {'name': pool_name}, + 'format': 'raw', + } + ] req = json.dumps(tmpl_params) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create vm in scsi pool - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test_fc_pool'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test_fc_pool'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Start the VM resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('running', vm['state']) + self.assertEqual('running', vm['state']) # Force poweroff the VM - resp = self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', - 'POST') + resp = self.request( + '/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) - self.assertEquals('shutoff', vm['state']) + self.assertEqual('shutoff', vm['state']) # Delete the VM resp = self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') - self.assertEquals(204, resp.status) + self.assertEqual(204, resp.status) def test_unnamed_vms(self): # Create a Template - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create 5 unnamed vms from this template - for i in xrange(1, 6): + for i in range(1, 6): req = json.dumps({'template': '/plugins/kimchi/templates/test'}) - task = json.loads(self.request('/plugins/kimchi/vms', - req, 'POST').read()) + task = json.loads(self.request( + '/plugins/kimchi/vms', req, 'POST').read()) wait_task(self._task_lookup, task['id']) - resp = self.request('/plugins/kimchi/vms/test-vm-%i' % i, '{}', - 'GET') - self.assertEquals(resp.status, 200) + resp = self.request( + '/plugins/kimchi/vms/test-vm-%i' % i, '{}', 'GET') + self.assertEqual(resp.status, 200) count = len(json.loads(self.request('/plugins/kimchi/vms').read())) - self.assertEquals(6, count) + self.assertEqual(6, count) def test_create_vm_without_template(self): req = json.dumps({'name': 'vm-without-template'}) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) resp = json.loads(resp.read()) - self.assertIn(u"KCHVM0016E:", resp['reason']) + self.assertIn('KCHVM0016E:', resp['reason']) def test_create_vm_with_bad_template_uri(self): req = json.dumps({'name': 'vm-bad-template', 'template': '/mytemplate'}) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) resp = json.loads(resp.read()) - self.assertIn(u"KCHVM0012E", resp['reason']) + self.assertIn('KCHVM0012E', resp['reason']) def test_vm_migrate(self): with RollbackContext() as rollback: - req = json.dumps({'name': 'test-migrate', - 'source_media': {'type': 'disk', - 'path': fake_iso}}) + req = json.dumps( + { + 'name': 'test-migrate', + 'source_media': {'type': 'disk', 'path': fake_iso}, + } + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) - rollback.prependDefer(self.request, - '/plugins/kimchi/templates/test-migrate', - '{}', 'DELETE') + self.assertEqual(201, resp.status) + rollback.prependDefer( + self.request, '/plugins/kimchi/templates/test-migrate', '{}', 'DELETE' + ) req = json.dumps( - {'name': 'test-vm-migrate', - 'template': '/plugins/kimchi/templates/test-migrate'} - ) + { + 'name': 'test-vm-migrate', + 'template': '/plugins/kimchi/templates/test-migrate', + } + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) - rollback.prependDefer(self.request, '/plugins/kimchi/vms/test-vm', - '{}', 'DELETE') + rollback.prependDefer( + self.request, '/plugins/kimchi/vms/test-vm', '{}', 'DELETE' + ) params = {'remote_host': 'destination_host'} resp = self.request( '/plugins/kimchi/vms/test-vm-migrate/migrate', - json.dumps(params), 'POST') - self.assertEquals(202, resp.status) + json.dumps(params), + 'POST', + ) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) task = json.loads( self.request('/plugins/kimchi/tasks/%s' % task['id']).read() ) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) params = {'remote_host': 'rdma_host', 'enable_rdma': True} resp = self.request( '/plugins/kimchi/vms/test-vm-migrate/migrate', - json.dumps(params), 'POST') - self.assertEquals(202, resp.status) + json.dumps(params), + 'POST', + ) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) task = json.loads( self.request('/plugins/kimchi/tasks/%s' % task['id']).read() ) - self.assertEquals('finished', task['status']) + self.assertEqual('finished', task['status']) def test_create_vm_with_img_based_template(self): resp = json.loads( @@ -1341,19 +1464,20 @@ def test_create_vm_with_img_based_template(self): '/plugins/kimchi/storagepools/default-pool/storagevolumes' ).read() ) - self.assertEquals(0, len(resp)) + self.assertEqual(0, len(resp)) # Create a Template mock_base = '/tmp/mock.img' - os.system("qemu-img create -f qcow2 %s 10M" % mock_base) - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': mock_base}}) + os.system('qemu-img create -f qcow2 %s 10M' % mock_base) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': mock_base}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) req = json.dumps({'template': '/plugins/kimchi/templates/test'}) resp = self.request('/plugins/kimchi/vms', req, 'POST') - self.assertEquals(202, resp.status) + self.assertEqual(202, resp.status) task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) @@ -1363,38 +1487,45 @@ def test_create_vm_with_img_based_template(self): '/plugins/kimchi/storagepools/default-pool/storagevolumes' ).read() ) - self.assertEquals(1, len(resp)) + self.assertEqual(1, len(resp)) def _create_pool(self, name): - req = json.dumps({'name': name, - 'capacity': 10240, - 'allocated': 5120, - 'path': '/var/lib/libvirt/images/', - 'type': 'dir'}) + req = json.dumps( + { + 'name': name, + 'capacity': 10240, + 'allocated': 5120, + 'path': '/var/lib/libvirt/images/', + 'type': 'dir', + } + ) resp = self.request('/plugins/kimchi/storagepools', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Verify the storage pool - storagepool = json.loads(self.request('/plugins/kimchi/storagepools/%s' - % name).read()) - self.assertEquals('inactive', storagepool['state']) + storagepool = json.loads( + self.request('/plugins/kimchi/storagepools/%s' % name).read() + ) + self.assertEqual('inactive', storagepool['state']) return name def _delete_pool(self, name): # Delete the storage pool - resp = self.request('/plugins/kimchi/storagepools/%s' % name, '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request('/plugins/kimchi/storagepools/%s' % + name, '{}', 'DELETE') + self.assertEqual(204, resp.status) def test_iso_scan_shallow(self): # fake environment preparation self._create_pool('pool-3') - self.request('/plugins/kimchi/storagepools/pool-3/activate', '{}', - 'POST') - params = {'name': 'fedora.iso', - 'capacity': 1073741824, # 1 GiB - 'type': 'file', - 'format': 'iso'} + self.request( + '/plugins/kimchi/storagepools/pool-3/activate', '{}', 'POST') + params = { + 'name': 'fedora.iso', + 'capacity': 1073741824, # 1 GiB + 'type': 'file', + 'format': 'iso', + } task_info = model.storagevolumes_create('pool-3', params) wait_task(self._task_lookup, task_info['id']) @@ -1403,100 +1534,106 @@ def test_iso_scan_shallow(self): '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes/' ).read() )[0] - self.assertEquals('fedora.iso', storagevolume['name']) - self.assertEquals('iso', storagevolume['format']) - self.assertEquals('/var/lib/libvirt/images/fedora.iso', - storagevolume['path']) - self.assertEquals(1073741824, storagevolume['capacity']) # 1 GiB - self.assertEquals(0, storagevolume['allocation']) - self.assertEquals('17', storagevolume['os_version']) - self.assertEquals('fedora', storagevolume['os_distro']) - self.assertEquals(True, storagevolume['bootable']) - self.assertEquals(True, storagevolume['has_permission']) + self.assertEqual('fedora.iso', storagevolume['name']) + self.assertEqual('iso', storagevolume['format']) + self.assertEqual('/var/lib/libvirt/images/fedora.iso', + storagevolume['path']) + self.assertEqual(1073741824, storagevolume['capacity']) # 1 GiB + self.assertEqual(0, storagevolume['allocation']) + self.assertEqual('17', storagevolume['os_version']) + self.assertEqual('fedora', storagevolume['os_distro']) + self.assertEqual(True, storagevolume['bootable']) + self.assertEqual(True, storagevolume['has_permission']) # Create a template # In real model os distro/version can be omitted # as we will scan the iso - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', - 'path': storagevolume['path']}, - 'os_distro': storagevolume['os_distro'], - 'os_version': storagevolume['os_version']}) + req = json.dumps( + { + 'name': 'test', + 'source_media': {'type': 'disk', 'path': storagevolume['path']}, + 'os_distro': storagevolume['os_distro'], + 'os_version': storagevolume['os_version'], + } + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Verify the template t = json.loads(self.request('/plugins/kimchi/templates/test').read()) - self.assertEquals('test', t['name']) - self.assertEquals('fedora', t['os_distro']) - self.assertEquals('17', t['os_version']) - self.assertEquals(get_template_default('old', 'memory'), t['memory']) + self.assertEqual('test', t['name']) + self.assertEqual('fedora', t['os_distro']) + self.assertEqual('17', t['os_version']) + self.assertEqual(get_template_default('old', 'memory'), t['memory']) # Deactivate or destroy scan pool return 405 resp = self.request( - '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes' - '/deactivate', '{}', 'POST' + '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes' '/deactivate', + '{}', + 'POST', ) - self.assertEquals(405, resp.status) + self.assertEqual(405, resp.status) resp = self.request( - '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes', - '{}', 'DELETE' + '/plugins/kimchi/storagepools/kimchi_isos/storagevolumes', '{}', 'DELETE' ) - self.assertEquals(405, resp.status) + self.assertEqual(405, resp.status) # Delete the template - resp = self.request('/plugins/kimchi/templates/%s' % t['name'], '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request('/plugins/kimchi/templates/%s' % + t['name'], '{}', 'DELETE') + self.assertEqual(204, resp.status) - resp = self.request('/plugins/kimchi/storagepools/pool-3/deactivate', - '{}', 'POST') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/pool-3/deactivate', '{}', 'POST' + ) + self.assertEqual(200, resp.status) self._delete_pool('pool-3') def test_screenshot_refresh(self): # Create a VM - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': fake_iso}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': fake_iso}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - req = json.dumps({'name': 'test-vm', - 'template': '/plugins/kimchi/templates/test'}) + req = json.dumps( + {'name': 'test-vm', 'template': '/plugins/kimchi/templates/test'} + ) resp = self.request('/plugins/kimchi/vms', req, 'POST') task = json.loads(resp.read()) wait_task(self._task_lookup, task['id']) # Test screenshot for shut-off state vm resp = self.request('/plugins/kimchi/vms/test-vm/screenshot') - self.assertEquals(404, resp.status) + self.assertEqual(404, resp.status) # Test screenshot for running vm resp = self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') vm = json.loads(self.request('/plugins/kimchi/vms/test-vm').read()) resp = self.request('/' + vm['screenshot'], method='HEAD') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) self.assertTrue(resp.getheader('Content-type').startswith('image')) # Test screenshot sub-resource redirect resp = self.request('/plugins/kimchi/vms/test-vm/screenshot') - self.assertEquals(200, resp.status) - self.assertEquals('image/png', resp.getheader('content-type')) + self.assertEqual(200, resp.status) + self.assertEqual('image/png', resp.getheader('content-type')) lastMod1 = resp.getheader('last-modified') # Take another screenshot instantly and compare the last Modified date resp = self.request('/plugins/kimchi/vms/test-vm/screenshot') lastMod2 = resp.getheader('last-modified') - self.assertEquals(lastMod2, lastMod1) + self.assertEqual(lastMod2, lastMod1) - resp = self.request('/plugins/kimchi/vms/test-vm/screenshot', '{}', - 'DELETE') - self.assertEquals(405, resp.status) + resp = self.request( + '/plugins/kimchi/vms/test-vm/screenshot', '{}', 'DELETE') + self.assertEqual(405, resp.status) # No screenshot after stopped the VM self.request('/plugins/kimchi/vms/test-vm/poweroff', '{}', 'POST') resp = self.request('/plugins/kimchi/vms/test-vm/screenshot') - self.assertEquals(404, resp.status) + self.assertEqual(404, resp.status) # Picture link not available after VM deleted self.request('/plugins/kimchi/vms/test-vm/start', '{}', 'POST') @@ -1504,27 +1641,24 @@ def test_screenshot_refresh(self): img_lnk = vm['screenshot'] self.request('/plugins/kimchi/vms/test-vm', '{}', 'DELETE') resp = self.request('/' + img_lnk) - self.assertEquals(404, resp.status) + self.assertEqual(404, resp.status) def test_interfaces(self): resp = self.request('/plugins/kimchi/interfaces').read() - self.assertIn('name', resp) - interfaces = json.loads(resp) + interfaces = json.loads(resp.decode('utf-8')) keys = ['name', 'type', 'ipaddr', 'netmask', 'status', 'module'] for interface in interfaces: - self.assertEquals(sorted(keys), sorted(interface.keys())) + self.assertEqual(sorted(keys), sorted(interface.keys())) def _task_lookup(self, taskid): - return json.loads( - self.request('/plugins/kimchi/tasks/%s' % taskid).read() - ) + return json.loads(self.request('/plugins/kimchi/tasks/%s' % taskid).read()) def test_tasks(self): id1 = AsyncTask('/plugins/kimchi/tasks/1', self._async_op).id id2 = AsyncTask('/plugins/kimchi/tasks/2', self._except_op).id id3 = AsyncTask('/plugins/kimchi/tasks/3', self._intermid_op).id - target_uri = urllib2.quote('^/plugins/kimchi/tasks/*', safe="") + target_uri = urllib.parse.quote('^/plugins/kimchi/tasks/*', safe='') filter_data = 'status=running&target_uri=%s' % target_uri tasks = json.loads( self.request('/plugins/kimchi/tasks?%s' % filter_data).read() @@ -1533,35 +1667,40 @@ def test_tasks(self): tasks = json.loads(self.request('/plugins/kimchi/tasks').read()) tasks_ids = [t['id'] for t in tasks] - self.assertEquals(set([id1, id2, id3]) - set(tasks_ids), set([])) + self.assertEqual(set([id1, id2, id3]) - set(tasks_ids), set([])) wait_task(self._task_lookup, id2) - foo2 = json.loads( - self.request('/plugins/kimchi/tasks/%s' % id2).read() - ) + foo2 = json.loads(self.request( + '/plugins/kimchi/tasks/%s' % id2).read()) keys = ['id', 'status', 'message', 'target_uri'] - self.assertEquals(sorted(keys), sorted(foo2.keys())) - self.assertEquals('failed', foo2['status']) + self.assertEqual(sorted(keys), sorted(foo2.keys())) + self.assertEqual('failed', foo2['status']) wait_task(self._task_lookup, id3) - foo3 = json.loads( - self.request('/plugins/kimchi/tasks/%s' % id3).read() - ) - self.assertEquals('in progress', foo3['message']) - self.assertEquals('running', foo3['status']) + foo3 = json.loads(self.request( + '/plugins/kimchi/tasks/%s' % id3).read()) + self.assertEqual('in progress', foo3['message']) + self.assertEqual('running', foo3['status']) def test_config(self): resp = self.request('/plugins/kimchi/config').read() conf = json.loads(resp) keys = ["version", "with_spice_web_client"] - self.assertEquals(keys, sorted(conf.keys())) + self.assertEqual(keys, sorted(conf.keys())) def test_capabilities(self): resp = self.request('/plugins/kimchi/config/capabilities').read() conf = json.loads(resp) - keys = [u'libvirt_stream_protocols', u'qemu_stream', u'qemu_spice', - u'screenshot', u'kernel_vfio', u'nm_running', - u'mem_hotplug_support', u'libvirtd_running'] - self.assertEquals(sorted(keys), sorted(conf.keys())) + keys = [ + 'libvirt_stream_protocols', + 'qemu_stream', + 'qemu_spice', + 'screenshot', + 'kernel_vfio', + 'nm_running', + 'mem_hotplug_support', + 'libvirtd_running', + ] + self.assertEqual(sorted(keys), sorted(conf.keys())) def test_distros(self): resp = self.request('/plugins/kimchi/config/distros').read() @@ -1573,15 +1712,16 @@ def test_distros(self): self.assertIn('path', distro) # Test in X86 - ident = "Fedora 24" - resp = self.request('/plugins/kimchi/config/distros/%s' % - urllib2.quote(ident)).read() + ident = 'Fedora 29' + resp = self.request( + '/plugins/kimchi/config/distros/%s' % urllib.parse.quote(ident) + ).read() distro = json.loads(resp) if os.uname()[4] in ['x86_64', 'amd64']: - self.assertEquals(distro['name'], ident) - self.assertEquals(distro['os_distro'], "fedora") - self.assertEquals(distro['os_version'], "24") - self.assertEquals(distro['os_arch'], "x86_64") + self.assertEqual(distro['name'], ident) + self.assertEqual(distro['os_distro'], 'fedora') + self.assertEqual(distro['os_version'], '29') + self.assertEqual(distro['os_arch'], 'x86_64') self.assertIn('path', distro) else: # Distro not found error @@ -1589,15 +1729,16 @@ def test_distros(self): self.assertIn('KCHDISTRO0001E', distro.get('reason')) # Test in PPC - ident = "Fedora 24 LE" - resp = self.request('/plugins/kimchi/config/distros/%s' % - urllib2.quote(ident)).read() + ident = 'Fedora 24 LE' + resp = self.request( + '/plugins/kimchi/config/distros/%s' % urllib.parse.quote(ident) + ).read() distro = json.loads(resp) if os.uname()[4] == 'ppc64': - self.assertEquals(distro['name'], ident) - self.assertEquals(distro['os_distro'], "fedora") - self.assertEquals(distro['os_version'], "24") - self.assertEquals(distro['os_arch'], "ppc64le") + self.assertEqual(distro['name'], ident) + self.assertEqual(distro['os_distro'], 'fedora') + self.assertEqual(distro['os_version'], '24') + self.assertEqual(distro['os_arch'], 'ppc64le') self.assertIn('path', distro) else: # Distro not found error @@ -1606,13 +1747,14 @@ def test_distros(self): def test_ovsbridges(self): resp = self.request('/plugins/kimchi/ovsbridges') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) class HttpsRestTests(RestTests): """ Run all of the same tests as above, but use https instead """ + def setUp(self): self.request = partial(request) model.reset() diff --git a/tests/test_storagepoolxml.py b/tests/test_storagepoolxml.py index 968ceb650..095f63e39 100644 --- a/tests/test_storagepoolxml.py +++ b/tests/test_storagepoolxml.py @@ -16,36 +16,37 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import lxml.etree as ET import unittest +import lxml.etree as ET from wok.plugins.kimchi.model.libvirtstoragepool import StoragePoolDef class StoragepoolXMLTests(unittest.TestCase): def test_get_storagepool_xml(self): poolDefs = [ - {'def': - {'type': 'dir', - 'name': 'unitTestDirPool', - 'path': '/var/temp/images'}, - 'xml': - """ + { + 'def': { + 'type': 'dir', + 'name': 'unitTestDirPool', + 'path': '/var/temp/images', + }, + 'xml': """ unitTestDirPool /var/temp/images - """}, - {'def': - {'type': 'netfs', - 'name': 'unitTestNFSPool', - 'source': {'host': '127.0.0.1', - 'path': '/var/export'}}, - 'xml': - """ + """, + }, + { + 'def': { + 'type': 'netfs', + 'name': 'unitTestNFSPool', + 'source': {'host': '127.0.0.1', 'path': '/var/export'}, + }, + 'xml': """ unitTestNFSPool @@ -56,13 +57,15 @@ def test_get_storagepool_xml(self): /var/lib/kimchi/nfs_mount/unitTestNFSPool - """}, - {'def': - {'type': 'logical', - 'name': 'unitTestLogicalPool', - 'source': {'devices': ['/dev/hda', '/dev/hdb']}}, - 'xml': - """ + """, + }, + { + 'def': { + 'type': 'logical', + 'name': 'unitTestLogicalPool', + 'source': {'devices': ['/dev/hda', '/dev/hdb']}, + }, + 'xml': """ unitTestLogicalPool @@ -73,15 +76,18 @@ def test_get_storagepool_xml(self): /dev/unitTestLogicalPool - """}, - {'def': - {'type': 'iscsi', - 'name': 'unitTestISCSIPool', - 'source': { - 'host': '127.0.0.1', - 'target': 'iqn.2003-01.org.linux-iscsi.localhost'}}, - 'xml': - """ + """, + }, + { + 'def': { + 'type': 'iscsi', + 'name': 'unitTestISCSIPool', + 'source': { + 'host': '127.0.0.1', + 'target': 'iqn.2003-01.org.linux-iscsi.localhost', + }, + }, + 'xml': """ unitTestISCSIPool @@ -92,16 +98,19 @@ def test_get_storagepool_xml(self): /dev/disk/by-id - """}, - {'def': - {'type': 'iscsi', - 'name': 'unitTestISCSIPoolPort', - 'source': { - 'host': '127.0.0.1', - 'port': 3266, - 'target': 'iqn.2003-01.org.linux-iscsi.localhost'}}, - 'xml': - """ + """, + }, + { + 'def': { + 'type': 'iscsi', + 'name': 'unitTestISCSIPoolPort', + 'source': { + 'host': '127.0.0.1', + 'port': 3266, + 'target': 'iqn.2003-01.org.linux-iscsi.localhost', + }, + }, + 'xml': """ unitTestISCSIPoolPort @@ -112,17 +121,22 @@ def test_get_storagepool_xml(self): /dev/disk/by-id - """}, - {'def': - {'type': 'iscsi', - 'name': 'unitTestISCSIPoolAuth', - 'source': { - 'host': '127.0.0.1', - 'target': 'iqn.2003-01.org.linux-iscsi.localhost', - 'auth': {'username': 'testUser', - 'password': 'ActuallyNotUsedInPoolXML'}}}, - 'xml': - """ + """, + }, + { + 'def': { + 'type': 'iscsi', + 'name': 'unitTestISCSIPoolAuth', + 'source': { + 'host': '127.0.0.1', + 'target': 'iqn.2003-01.org.linux-iscsi.localhost', + 'auth': { + 'username': 'testUser', + 'password': 'ActuallyNotUsedInPoolXML', + }, + }, + }, + 'xml': """ unitTestISCSIPoolAuth @@ -136,19 +150,23 @@ def test_get_storagepool_xml(self): /dev/disk/by-id - """}, - {'def': - {'type': 'scsi', - 'name': 'unitTestSCSIFCPool', - 'path': '/dev/disk/by-path', - 'source': { - 'name': 'scsi_host3', - 'adapter': { - 'type': 'fc_host', - 'wwpn': '0123456789abcdef', - 'wwnn': 'abcdef0123456789'}}}, - 'xml': - """ + """, + }, + { + 'def': { + 'type': 'scsi', + 'name': 'unitTestSCSIFCPool', + 'path': '/dev/disk/by-path', + 'source': { + 'name': 'scsi_host3', + 'adapter': { + 'type': 'fc_host', + 'wwpn': '0123456789abcdef', + 'wwnn': 'abcdef0123456789', + }, + }, + }, + 'xml': """ unitTestSCSIFCPool @@ -159,7 +177,9 @@ def test_get_storagepool_xml(self): /dev/disk/by-path - """}] + """, + }, + ] for poolDef in poolDefs: defObj = StoragePoolDef.create(poolDef['def']) @@ -168,4 +188,4 @@ def test_get_storagepool_xml(self): parser = ET.XMLParser(remove_blank_text=True) t1 = ET.fromstring(xmlStr, parser) t2 = ET.fromstring(poolDef['xml'], parser) - self.assertEquals(ET.tostring(t1), ET.tostring(t2)) + self.assertEqual(ET.tostring(t1), ET.tostring(t2)) diff --git a/tests/test_template.py b/tests/test_template.py index bf63acf1a..63a3fa4a1 100644 --- a/tests/test_template.py +++ b/tests/test_template.py @@ -17,26 +17,27 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import cherrypy -import iso_gen import json import os -import psutil import unittest +import urllib from functools import partial -from tests.utils import patch_auth, request, run_server - +import cherrypy +import iso_gen +import psutil from wok.plugins.kimchi.config import READONLY_POOL_TYPE from wok.plugins.kimchi.model.featuretests import FeatureTests from wok.plugins.kimchi.model.templates import MAX_MEM_LIM +from tests.utils import patch_auth +from tests.utils import request +from tests.utils import run_server model = None test_server = None -MOCK_ISO = "/tmp/mock.iso" -DEFAULT_POOL = u'/plugins/kimchi/storagepools/default-pool' +MOCK_ISO = '/tmp/mock.iso' +DEFAULT_POOL = '/plugins/kimchi/storagepools/default-pool' def setUpModule(): @@ -59,61 +60,76 @@ def setUp(self): def test_tmpl_lifecycle(self): resp = self.request('/plugins/kimchi/templates') - self.assertEquals(200, resp.status) - self.assertEquals(0, len(json.loads(resp.read()))) + self.assertEqual(200, resp.status) + self.assertEqual(0, len(json.loads(resp.read()))) # Create a template without cdrom and disk specified fails with 400 - t = {'name': 'test', 'os_distro': 'ImagineOS', - 'os_version': '1.0', 'memory': {'current': 1024}, - 'cpu_info': {'vcpus': 1}} + t = { + 'name': 'test', + 'os_distro': 'ImagineOS', + 'os_version': '1.0', + 'memory': {'current': 1024}, + 'cpu_info': {'vcpus': 1}, + } req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Create a netboot template t = {'name': 'test-netboot', 'source_media': {'type': 'netboot'}} req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Verify the netboot template - tmpl = json.loads( - self.request('/plugins/kimchi/templates/test-netboot').read() - ) + tmpl = json.loads(self.request( + '/plugins/kimchi/templates/test-netboot').read()) self.assertIsNone(tmpl['cdrom']) # Delete the netboot template - resp = self.request('/plugins/kimchi/templates/test-netboot', '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/templates/test-netboot', '{}', 'DELETE') + self.assertEqual(204, resp.status) # Create a template - t = {'name': 'test', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}} + t = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': MOCK_ISO}} req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Verify the template - keys = ['name', 'icon', 'invalid', 'os_distro', 'os_version', 'memory', - 'cdrom', 'disks', 'networks', 'folder', 'graphics', 'cpu_info'] - tmpl = json.loads( - self.request('/plugins/kimchi/templates/test').read() - ) - if os.uname()[4] == "s390x": - keys.append("interfaces") - self.assertEquals(sorted(tmpl.keys()), sorted(keys)) - self.assertEquals(t['source_media']['path'], tmpl["cdrom"]) + keys = [ + 'name', + 'icon', + 'invalid', + 'os_distro', + 'os_version', + 'memory', + 'cdrom', + 'disks', + 'networks', + 'folder', + 'graphics', + 'cpu_info', + ] + tmpl = json.loads(self.request( + '/plugins/kimchi/templates/test').read()) + if os.uname()[4] == 's390x': + keys.append('interfaces') + self.assertEqual(sorted(tmpl.keys()), sorted(keys)) + self.assertEqual(t['source_media']['path'], tmpl['cdrom']) disk_keys = ['index', 'pool', 'size', 'format'] disk_pool_keys = ['name', 'type'] - self.assertEquals(sorted(tmpl['disks'][0].keys()), sorted(disk_keys)) - self.assertEquals(sorted(tmpl['disks'][0]['pool'].keys()), - sorted(disk_pool_keys)) + self.assertEqual(sorted(tmpl['disks'][0].keys()), sorted(disk_keys)) + self.assertEqual( + sorted(tmpl['disks'][0]['pool'].keys()), sorted(disk_pool_keys) + ) # Clone a template - resp = self.request('/plugins/kimchi/templates/test/clone', '{}', - 'POST') - self.assertEquals(303, resp.status) + resp = self.request( + '/plugins/kimchi/templates/test/clone', '{}', 'POST') + self.assertEqual(303, resp.status) # Verify the cloned template tmpl_cloned = json.loads( @@ -121,278 +137,313 @@ def test_tmpl_lifecycle(self): ) del tmpl['name'] del tmpl_cloned['name'] - self.assertEquals(tmpl, tmpl_cloned) + self.assertEqual(tmpl, tmpl_cloned) # Delete the cloned template - resp = self.request('/plugins/kimchi/templates/test-clone1', '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/templates/test-clone1', '{}', 'DELETE') + self.assertEqual(204, resp.status) # Create a template with same name fails with 400 - req = json.dumps({'name': 'test', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}}) + req = json.dumps( + {'name': 'test', 'source_media': {'type': 'disk', 'path': MOCK_ISO}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Create an image based template - os.system("qemu-img create -f qcow2 %s 10G" % '/tmp/mock.img') - t = {'name': 'test_img_template', - 'source_media': {'type': 'disk', 'path': '/tmp/mock.img'}} + os.system('qemu-img create -f qcow2 %s 10G' % '/tmp/mock.img') + t = { + 'name': 'test_img_template', + 'source_media': {'type': 'disk', 'path': '/tmp/mock.img'}, + } req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) os.remove('/tmp/mock.img') # Test disk format - t = {'name': 'test-format', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}, - 'disks': [{'size': 10, 'format': 'vmdk', - 'pool': {'name': DEFAULT_POOL}}]} + t = { + 'name': 'test-format', + 'source_media': {'type': 'disk', 'path': MOCK_ISO}, + 'disks': [{'size': 10, 'format': 'vmdk', 'pool': {'name': DEFAULT_POOL}}], + } req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) - tmpl = json.loads( - self.request('/plugins/kimchi/templates/test-format').read() - ) - self.assertEquals(tmpl['disks'][0]['format'], 'vmdk') + self.assertEqual(201, resp.status) + tmpl = json.loads(self.request( + '/plugins/kimchi/templates/test-format').read()) + self.assertEqual(tmpl['disks'][0]['format'], 'vmdk') # Create template with memory higher than host max if hasattr(psutil, 'virtual_memory'): - max_mem = (psutil.virtual_memory().total >> 10 >> 10) + max_mem = psutil.virtual_memory().total >> 10 >> 10 else: - max_mem = (psutil.TOTAL_PHYMEM >> 10 >> 10) + max_mem = psutil.TOTAL_PHYMEM >> 10 >> 10 memory = max_mem + 1024 - t = {'name': 'test-maxmem', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}, - 'memory': {'current': memory}} + t = { + 'name': 'test-maxmem', + 'source_media': {'type': 'disk', 'path': MOCK_ISO}, + 'memory': {'current': memory}, + } req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(400, resp.status) - self.assertTrue(str(max_mem) in resp.read()) + self.assertEqual(400, resp.status) + self.assertTrue(str(max_mem) in resp.read().decode('utf-8')) def test_customized_tmpl(self): # Create a template - t = {'name': 'test', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}} + t = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': MOCK_ISO}} req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) - tmpl = json.loads( - self.request('/plugins/kimchi/templates/test').read() - ) + self.assertEqual(201, resp.status) + tmpl = json.loads(self.request( + '/plugins/kimchi/templates/test').read()) # Create another template to test update template name with one of # existing template name - req = json.dumps({'name': 'test_new', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}}) + req = json.dumps( + {'name': 'test_new', 'source_media': { + 'type': 'disk', 'path': MOCK_ISO}} + ) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Update name with one of existing name should fail with 400 req = json.dumps({'name': 'test_new'}) resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # Delete the test1 template - resp = self.request('/plugins/kimchi/templates/test_new', '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/templates/test_new', '{}', 'DELETE') + self.assertEqual(204, resp.status) # Update name - new_name = u'kīмсhīTmpl' - new_tmpl_uri = '/plugins/kimchi/templates/%s' \ - % new_name.encode('utf-8') + new_name = 'kīмсhīTmpl' + new_tmpl_uri = urllib.parse.quote( + f'/plugins/kimchi/templates/{new_name}') req = json.dumps({'name': new_name}) resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') - self.assertEquals(303, resp.status) + self.assertEqual(303, resp.status) resp = self.request(new_tmpl_uri) update_tmpl = json.loads(resp.read()) - self.assertEquals(new_name, update_tmpl['name']) + self.assertEqual(new_name, update_tmpl['name']) del tmpl['name'] del update_tmpl['name'] - self.assertEquals(tmpl, update_tmpl) + self.assertEqual(tmpl, update_tmpl) # Update icon req = json.dumps({'icon': 'plugins/kimchi/images/icon-fedora.png'}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals('plugins/kimchi/images/icon-fedora.png', - update_tmpl['icon']) + self.assertEqual( + 'plugins/kimchi/images/icon-fedora.png', update_tmpl['icon']) # Update os_distro and os_version req = json.dumps({'os_distro': 'fedora', 'os_version': '21'}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals('fedora', update_tmpl['os_distro']) - self.assertEquals('21', update_tmpl['os_version']) + self.assertEqual('fedora', update_tmpl['os_distro']) + self.assertEqual('21', update_tmpl['os_version']) # Update maxvcpus only req = json.dumps({'cpu_info': {'maxvcpus': 2}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(2, update_tmpl['cpu_info']['maxvcpus']) + self.assertEqual(2, update_tmpl['cpu_info']['maxvcpus']) # Update vcpus only req = json.dumps({'cpu_info': {'vcpus': 2}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(2, update_tmpl['cpu_info']['vcpus']) + self.assertEqual(2, update_tmpl['cpu_info']['vcpus']) # Update cpu_info cpu_info_data = { 'cpu_info': { 'maxvcpus': 2, 'vcpus': 2, - 'topology': {'sockets': 1, 'cores': 2, 'threads': 1} + 'topology': {'sockets': 1, 'cores': 2, 'threads': 1}, } } resp = self.request(new_tmpl_uri, json.dumps(cpu_info_data), 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(update_tmpl['cpu_info'], cpu_info_data['cpu_info']) + self.assertEqual(update_tmpl['cpu_info'], cpu_info_data['cpu_info']) # Test memory and max memory # - memory greated than max memory (1024 default on x86 # otherwise 2048) - if os.uname()[4] == "s390x": + if os.uname()[4] == 's390x': req = json.dumps({'memory': {'current': 4096}}) else: req = json.dumps({'memory': {'current': 2048}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) # - max memory greater than limit: 16TiB to PPC and 4TiB to x86 req = json.dumps({'memory': {'maxmemory': MAX_MEM_LIM + 1024}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(400, resp.status) - self.assertTrue('KCHVM0079E' in resp.read()) + self.assertEqual(400, resp.status) + self.assertTrue('KCHVM0079E' in resp.read().decode('utf-8')) # - change only max memory req = json.dumps({'memory': {'maxmemory': 3072}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(3072, update_tmpl['memory']['maxmemory']) + self.assertEqual(3072, update_tmpl['memory']['maxmemory']) # - change only memory req = json.dumps({'memory': {'current': 2048}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(2048, update_tmpl['memory']['current']) - self.assertEquals(3072, update_tmpl['memory']['maxmemory']) + self.assertEqual(2048, update_tmpl['memory']['current']) + self.assertEqual(3072, update_tmpl['memory']['maxmemory']) # - change both values req = json.dumps({'memory': {'current': 1024, 'maxmemory': 1024}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(1024, update_tmpl['memory']['current']) - self.assertEquals(1024, update_tmpl['memory']['maxmemory']) + self.assertEqual(1024, update_tmpl['memory']['current']) + self.assertEqual(1024, update_tmpl['memory']['maxmemory']) # Update cdrom cdrom_data = {'cdrom': 'inexistent.iso'} resp = self.request(new_tmpl_uri, json.dumps(cdrom_data), 'PUT') - self.assertEquals(400, resp.status) + self.assertEqual(400, resp.status) cdrom_data = {'cdrom': '/tmp/existent.iso'} resp = self.request(new_tmpl_uri, json.dumps(cdrom_data), 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(update_tmpl['cdrom'], cdrom_data['cdrom']) + self.assertEqual(update_tmpl['cdrom'], cdrom_data['cdrom']) # Update disks - disk_data = {'disks': [{'index': 0, 'size': 10, - 'format': 'raw', 'pool': { - 'name': DEFAULT_POOL}}, - {'index': 1, 'size': 20, 'format': 'qcow2', - 'pool': {'name': DEFAULT_POOL}}]} + disk_data = { + 'disks': [ + { + 'index': 0, + 'size': 10, + 'format': 'raw', + 'pool': {'name': DEFAULT_POOL}, + }, + { + 'index': 1, + 'size': 20, + 'format': 'qcow2', + 'pool': {'name': DEFAULT_POOL}, + }, + ] + } resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) resp = self.request(new_tmpl_uri) - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) updated_tmpl = json.loads(resp.read()) - disk_data['disks'][0]['pool'] = {'name': DEFAULT_POOL, - 'type': 'dir'} + disk_data['disks'][0]['pool'] = {'name': DEFAULT_POOL, 'type': 'dir'} - disk_data['disks'][1]['pool'] = {'name': DEFAULT_POOL, - 'type': 'dir'} - self.assertEquals(updated_tmpl['disks'], disk_data['disks']) + disk_data['disks'][1]['pool'] = {'name': DEFAULT_POOL, 'type': 'dir'} + self.assertEqual(updated_tmpl['disks'], disk_data['disks']) # For all supported types, edit the template and check if # the change was made. disk_types = ['qcow', 'qcow2', 'qed', 'raw', 'vmdk', 'vpc'] for disk_type in disk_types: - disk_data = {'disks': [{'index': 0, 'format': disk_type, - 'size': 10, 'pool': {'name': DEFAULT_POOL}}]} + disk_data = { + 'disks': [ + { + 'index': 0, + 'format': disk_type, + 'size': 10, + 'pool': {'name': DEFAULT_POOL}, + } + ] + } resp = self.request(new_tmpl_uri, json.dumps(disk_data), 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) resp = self.request(new_tmpl_uri) - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) updated_tmpl = json.loads(resp.read()) - disk_data['disks'][0]['pool'] = {u'name': DEFAULT_POOL, - u'type': u'dir'} - self.assertEquals(updated_tmpl['disks'], disk_data['disks']) + disk_data['disks'][0]['pool'] = { + 'name': DEFAULT_POOL, 'type': 'dir'} + self.assertEqual(updated_tmpl['disks'], disk_data['disks']) # Update folder folder_data = {'folder': ['mock', 'isos']} resp = self.request(new_tmpl_uri, json.dumps(folder_data), 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals(update_tmpl['folder'], folder_data['folder']) + self.assertEqual(update_tmpl['folder'], folder_data['folder']) # Test graphics merge req = json.dumps({'graphics': {'type': 'spice'}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals('spice', update_tmpl['graphics']['type']) + self.assertEqual('spice', update_tmpl['graphics']['type']) # update only listen (type does not reset to default 'vnc') req = json.dumps({'graphics': {'listen': 'fe00::0'}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals('spice', update_tmpl['graphics']['type']) - self.assertEquals('fe00::0', update_tmpl['graphics']['listen']) + self.assertEqual('spice', update_tmpl['graphics']['type']) + self.assertEqual('fe00::0', update_tmpl['graphics']['listen']) # update only type (listen does not reset to default '127.0.0.1') req = json.dumps({'graphics': {'type': 'vnc'}}) resp = self.request(new_tmpl_uri, req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) update_tmpl = json.loads(resp.read()) - self.assertEquals('vnc', update_tmpl['graphics']['type']) - self.assertEquals('fe00::0', update_tmpl['graphics']['listen']) + self.assertEqual('vnc', update_tmpl['graphics']['type']) + self.assertEqual('fe00::0', update_tmpl['graphics']['listen']) def test_customized_network(self): # Create a template - t = {'name': 'test', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}} + t = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': MOCK_ISO}} req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Create networks to be used for testing - networks = [{'name': u'kīмсhī-пet', 'connection': 'isolated'}, - {'name': u'nat-network', 'connection': 'nat'}, - {'name': u'subnet-network', 'connection': 'nat', - 'subnet': '127.0.100.0/24'}] + networks = [ + {'name': 'kīмсhī-пet', 'connection': 'isolated'}, + {'name': 'nat-network', 'connection': 'nat'}, + {'name': 'subnet-network', 'connection': 'nat', + 'subnet': '127.0.100.0/24'}, + ] # Verify the current system has at least one interface to create a # bridged network interfaces = json.loads( self.request( - '/plugins/kimchi/interfaces?_inuse=false&type=nic').read()) + '/plugins/kimchi/interfaces?_inuse=false&type=nic').read() + ) if len(interfaces) > 0: iface = interfaces[0]['name'] - networks.append({'name': u'bridge-network', - 'connection': 'macvtap', - 'interfaces': [iface]}) + networks.append( + { + 'name': 'bridge-network', + 'connection': 'macvtap', + 'interfaces': [iface], + } + ) if not FeatureTests.is_nm_running(): - networks.append({'name': u'bridge-network-with-vlan', - 'connection': 'bridge', - 'interfaces': [iface], 'vlan_id': 987}) + networks.append( + { + 'name': 'bridge-network-with-vlan', + 'connection': 'bridge', + 'interfaces': [iface], + 'vlan_id': 987, + } + ) tmpl_nets = [] for net in networks: @@ -400,20 +451,19 @@ def test_customized_network(self): tmpl_nets.append(net['name']) req = json.dumps({'networks': tmpl_nets}) resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') - self.assertEquals(200, resp.status) + self.assertEqual(200, resp.status) def test_customized_storagepool(self): # Create a template - t = {'name': 'test', - 'source_media': {'type': 'disk', 'path': MOCK_ISO}} + t = {'name': 'test', 'source_media': { + 'type': 'disk', 'path': MOCK_ISO}} req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # MockModel always returns 2 partitions (vdx, vdz) - partitions = json.loads( - self.request('/plugins/kimchi/host/partitions').read() - ) + partitions = json.loads(self.request( + '/plugins/kimchi/host/partitions').read()) devs = [dev['path'] for dev in partitions] # MockModel always returns 3 FC devices @@ -423,111 +473,173 @@ def test_customized_storagepool(self): fc_devs = [dev['name'] for dev in fc_devs] poolDefs = [ - {'type': 'dir', 'name': u'kīмсhīUnitTestDirPool', - 'path': '/tmp/kimchi-images'}, - {'type': 'netfs', 'name': u'kīмсhīUnitTestNSFPool', - 'source': {'host': 'localhost', - 'path': '/var/lib/kimchi/nfs-pool'}}, - {'type': 'scsi', 'name': u'kīмсhīUnitTestSCSIFCPool', - 'source': {'adapter_name': fc_devs[0]}}, - {'type': 'iscsi', 'name': u'kīмсhīUnitTestISCSIPool', - 'source': {'host': '127.0.0.1', - 'target': 'iqn.2015-01.localhost.kimchiUnitTest'}}, - {'type': 'logical', 'name': u'kīмсhīUnitTestLogicalPool', - 'source': {'devices': [devs[0]]}}] + { + 'type': 'dir', + 'name': 'kīмсhīUnitTestDirPool', + 'path': '/tmp/kimchi-images', + }, + { + 'type': 'netfs', + 'name': 'kīмсhīUnitTestNSFPool', + 'source': {'host': 'localhost', 'path': '/var/lib/kimchi/nfs-pool'}, + }, + { + 'type': 'scsi', + 'name': 'kīмсhīUnitTestSCSIFCPool', + 'source': {'adapter_name': fc_devs[0]}, + }, + { + 'type': 'iscsi', + 'name': 'kīмсhīUnitTestISCSIPool', + 'source': { + 'host': '127.0.0.1', + 'target': 'iqn.2015-01.localhost.kimchiUnitTest', + }, + }, + { + 'type': 'logical', + 'name': 'kīмсhīUnitTestLogicalPool', + 'source': {'devices': [devs[0]]}, + }, + ] for pool in poolDefs: - self.request('/plugins/kimchi/storagepools', json.dumps(pool), - 'POST') - pool_uri = '/plugins/kimchi/storagepools/%s' \ - % pool['name'].encode('utf-8') - self.request(pool_uri + '/activate', '{}', 'POST') + resp = self.request( + '/plugins/kimchi/storagepools', json.dumps(pool), 'POST' + ) + self.assertEqual(201, resp.status) + pool_uri = urllib.parse.quote( + f"/plugins/kimchi/storagepools/{pool['name']}" + ) + resp = self.request(pool_uri + '/activate', '{}', 'POST') + self.assertEqual(200, resp.status) req = None + unquoted_pool_uri = urllib.parse.unquote(pool_uri) if pool['type'] in READONLY_POOL_TYPE: resp = self.request(pool_uri + '/storagevolumes') vols = json.loads(resp.read()) if len(vols) > 0: + vol = vols[0]['name'] - req = json.dumps({'disks': [{'volume': vol, - 'pool': {'name': pool_uri}, - 'format': 'raw'}]}) + req = json.dumps( + { + 'disks': [ + { + 'volume': vol, + 'pool': {'name': unquoted_pool_uri}, + 'format': 'raw', + } + ] + } + ) elif pool['type'] == 'logical': - req = json.dumps({'disks': [{'pool': {'name': pool_uri}, - 'format': 'raw', 'size': 10}]}) + req = json.dumps( + { + 'disks': [ + { + 'pool': {'name': unquoted_pool_uri}, + 'format': 'raw', + 'size': 10, + } + ] + } + ) else: - req = json.dumps({'disks': [{'pool': {'name': pool_uri}, - 'format': 'qcow2', 'size': 10}]}) + req = json.dumps( + { + 'disks': [ + { + 'pool': {'name': unquoted_pool_uri}, + 'format': 'qcow2', + 'size': 10, + } + ] + } + ) if req is not None: - resp = self.request('/plugins/kimchi/templates/test', req, - 'PUT') - self.assertEquals(200, resp.status) + resp = self.request( + '/plugins/kimchi/templates/test', req, 'PUT') + self.assertEqual(200, resp.status) # Test disk template update with different pool - pool_uri = u'/plugins/kimchi/storagepools/kīмсhīUnitTestDirPool' - disk_data = {'disks': [{'size': 5, 'format': 'qcow2', - 'pool': {'name': pool_uri}}]} + pool_uri = '/plugins/kimchi/storagepools/kīмсhīUnitTestDirPool' + disk_data = { + 'disks': [{'size': 5, 'format': 'qcow2', 'pool': {'name': pool_uri}}] + } req = json.dumps(disk_data) resp = self.request('/plugins/kimchi/templates/test', req, 'PUT') - self.assertEquals(200, resp.status) - del(disk_data['disks'][0]['pool']) + self.assertEqual(200, resp.status) + del disk_data['disks'][0]['pool'] disk_data['disks'][0]['index'] = 0 - disk_data['disks'][0]['pool'] = {u'name': pool_uri, - u'type': u'dir'} - tmpl = json.loads( - self.request('/plugins/kimchi/templates/test').read()) - self.assertEquals(sorted(disk_data['disks'][0].keys()), - sorted(tmpl['disks'][0].keys())) - self.assertEquals(sorted(disk_data['disks'][0].values()), - sorted(tmpl['disks'][0].values())) + disk_data['disks'][0]['pool'] = {'name': pool_uri, 'type': 'dir'} + tmpl = json.loads(self.request( + '/plugins/kimchi/templates/test').read()) + self.assertListEqual( + sorted(disk_data['disks'][0].keys()), sorted( + tmpl['disks'][0].keys()) + ) + self.assertListEqual( + list(disk_data['disks'][0].values()), list( + tmpl['disks'][0].values()) + ) def test_tmpl_integrity(self): - mock_iso2 = "/tmp/mock2.iso" + mock_iso2 = '/tmp/mock2.iso' iso_gen.construct_fake_iso(mock_iso2, True, '14.04', 'ubuntu') # Create a network and a pool for testing template integrity - net = {'name': u'nat-network', 'connection': 'nat'} - self.request('/plugins/kimchi/networks', json.dumps(net), 'POST') + net = {'name': 'nat-network', 'connection': 'nat'} + resp = self.request('/plugins/kimchi/networks', + json.dumps(net), 'POST') + self.assertEqual(201, resp.status) pool = {'type': 'dir', 'name': 'dir-pool', 'path': '/tmp/dir-pool'} - self.request('/plugins/kimchi/storagepools', json.dumps(pool), 'POST') - pool_uri = '/plugins/kimchi/storagepools/%s' \ - % pool['name'].encode('utf-8') - self.request(pool_uri + '/activate', '{}', 'POST') + resp = self.request('/plugins/kimchi/storagepools', + json.dumps(pool), 'POST') + self.assertEqual(201, resp.status) + pool_uri = f"/plugins/kimchi/storagepools/{pool['name']}" + resp = self.request(pool_uri + '/activate', '{}', 'POST') + self.assertEqual(200, resp.status) # Create a template using the custom network and pool - t = {'name': 'test', - 'source_media': {'type': 'disk', 'path': mock_iso2}, - 'networks': ['nat-network'], - 'disks': [{'pool': { - 'name': '/plugins/kimchi/storagepools/dir-pool'}, - 'size': 2, - 'format': 'qcow2'}]} + t = { + 'name': 'test', + 'source_media': {'type': 'disk', 'path': mock_iso2}, + 'networks': ['nat-network'], + 'disks': [ + { + 'pool': {'name': '/plugins/kimchi/storagepools/dir-pool'}, + 'size': 2, + 'format': 'qcow2', + } + ], + } req = json.dumps(t) resp = self.request('/plugins/kimchi/templates', req, 'POST') - self.assertEquals(201, resp.status) + self.assertEqual(201, resp.status) # Try to delete network # It should fail as it is associated to a template - resp = self.request('/plugins/kimchi/networks/nat-network', '{}', - 'DELETE') - self.assertIn("KCHNET0017E", json.loads(resp.read())["reason"]) + resp = self.request( + '/plugins/kimchi/networks/nat-network', '{}', 'DELETE') + self.assertIn('KCHNET0017E', json.loads(resp.read())['reason']) # Update template to release network and then delete it params = {'networks': []} req = json.dumps(params) self.request('/plugins/kimchi/templates/test', req, 'PUT') - resp = self.request('/plugins/kimchi/networks/nat-network', '{}', - 'DELETE') - self.assertEquals(204, resp.status) + resp = self.request( + '/plugins/kimchi/networks/nat-network', '{}', 'DELETE') + self.assertEqual(204, resp.status) # Try to delete the storagepool # It should fail as it is associated to a template - resp = self.request('/plugins/kimchi/storagepools/dir-pool', '{}', - 'DELETE') - self.assertEquals(400, resp.status) + resp = self.request( + '/plugins/kimchi/storagepools/dir-pool', '{}', 'DELETE') + self.assertEqual(400, resp.status) # Verify the template os.remove(mock_iso2) res = json.loads(self.request('/plugins/kimchi/templates/test').read()) - self.assertEquals(res['invalid']['cdrom'], [mock_iso2]) + self.assertEqual(res['invalid']['cdrom'], [mock_iso2]) diff --git a/tests/test_vmtemplate.py b/tests/test_vmtemplate.py index ca4eae551..00a388bbe 100644 --- a/tests/test_vmtemplate.py +++ b/tests/test_vmtemplate.py @@ -16,22 +16,31 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - -import iso_gen import os -import psutil import unittest import uuid -from wok.xmlutils.utils import xpath_get_text - -from wok.plugins.kimchi.osinfo import get_template_default, MEM_DEV_SLOTS +import iso_gen +import psutil +from wok.plugins.kimchi.osinfo import get_template_default +from wok.plugins.kimchi.osinfo import MEM_DEV_SLOTS from wok.plugins.kimchi.vmtemplate import VMTemplate +from wok.xmlutils.utils import xpath_get_text -DISKS = [{'size': 10, 'format': 'raw', 'index': 0, 'pool': {'name': - '/plugins/kimchi/storagepools/default-pool'}}, - {'size': 5, 'format': 'qcow2', 'index': 1, 'pool': {'name': - '/plugins/kimchi/storagepools/default-pool'}}] +DISKS = [ + { + 'size': 10, + 'format': 'raw', + 'index': 0, + 'pool': {'name': '/plugins/kimchi/storagepools/default-pool'}, + }, + { + 'size': 5, + 'format': 'qcow2', + 'index': 1, + 'pool': {'name': '/plugins/kimchi/storagepools/default-pool'}, + }, +] class VMTemplateTests(unittest.TestCase): @@ -46,27 +55,33 @@ def test_minimal_construct(self): disk_bus = get_template_default('old', 'disk_bus') memory = get_template_default('old', 'memory') nic_model = get_template_default('old', 'nic_model') - fields = (('name', 'test'), ('cdrom', self.iso), - ('os_distro', 'unknown'), ('os_version', 'unknown'), - ('cpu_info', {'vcpus': 1, 'maxvcpus': 1}), - ('memory', memory), ('networks', ['default']), - ('disk_bus', disk_bus), ('nic_model', nic_model), - ('graphics', {'type': 'vnc', 'listen': '127.0.0.1'})) + fields = ( + ('name', 'test'), + ('cdrom', self.iso), + ('os_distro', 'unknown'), + ('os_version', 'unknown'), + ('cpu_info', {'vcpus': 1, 'maxvcpus': 1}), + ('memory', memory), + ('networks', ['default']), + ('disk_bus', disk_bus), + ('nic_model', nic_model), + ('graphics', {'type': 'vnc', 'listen': '127.0.0.1'}), + ) args = {'name': 'test', 'cdrom': self.iso} t = VMTemplate(args) for name, val in fields: - if os.uname()[4] == "s390x" and name == 'networks': + if os.uname()[4] == 's390x' and name == 'networks': continue - self.assertEquals(val, t.info.get(name)) + self.assertEqual(val, t.info.get(name)) def test_construct_overrides(self): graphics = {'type': 'spice', 'listen': '127.0.0.1'} args = {'name': 'test', 'disks': DISKS, - 'graphics': graphics, "cdrom": self.iso} + 'graphics': graphics, 'cdrom': self.iso} t = VMTemplate(args) - self.assertEquals(2, len(t.info['disks'])) - self.assertEquals(graphics, t.info['graphics']) + self.assertEqual(2, len(t.info['disks'])) + self.assertEqual(graphics, t.info['graphics']) def test_specified_graphics(self): # Test specified listen @@ -74,66 +89,76 @@ def test_specified_graphics(self): args = {'name': 'test', 'disks': DISKS, 'graphics': graphics, 'cdrom': self.iso} t = VMTemplate(args) - self.assertEquals(graphics, t.info['graphics']) + self.assertEqual(graphics, t.info['graphics']) # Test specified type graphics = {'type': 'spice', 'listen': '127.0.0.1'} args['graphics'] = graphics t = VMTemplate(args) - self.assertEquals(graphics, t.info['graphics']) + self.assertEqual(graphics, t.info['graphics']) # If no listen specified, test the default listen graphics = {'type': 'vnc'} args['graphics'] = graphics t = VMTemplate(args) - self.assertEquals(graphics['type'], t.info['graphics']['type']) - self.assertEquals('127.0.0.1', t.info['graphics']['listen']) + self.assertEqual(graphics['type'], t.info['graphics']['type']) + self.assertEqual('127.0.0.1', t.info['graphics']['listen']) def test_mem_dev_slots(self): vm_uuid = str(uuid.uuid4()).replace('-', '') - t = VMTemplate({'name': 'test-template', 'cdrom': self.iso, - 'memory': {'current': 2048, 'maxmemory': 3072}}) + t = VMTemplate( + { + 'name': 'test-template', + 'cdrom': self.iso, + 'memory': {'current': 2048, 'maxmemory': 3072}, + } + ) xml = t.to_vm_xml('test-vm', vm_uuid) - expr = "/domain/maxMemory/@slots" + expr = '/domain/maxMemory/@slots' slots = str(MEM_DEV_SLOTS[os.uname()[4]]) - self.assertEquals(slots, xpath_get_text(xml, expr)[0]) + self.assertEqual(slots, xpath_get_text(xml, expr)[0]) def test_to_xml(self): - if not os.uname()[4] == "s390x": + if not os.uname()[4] == 's390x': graphics = {'type': 'spice', 'listen': '127.0.0.1'} else: graphics = {'type': 'vnc', 'listen': '127.0.0.1'} vm_uuid = str(uuid.uuid4()).replace('-', '') t = VMTemplate({'name': 'test-template', 'cdrom': self.iso}) xml = t.to_vm_xml('test-vm', vm_uuid, graphics=graphics) - self.assertEquals(vm_uuid, xpath_get_text(xml, "/domain/uuid")[0]) - self.assertEquals('test-vm', xpath_get_text(xml, "/domain/name")[0]) - if not os.uname()[4] == "s390x": - expr = "/domain/devices/graphics/@type" - self.assertEquals(graphics['type'], xpath_get_text(xml, expr)[0]) - expr = "/domain/devices/graphics/@listen" - self.assertEquals(graphics['listen'], xpath_get_text(xml, expr)[0]) - expr = "/domain/maxMemory/@slots" + self.assertEqual(vm_uuid, xpath_get_text(xml, '/domain/uuid')[0]) + self.assertEqual('test-vm', xpath_get_text(xml, '/domain/name')[0]) + if not os.uname()[4] == 's390x': + expr = '/domain/devices/graphics/@type' + self.assertEqual(graphics['type'], xpath_get_text(xml, expr)[0]) + expr = '/domain/devices/graphics/@listen' + self.assertEqual(graphics['listen'], xpath_get_text(xml, expr)[0]) + expr = '/domain/maxMemory/@slots' # The default is memory and maxmemory have the same value, so # max memory tag is not set - self.assertEquals(0, len(xpath_get_text(xml, expr))) - expr = "/domain/memory" - if os.uname()[4] == "s390x": - self.assertEquals(str(2048), xpath_get_text(xml, expr)[0]) + self.assertEqual(0, len(xpath_get_text(xml, expr))) + expr = '/domain/memory' + if os.uname()[4] == 's390x': + self.assertEqual(str(2048), xpath_get_text(xml, expr)[0]) else: - self.assertEquals(str(1024), xpath_get_text(xml, expr)[0]) + self.assertEqual(str(1024), xpath_get_text(xml, expr)[0]) if hasattr(psutil, 'virtual_memory'): host_memory = psutil.virtual_memory().total >> 10 else: host_memory = psutil.TOTAL_PHYMEM >> 10 - t = VMTemplate({'name': 'test-template', 'cdrom': self.iso, - 'memory': {'current': (host_memory >> 10) - 512}}) + t = VMTemplate( + { + 'name': 'test-template', + 'cdrom': self.iso, + 'memory': {'current': (host_memory >> 10) - 512}, + } + ) try: xml = t.to_vm_xml('test-vm', vm_uuid, graphics=graphics) except Exception as e: # Test current memory greater than maxmemory (1024/default) - self.assertTrue('KCHVM0041E' in e.message) + self.assertTrue('KCHVM0041E' in str(e)) def test_arg_merging(self): """ @@ -141,34 +166,45 @@ def test_arg_merging(self): provided parameters. """ graphics = {'type': 'vnc', 'listen': '127.0.0.1'} - args = {'name': 'test', 'os_distro': 'opensuse', 'os_version': '12.3', - 'cpu_info': {'vcpus': 2, 'maxvcpus': 4}, - 'memory': {'current': 2048, 'maxmemory': 3072}, - 'networks': ['foo'], 'cdrom': self.iso, 'graphics': graphics} + args = { + 'name': 'test', + 'os_distro': 'opensuse', + 'os_version': '12.3', + 'cpu_info': {'vcpus': 2, 'maxvcpus': 4}, + 'memory': {'current': 2048, 'maxmemory': 3072}, + 'networks': ['foo'], + 'cdrom': self.iso, + 'graphics': graphics, + } t = VMTemplate(args) - self.assertEquals(2, t.info.get('cpu_info', {}).get('vcpus')) - self.assertEquals(4, t.info.get('cpu_info', {}).get('maxvcpus')) - self.assertEquals(2048, t.info.get('memory').get('current')) - self.assertEquals(3072, t.info.get('memory').get('maxmemory')) - self.assertEquals(['foo'], t.info.get('networks')) - self.assertEquals(self.iso, t.info.get('cdrom')) - self.assertEquals(graphics, t.info.get('graphics')) + self.assertEqual(2, t.info.get('cpu_info', {}).get('vcpus')) + self.assertEqual(4, t.info.get('cpu_info', {}).get('maxvcpus')) + self.assertEqual(2048, t.info.get('memory').get('current')) + self.assertEqual(3072, t.info.get('memory').get('maxmemory')) + self.assertEqual(['foo'], t.info.get('networks')) + self.assertEqual(self.iso, t.info.get('cdrom')) + self.assertEqual(graphics, t.info.get('graphics')) def test_netboot_vmtemplate(self): disk_bus = get_template_default('old', 'disk_bus') memory = get_template_default('old', 'memory') nic_model = get_template_default('old', 'nic_model') - fields = (('name', 'test'), ('os_distro', 'unknown'), - ('os_version', 'unknown'), - ('cpu_info', {'vcpus': 1, 'maxvcpus': 1}), - ('memory', memory), ('networks', ['default']), - ('disk_bus', disk_bus), ('nic_model', nic_model), - ('graphics', {'type': 'vnc', 'listen': '127.0.0.1'})) + fields = ( + ('name', 'test'), + ('os_distro', 'unknown'), + ('os_version', 'unknown'), + ('cpu_info', {'vcpus': 1, 'maxvcpus': 1}), + ('memory', memory), + ('networks', ['default']), + ('disk_bus', disk_bus), + ('nic_model', nic_model), + ('graphics', {'type': 'vnc', 'listen': '127.0.0.1'}), + ) t = VMTemplate({'name': 'test'}, netboot=True) for name, val in fields: - if os.uname()[4] == "s390x" and name == 'networks': + if os.uname()[4] == 's390x' and name == 'networks': continue - self.assertEquals(val, t.info.get(name)) + self.assertEqual(val, t.info.get(name)) self.assertNotIn('cdrom', t.info.keys()) diff --git a/utils.py b/utils.py index 22430ce03..b2f5bbf0d 100644 --- a/utils.py +++ b/utils.py @@ -17,22 +17,23 @@ # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA # - import contextlib import json +import os import re import sqlite3 import time -import os -import urllib2 -from httplib import HTTPConnection, HTTPException -from urlparse import urlparse +import urllib +from http.client import HTTPConnection +from http.client import HTTPException -from wok.exception import InvalidParameter, OperationFailed +from wok.exception import InvalidParameter +from wok.exception import OperationFailed from wok.plugins.kimchi import config from wok.plugins.kimchi.osinfo import get_template_default from wok.stringutils import encode_value -from wok.utils import run_command, wok_log +from wok.utils import run_command +from wok.utils import wok_log from wok.xmlutils.utils import xpath_get_text MAX_REDIRECTION_ALLOWED = 5 @@ -42,7 +43,7 @@ def _uri_to_name(collection, uri): expr = '/plugins/kimchi/%s/(.*?)$' % collection m = re.match(expr, uri) if not m: - raise InvalidParameter("WOKUTILS0001E", {'uri': uri}) + raise InvalidParameter('WOKUTILS0001E', {'uri': uri}) return m.group(1) @@ -58,13 +59,12 @@ def check_url_path(path, redirected=0): if redirected > MAX_REDIRECTION_ALLOWED: return False try: - code = '' - parse_result = urlparse(path) + parse_result = urllib.parse.urlparse(path) server_name = parse_result.netloc urlpath = parse_result.path if not urlpath: # Just a server, as with a repo. - with contextlib.closing(urllib2.urlopen(path)) as res: + with contextlib.closing(urllib.request.urlopen(path)) as res: code = res.getcode() else: # socket.gaierror could be raised, @@ -80,10 +80,10 @@ def check_url_path(path, redirected=0): elif code == 301 or code == 302: for header in response.getheaders(): if header[0] == 'location': - return check_url_path(header[1], redirected+1) + return check_url_path(header[1], redirected + 1) else: return False - except (urllib2.URLError, HTTPException, IOError, ValueError): + except (urllib.error.URLError, HTTPException, IOError, ValueError): return False return True @@ -102,19 +102,21 @@ def upgrade_objectstore_data(item, old_uri, new_uri): for row in cursor.fetchall(): # execute update here template = json.loads(row[1]) - path = (template[item] if item in template else 'none') + path = template[item] if item in template else 'none' if path.startswith(old_uri): template[item] = new_uri + path - sql = "UPDATE objects SET json=?, version=? WHERE id=?" - cursor.execute(sql, (json.dumps(template), - config.get_kimchi_version(), row[0])) + sql = 'UPDATE objects SET json=?, version=? WHERE id=?' + cursor.execute( + sql, (json.dumps(template), + config.get_kimchi_version(), row[0]) + ) conn.commit() total += 1 - except sqlite3.Error, e: + except sqlite3.Error as e: if conn: conn.rollback() - wok_log.error("Error while upgrading objectstore data: %s", e.args[0]) - raise OperationFailed("KCHUTILS0006E") + wok_log.error('Error while upgrading objectstore data: %s', e.args[0]) + raise OperationFailed('KCHUTILS0006E') finally: if conn: conn.close() @@ -139,28 +141,26 @@ def upgrade_objectstore_template_disks(libv_conn): # Get pool info pool_uri = template['storagepool'] pool_name = pool_name_from_uri(pool_uri) - pool = libv_conn.get().storagePoolLookupByName( - pool_name.encode("utf-8")) - pool_type = xpath_get_text(pool.XMLDesc(0), "/pool/@type")[0] + pool = libv_conn.get().storagePoolLookupByName(pool_name.encode('utf-8')) + pool_type = xpath_get_text(pool.XMLDesc(0), '/pool/@type')[0] # Update json new_disks = [] for disk in template['disks']: - disk['pool'] = {'name': pool_uri, - 'type': pool_type} + disk['pool'] = {'name': pool_uri, 'type': pool_type} new_disks.append(disk) template['disks'] = new_disks del template['storagepool'] - sql = "UPDATE objects SET json=? WHERE id=?" + sql = 'UPDATE objects SET json=? WHERE id=?' cursor.execute(sql, (json.dumps(template), row[0])) conn.commit() total += 1 - except sqlite3.Error, e: + except sqlite3.Error as e: if conn: conn.rollback() - wok_log.error("Error while upgrading objectstore data: %s", e.args[0]) - raise OperationFailed("KCHUTILS0006E") + wok_log.error('Error while upgrading objectstore data: %s', e.args[0]) + raise OperationFailed('KCHUTILS0006E') finally: if conn: conn.close() @@ -186,24 +186,23 @@ def upgrade_objectstore_memory(): memory = template['memory'] # New memory is a dictionary with 'current' and 'maxmemory' if type(memory) is not dict: - maxmem = get_template_default('modern', - 'memory').get('maxmemory') + maxmem = get_template_default( + 'modern', 'memory').get('maxmemory') if maxmem < memory: maxmem = memory - template['memory'] = {'current': memory, - 'maxmemory': maxmem} + template['memory'] = {'current': memory, 'maxmemory': maxmem} else: continue - sql = "UPDATE objects SET json=? WHERE id=?" + sql = 'UPDATE objects SET json=? WHERE id=?' cursor.execute(sql, (json.dumps(template), row[0])) conn.commit() total += 1 - except sqlite3.Error, e: + except sqlite3.Error as e: if conn: conn.rollback() - wok_log.error("Error while upgrading objectstore data: %s", e.args[0]) - raise OperationFailed("KCHUTILS0006E") + wok_log.error('Error while upgrading objectstore data: %s', e.args[0]) + raise OperationFailed('KCHUTILS0006E') finally: if conn: conn.close() @@ -240,7 +239,7 @@ def get_next_clone_name(all_names, basename, name_suffix='', ts=False): ts_suffix = int(time.time() * 1000000) new_name = u'%s-clone-%d' % (basename, ts_suffix) else: - re_expr = u'%s-clone-(?P<%s>\d+)' % (basename, re_group_num) + re_expr = u'%s-clone-(?P<%s>\\d+)' % (basename, re_group_num) if name_suffix != '': re_expr = u'%s%s' % (re_expr, name_suffix) @@ -293,10 +292,19 @@ def create_disk_image(format_type, path, capacity): """ out, err, rc = run_command( - ["/usr/bin/qemu-img", "create", "-f", format_type, "-o", - "preallocation=metadata", path, encode_value(capacity) + "G"]) + [ + '/usr/bin/qemu-img', + 'create', + '-f', + format_type, + '-o', + 'preallocation=metadata', + path, + encode_value(capacity) + 'G', + ] + ) if rc != 0: - raise OperationFailed("KCHTMPL0041E", {'err': err}) + raise OperationFailed('KCHTMPL0041E', {'err': err}) return diff --git a/vmtemplate.py b/vmtemplate.py index 1acd4dbee..21245b2c6 100644 --- a/vmtemplate.py +++ b/vmtemplate.py @@ -16,23 +16,25 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import os import platform import stat import time -import urlparse +import urllib import uuid + from lxml import etree from lxml.builder import E - -from wok.exception import InvalidParameter, ImageFormatError, IsoFormatError -from wok.exception import MissingParameter, OperationFailed - +from wok.exception import ImageFormatError +from wok.exception import InvalidParameter +from wok.exception import IsoFormatError +from wok.exception import MissingParameter +from wok.exception import OperationFailed from wok.plugins.kimchi import imageinfo from wok.plugins.kimchi import osinfo from wok.plugins.kimchi.isoinfo import IsoImage -from wok.plugins.kimchi.utils import check_url_path, is_s390x +from wok.plugins.kimchi.utils import check_url_path +from wok.plugins.kimchi.utils import is_s390x from wok.plugins.kimchi.utils import pool_name_from_uri from wok.plugins.kimchi.xmlutils.bootorder import get_bootorder_xml from wok.plugins.kimchi.xmlutils.cpu import get_cpu_xml @@ -45,7 +47,6 @@ class VMTemplate(object): - def __init__(self, args, scan=False, netboot=False): """ Construct a VM Template from a widely variable amount of information. @@ -144,9 +145,11 @@ def __init__(self, args, scan=False, netboot=False): keys = sorted(disk_info.keys()) - if ((keys != sorted(basic_disk)) and - (keys != sorted(ro_disk)) and - (keys != sorted(base_disk))): + if ( + (keys != sorted(basic_disk)) + and (keys != sorted(ro_disk)) + and (keys != sorted(base_disk)) + ): # Addition check required only on s390x if not is_s390x() or (keys != sorted(basic_path_disk)): raise MissingParameter('KCHTMPL0028E') @@ -167,8 +170,9 @@ def __init__(self, args, scan=False, netboot=False): disk_info.update(disk) keys = sorted(disk_info.keys()) - if ((keys != sorted(basic_path_disk)) and - (keys != sorted(base_path_disk))): + if (keys != sorted(basic_path_disk)) and ( + keys != sorted(base_path_disk) + ): raise MissingParameter('KCHTMPL0042E') disk_info['path'] = path @@ -202,7 +206,7 @@ def _get_os_info(self, args, scan): d['size'] = d_info['virtual-size'] if len(base_imgs) == 0: - raise MissingParameter("KCHTMPL0016E") + raise MissingParameter('KCHTMPL0016E') return distro, version @@ -215,13 +219,13 @@ def _gen_name(self, distro, version): def get_iso_info(self, iso): iso_prefixes = ['/', 'http', 'https', 'ftp', 'ftps', 'tftp'] - if len(filter(iso.startswith, iso_prefixes)) == 0: - raise InvalidParameter("KCHTMPL0006E", {'param': iso}) + if len(list(filter(iso.startswith, iso_prefixes))) == 0: + raise InvalidParameter('KCHTMPL0006E', {'param': iso}) try: iso_img = IsoImage(iso) return iso_img.probe() except IsoFormatError: - raise InvalidParameter("KCHISO0001E", {'filename': iso}) + raise InvalidParameter('KCHISO0001E', {'filename': iso}) def _get_cdrom_xml(self, libvirt_stream_protocols): if 'cdrom' not in self.info: @@ -235,7 +239,7 @@ def _get_cdrom_xml(self, libvirt_stream_protocols): params['path'] = self.info['cdrom'] if self.info.get('iso_stream', False): - protocol = urlparse.urlparse(params['path']).scheme + protocol = urllib.parse.urlparse(params['path']).scheme if protocol not in libvirt_stream_protocols: driveOpt = 'file=%(path)s,if=none,id=drive-%(bus)s0-1-0,' driveOpt += 'readonly=on,format=%(format)s' @@ -253,14 +257,21 @@ def _get_cdrom_xml(self, libvirt_stream_protocols): return xml def _get_disks_xml(self, vm_uuid): - base_disk_params = {'type': 'disk', 'disk': 'file', - 'bus': self.info['disk_bus']} + base_disk_params = { + 'type': 'disk', + 'disk': 'file', + 'bus': self.info['disk_bus'], + } logical_disk_params = {'format': 'raw'} iscsi_disk_params = {'disk': 'block', 'format': 'raw'} scsi_disk = 'volume' if self.fc_host_support else 'block' - scsi_disk_params = {'disk': scsi_disk, 'type': 'lun', - 'format': 'raw', 'bus': 'scsi'} + scsi_disk_params = { + 'disk': scsi_disk, + 'type': 'lun', + 'format': 'raw', + 'bus': 'scsi', + } disks_xml = '' for index, disk in enumerate(self.info['disks']): @@ -273,10 +284,10 @@ def _get_disks_xml(self, vm_uuid): volume = disk.get('volume') if volume is not None: - params['path'] = self._get_volume_path(disk['pool']['name'], - volume) + params['path'] = self._get_volume_path( + disk['pool']['name'], volume) else: - img = "%s-%s.img" % (vm_uuid, params['index']) + img = '%s-%s.img' % (vm_uuid, params['index']) if disk.get('pool'): storage_path = self._get_storage_path(disk['pool']['name']) params['pool_type'] = disk['pool']['type'] @@ -286,31 +297,34 @@ def _get_disks_xml(self, vm_uuid): params['path'] = os.path.join(storage_path, img) disks_xml += get_disk_xml(params)[1] - return unicode(disks_xml, 'utf-8') + return disks_xml def to_volume_list(self, vm_uuid): ret = [] for i, d in enumerate(self.info['disks']): # Create only .img. If storagepool is (i)SCSI, volumes will be LUNs - if 'pool' in d and d['pool']['type'] in ["iscsi", "scsi"]: + if 'pool' in d and d['pool']['type'] in ['iscsi', 'scsi']: continue index = d.get('index', i) - volume = "%s-%s.img" % (vm_uuid, index) + volume = '%s-%s.img' % (vm_uuid, index) if 'path' in d: storage_path = d['path'] else: storage_path = self._get_storage_path(d['pool']['name']) - info = {'name': volume, - 'capacity': d['size'], - 'format': d['format'], - 'path': '%s/%s' % (storage_path, volume), - 'pool': d['pool']['name'] if 'pool' in d else None} - - if ('pool' in d and 'logical' == d['pool']['type']) or \ - info['format'] not in ['qcow2', 'raw']: + info = { + 'name': volume, + 'capacity': d['size'], + 'format': d['format'], + 'path': '%s/%s' % (storage_path, volume), + 'pool': d['pool']['name'] if 'pool' in d else None, + } + + if ('pool' in d and 'logical' == d['pool']['type']) or info[ + 'format' + ] not in ['qcow2', 'raw']: info['allocation'] = info['capacity'] else: info['allocation'] = 0 @@ -319,7 +333,7 @@ def to_volume_list(self, vm_uuid): info['base'] = dict() base_fmt = imageinfo.probe_img_info(d['base'])['format'] if base_fmt is None: - raise InvalidParameter("KCHTMPL0024E", {'path': d['base']}) + raise InvalidParameter('KCHTMPL0024E', {'path': d['base']}) info['base']['path'] = d['base'] info['base']['format'] = base_fmt @@ -332,33 +346,37 @@ def to_volume_list(self, vm_uuid): # target must be qcow2 in order to use a backing file target_fmt = 'qcow2' - v_tree.append(E.backingStore( - E.path(info['base']['path']), - E.format(type=info['base']['format']))) + v_tree.append( + E.backingStore( + E.path(info['base']['path']), + E.format(type=info['base']['format']), + ) + ) - target = E.target( - E.format(type=target_fmt), E.path(info['path'])) + target = E.target(E.format(type=target_fmt), E.path(info['path'])) v_tree.append(target) info['xml'] = etree.tostring(v_tree) ret.append(info) return ret def _get_networks_xml(self): - networks = "" - params = {'type': 'network', - 'model': self.info['nic_model']} + networks = '' + params = {'type': 'network', 'model': self.info['nic_model']} info_networks = self.info.get('networks', []) for nw in info_networks: params['network'] = nw - networks += get_iface_xml(params, self.info['arch'], - self.info['os_distro'], - self.info['os_version']) - return unicode(networks, 'utf-8') + networks += get_iface_xml( + params, + self.info['arch'], + self.info['os_distro'], + self.info['os_version'], + ) + return networks def _get_interfaces_xml(self): - interfaces = "" + interfaces = '' params = {'model': self.info['nic_model']} for interface in self.info.get('interfaces', []): typ = interface['type'] @@ -370,10 +388,13 @@ def _get_interfaces_xml(self): params['virtualport_type'] = 'openvswitch' params['name'] = interface['name'] - interfaces += get_iface_xml(params, self.info['arch'], - self.info['os_distro'], - self.info['os_version']) - return unicode(interfaces, 'utf-8') + interfaces += get_iface_xml( + params, + self.info['arch'], + self.info['os_distro'], + self.info['os_version'], + ) + return interfaces def _get_usb_controller(self): # Power systems must include USB controller model @@ -404,7 +425,7 @@ def _get_input_output_xml(self): """ - input_output = "" + input_output = '' if 'mouse_bus' in self.info.keys(): input_output += mouse % self.info if 'kbd_bus' in self.info.keys(): @@ -420,8 +441,7 @@ def _get_input_output_xml(self): def _get_cpu_xml(self): # Include CPU topology, if provided cpu_topo = self.info.get('cpu_info', {}).get('topology', {}) - return get_cpu_xml(0, (self.info.get('memory').get('current')) << 10, - cpu_topo) + return get_cpu_xml(0, (self.info.get('memory').get('current')) << 10, cpu_topo) def to_vm_xml(self, vm_name, vm_uuid, **kwargs): params = dict(self.info) @@ -449,9 +469,9 @@ def to_vm_xml(self, vm_name, vm_uuid, **kwargs): # Add information of CD-ROM device only if template have info about it. if cdrom_xml is not None: - if not urlparse.urlparse(self.info.get('cdrom', "")).scheme in \ - libvirt_stream_protocols and \ - params.get('iso_stream', False): + if not urllib.parse.urlparse( + self.info.get('cdrom', '') + ).scheme in libvirt_stream_protocols and params.get('iso_stream', False): params['qemu-stream-cmdline'] = cdrom_xml else: params['cdroms'] = cdrom_xml @@ -459,8 +479,8 @@ def to_vm_xml(self, vm_name, vm_uuid, **kwargs): # Set the boot order of VM # TODO: need modify this when boot order edition feature came upstream. if cdrom_xml and params.get('arch') == 's390x': - params['boot_order'] = get_bootorder_xml(['cdrom', 'hd', - 'network']) + params['boot_order'] = get_bootorder_xml( + ['cdrom', 'hd', 'network']) else: params['boot_order'] = get_bootorder_xml() @@ -471,11 +491,10 @@ def to_vm_xml(self, vm_name, vm_uuid, **kwargs): memory = self.info['memory'].get('current') maxmemory = self.info['memory'].get('maxmemory') if maxmemory < memory: - raise OperationFailed("KCHVM0041E", - {'maxmem': str(maxmemory)}) + raise OperationFailed('KCHVM0041E', {'maxmem': str(maxmemory)}) params['memory'] = self.info['memory'].get('current') - params['max_memory'] = "" + params['max_memory'] = '' # if there is not support to memory hotplug in Libvirt or qemu, we # cannot add the tag maxMemory if memory != maxmemory and kwargs.get('mem_hotplug_support', True): @@ -496,7 +515,8 @@ def to_vm_xml(self, vm_name, vm_uuid, **kwargs): # usb controller params['usb_controller'] = self._get_usb_controller() - xml = """ + xml = ( + """ %(qemu-stream-cmdline)s %(name)s @@ -535,7 +555,9 @@ def to_vm_xml(self, vm_name, vm_uuid, **kwargs): - """ % params + """ + % params + ) return xml @@ -587,8 +609,8 @@ def validate_integrity(self): # validate networks integrity networks = self.info.get('networks', []) - invalid_networks = list(set(networks) - - set(self._get_all_networks_name())) + invalid_networks = list( + set(networks) - set(self._get_all_networks_name())) if invalid_networks: invalid['networks'] = invalid_networks @@ -600,11 +622,11 @@ def validate_integrity(self): if pool_name not in self._get_active_storagepools_name(): invalid['storagepools'] = [pool_name] - if disk.get("base") is None: + if disk.get('base') is None: continue - if os.path.exists(disk.get("base")) is False: - invalid['vm-image'] = disk["base"] + if os.path.exists(disk.get('base')) is False: + invalid['vm-image'] = disk['base'] # validate iso integrity # FIXME when we support multiples cdrom devices diff --git a/xmlutils/bootorder.py b/xmlutils/bootorder.py index f8bc6ea98..19acb0fca 100644 --- a/xmlutils/bootorder.py +++ b/xmlutils/bootorder.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import E @@ -44,10 +43,12 @@ def get_bootorder_xml(boot_order=None): """ boot_xml = '' for device in get_bootorder_node(boot_order): - boot_xml += ET.tostring(device, encoding='utf-8', pretty_print=True) + boot_xml += ET.tostring(device, encoding='utf-8', pretty_print=True).decode( + 'utf-8' + ) return boot_xml def get_bootmenu_node(): - return E.bootmenu(enable="yes", timeout="5000") + return E.bootmenu(enable='yes', timeout='5000') diff --git a/xmlutils/cpu.py b/xmlutils/cpu.py index 7548be29f..7c958d821 100644 --- a/xmlutils/cpu.py +++ b/xmlutils/cpu.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import E diff --git a/xmlutils/disk.py b/xmlutils/disk.py index 97298c5bc..4b3121817 100644 --- a/xmlutils/disk.py +++ b/xmlutils/disk.py @@ -16,18 +16,18 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import errno -import lxml.etree as ET import os import socket import stat import string -import urlparse +import urllib + +import lxml.etree as ET from lxml import objectify from lxml.builder import E - -from wok.exception import InvalidParameter, NotFoundError +from wok.exception import InvalidParameter +from wok.exception import NotFoundError from wok.plugins.kimchi.utils import check_url_path from wok.utils import wok_log @@ -55,36 +55,53 @@ def get_disk_xml(params): try: fd = os.open(path, os.O_RDONLY | os.O_DIRECT) os.close(fd) - wok_log.debug("Disk '%s' supports direct I/O. Setting cache=none" - "to enable live migration" % path) - except OSError, e: + wok_log.debug( + "Disk '%s' supports direct I/O. Setting cache=none" + 'to enable live migration' % path + ) + except OSError as e: if e.errno == errno.EINVAL: - wok_log.debug("Disk '%s' does not support direct I/O: " - "'%s'. Let libvirt sets the default cache mode." % - (path, e.message)) + wok_log.debug( + "Disk '%s' does not support direct I/O: " + "'%s'. Let libvirt sets the default cache mode." % (path, e) + ) else: if params['type'] != 'cdrom': driver.set('cache', 'none') - if params.get('pool_type') == "netfs": - driver.set("io", "native") + if params.get('pool_type') == 'netfs': + driver.set('io', 'native') disk.append(driver) # Get device name according to bus and index values - dev = params.get('dev', (BUS_TO_DEV_MAP[params['bus']] + - string.lowercase[params.get('index', 0)])) + dev = params.get( + 'dev', + ( + BUS_TO_DEV_MAP[params['bus']] + + string.ascii_lowercase[params.get('index', 0)] + ), + ) disk.append(E.target(dev=dev, bus=params['bus'])) if params.get('address'): # ide disk target id is always '0' - disk.append(E.address( - type='drive', controller=params['address']['controller'], - bus=params['address']['bus'], target='0', - unit=params['address']['unit'])) + disk.append( + E.address( + type='drive', + controller=params['address']['controller'], + bus=params['address']['bus'], + target='0', + unit=params['address']['unit'], + ) + ) if len(params['path']) == 0: - return (dev, ET.tostring(disk, encoding='utf-8', pretty_print=True)) + return ( + dev, + ET.tostring(disk, encoding='utf-8', + pretty_print=True).decode('utf-8'), + ) if disk_type == 'network': """ @@ -92,7 +109,7 @@ def get_disk_xml(params): """ - output = urlparse.urlparse(params['path']) + output = urllib.parse.urlparse(params['path']) port = str(output.port or socket.getservbyname(output.scheme)) source = E.source(protocol=output.scheme, name=output.path) @@ -105,7 +122,7 @@ def get_disk_xml(params): source.set(DEV_TYPE_SRC_ATTR_MAP[disk_type], params['path']) disk.append(source) - return (dev, ET.tostring(disk, encoding='utf-8', pretty_print=True)) + return dev, ET.tostring(disk, encoding='utf-8', pretty_print=True).decode('utf-8') def _get_disk_type(path): @@ -113,7 +130,7 @@ def _get_disk_type(path): return 'network' if not os.path.exists(path): - raise InvalidParameter("KCHVMSTOR0003E", {'value': path}) + raise InvalidParameter('KCHVMSTOR0003E', {'value': path}) # Check if path is a valid local path if os.path.isfile(path): @@ -123,18 +140,19 @@ def _get_disk_type(path): if stat.S_ISBLK(os.stat(r_path).st_mode): return 'block' - raise InvalidParameter("KCHVMSTOR0003E", {'value': path}) + raise InvalidParameter('KCHVMSTOR0003E', {'value': path}) def get_device_node(dom, dev_name): - xml = dom.XMLDesc(0) + import libvirt + + xml = dom.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE) devices = objectify.fromstring(xml).devices disk = devices.xpath("./disk/target[@dev='%s']/.." % dev_name) - if not disk: - raise NotFoundError("KCHVMSTOR0007E", - {'dev_name': dev_name, - 'vm_name': dom.name()}) + raise NotFoundError( + 'KCHVMSTOR0007E', {'dev_name': dev_name, 'vm_name': dom.name()} + ) return disk[0] @@ -151,19 +169,26 @@ def get_vm_disk_info(dom, dev_name): src_type = disk.attrib['type'] if src_type == 'network': host = source.host - path = (source.attrib['protocol'] + '://' + - host.attrib['name'] + ':' + - host.attrib['port'] + source.attrib['name']) + path = ( + source.attrib['protocol'] + + '://' + + host.attrib['name'] + + ':' + + host.attrib['port'] + + source.attrib['name'] + ) else: path = source.attrib[DEV_TYPE_SRC_ATTR_MAP[src_type]] - except: - path = "" - - return {'dev': dev_name, - 'path': path, - 'type': disk.attrib['device'], - 'format': disk.driver.attrib['type'], - 'bus': disk.target.attrib['bus']} + except Exception: + path = '' + + return { + 'dev': dev_name, + 'path': path, + 'type': disk.attrib['device'], + 'format': disk.driver.attrib['type'], + 'bus': disk.target.attrib['bus'], + } def get_vm_disks(dom): diff --git a/xmlutils/graphics.py b/xmlutils/graphics.py index 49609ffc5..b461ba791 100644 --- a/xmlutils/graphics.py +++ b/xmlutils/graphics.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import E diff --git a/xmlutils/interface.py b/xmlutils/interface.py index af33ea0cd..ca0199737 100644 --- a/xmlutils/interface.py +++ b/xmlutils/interface.py @@ -16,10 +16,8 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import E - from wok.plugins.kimchi import osinfo @@ -70,7 +68,7 @@ def get_iface_network_xml(params, arch=None, os_distro=None, os_version=None): if mac is not None: interface.append(E.mac(address=mac)) - return ET.tostring(interface, encoding='utf-8', pretty_print=True) + return ET.tostring(interface, encoding='utf-8', pretty_print=True).decode('utf-8') def get_iface_macvtap_xml(params, arch=None): @@ -99,7 +97,7 @@ def get_iface_macvtap_xml(params, arch=None): if mac is not None: interface.append(E.mac(address=mac)) - return ET.tostring(interface, encoding='utf-8', pretty_print=True) + return ET.tostring(interface, encoding='utf-8', pretty_print=True).decode('utf-8') def get_iface_ovs_xml(params, arch=None): @@ -127,4 +125,4 @@ def get_iface_ovs_xml(params, arch=None): if mac is not None: interface.append(E.mac(address=mac)) - return ET.tostring(interface, encoding='utf-8', pretty_print=True) + return ET.tostring(interface, encoding='utf-8', pretty_print=True).decode('utf-8') diff --git a/xmlutils/network.py b/xmlutils/network.py index 2e2966bc1..1f63bda6a 100644 --- a/xmlutils/network.py +++ b/xmlutils/network.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import ipaddr import lxml.etree as ET from lxml.builder import E @@ -33,15 +32,14 @@ def _get_dhcp_elem(**kwargs): """ dhcp = E.dhcp() if 'range' in kwargs.keys(): - dhcp_range = E.range(start=kwargs['range']['start'], - end=kwargs['range']['end']) + dhcp_range = E.range( + start=kwargs['range']['start'], end=kwargs['range']['end']) dhcp.append(dhcp_range) if 'hosts' in kwargs.keys(): for host in kwargs['hosts']: - dhcp.append(E.host(mac=host['mac'], - name=host['name'], - ip=host['ip'])) + dhcp.append( + E.host(mac=host['mac'], name=host['name'], ip=host['ip'])) return dhcp if len(dhcp) > 0 else None @@ -73,7 +71,7 @@ def _get_forward_elem(**kwargs): """ - if "mode" in kwargs.keys() and kwargs['mode'] is None: + if 'mode' in kwargs.keys() and kwargs['mode'] is None: return None forward = E.forward() @@ -109,7 +107,7 @@ def to_network_xml(**kwargs): network.append(E.virtualport(type='openvswitch')) # None means is Isolated network, {} means default mode nat - params = kwargs.get('forward', {"mode": None}) + params = kwargs.get('forward', {'mode': None}) forward = _get_forward_elem(**params) if forward is not None: network.append(forward) @@ -117,7 +115,7 @@ def to_network_xml(**kwargs): if 'net' in kwargs: network.append(_get_ip_elem(**kwargs)) - return ET.tostring(network) + return ET.tostring(network).decode('utf-8') def create_vlan_tagged_bridge_xml(bridge, interface, vlan_id): @@ -125,29 +123,25 @@ def create_vlan_tagged_bridge_xml(bridge, interface, vlan_id): vlan.set('tag', vlan_id) m = E.interface( E.start(mode='onboot'), - E.bridge( - E.interface( - vlan, - type='vlan', - name='.'.join([interface, vlan_id]))), + E.bridge(E.interface(vlan, type='vlan', + name='.'.join([interface, vlan_id]))), type='bridge', - name=bridge) + name=bridge, + ) return ET.tostring(m) def create_linux_bridge_xml(bridge, interface, iface_xml): m = E.interface( E.start(mode='onboot'), - E.bridge( - E.interface( - type='ethernet', - name=interface)), + E.bridge(E.interface(type='ethernet', name=interface)), type='bridge', - name=bridge) + name=bridge, + ) # use same network configuration of lower interface iface = ET.fromstring(iface_xml) - for element in iface.iter("protocol"): + for element in iface.iter('protocol'): m.append(element) return ET.tostring(m) @@ -156,7 +150,7 @@ def create_linux_bridge_xml(bridge, interface, iface_xml): def get_no_network_config_xml(iface_xml): # remove all protocol elements from interface xml xml = ET.fromstring(iface_xml) - for element in xml.iter("protocol"): + for element in xml.iter('protocol'): element.getparent().remove(element) return ET.tostring(xml) diff --git a/xmlutils/qemucmdline.py b/xmlutils/qemucmdline.py index 50575b2bb..1871c471a 100644 --- a/xmlutils/qemucmdline.py +++ b/xmlutils/qemucmdline.py @@ -16,11 +16,10 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import ElementMaker -QEMU_NAMESPACE = "http://libvirt.org/schemas/domain/qemu/1.0" +QEMU_NAMESPACE = 'http://libvirt.org/schemas/domain/qemu/1.0' def get_qemucmdline_xml(args): @@ -34,11 +33,10 @@ def get_qemucmdline_xml(args): drive=drive-%(bus)s0-1-0,id=%(bus)s0-1-0'/> """ - EM = ElementMaker(namespace=QEMU_NAMESPACE, - nsmap={'qemu': QEMU_NAMESPACE}) + EM = ElementMaker(namespace=QEMU_NAMESPACE, nsmap={'qemu': QEMU_NAMESPACE}) root = EM.commandline() - for opt, value in args.iteritems(): + for opt, value in args.items(): root.append(EM.arg(value=opt)) root.append(EM.arg(value=value)) diff --git a/xmlutils/serial.py b/xmlutils/serial.py index f61eefa44..46898b7bd 100644 --- a/xmlutils/serial.py +++ b/xmlutils/serial.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import E @@ -44,23 +43,23 @@ def get_serial_xml(params): """ # pcc serial console - if params["arch"] in ["ppc", "ppc64"]: - console = E.console(type="pty") - console.append(E.target(type="serial", port='1')) - console.append(E.address(type="spapr-vio", reg="0x30001000")) + if params['arch'] in ['ppc', 'ppc64']: + console = E.console(type='pty') + console.append(E.target(type='serial', port='1')) + console.append(E.address(type='spapr-vio', reg='0x30001000')) return ET.tostring(console, encoding='utf-8', pretty_print=True) # for s390x - elif params["arch"] in ["s390x"]: + elif params['arch'] in ['s390x']: # if params doesn't have console parameter, use virtio as default console_type = params.get('console', 'virtio') - console = E.console(type="pty") + console = E.console(type='pty') console.append(E.target(type=console_type, port='0')) return ET.tostring(console, encoding='utf-8', pretty_print=True) # for x else: - serial = E.serial(type="pty") + serial = E.serial(type='pty') serial.append(E.target(port='0')) - console = E.console(type="pty") - console.append(E.target(type="serial", port='0')) + console = E.console(type='pty') + console.append(E.target(type='serial', port='0')) return ET.tostring(serial, encoding='utf-8', pretty_print=True) + \ ET.tostring(console, encoding='utf-8', pretty_print=True) diff --git a/xmlutils/usb.py b/xmlutils/usb.py index 84d2aebf2..e54051903 100644 --- a/xmlutils/usb.py +++ b/xmlutils/usb.py @@ -16,7 +16,6 @@ # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - import lxml.etree as ET from lxml.builder import E