From a17bd531836758e4f6bbd64543518360d8e45492 Mon Sep 17 00:00:00 2001 From: vanzhiganov Date: Sat, 7 May 2016 18:29:28 +0300 Subject: [PATCH] new version with QEMU --- .gitignore | 3 + README.md | 7 + SWSCloudNode/common.py | 101 ++++++++ SWSCloudNode/libvirt/__init__.py | 0 SWSCloudNode/lxc/__init__.py | 407 +++++++++++++++++++++++++++++++ SWSCloudNode/qemu/__init__.py | 213 ++++++++++++++++ cloud_node_agent.py | 22 +- cloud_node_statistics.py | 3 +- cloudcli.py | 4 + setup.py | 4 +- test.py | 6 + 11 files changed, 766 insertions(+), 4 deletions(-) create mode 100644 SWSCloudNode/common.py delete mode 100644 SWSCloudNode/libvirt/__init__.py create mode 100644 SWSCloudNode/lxc/__init__.py create mode 100644 SWSCloudNode/qemu/__init__.py create mode 100644 cloudcli.py create mode 100644 test.py diff --git a/.gitignore b/.gitignore index 0d20b64..c185202 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,4 @@ *.pyc +build/ +dist/ +.idea/ \ No newline at end of file diff --git a/README.md b/README.md index b7901b3..575119f 100644 --- a/README.md +++ b/README.md @@ -128,3 +128,10 @@ Check exists container with name 'test' Start container ```lxc().start("control.container.ly")``` + + +## Develop + +### Publish to Pypi server + +`python setup.py sdist register -r local upload -r local` diff --git a/SWSCloudNode/common.py b/SWSCloudNode/common.py new file mode 100644 index 0000000..fa20c2f --- /dev/null +++ b/SWSCloudNode/common.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- + +import datetime +import json +import os +import sys + +# for network +import socket +import fcntl +import struct +import array +import urllib + + +class Common: + def load_config(self): + self.settings_node = self.settings_node() + self.settings_vm = self.settings_vm() + + def time(self): + """ + используется для вывода времени на экран в консоли + """ + return str(datetime.datetime.now()) + + def error(self, code): + """ + вывод ошибки по error.id + """ + if code == 1: + message = "Invalid JSON-RPC. Unknown RPC version" + print self.time() + " - " + message + return {"error": {"code": code, "message": message}} + + def settings_node(self): + """ + конфигурация ноды + """ + if os.path.isfile('/etc/gocloud/node/node.json'): + with open('/etc/gocloud/node/node.json') as nodesettings_file: + conf = json.load(nodesettings_file) + else: + print "[Errno 2] No such file or directory: '/etc/gocloud/node/node.json'" + sys.exit(2) + + return conf + + def settings_vm(self): + """ + конфигурационные параметры для виртуальных машин + """ + if os.path.isfile('/etc/gocloud/node/vmsettings.json'): + with open('/etc/gocloud/node/vmsettings.json') as vmsettings_file: + conf = json.load(vmsettings_file) + else: + print "[Errno 2] No such file or directory: '/etc/gocloud/node/vmsettings.json'" + sys.exit(2) + + return conf + + def linux_distribution(self): + try: + return platform.linux_distribution() + except: + return 0 + + # network + def all_interfaces(self): + # arbitrary. raise if needed. + max_possible = 128 + bytes = max_possible * 32 + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + names = array.array('B', '\0' * bytes) + outbytes = struct.unpack('iL', fcntl.ioctl( + s.fileno(), + 0x8912, # SIOCGIFCONF + struct.pack('iL', bytes, names.buffer_info()[0]) + ))[0] + namestr = names.tostring() + lst = [] + for i in range(0, outbytes, 40): + name = namestr[i:i+16].split('\0', 1)[0] + ip = namestr[i+20:i+24] + lst.append((name, ip)) + return lst + + def format_ip(self, addr): + return str(ord(addr[0])) + '.' + str(ord(addr[1])) + '.' + str(ord(addr[2])) + '.' + str(ord(addr[3])) + + def check_file_exists(self, filename): + if os.path.isfile(filename): + return True + else: + return False + + def server_request(self, server_url, request): + params = { + "request": json.dumps(request) + } + return urllib.urlopen(server_url, urllib.urlencode(params)).read() diff --git a/SWSCloudNode/libvirt/__init__.py b/SWSCloudNode/libvirt/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/SWSCloudNode/lxc/__init__.py b/SWSCloudNode/lxc/__init__.py new file mode 100644 index 0000000..288fc29 --- /dev/null +++ b/SWSCloudNode/lxc/__init__.py @@ -0,0 +1,407 @@ +# coding: utf-8 + +import subprocess +import logging +import threading +import select +import pty +import os +import signal + + +class ContainerAlreadyExists(Exception): + pass + + +class ContainerAlreadyRunning(Exception): + pass + + +class ContainerNotExists(Exception): + pass + + +_logger = logging.getLogger("pylxc") +_monitor = None + + +class _LXCMonitor(threading.Thread): + def __init__(self): + threading.Thread.__init__(self) + self._process = None + self._monitors = {} + + def run(self): + master, slave = pty.openpty() + cmd = ['/usr/bin/lxc-monitor', '-n', '.*'] + self._process = subprocess.Popen(cmd, stdout=slave, bufsize=1) + stdout = os.fdopen(master) + while self._process.poll() is None: + ready, _, _ = select.select([stdout], [], [], 0.1) + if ready: + logging.debug("Waiting for state change") + state = stdout.readline() + inf = state.strip().split() + container = inf[0].strip("'") + state = inf[-1].strip('[]') + if container in self._monitors: + logging.debug("State of container '%s' changed to '%s'", container, state) + self._monitors[container](state) + _logger.info("LXC Monitor stopped!") + + def add_monitor(self, name, callback): + self._monitors[name] = callback + + def rm_monitor(self, name): + self._monitors.pop(name) + + def is_monitored(self, name): + return name in self._monitors + + def kill(self): + try: + self._process.terminate() + self._process.wait() + except: + pass + self.join() + + +class lxc(): + def __init__(self): + logging.debug("") + + def list(self, status=None): + """ + :return: ['container_first', 'container_second'] + """ + if status in ['active', 'frozen', 'running', 'stopped', 'nesting']: + path = "--%s" % status + else: + path = "" + + cmd = ['/usr/bin/lxc-ls', path] + out = subprocess.check_output(cmd).splitlines() + # print out + return out + + def exists(self, name): + """ + checks if a given container is defined or not + """ + if name in self.list(): + return True + return False + + def start(self, name, config_file=None): + """ + starts a container in daemon mode + """ + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + + if name in self.list("running"): + raise ContainerAlreadyRunning('The container %s is already started!' % name) + + cmd = ['lxc-start', '-n', name, '-d'] + if config_file: + cmd += ['-f', config_file] + + return subprocess.check_call(cmd) + + def stop(self, name): + """ + stops a container + """ + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + + cmd = ['/usr/bin/lxc-stop', '-n', name] + + try: + result = subprocess.check_call(cmd) + return True + except Exception as e: + return False + + def destroy(self, name): + """ + removes a container [stops a container if it's running and] + raises ContainerNotExists exception if the specified name is not created + """ + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + + # todo: check status. If status not STOPPED - run method self.stop(name) + # todo: add condition + self.stop(name) + + cmd = ['/usr/bin/lxc-destroy', '-f', '-n', name] + + return subprocess.check_call(cmd) + + def info(self, name): + """ + returns info dict about the specified container + """ + # + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + # + cmd = ['/usr/bin/lxc-info', '-n', name, "-H"] + out = subprocess.check_output(cmd).splitlines() + clean = [] + info = {} + # + for line in out: + if line not in clean: + clean.append(line) + # + for line in clean: + key, value = line.split(":") + + # strip + key = key.lstrip() + value = value.lstrip() + # + key = key.replace(" ", "_") + + info[key.lower()] = value + + # get container size + info['size'] = self.__get_container_size(name) + return info + + def __get_container_size(self, name): + cmd = ['/usr/bin/du', '--total', '-s', '/var/lib/lxc/%s' % name] + out = subprocess.check_output(cmd).splitlines() + size = 0 + for l in out: + key, value = l.split('\t') + if value == 'total': + size = key + return int(key) + + def freeze(self, name): + """ + freezes the container + """ + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + cmd = ['/usr/bin/lxc-freeze', '-n', name] + subprocess.check_call(cmd) + + def unfreeze(self, name): + """ + unfreezes the container + """ + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + cmd = ['lxc-unfreeze', '-n', name] + subprocess.check_call(cmd) + + def notify(self, name, states, callback): + """ + executes the callback function with no parameters when the container reaches the specified state or states + states can be or-ed or and-ed + notify('test', 'STOPPED', letmeknow) + + notify('test', 'STOPPED|RUNNING', letmeknow) + """ + if not self.exists(name): + raise ContainerNotExists("The container (%s) does not exist!" % name) + + cmd = ['lxc-wait', '-n', name, '-s', states] + def th(): + subprocess.check_call(cmd) + callback() + _logger.info("Waiting on states %s for container %s", states, name) + threading.Thread(target=th).start() + + def checkconfig(self): + """ + returns the output of lxc-checkconfig + """ + cmd = ['lxc-checkconfig'] + return subprocess.check_output(cmd).replace('[1;32m', '').replace('[1;33m', '').replace('[0;39m', '').replace('[1;32m', '').replace(' ', '').split('\n') + + def create(self, name, config_file=None, template=None, backing_store=None, template_options=None): + """ + Create a new container + raises ContainerAlreadyExists exception if the container name is reserved already. + + :param template_options: Options passed to the specified template + :type template_options: list or None + """ + if self.exists(name): + raise ContainerAlreadyExists("The Container %s is already created!" % name) + + command = list() + command.append("lxc-create -n %s" % name) + + if config_file: + command.append(' -f %s' % config_file) + if template: + command.append(' -t %s' % template) + if backing_store: + command.append(' -B %s' % backing_store) + if template_options: + command.append(' -- %s' % template_options) + + print " ".join(command) + print + # create = subprocess.check_call(command, shell=True) + create = subprocess.check_call(" ".join(command), shell=True) + print + print create + print + + # if create == 0: + # if not self.exists(name): + # _logger.critical("The Container %s doesn't seem to be created! (options: %s)", name, command[3:]) + # raise ContainerNotExists("The container (%s) does not exist!" % name) + # + # _logger.info("Container %s has been created with options %s", name, command[3:]) + # return False + return True + + def reset_password(self, container_name, username, password): + call = [ + 'echo', + '"%s:${PASSWORD:-%s}"' % (username, password), + "|", + "chroot", + "/var/lib/lxc/%s/rootfs/ chpasswd" % container_name + ] + subprocess.check_call(call, shell=True) + # subprocess.call("echo \"ubuntu:${PASSWORD:-%(password)s}\" | chroot /var/lib/lxc/%(hostname)s/rootfs/ chpasswd" % task['parameters'], shell=True) + return True + +# def running(): +# ''' +# returns a list of the currently running containers +# ''' +# return all_as_dict()['Running'] + + +# def stopped(): +# ''' +# returns a list of the stopped containers +# ''' +# return all_as_dict()['Stopped'] + + +# def all_as_dict(): +# ''' +# returns a dict {'Running': ['cont1', 'cont2'], +# 'Stopped': ['cont3', 'cont4'] +# } +# +# ''' +# cmd = ['lxc-ls'] +# out = subprocess.check_output(cmd).splitlines() +# print out +# stopped = [] +# running = [] +# frozen = [] +# current = None +# for c in out: +# c = c.strip() +# if c == 'RUNNING': +# current = running +# continue +# if c == 'STOPPED': +# current = stopped +# continue +# if c == 'FROZEN': +# current = frozen +# continue +# if not len(c): +# continue +# current.append(c) +# return {'Running': running, +# 'Stopped': stopped, +# 'Frozen': frozen} + + +# def all_as_list(): +# ''' +# returns a list of all defined containers +# ''' +# as_dict = all_as_dict() +# containers = as_dict['Running'] + as_dict['Frozen'] + as_dict['Stopped'] +# containers_list = [] +# for i in containers: +# i = i.replace(' (auto)', '') +# containers_list.append(i) +# return containers_list + + +# def kill(name, signal): +# ''' +# sends a kill signal to process 1 of ths container +# :param signal: numeric signal +# ''' +# if not exists(name): +# raise ContainerNotExists("The container (%s) does not exist!" % name) +# cmd = ['lxc-kill', '--name=%s' % name, signal] +# subprocess.check_call(cmd) + + +# def shutdown(name, wait=False, reboot=False): +# ''' +# graceful shutdown sent to the container +# :param wait: should we wait for the shutdown to complete? +# :param reboot: reboot a container, ignores wait +# ''' +# if not exists(name): +# raise ContainerNotExists("The container (%s) does not exist!" % name) +# cmd = ['lxc-shutdown', '-n', name] +# if wait: +# cmd += ['-w'] +# if reboot: +# cmd += ['-r'] +# +# subprocess.check_call(cmd) + + +# def monitor(name, callback): +# ''' +# monitors actions on the specified container, +# callback is a function to be called on +# ''' +# global _monitor +# if not exists(name): +# raise ContainerNotExists("The container (%s) does not exist!" % name) +# if _monitor: +# if _monitor.is_monitored(name): +# raise Exception("You are already monitoring this container (%s)" % name) +# else: +# _monitor = _LXCMonitor() +# logging.info("Starting LXC Monitor") +# _monitor.start() +# def kill_handler(sg, fr): +# stop_monitor() +# signal.signal(signal.SIGTERM, kill_handler) +# signal.signal(signal.SIGINT, kill_handler) +# _monitor.add_monitor(name, callback) + + +# def unmonitor(name): +# if not exists(name): +# raise ContainerNotExists("The container (%s) does not exist!" % name) +# if not _monitor: +# raise Exception("LXC Monitor is not started!") +# if not _monitor.is_monitored(name): +# raise Exception("This container (%s) is not monitored!" % name) +# _monitor.rm_monitor(name) + + +# def stop_monitor(): +# global _monitor +# if _monitor: +# logging.info("Killing LXC Monitor") +# _monitor.kill() +# _monitor = None +# signal.signal(signal.SIGTERM, signal.SIG_DFL) +# signal.signal(signal.SIGINT, signal.SIG_DFL) diff --git a/SWSCloudNode/qemu/__init__.py b/SWSCloudNode/qemu/__init__.py new file mode 100644 index 0000000..16a9546 --- /dev/null +++ b/SWSCloudNode/qemu/__init__.py @@ -0,0 +1,213 @@ +# -*- coding: utf-8 -*- + +import os +import libvirt +import subprocess +from SWSCloudNode.common import Common + + +class QEMU: + def __init__(self): + # qemu+ssh://root@laforge.usersys.redhat.com/system + self.conn = libvirt.open("qemu:///system") + + def list(self): + names = self.conn.listDefinedDomains() + domains = map(self.conn.lookupByName, names) + + ids = self.conn.listDomainsID() + running = map(self.conn.lookupByID, ids) + + columns = 3 + + c = 0 + n = 0 + online = {} + offline = {} + + for row in map(None, *[iter(domains)] * columns): + for domain in row: + if domain: + #print str(info(domain)) + c += 1 + offline[c] = self._info(domain) + + for row in map(None, *[iter(running)] * columns): + for domain in row: + if domain: + n += 1 + online[n] = self._info(domain) + + return { + "online": online, + "offline": offline + } + + def _info(self, dom): + states = { + libvirt.VIR_DOMAIN_NOSTATE: 'no state', + libvirt.VIR_DOMAIN_RUNNING: 'running', + libvirt.VIR_DOMAIN_BLOCKED: 'blocked on resource', + libvirt.VIR_DOMAIN_PAUSED: 'paused by user', + libvirt.VIR_DOMAIN_SHUTDOWN: 'being shut down', + libvirt.VIR_DOMAIN_SHUTOFF: 'shut off', + libvirt.VIR_DOMAIN_CRASHED: 'crashed', + } + + [state, maxmem, mem, ncpu, cputime] = dom.info() + #return '%s is %s,' % (dom.name(), states.get(state, state)) + return { + "hostname": dom.name(), + "info": { + "state": state, + "maxmem": maxmem, + "memory": mem, + "ncpu": ncpu, + "cputime": cputime + } + } + + def start(self, hostname): + """ + Function VM Start + использует нативную библиотеку qemu + + Params + :hostname: str + Return: + :bool: + """ + conn = libvirt.open("qemu:///system") + action = conn.lookupByName(hostname) + + try: + action.create() + except: + print conn.virConnGetLastError() + return False + + return True + + def stop(self, hostname): + """ + функция остановки фиртуальной машины + использует + """ + conn = libvirt.open("qemu:///system") + action = conn.lookupByName(hostname) + + try: + action.destroy() + except: + print conn.virConnGetLastError() + return False + + return True + + def restart(self, hostname): + """ + функция перезапуска виртуальной машины, + использует функции stop и start + """ + self.stop(hostname) + + return self.start(hostname) + + def delete(self, hostname): + """ + функция удаления виртуальной машины + """ + os.popen("/usr/bin/virsh shutdown %s" % hostname, "r") + os.popen("/usr/bin/virsh undefine %s" % hostname, "r") + os.popen("/bin/rm -r /var/lib/qemu/images/%s/" % hostname, "r") + + return True + + def create(self, cores, memory, storage, hostname, ip, dns1, dns2, password, os_name, os_suite): + """ + функция создания виртуальной машины + """ + comm = Common() + comm.load_config() + + #print os_suite + + #return {} + #load plan list + #with open('../config/plan.json') as plandata_file: + # plan = json.load(plandata_file) + + # Create directory for new VM + #subprocess.Popen(['mkdir', '-p', '/var/lib/qemu/images/%s/templates/qemu' % hostname], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + os.popen('mkdir -p /var/lib/qemu/images/%s/templates/qemu' % hostname, "r") + #subprocess.Popen(['cp', '/etc/vmbuilder/qemu/*', '/var/lib/qemu/images/%s/templates/qemu' % hostname], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + os.popen("cp /etc/vmbuilder/qemu/* /var/lib/qemu/images/%s/templates/qemu" % hostname) + + # generate partition file + os.popen("cp vm/storage/%s.partition /var/lib/qemu/images/%s/partition" % (storage, hostname), "r") + os.popen("cp vm/firstboot.sh /var/lib/qemu/images/%s/boot.sh" % hostname, "r") + + values = { + "hostname": hostname, + "os_name": os_name, + "os_suite": os_suite, + "mirror": comm.settings_vm['mirror'], + "ip": ip, + "gw": comm.settings_vm['gw'], + "password": password, + "memory": memory, + "cores": cores + } + print "------" + print values + print "------" + + #qqq = [ + # "cd", + # "/var/lib/qemu/images/%s;" % values['hostname'], + # "/usr/bin/vmbuilder", "kvm", values['os_name'], "--suite=%s" % values['os_suite']] + #aaa = subprocess.Popen(qqq, stdout = subprocess.PIPE, stderr = subprocess.PIPE) + + #print "cd /var/lib/qemu/images/%(hostname)s;/usr/bin/vmbuilder kvm %(os_name)s --suite=%(os_suite)s + # --flavour=virtual --arch=amd64 --mirror=%(mirror)s -o --qemu=qemu:///system --ip=%(ip)s + # --gw=%(gw)s --part=/var/lib/qemu/images/%(hostname)s/partition --templates=templates --user=administrator + # --name=administrator --pass=%(password)s --addpkg=linux-image-generic --addpkg=vim-nox --addpkg=nano + # --addpkg=unattended-upgrades --addpkg=acpid --firstboot=/var/lib/qemu/images/%(hostname)s/boot.sh + # --mem=%(memory)s --cpus=%(cores)s --hostname=%(hostname)s --bridge=br0" % values + + c = [ + "cd", + "/var/lib/qemu/images/%s;" % values['hostname'], + "/usr/bin/vmbuilder", + "kvm", + values['os_name'], + "--suite=%s" % values['os_suite'], + "--flavour=virtual", + "--arch=amd64", + "--mirror=%s" % values['mirror'], + "-o", + "--qemu=qemu:///system", + "--ip=%s" % values['ip'], + "--gw=%s" % values['gw'], + "--part=/var/lib/qemu/images/%s/partition" % values['hostname'], + "--templates=templates", + "--user=administrator", + "--name=administrator", + "--pass=%s" % values['password'], + "--addpkg=linux-image-generic", + "--addpkg=vim-nox", + "--addpkg=nano", + "--addpkg=unattended-upgrades", + "--addpkg=acpid", + "--firstboot=/var/lib/qemu/images/%s/boot.sh" % values['hostname'], + "--mem=%s" % values['memory'], + "--cpus=%s" % values['cores'], + "--hostname=%s" % values['hostname'], + "--bridge=br0" + ] + #subprocess.Popen(c, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + + os.popen("cd /var/lib/qemu/images/%(hostname)s;/usr/bin/vmbuilder kvm %(os_name)s --suite=%(os_suite)s --flavour=virtual --arch=amd64 --mirror=%(mirror)s -o --qemu=qemu:///system --ip=%(ip)s --gw=%(gw)s --part=/var/lib/qemu/images/%(hostname)s/partition --templates=templates --user=administrator --name=administrator --pass=%(password)s --addpkg=linux-image-generic --addpkg=vim-nox --addpkg=nano --addpkg=unattended-upgrades --addpkg=acpid --firstboot=/var/lib/qemu/images/%(hostname)s/boot.sh --mem=%(memory)s --cpus=%(cores)s --hostname=%(hostname)s --bridge=br0" % values) + #sys.exit(2) + print "-----" + return True diff --git a/cloud_node_agent.py b/cloud_node_agent.py index 4e7ec99..363cd0e 100644 --- a/cloud_node_agent.py +++ b/cloud_node_agent.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python # coding: utf-8 import time @@ -12,7 +13,13 @@ allowed_actions = [ 'container_create', 'container_start', 'container_stop', - 'container_restart' + 'container_restart', + + 'vm_create', + 'vm_delete', + 'vm_start', + 'vm_stop', + 'vm_restart', ] logging.debug("Application started") @@ -121,3 +128,16 @@ while True: logging.warning(e) pass nodeclient.task_status_update(task['id'], 2) + + if task.get('task') == 'vm_create': + pass + if task.get('task') == 'vm_start': + pass + if task.get('task') == 'vm_restart': + pass + if task.get('task') == 'vm_stop': + pass + if task.get('task') == 'vm_delete': + pass + +logging.debug("Application ended") diff --git a/cloud_node_statistics.py b/cloud_node_statistics.py index 85900c1..a452b52 100644 --- a/cloud_node_statistics.py +++ b/cloud_node_statistics.py @@ -1,4 +1,5 @@ -# coding: utf-8 +#!/usr/bin/env python +# coding: utf-8 from SWSCloudNode import Node from SWSCloudNode import lxc diff --git a/cloudcli.py b/cloudcli.py new file mode 100644 index 0000000..e3ea131 --- /dev/null +++ b/cloudcli.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python + +from SWSCloudNode.settings import settings + diff --git a/setup.py b/setup.py index 26ab51f..72066ee 100644 --- a/setup.py +++ b/setup.py @@ -4,18 +4,18 @@ from setuptools import setup setup( name='SWSCloudNode', - version='2.0.1', + version='3.0.1-beta', author='Vyacheslav Anzhiganov', author_email='vanzhiganov@ya.ru', packages=[ 'SWSCloudNode', 'SWSCloudNode.lxc', + 'SWSCloudNode.qemu', ], scripts=[ 'cloud_node_agent.py', 'cloud_node_statistics.py', ], - package_data=[], install_requires=[ 'requests' ], diff --git a/test.py b/test.py new file mode 100644 index 0000000..c860caa --- /dev/null +++ b/test.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python +# coding: utf-8 + +from SWSCloudNode.qemu import QEMU + +print QEMU().list()