zVMCloudConnector-1.6.3/0000775000175000017510000000000014315232035014520 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/tox.ini0000664000175000017510000000335514266177632016061 0ustar ruirui00000000000000[tox] minversion = 1.6 envlist = pep8,py36,docs skipsdist = True [testenv] usedevelop = True install_command = pip install -U {opts} {packages} setenv = VIRTUAL_ENV={envdir} deps = -r{toxinidir}/requirements.txt -r{toxinidir}/test-requirements.txt commands-zvmsdk = coverage run -m unittest discover -v -s {toxinidir}/zvmsdk/tests/unit commands-smt = coverage run -m unittest discover -v -s {toxinidir}/smtLayer/tests/unit [testenv:pep8] deps = flake8 commands = flake8 flake8 {toxinidir}/scripts/sdkserver [flake8] max-line-length = 120 ignore = E121,E122,E123,E124,E125,E126,E127,E128,E129,E131,E251,H405,W504,W605 exclude = .venv,.git,.tox,dist,doc,*openstack/common*,sample,*lib/python*,*egg,build,tools,*.py.*.py [testenv:docs] whitelist_externals = rm commands = rm -rf doc/build python tools/generate_conf.py python tools/generate_errcode_csv.py rm -fr doc/source/zvmsdk.conf.sample rm -fr doc/source/errcode.csv /bin/mv ./zvmsdk.conf.sample doc/source/zvmsdk.conf.sample /bin/mv ./errcode.csv doc/source/errcode.csv sphinx-build -W -b html -d doc/build/doctrees doc/source doc/build/html [testenv:py36] whitelist_externals = rm coverage mkdir commands = rm -f /tmp/sdk_guest.sqlite rm -f /tmp/sdk_image.sqlite rm -f /tmp/sdk_fcp.sqlite rm -f /tmp/sdk_network.sqlite rm -f /tmp/FakeID rm -rf test-results {[testenv]commands-smt} {[testenv]commands-zvmsdk} mkdir test-results coverage html -d test-results/cover [testenv:py36-zvmsdk] whitelist_externals = rm commands = rm -f /tmp/sdk_guest.sqlite rm -f /tmp/sdk_image.sqlite rm -f /tmp/sdk_fcp.sqlite rm -f /tmp/sdk_network.sqlite rm -f /tmp/FakeID {[testenv]commands-zvmsdk} [testenv:py36-smt] commands = {[testenv]commands-smt} zVMCloudConnector-1.6.3/requirements.txt0000664000175000017510000000024013732306202020000 0ustar ruirui00000000000000jsonschema>=2.3.0 # MIT netaddr>=0.7.5 # BSD PyJWT>=1.0.1 # MIT requests>=2.6.0 # Apache-2.0 Routes>=2.2 # MIT six>=1.9.0 # MIT WebOb>=1.2.3 # MIT PyYAML>=3.10 zVMCloudConnector-1.6.3/smtLayer/0000775000175000017510000000000014315232035016320 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/smtLayer/smt.py0000664000175000017510000000714413672563714017523 0ustar ruirui00000000000000# Daemon for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from time import time from smtLayer.ReqHandle import ReqHandle from zvmsdk import config from zvmsdk import log version = '1.0.0' # Version of this function. class SMT(object): """ Systems Management Ultra Thin daemon. """ def __init__(self, **kwArgs): """ Constructor Input: cmdName= - Specifies the name of the command that drives SMT. captureLogs= Enables or disables log capture for all requests. """ self.reqIdPrefix = int(time() * 100) self.reqCnt = 0 # Number of requests so far logger = log.Logger('SMT') logger.setup(log_dir=config.CONF.logging.log_dir, log_level='logging.DEBUG', log_file_name='smt.log') self.logger = logger.getlog() # Initialize the command name associated with this SMT instance. if 'cmdName' in kwArgs.keys(): self.cmdName = kwArgs['cmdName'] else: self.cmdName = "" # Setup the log capture flag. if 'captureLogs' in kwArgs.keys(): self.captureLogs = kwArgs['captureLogs'] else: self.captureLogs = False # Don't capture & return Syslog entries def disableLogCapture(self): """ Disable capturing of log entries for all requests. """ self.captureLogs = False # Don't capture Syslog entries def enableLogCapture(self): """ Enable capturing of log entries for all requests. """ self.captureLogs = True # Begin capturing & returning Syslog entries def request(self, requestData, **kwArgs): """ Process a request. Input: Request as either a string or a list. captureLogs= Enables or disables log capture per request. This overrides the value from SMT. requestId= to pass a value for the request Id instead of using one generated by SMT. Output: Dictionary containing the results. See ReqHandle.buildReturnDict() for information on the contents of the dictionary. """ self.reqCnt = self.reqCnt + 1 # Determine whether the request will be capturing logs if 'captureLogs' in kwArgs.keys(): logFlag = kwArgs['captureLogs'] else: logFlag = self.captureLogs # Pass along or generate a request Id if 'requestId' in kwArgs.keys(): requestId = kwArgs['requestId'] else: requestId = str(self.reqIdPrefix) + str(self.reqCnt) rh = ReqHandle( requestId=requestId, captureLogs=logFlag, smt=self) rh.parseCmdline(requestData) if rh.results['overallRC'] == 0: rh.printSysLog("Processing: " + rh.requestString) rh.driveFunction() return rh.results zVMCloudConnector-1.6.3/smtLayer/smtCmd.py0000664000175000017510000000405613672563714020146 0ustar ruirui00000000000000#!/usr/bin/env python # Command line processor for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys from smtLayer.smt import SMT from smtLayer.ReqHandle import ReqHandle version = '1.0.0' # Version of this script """ ****************************************************************************** main routine ****************************************************************************** """ useSMT = True if useSMT: results = SMT(cmdName=sys.argv[0]).request(sys.argv[1:], captureLogs=True) else: reqHandle = ReqHandle(cmdName=sys.argv[0], captureLogs=True) results = reqHandle.parseCmdline(sys.argv[1:]) if results['overallRC'] == 0: results = reqHandle.driveFunction() # On error, show the result codes (overall rc, rc, rs, ...) if results['overallRC'] != 0: print("overall rc: " + str(results['overallRC'])) print(" rc: " + str(results['rc'])) print(" rs: " + str(results['rs'])) print(" errno: " + str(results['errno'])) print(" strError: " + str(results['strError'])) print("") print("Response:") # Show the response lines if len(results['response']) != 0: for line in results['response']: print(line) elif results['overallRC'] == 0: print("Command succeeded.") # On error, show the trace log. if results['overallRC'] != 0: print("") print("Trace Log:") for line in results['logEntries']: print(line) if results['overallRC'] != 0: exit(results['overallRC']) zVMCloudConnector-1.6.3/smtLayer/smapi.py0000664000175000017510000001707314263437505020026 0ustar ruirui00000000000000# SMAPI functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import invokeSMCLI modId = 'SMP' version = "1.0.0" """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'API': ['invokeSmapiApi', lambda rh: invokeSmapiApi(rh)], 'HELP': ['help', lambda rh: help(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)]} """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = { 'API': [ ['API Name', 'apiName', True, 2]] } """ List of additional operands/options supported by the various subfunctions. The dictionary following the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'API': { '--operands': ['operands', -1, 2], '--showparms': ['showParms', 0, 0]} } def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter smapi.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms']: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: smapi." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit smapi.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for SMAPI functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def invokeSmapiApi(rh): """ Invoke a SMAPI API. Input: Request Handle with the following properties: function - 'SMAPI' subfunction - 'API' userid - 'HYPERVISOR' parms['apiName'] - Name of API as defined by SMCLI parms['operands'] - List (array) of operands to send or an empty list. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter smapi.invokeSmapiApi") if rh.userid != 'HYPERVISOR': userid = rh.userid else: userid = 'dummy' parms = ["-T", userid] if 'operands' in rh.parms: parms.extend(rh.parms['operands']) # SSI_Query does not need any param if rh.parms['apiName'] == 'SSI_Query': parms = [] results = invokeSMCLI(rh, rh.parms['apiName'], parms) if results['overallRC'] == 0: rh.printLn("N", results['response']) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit smapi.invokeCmd, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter smapi.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit smapi.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit smapi.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " SMAPI " + "api [--operands ]") rh.printLn("N", " python " + rh.cmdName + " SMAPI help") rh.printLn("N", " python " + rh.cmdName + " SMAPI version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the " + rh.function + " function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " api - Invoke a SMAPI API.") rh.printLn("N", " help - Displays this help information.") rh.printLn("N", " version - " + "show the version of the power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " - " + "Userid of the target virtual machine") rh.printLn("N", " - Name of the API to invoke") rh.printLn("N", " --operands - Additional API operands") return zVMCloudConnector-1.6.3/smtLayer/migrateVM.py0000664000175000017510000005057614263437505020615 0ustar ruirui00000000000000# MigrateVM functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import invokeSMCLI modId = 'MIG' version = "1.0.0" """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'CANCEL': ['cancel', lambda rh: cancelMigrate(rh)], 'HELP': ['help', lambda rh: help(rh)], 'MODIFY': ['modify', lambda rh: modifyMigrate(rh)], 'MOVE': ['move', lambda rh: moveVM(rh)], 'STATUS': ['status', lambda rh: getStatus(rh)], 'TEST': ['test', lambda rh: testMigrate(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = {} """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) For example, the 'WAITFOR' subfunction has two keyword operands 'poll' and 'maxwait', and each of them take one additional operand (time in seconds) which is an int. """ keyOpsList = { 'CANCEL': {'--showparms': ['showParms', 0, 0]}, 'HELP': {}, 'MODIFY': { '--maxquiesce': ['maxQuiesce', 1, 1], '--maxtotal': ['maxTotal', 1, 1], '--showparms': ['showParms', 0, 0]}, 'MOVE': { '--destination': ['dest', 1, 2], '--forcearch': ['forcearch', 0, 0], '--forcedomain': ['forcedomain', 0, 0], '--forcestorage': ['forcestorage', 0, 0], '--immediate': ['immediate', 0, 0], '--maxquiesce': ['maxQuiesce', 1, 1], '--maxtotal': ['maxTotal', 1, 1], '--showparms': ['showParms', 0, 0]}, 'STATUS': { '--all': ['all', 0, 0], '--incoming': ['incoming', 0, 0], '--outgoing': ['outgoing', 0, 0], '--showparms': ['showParms', 0, 0]}, 'TEST': { '--destination': ['dest', 1, 2], '--showparms': ['showParms', 0, 0]}, 'VERSION': {}, } def cancelMigrate(rh): """ Cancel an existing VMRelocate request. Input: Request Handle with the following properties: function - 'MIGRATEVM' subfunction - 'CANCEL' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.cancelMigrate") parms = ["-T", rh.userid, "-k", "action=CANCEL"] results = invokeSMCLI(rh, "VMRELOCATE", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['rc'] == 8 and results['rs'] == 3000: if "1926" in results['response']: # No relocation in progress msg = msgs.msg['0419'][1] % (modId, rh.userid) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0419'][0]) else: # More details in message codes lines = results['response'].split("\n") for line in lines: if "Details:" in line: codes = line.split(' ', 1)[1] msg = msgs.msg['420'][1] % (modId, "VMRELOCATE Cancel", rh.userid, codes) rh.printLn("ES", msg) rh.printSysLog("Exit migrateVM.cancelMigrate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: migrateVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit migrateVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getStatus(rh): """ Get status of a VMRelocate request. Input: Request Handle with the following properties: function - 'MIGRATEVM' subfunction - 'STATUS' userid - userid of the virtual machine parms['all'] - If present, set status_target to ALL. parms['incoming'] - If present, set status_target to INCOMING. parms['outgoing'] - If present, set status_target to OUTGOING. if parms does not contain 'all', 'incoming' or 'outgoing', the status_target is set to 'USER '. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.getStatus") parms = ["-T", rh.userid] if 'all' in rh.parms: parms.extend(["-k", "status_target=ALL"]) elif 'incoming' in rh.parms: parms.extend(["-k", "status_target=INCOMING"]) elif 'outgoing' in rh.parms: parms.extend(["-k", "status_target=OUTGOING"]) else: parms.extend(["-k", "status_target=USER " + rh.userid + ""]) results = invokeSMCLI(rh, "VMRELOCATE_Status", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['rc'] == 4 and results['rs'] == 3001: # No relocation in progress msg = msgs.msg['0419'][1] % (modId, rh.userid) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0419'][0]) else: rh.printLn("N", results['response']) rh.printSysLog("Exit migrateVM.getStatus, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for MigrateVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def modifyMigrate(rh): """ Modify an existing VMRelocate request. Input: Request Handle with the following properties: function - 'MIGRATEVM' subfunction - 'MODIFY' userid - userid of the virtual machine parms['maxQuiesce'] - maximum quiesce time in seconds, or -1 to indicate no limit. parms['maxTotal'] - maximum total time in seconds, or -1 to indicate no limit. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.modifyMigrate") parms = ["-T", rh.userid] if 'maxQuiesce' in rh.parms: if rh.parms['maxQuiesce'] == -1: parms.extend(["-k", "max_quiesce=NOLIMIT"]) else: parms.extend(["-k", "max_quiesce=" + str(rh.parms['maxQuiesce'])]) if 'maxTotal' in rh.parms: if rh.parms['maxTotal'] == -1: parms.extend(["-k", "max_total=NOLIMIT"]) else: parms.extend(["-k", "max_total=" + str(rh.parms['maxTotal'])]) results = invokeSMCLI(rh, "VMRELOCATE_Modify", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['rc'] == 8 and results['rs'] == 3010: if "1926" in results['response']: # No relocations in progress msg = msgs.msg['0419'][1] % (modId, rh.userid) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0419'][0]) else: # More details in message codes lines = results['response'].split("\n") for line in lines: if "Details:" in line: codes = line.split(' ', 1)[1] msg = msgs.msg['0420'][1] % (modId, "VMRELOCATE Modify", rh.userid, codes) rh.printLn("ES", msg) rh.printSysLog("Exit migrateVM.modifyMigrate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def moveVM(rh): """ Initiate a VMRelocate request to move a userid. Input: Request Handle with the following properties: function - 'MIGRATEVM' subfunction - 'MOVE' userid - userid of the virtual machine parms['destination'] - target SSI member parms['forcearch'] - if present, force=architecture is set. parms['forcedomain'] - if present, force=domain is set. parms['forcestorage'] - if present, force=storage is set. parms['immediate'] - if present, immediate=YES is set. parms['maxquiesce'] - maximum quiesce time in seconds, or -1 to indicate no limit. parms['maxTotal'] - maximum total time in seconds, or -1 to indicate no limit. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.moveVM") parms = ["-T", rh.userid, "-k", "action=MOVE"] if 'dest' in rh.parms: parms.extend(["-k", "destination=" + rh.parms['dest']]) forceOption = '' if 'forcearch' in rh.parms: forceOption = "ARCHITECTURE " if 'forcedomain' in rh.parms: forceOption = forceOption + "DOMAIN " if 'forcestorage' in rh.parms: forceOption = forceOption + "STORAGE " if forceOption != '': parms.extend(["-k", "force=" + forceOption]) if 'immediate' in rh.parms: parms.extend(["-k", "immediate=YES"]) if 'maxQuiesce' in rh.parms: if rh.parms['maxQuiesce'] == -1: parms.extend(["-k", "max_quiesce=NOLIMIT"]) else: parms.extend(["-k", "max_quiesce=" + str(rh.parms['maxQuiesce'])]) if 'maxTotal' in rh.parms: if rh.parms['maxTotal'] == -1: parms.extend(["-k", "max_total=NOLIMIT"]) else: parms.extend(["-k", "max_total=" + str(rh.parms['maxTotal'])]) results = invokeSMCLI(rh, "VMRELOCATE", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['rc'] == 8 and results['rs'] == 3000: if "0045" in results['response']: # User not logged on msg = msgs.msg['0418'][1] % (modId, rh.userid) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0418'][0]) else: # More details in message codes lines = results['response'].split("\n") for line in lines: if "Details:" in line: codes = line.split(' ', 1)[1] msg = msgs.msg['0420'][1] % (modId, "VMRELOCATE Move", rh.userid, codes) rh.printLn("ES", msg) rh.printSysLog("Exit migrateVM.moveVM, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit migrateVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit migrateVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " MigrateVM cancel") rh.printLn("N", " python " + rh.cmdName + " MigrateVM help") rh.printLn("N", " python " + rh.cmdName + " MigrateVM modify [--maxtotal ]") rh.printLn("N", " [--maxquiesce ]") rh.printLn("N", " python " + rh.cmdName + " MigrateVM move --destination ") rh.printLn("N", " [--immediate] [--forcearch] " + "[--forcedomain] [--forcestorage]") rh.printLn("N", " [--maxtotal ] " + "[--maxquiesce ]") rh.printLn("N", " python " + rh.cmdName + " MigrateVM status " + "[--all | --incoming | --outgoing]") rh.printLn("N", " python " + rh.cmdName + " MigrateVM test --destination ") rh.printLn("N", " python " + rh.cmdName + " MigrateVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the MigrateVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " cancel - " + "cancels the relocation of the specified virtual machine.") rh.printLn("N", " help - Displays this help information.") rh.printLn("N", " modify - " + "modifies the time limits associated with the relocation already") rh.printLn("N", " in progress .") rh.printLn("N", " move - " + "moves the specified virtual machine, while it continues to run,") rh.printLn("N", " " + "to the specified system within the SSI cluster.") rh.printLn("N", " status - requests information about " + "relocations currently in progress.") rh.printLn("N", " test - tests the specified virtual machine " + "and reports whether or not") rh.printLn("N", " " + "it is eligible to be relocated to the specified system.") rh.printLn("N", " version - show the version of the power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " --all - All relocations") rh.printLn("N", " --destination - " + "Specifies the SSI name of the target destination") rh.printLn("N", " z/VM system ") rh.printLn("N", " --forcearch - " + "force relocations past architecture checks.") rh.printLn("N", " --forcedomain - " + "force relocations past domain checks.") rh.printLn("N", " --forcestorage - " + "force relocations past storage checks.") rh.printLn("N", " --immediate - " + "causes the VMRELOCATE command to do one early") rh.printLn("N", " " + "pass through virtual machine storage and then go") rh.printLn("N", " " + "directly to the quiesce stage.") rh.printLn("N", " --incoming - Incoming relocations") rh.printLn("N", " --maxquiesce - " + "indicates the maximum quiesce time (in seconds)") rh.printLn("N", " for this relocation.") rh.printLn("N", " --maxtotal - " + "indicates the maximum total time (in seconds)") rh.printLn("N", " " + "for relocation to complete.") rh.printLn("N", " --outgoing - Out-going relocations") rh.printLn("N", " - " + "Userid of the target virtual machine") return def testMigrate(rh): """ Test the ability to use VMRelocate on the target userid. Input: Request Handle with the following properties: function - 'MIGRATEVM' subfunction - 'TEST' userid - userid of the virtual machine parms['dest'] - Target SSI system. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter migrateVM.testMigrate") parms = ["-T", rh.userid, "-k", "action=TEST"] if 'dest' in rh.parms: parms.extend(["-k", "destination=" + rh.parms['dest']]) results = invokeSMCLI(rh, "VMRELOCATE", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['rc'] == 4 and results['rs'] == 3000: if "0045" in results['response']: # User not logged on msg = msgs.msg['0418'][1] % (modId, rh.userid) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0418'][0]) else: # More details in message codes lines = results['response'].split("\n") for line in lines: if "Details:" in line: codes = line.split(' ', 1)[1] msg = msgs.msg['0420'][1] % (modId, "VMRELOCATE Move", rh.userid, codes) rh.printSysLog("Exit migrateVM.testMigrate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] zVMCloudConnector-1.6.3/smtLayer/generalUtils.py0000664000175000017510000003113413672563714021352 0ustar ruirui00000000000000# General Utilities for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import math from smtLayer import msgs fiveGigSize = (1024 * 5) modId = 'GUT' def cvtToBlocks(rh, diskSize): """ Convert a disk storage value to a number of blocks. Input: Request Handle Size of disk in bytes Output: Results structure: overallRC - Overall return code for the function: 0 - Everything went ok 4 - Input validation error rc - Return code causing the return. Same as overallRC. rs - Reason code causing the return. errno - Errno value causing the return. Always zero. Converted value in blocks """ rh.printSysLog("Enter generalUtils.cvtToBlocks") blocks = 0 results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0} blocks = diskSize.strip().upper() lastChar = blocks[-1] if lastChar == 'G' or lastChar == 'M': # Convert the bytes to blocks byteSize = blocks[:-1] if byteSize == '': # The size of the disk is not valid. msg = msgs.msg['0200'][1] % (modId, blocks) rh.printLn("ES", msg) results = msgs.msg['0200'][0] else: try: if lastChar == 'M': blocks = (float(byteSize) * 1024 * 1024) / 512 elif lastChar == 'G': blocks = (float(byteSize) * 1024 * 1024 * 1024) / 512 blocks = str(int(math.ceil(blocks))) except Exception: # Failed to convert to a number of blocks. msg = msgs.msg['0201'][1] % (modId, byteSize) rh.printLn("ES", msg) results = msgs.msg['0201'][0] elif blocks.strip('1234567890'): # Size is not an integer size of blocks. msg = msgs.msg['0202'][1] % (modId, blocks) rh.printLn("ES", msg) results = msgs.msg['0202'][0] rh.printSysLog("Exit generalUtils.cvtToBlocks, rc: " + str(results['overallRC'])) return results, blocks def cvtToCyl(rh, diskSize): """ Convert a disk storage value to a number of cylinders. Input: Request Handle Size of disk in bytes Output: Results structure: overallRC - Overall return code for the function: 0 - Everything went ok 4 - Input validation error rc - Return code causing the return. Same as overallRC. rs - Reason code causing the return. errno - Errno value causing the return. Always zero. Converted value in cylinders """ rh.printSysLog("Enter generalUtils.cvtToCyl") cyl = 0 results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0} cyl = diskSize.strip().upper() lastChar = cyl[-1] if lastChar == 'G' or lastChar == 'M': # Convert the bytes to cylinders byteSize = cyl[:-1] if byteSize == '': # The size of the disk is not valid. msg = msgs.msg['0200'][1] % (modId, lastChar) rh.printLn("ES", msg) results = msgs.msg['0200'][0] else: try: if lastChar == 'M': cyl = (float(byteSize) * 1024 * 1024) / 737280 elif lastChar == 'G': cyl = (float(byteSize) * 1024 * 1024 * 1024) / 737280 cyl = str(int(math.ceil(cyl))) except Exception: # Failed to convert to a number of cylinders. msg = msgs.msg['0203'][1] % (modId, byteSize) rh.printLn("ES", msg) results = msgs.msg['0203'][0] elif cyl.strip('1234567890'): # Size is not an integer value. msg = msgs.msg['0204'][1] % (modId, cyl) rh.printLn("ES", msg) results = msgs.msg['0202'][0] rh.printSysLog("Exit generalUtils.cvtToCyl, rc: " + str(results['overallRC'])) return results, cyl def cvtToMag(rh, size): """ Convert a size value to a number with a magnitude appended. Input: Request Handle Size bytes Output: Converted value with a magnitude """ rh.printSysLog("Enter generalUtils.cvtToMag") mSize = '' size = size / (1024 * 1024) if size > (1024 * 5): # Size is greater than 5G. Using "G" magnitude. size = size / 1024 mSize = "%.1fG" % size else: # Size is less than or equal 5G. Using "M" magnitude. mSize = "%.1fM" % size rh.printSysLog("Exit generalUtils.cvtToMag, magSize: " + mSize) return mSize def getSizeFromPage(rh, page): """ Convert a size value from page to a number with a magnitude appended. Input: Request Handle Size in page Output: Converted value with a magnitude """ rh.printSysLog("Enter generalUtils.getSizeFromPage") bSize = float(page) * 4096 mSize = cvtToMag(rh, bSize) rh.printSysLog("Exit generalUtils.getSizeFromPage, magSize: " + mSize) return mSize def parseCmdline(rh, posOpsList, keyOpsList): """ Parse the request command input. Input: Request Handle Positional Operands List. This is a dictionary that contains an array for each subfunction. The array contains a entry (itself an array) for each positional operand. That array contains: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). Keyword Operands List. This is a dictionary that contains an item for each subfunction. The value for the subfunction is a dictionary that contains a key for each recognized operand. The value associated with the key is an array that contains the following: - the related ReqHandle.parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter generalUtils.parseCmdline") # Handle any positional operands on the line. if rh.results['overallRC'] == 0 and rh.subfunction in posOpsList: ops = posOpsList[rh.subfunction] currOp = 0 # While we have operands on the command line AND # we have more operands in the positional operand list. while rh.argPos < rh.totalParms and currOp < len(ops): key = ops[currOp][1] # key for rh.parms[] opType = ops[currOp][3] # data type if opType == 1: # Handle an integer data type try: rh.parms[key] = int(rh.request[rh.argPos]) except ValueError: # keyword is not an integer msg = msgs.msg['0001'][1] % (modId, rh.function, rh.subfunction, (currOp + 1), ops[currOp][0], rh.request[rh.argPos]) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0001'][0]) break else: rh.parms[key] = rh.request[rh.argPos] currOp += 1 rh.argPos += 1 if (rh.argPos >= rh.totalParms and currOp < len(ops) and ops[currOp][2] is True): # Check for missing required operands. msg = msgs.msg['0002'][1] % (modId, rh.function, rh.subfunction, ops[currOp][0], (currOp + 1)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0002'][0]) # Handle any keyword operands on the line. if rh.results['overallRC'] == 0 and rh.subfunction in keyOpsList: while rh.argPos < rh.totalParms: if rh.request[rh.argPos] in keyOpsList[rh.subfunction]: keyword = rh.request[rh.argPos] rh.argPos += 1 ops = keyOpsList[rh.subfunction] if keyword in ops: key = ops[keyword][0] opCnt = ops[keyword][1] opType = ops[keyword][2] if opCnt == 0: # Keyword has no additional value rh.parms[key] = True else: # Keyword has values following it. storeIntoArray = False # Assume single word if opCnt < 0: storeIntoArray = True # Property is a list all of the rest of the parms. opCnt = rh.totalParms - rh.argPos if opCnt == 0: # Need at least 1 operand value opCnt = 1 elif opCnt > 1: storeIntoArray = True if opCnt + rh.argPos > rh.totalParms: # keyword is missing its related value operand msg = msgs.msg['0003'][1] % (modId, rh.function, rh.subfunction, keyword) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0003'][0]) break """ Add the expected value to the property. Take into account if there are more than 1. """ if storeIntoArray: # Initialize the list. rh.parms[key] = [] for i in range(0, opCnt): if opType == 1: # convert from string to int and save it. try: if not storeIntoArray: rh.parms[key] = ( int(rh.request[rh.argPos])) else: rh.parms[key].append(int( rh.request[rh.argPos])) except ValueError: # keyword is not an integer msg = (msgs.msg['0004'][1] % (modId, rh.function, rh.subfunction, keyword, rh.request[rh.argPos])) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0004'][0]) break else: # Value is a string, save it. if not storeIntoArray: rh.parms[key] = rh.request[rh.argPos] else: rh.parms[key].append(rh.request[rh.argPos]) rh.argPos += 1 if rh.results['overallRC'] != 0: # Upper loop had an error break from loops. break else: # keyword is not in the subfunction's keyword list msg = msgs.msg['0005'][1] % (modId, rh.function, rh.subfunction, keyword) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0005'][0]) break else: # Subfunction does not support keywords msg = (msgs.msg['0006'][1] % (modId, rh.function, rh.subfunction, rh.request[rh.argPos])) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0006'][0]) break rh.printSysLog("Exit generalUtils.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] zVMCloudConnector-1.6.3/smtLayer/vmUtils.py0000664000175000017510000013406414266203411020346 0ustar ruirui00000000000000# Virtual Machine Utilities for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import subprocess from subprocess import CalledProcessError import time from smtLayer import msgs from smtLayer import vmStatus modId = 'VMU' version = '1.0.0' # Version of this script def disableEnableDisk(rh, userid, vaddr, option): """ Disable or enable a disk. Input: Request Handle: owning userid virtual address option ('-e': enable, '-d': disable) Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - rc from the chccwdev command or IUCV transmission. rs - rs from the chccwdev command or IUCV transmission. results - possible error message from the IUCV transmission. """ rh.printSysLog("Enter vmUtils.disableEnableDisk, userid: " + userid + " addr: " + vaddr + " option: " + option) results = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'response': '' } """ Can't guarantee the success of online/offline disk, need to wait Until it's done because we may detach the disk after -d option or use the disk after the -e option """ for secs in [0.1, 0.4, 1, 1.5, 3, 7, 15, 32, 30, 30, 60, 60, 60, 60, 60]: strCmd = "sudo /sbin/chccwdev " + option + " " + vaddr + " 2>&1" results = execCmdThruIUCV(rh, userid, strCmd) if results['overallRC'] == 0: break elif (results['overallRC'] == 2 and results['rc'] == 8 and results['rs'] == 1 and option == '-d'): # Linux does not know about the disk being disabled. # Ok, nothing to do. Treat this as a success. results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': ''} break time.sleep(secs) rh.printSysLog("Exit vmUtils.disableEnableDisk, rc: " + str(results['overallRC'])) return results def execCmdThruIUCV(rh, userid, strCmd, hideInLog=[], timeout=None): """ Send a command to a virtual machine using IUCV. Input: Request Handle Userid of the target virtual machine Command string to send (Optional) List of strCmd words (by index) to hide in sysLog by replacing the word with "". (Optional) timeout value in seconds for executing this command. Output: Dictionary containing the following: overallRC - overall return code, 0: success, 2: failure rc - RC returned from iucvclnt if overallRC != 0. rs - RS returned from iucvclnt if overallRC != 0. errno - Errno returned from iucvclnt if overallRC != 0. response - Output of the iucvclnt command or this routine. Notes: 1) This routine does not use the Request Handle printLn function. This is because an error might be expected and we might desire to suppress it. Instead, any error messages are put in the response dictionary element that is returned. """ if len(hideInLog) == 0: rh.printSysLog("Enter vmUtils.execCmdThruIUCV, userid: " + userid + " cmd: " + strCmd + " timeout: " + str(timeout)) else: logCmd = strCmd.split(' ') for i in hideInLog: logCmd[i] = '' rh.printSysLog("Enter vmUtils.execCmdThruIUCV, userid: " + userid + " cmd: " + ' '.join(logCmd) + " timeout: " + str(timeout)) iucvpath = '/opt/zthin/bin/IUCV/' results = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': [], } cmd = ['sudo', iucvpath + "iucvclnt", userid, strCmd] try: results['response'] = subprocess.check_output( cmd, stderr=subprocess.STDOUT, close_fds=True, timeout=timeout) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) except subprocess.TimeoutExpired as e: # Timeout exceptions from this system rh.printSysLog("Timeout exception in vmUtils.execCmdThruIUCV") results = msgs.msg['0501'][0] msg = msgs.msg['0501'][1] % (modId, strCmd, type(e).__name__, str(e)) results['response'] = msg except CalledProcessError as e: msg = [] results['overallRC'] = 2 results['rc'] = e.returncode output = bytes.decode(e.output) match = re.search('Return code (.+?),', output) if match: try: results['rc'] = int(match.group(1)) except ValueError: # Return code in response from IUCVCLNT is not an int. msg = msgs.msg['0311'][1] % (modId, userid, strCmd, results['rc'], match.group(1), output) if not msg: # We got the rc. Now, get the rs. match = re.search('Reason code (.+?)\.', output) if match: try: results['rs'] = int(match.group(1)) except ValueError: # Reason code in response from IUCVCLNT is not an int. msg = msgs.msg['0312'][1] % (modId, userid, strCmd, results['rc'], match.group(1), output) if msg: # Already produced an error message. pass elif results['rc'] == 1: # Command was not authorized or a generic Linux error. msg = msgs.msg['0313'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 2: # IUCV client parameter error. msg = msgs.msg['0314'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 4: # IUCV socket error msg = msgs.msg['0315'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 8: # Executed command failed msg = msgs.msg['0316'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 16: # File Transport failed msg = msgs.msg['0317'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) elif results['rc'] == 32: # IUCV server file was not found on this system. msg += msgs.msg['0318'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) else: # Unrecognized IUCV client error msg = msgs.msg['0319'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) results['response'] = msg except (subprocess.TimeoutExpired, PermissionError) as e: results['overallRC'] = 3 # return code results['rc'] = 64 # reason code results['rs'] = 408 output = str(e) msg = msgs.msg['0320'][1] % (modId, userid, strCmd, results['rc'], results['rs'], output) results['response'] = msg except Exception as e: # Other exceptions from this system (i.e. not the managed system). results = msgs.msg['0421'][0] msg = msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e)) results['response'] = msg rh.printSysLog("Exit vmUtils.execCmdThruIUCV, rc: " + str(results['rc'])) return results def getPerfInfo(rh, useridlist): """ Get the performance information for a userid Input: Request Handle Userid to query <- may change this to a list later. Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. errno - Errno returned from SMCLI if overallRC = 0. response - Stripped and reformatted output of the SMCLI command. """ rh.printSysLog("Enter vmUtils.getPerfInfo, userid: " + useridlist) parms = ["-T", rh.userid, "-c", "1"] results = invokeSMCLI(rh, "Image_Performance_Query", parms) if results['overallRC'] != 0: # SMCLI failed. rh.printLn("ES", results['response']) rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " + str(results['overallRC'])) return results lines = results['response'].split("\n") usedTime = 0 totalCpu = 0 totalMem = 0 usedMem = 0 try: for line in lines: if "Used CPU time:" in line: usedTime = line.split()[3].strip('"') # Value is in us, need make it seconds usedTime = int(usedTime) / 1000000 if "Guest CPUs:" in line: totalCpu = line.split()[2].strip('"') if "Max memory:" in line: totalMem = line.split()[2].strip('"') # Value is in Kb, need to make it Mb totalMem = int(totalMem) / 1024 if "Used memory:" in line: usedMem = line.split()[2].strip('"') usedMem = int(usedMem) / 1024 except Exception as e: msg = msgs.msg['0412'][1] % (modId, type(e).__name__, str(e), results['response']) rh.printLn("ES", msg) results['overallRC'] = 4 results['rc'] = 4 results['rs'] = 412 if results['overallRC'] == 0: memstr = "Total Memory: %iM\n" % totalMem usedmemstr = "Used Memory: %iM\n" % usedMem procstr = "Processors: %s\n" % totalCpu timestr = "CPU Used Time: %i sec\n" % usedTime results['response'] = memstr + usedmemstr + procstr + timestr rh.printSysLog("Exit vmUtils.getPerfInfo, rc: " + str(results['rc'])) return results def installFS(rh, vaddr, mode, fileSystem, diskType): """ Install a filesystem on a virtual machine's dasd. Input: Request Handle: userid - Userid that owns the disk Virtual address as known to the owning system. Access mode to use to get the disk. Disk Type - 3390 or 9336 Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. errno - Errno returned from SMCLI if overallRC = 0. response - Output of the SMCLI command. """ rh.printSysLog("Enter vmUtils.installFS, userid: " + rh.userid + ", vaddr: " + str(vaddr) + ", mode: " + mode + ", file system: " + fileSystem + ", disk type: " + diskType) results = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, } out = '' diskAccessed = False # Get access to the disk. cmd = ["sudo", "/opt/zthin/bin/linkdiskandbringonline", rh.userid, vaddr, mode] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: # Sometimes the disk is not ready: sleep and retry try_num = 0 for sleep_secs in [0.1, 0.2, 0.3, 0.5, 1, 2, -1]: try_num += 1 try: out = subprocess.check_output(cmd, close_fds=True) rh.printSysLog("Run `%s` successfully." % strCmd) diskAccessed = True break except CalledProcessError as e: if sleep_secs > 0: rh.printSysLog("Num %d try `%s` failed (" "retry after %s seconds): " "rc=%d msg=%s" % ( try_num, strCmd, sleep_secs, e.returncode, e.output)) time.sleep(sleep_secs) else: raise if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. results = msgs.msg['0421'][0] rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) if results['overallRC'] == 0: """ sample output: linkdiskandbringonline maint start time: 2017-03-03-16:20:48.011 Success: Userid maint vdev 193 linked at ad35 device name dasdh linkdiskandbringonline exit time: 2017-03-03-16:20:52.150 """ match = re.search('Success:(.+?)\n', out) if match: parts = match.group(1).split() if len(parts) > 9: device = "/dev/" + parts[9] else: strCmd = ' '.join(cmd) rh.printLn("ES", msgs.msg['0416'][1] % (modId, 'Success:', 10, strCmd, out)) results = msgs.msg['0416'][0] rh.updateResults(results) else: strCmd = ' '.join(cmd) rh.printLn("ES", msgs.msg['0417'][1] % (modId, 'Success:', strCmd, out)) results = msgs.msg['0417'][0] rh.updateResults(results) if results['overallRC'] == 0 and diskType == "3390": # dasdfmt the disk cmd = ["sudo", "/sbin/dasdfmt", "-y", "-b", "4096", "-d", "cdl", "-v", device] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: out = subprocess.check_output(cmd, close_fds=True) if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. strCmd = " ".join(cmd) rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['overallRC'] == 0 and diskType == "3390": # Settle the devices so we can do the partition. strCmd = ("which udevadm &> /dev/null && " + "udevadm settle || udevsettle") rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( strCmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. strCmd = " ".join(cmd) rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['overallRC'] == 0 and diskType == "3390": # Prepare the partition with fdasd cmd = ["sudo", "/sbin/fdasd", "-a", device] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, close_fds=True) if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['overallRC'] == 0 and diskType == "9336": # Delete the existing partition in case the disk already # has a partition in it. cmd = "sudo /sbin/fdisk " + device + " << EOF\ng\nw\nEOF" rh.printSysLog("Invoking: sudo /sbin/fdisk " + device + " << EOF\\nd\\nw\\nEOF ") try: out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) rh.printSysLog("Run `%s` success with output: %s" % (cmd, out)) if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, cmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, cmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['overallRC'] == 0 and diskType == "9336": # Prepare the partition with fdisk cmd = "sudo /sbin/fdisk " + device + " << EOF\nn\np\n1\n\n\nw\nEOF" rh.printSysLog("Invoking: sudo /sbin/fdisk " + device + " << EOF\\nn\\np\\n1\\n\\n\\nw\\nEOF") try: # Sometimes the table is not ready: sleep and retry try_num = 0 for sleep_secs in [0.1, 0.2, 0.3, 0.5, 1, 2, -1]: try_num += 1 try: out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) rh.printSysLog("Run `%s` success with output: %s" % (cmd, out)) break except CalledProcessError as e: if sleep_secs > 0: rh.printSysLog("Num %d try `%s` failed (" "retry after %s seconds): " "rc=%d msg=%s" % ( try_num, cmd, sleep_secs, e.returncode, e.output)) time.sleep(sleep_secs) else: raise if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, cmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, cmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['overallRC'] == 0: # Settle the devices so we can do the partition. strCmd = ("which udevadm &> /dev/null && " + "udevadm settle || udevsettle") rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( strCmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. strCmd = " ".join(cmd) rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['overallRC'] == 0: # Install the file system into the disk. device = device + "1" # Point to first partition if fileSystem == 'swap': cmd = ["sudo", "mkswap", device] elif fileSystem == 'xfs': cmd = ["sudo", "mkfs.xfs", "-f", device] else: cmd = ["sudo", "mkfs", "-F", "-t", fileSystem, device] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: # Sometimes the device is not ready: sleep and retry try_num = 0 for sleep_secs in [0.1, 0.2, 0.3, 0.5, 1, 2, -1]: try_num += 1 try: out = subprocess.check_output(cmd, stderr=subprocess.STDOUT, close_fds=True) rh.printSysLog("Run `%s` successfully." % strCmd) break except CalledProcessError as e: if sleep_secs > 0: rh.printSysLog("Num %d try `%s` failed (" "retry after %s seconds): " "rc=%d msg=%s" % ( try_num, strCmd, sleep_secs, e.returncode, e.output)) time.sleep(sleep_secs) else: raise if isinstance(out, bytes): out = bytes.decode(out) rh.printLn("N", "File system: " + fileSystem + " is installed.") except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) # TODO: diskAccessed hard code to True, because if linkdiskandbringonline # failed, can not set diskAccessed. will leave DASD undetached. # So always try to disconnect the disk. If this fixed in the future, need # remove this. diskAccessed = True if diskAccessed: # flush disk buffer before offline the disk. cmd = ["sudo", "/usr/sbin/blockdev", "--flushbufs", device] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: out = subprocess.check_output(cmd, close_fds=True) if isinstance(out, bytes): out = bytes.decode(out) except Exception as e: # log worning and ignore the exception wmesg = "Executing %(cmd)s failed: %(exp)s" % {'cmd': strCmd, 'exp': str(e)} rh.printLn("WS", wmesg) # Give up the disk. cmd = ["sudo", "/opt/zthin/bin/offlinediskanddetach", rh.userid, vaddr] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: out = subprocess.check_output(cmd, close_fds=True) if isinstance(out, bytes): out = bytes.decode(out) except CalledProcessError as e: rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode rh.updateResults(results) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) rh.printSysLog("Exit vmUtils.installFS, rc: " + str(results['rc'])) return results def invokeSMCLI(rh, api, parms, hideInLog=[]): """ Invoke SMCLI and parse the results. Input: Request Handle API name, SMCLI parms as an array (Optional) List of parms (by index) to hide in sysLog by replacing the parm with "". Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. errno - Errno returned from SMCLI if overallRC = 0. response - String output of the SMCLI command. Note: - If the first three words of the header returned from smcli do not do not contain words that represent valid integer values or contain too few words then one or more error messages are generated. THIS SHOULD NEVER OCCUR !!!! """ if len(hideInLog) == 0: rh.printSysLog("Enter vmUtils.invokeSMCLI, userid: " + rh.userid + ", function: " + api + ", parms: " + str(parms)) else: logParms = parms for i in hideInLog: logParms[i] = '' rh.printSysLog("Enter vmUtils.invokeSMCLI, userid: " + rh.userid + ", function: " + api + ", parms: " + str(logParms)) goodHeader = False results = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': [], 'strError': '', } cmd = [] cmd.append('sudo') cmd.append('/opt/zthin/bin/smcli') cmd.append(api) cmd.append('--addRCheader') status = vmStatus.GetSMAPIStatus() try: smcliResp = subprocess.check_output(cmd + parms, close_fds=True) if isinstance(smcliResp, bytes): smcliResp = bytes.decode(smcliResp, errors='replace') smcliResp = smcliResp.split('\n', 1) results['response'] = smcliResp[1] results['overallRC'] = 0 results['rc'] = 0 status.RecordSuccess() except CalledProcessError as e: status.RecordFail() strCmd = " ".join(cmd + parms) # Break up the RC header into its component parts. if e.output == '': smcliResp = [''] else: smcliResp = bytes.decode(e.output).split('\n', 1) # Split the header into its component pieces. rcHeader = smcliResp[0].split('(details)', 1) if len(rcHeader) == 0: rcHeader = ['', ''] elif len(rcHeader) == 1: # No data after the details tag. Add empty [1] value. rcHeader.append('') codes = rcHeader[0].split(' ') # Validate the rc, rs, and errno. if len(codes) < 3: # Unexpected number of codes. Need at least 3. results = msgs.msg['0301'][0] results['response'] = msgs.msg['0301'][1] % (modId, api, strCmd, rcHeader[0], rcHeader[1]) else: goodHeader = True # Convert the first word (overall rc from SMAPI) to an int # and set the SMT overall rc based on this value. orcError = False try: results['overallRC'] = int(codes[0]) if results['overallRC'] not in [8, 24, 25]: orcError = True except ValueError: goodHeader = False orcError = True if orcError: results['overallRC'] = 25 # SMCLI Internal Error results = msgs.msg['0302'][0] results['response'] = msgs.msg['0302'][1] % (modId, api, codes[0], strCmd, rcHeader[0], rcHeader[1]) # Convert the second word to an int and save as rc. try: results['rc'] = int(codes[1]) except ValueError: goodHeader = False results = msgs.msg['0303'][0] results['response'] = msgs.msg['0303'][1] % (modId, api, codes[1], strCmd, rcHeader[0], rcHeader[1]) # Convert the second word to an int and save it as either # the rs or errno. try: word3 = int(codes[2]) if results['overallRC'] == 8: results['rs'] = word3 # Must be an rs elif results['overallRC'] == 25: results['errno'] = word3 # Must be the errno # We ignore word 3 for everyone else and default to 0. except ValueError: goodHeader = False results = msgs.msg['0304'][0] results['response'] = msgs.msg['0304'][1] % (modId, api, codes[1], strCmd, rcHeader[0], rcHeader[1]) results['strError'] = rcHeader[1].lstrip() if goodHeader: # Produce a message that provides the error info. results['response'] = msgs.msg['0300'][1] % (modId, api, results['overallRC'], results['rc'], results['rs'], results['errno'], strCmd, smcliResp[1]) except Exception as e: # All other exceptions. strCmd = " ".join(cmd + parms) results = msgs.msg['0305'][0] results['response'] = msgs.msg['0305'][1] % (modId, strCmd, type(e).__name__, str(e)) rh.printSysLog("Exit vmUtils.invokeSMCLI, rc: " + str(results['overallRC'])) return results def isLoggedOn(rh, userid): """ Determine whether a virtual machine is logged on. Input: Request Handle: userid being queried Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - 0: if we got status. Otherwise, it is the error return code from the commands issued. rs - Based on rc value. For rc==0, rs is: 0: if we determined it is logged on. 1: if we determined it is logged off. """ rh.printSysLog("Enter vmUtils.isLoggedOn, userid: " + userid) results = { 'overallRC': 0, 'rc': 0, 'rs': 0, } cmd = ["sudo", "/sbin/vmcp", "query", "user", userid] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) except CalledProcessError as e: search_pattern = '(^HCP\w\w\w045E|^HCP\w\w\w361E)'.encode() match = re.search(search_pattern, e.output) if match: # Not logged on results['rs'] = 1 else: # Abnormal failure rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, e.output)) results = msgs.msg['0415'][0] results['rs'] = e.returncode except Exception as e: # All other exceptions. results = msgs.msg['0421'][0] rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.printSysLog("Exit vmUtils.isLoggedOn, overallRC: " + str(results['overallRC']) + " rc: " + str(results['rc']) + " rs: " + str(results['rs'])) return results def punch2reader(rh, userid, fileLoc, spoolClass): """ Punch a file to a virtual reader of the specified virtual machine. Input: Request Handle - for general use and to hold the results userid - userid of the virtual machine fileLoc - File to send spoolClass - Spool class Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter punch2reader.punchFile") results = {} # Setting rc to time out rc code as default and its changed during runtime results['rc'] = 9 # Punch to the current user intially and then change the spool class. cmd = ["sudo", "/usr/sbin/vmur", "punch", "-r", fileLoc] strCmd = ' '.join(cmd) for secs in [1, 2, 3, 5, 10]: rh.printSysLog("Invoking: " + strCmd) try: results['response'] = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) results['rc'] = 0 rh.updateResults(results) break except CalledProcessError as e: results['response'] = e.output # Check if we have concurrent instance of vmur active to_find = "A concurrent instance of vmur is already active" to_find = to_find.encode() if results['response'].find(to_find) == -1: # Failure in VMUR punch update the rc results['rc'] = 7 break else: # if concurrent vmur is active try after sometime rh.printSysLog("Punch in use. Retrying after " + str(secs) + " seconds") time.sleep(secs) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] rh.updateResults(results) if results['rc'] == 7: # Failure while issuing vmur command (For eg: invalid file given) msg = msgs.msg['0401'][1] % (modId, fileLoc, userid, results['response']) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0401'][0]) elif results['rc'] == 9: # Failure due to vmur timeout msg = msgs.msg['0406'][1] % (modId, fileLoc) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0406'][0]) if rh.results['overallRC'] == 0: # On VMUR success change the class of the spool file spoolId = re.findall(r'\d+', str(results['response'])) cmd = ["sudo", "vmcp", "change", "rdr", str(spoolId[0]), "class", spoolClass] strCmd = " ".join(cmd) rh.printSysLog("Invoking: " + strCmd) try: results['response'] = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) rh.updateResults(results) except CalledProcessError as e: msg = msgs.msg['0404'][1] % (modId, spoolClass, e.output) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0404'][0]) # Class change failed # Delete the punched file from current userid cmd = ["sudo", "vmcp", "purge", "rdr", spoolId[0]] strCmd = " ".join(cmd) rh.printSysLog("Invoking: " + strCmd) try: results['response'] = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) # We only need to issue the printLn. # Don't need to change return/reason code values except CalledProcessError as e: msg = msgs.msg['0403'][1] % (modId, spoolId[0], e.output) rh.printLn("ES", msg) except Exception as e: # All other exceptions related to purge. # We only need to issue the printLn. # Don't need to change return/reason code values rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) except Exception as e: # All other exceptions related to change rdr. results = msgs.msg['0421'][0] rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.updateResults(msgs.msg['0421'][0]) if rh.results['overallRC'] == 0: # Transfer the file from current user to specified user cmd = ["sudo", "vmcp", "transfer", "*", "rdr", str(spoolId[0]), "to", userid, "rdr"] strCmd = " ".join(cmd) rh.printSysLog("Invoking: " + strCmd) try: results['response'] = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) rh.updateResults(results) except CalledProcessError as e: msg = msgs.msg['0424'][1] % (modId, fileLoc, userid, e.output) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0424'][0]) # Transfer failed so delete the punched file from current userid cmd = ["sudo", "vmcp", "purge", "rdr", spoolId[0]] strCmd = " ".join(cmd) rh.printSysLog("Invoking: " + strCmd) try: results['response'] = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) if isinstance(results['response'], bytes): results['response'] = bytes.decode(results['response']) # We only need to issue the printLn. # Don't need to change return/reason code values except CalledProcessError as e: msg = msgs.msg['0403'][1] % (modId, spoolId[0], e.output) rh.printLn("ES", msg) except Exception as e: # All other exceptions related to purge. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) except Exception as e: # All other exceptions related to transfer. results = msgs.msg['0421'][0] rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.updateResults(msgs.msg['0421'][0]) rh.printSysLog("Exit vmUtils.punch2reader, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def waitForOSState(rh, userid, desiredState, maxQueries=90, sleepSecs=5): """ Wait for the virtual OS to go into the indicated state. Input: Request Handle userid whose state is to be monitored Desired state, 'up' or 'down', case sensitive Maximum attempts to wait for desired state before giving up Sleep duration between waits Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from execCmdThruIUCV if overallRC = 0. rs - RS returned from execCmdThruIUCV if overallRC = 0. errno - Errno returned from execCmdThruIUCV if overallRC = 0. response - Updated with an error message if wait times out. Note: """ rh.printSysLog("Enter vmUtils.waitForOSState, userid: " + userid + " state: " + desiredState + " maxWait: " + str(maxQueries) + " sleepSecs: " + str(sleepSecs)) results = {} strCmd = "echo 'ping'" stateFnd = False for i in range(1, maxQueries + 1): results = execCmdThruIUCV(rh, rh.userid, strCmd) if results['overallRC'] == 0: if desiredState == 'up': stateFnd = True break else: if desiredState == 'down': stateFnd = True break if i < maxQueries: time.sleep(sleepSecs) if stateFnd is True: results = { 'overallRC': 0, 'rc': 0, 'rs': 0, } else: maxWait = maxQueries * sleepSecs rh.printLn("ES", msgs.msg['0413'][1] % (modId, userid, desiredState, maxWait)) results = msgs.msg['0413'][0] rh.printSysLog("Exit vmUtils.waitForOSState, rc: " + str(results['overallRC'])) return results def waitForVMState(rh, userid, desiredState, maxQueries=90, sleepSecs=5): """ Wait for the virtual machine to go into the indicated state. Input: Request Handle userid whose state is to be monitored Desired state, 'on' or 'off', case sensitive Maximum attempts to wait for desired state before giving up Sleep duration between waits Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. Note: """ rh.printSysLog("Enter vmUtils.waitForVMState, userid: " + userid + " state: " + desiredState + " maxWait: " + str(maxQueries) + " sleepSecs: " + str(sleepSecs)) results = {} maxQueries = int(maxQueries) cmd = ["sudo", "/sbin/vmcp", "query", "user", userid] strCmd = " ".join(cmd) stateFnd = False for i in range(1, maxQueries + 1): rh.printSysLog("Invoking: " + strCmd) try: out = subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) if isinstance(out, bytes): out = bytes.decode(out) rh.printSysLog("Query user output: " + out) if desiredState == 'on': stateFnd = True break except CalledProcessError as e: out = e.output if isinstance(out, bytes): out = bytes.decode(out) rh.printSysLog("Query user output: " + out) match = re.search('(^HCP\w\w\w045E|^HCP\w\w\w361E)', out) if match: # Logged off if desiredState == 'off': stateFnd = True break else: # Abnormal failure rh.printLn("ES", msgs.msg['0415'][1] % (modId, strCmd, e.returncode, out)) results = msgs.msg['0415'][0] results['rs'] = e.returncode break except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) results = msgs.msg['0421'][0] if i < maxQueries: # Sleep a bit before looping. time.sleep(sleepSecs) if stateFnd is True: results = { 'overallRC': 0, 'rc': 0, 'rs': 0, } else: maxWait = maxQueries * sleepSecs rh.printLn("ES", msgs.msg['0414'][1] % (modId, userid, desiredState, maxWait)) results = msgs.msg['0414'][0] rh.printSysLog("Exit vmUtils.waitForVMState, rc: " + str(results['overallRC'])) return results def purgeReader(rh): """ Purge reader of the specified userid. Input: Request Handle Output: Dictionary containing the following: overallRC - overall return code, 0: success, non-zero: failure rc - RC returned from SMCLI if overallRC = 0. rs - RS returned from SMCLI if overallRC = 0. errno - Errno returned from SMCLI if overallRC = 0. response - Updated with an error message. Note: """ rh.printSysLog("Enter vmUtils.purgeRDR, userid: " + rh.userid) results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': []} parms = ['-T', rh.userid, '-k', 'spoolids=all'] results = invokeSMCLI(rh, "System_RDR_File_Manage", parms) if results['overallRC'] != 0: rh.printLn("ES", results['response']) rh.updateResults(results) rh.printSysLog("Exit vmUtils.purgeReader, rc: " + str(results['overallRC'])) return results zVMCloudConnector-1.6.3/smtLayer/ReqHandle.py0000664000175000017510000003362613672563714020567 0ustar ruirui00000000000000# Request Handle for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import logging.handlers import shlex from six import string_types from smtLayer import changeVM from smtLayer import cmdVM from smtLayer import deleteVM from smtLayer import getHost from smtLayer import getVM from smtLayer import makeVM from smtLayer import migrateVM from smtLayer import msgs from smtLayer import smapi from smtLayer import powerVM from zvmsdk import log as zvmsdklog modId = "RQH" version = '1.0.0' # Version of this script class ReqHandle(object): """ Systems Management Ultra Thin Layer Request Handle. This class contains all information related to a specific request. All functions are passed this request handle. """ funcHandler = { 'CHANGEVM': [ lambda rh: changeVM.showInvLines(rh), lambda rh: changeVM.showOperandLines(rh), lambda rh: changeVM.parseCmdline(rh), lambda rh: changeVM.doIt(rh)], 'CMDVM': [ lambda rh: cmdVM.showInvLines(rh), lambda rh: cmdVM.showOperandLines(rh), lambda rh: cmdVM.parseCmdline(rh), lambda rh: cmdVM.doIt(rh)], 'DELETEVM': [ lambda rh: deleteVM.showInvLines(rh), lambda rh: deleteVM.showOperandLines(rh), lambda rh: deleteVM.parseCmdline(rh), lambda rh: deleteVM.doIt(rh)], 'GETHOST': [ lambda rh: getHost.showInvLines(rh), lambda rh: getHost.showOperandLines(rh), lambda rh: getHost.parseCmdline(rh), lambda rh: getHost.doIt(rh)], 'GETVM': [ lambda rh: getVM.showInvLines(rh), lambda rh: getVM.showOperandLines(rh), lambda rh: getVM.parseCmdline(rh), lambda rh: getVM.doIt(rh)], 'MAKEVM': [ lambda rh: makeVM.showInvLines(rh), lambda rh: makeVM.showOperandLines(rh), lambda rh: makeVM.parseCmdline(rh), lambda rh: makeVM.doIt(rh)], 'MIGRATEVM': [ lambda rh: migrateVM.showInvLines(rh), lambda rh: migrateVM.showOperandLines(rh), lambda rh: migrateVM.parseCmdline(rh), lambda rh: migrateVM.doIt(rh)], 'POWERVM': [ lambda rh: powerVM.showInvLines(rh), lambda rh: powerVM.showOperandLines(rh), lambda rh: powerVM.parseCmdline(rh), lambda rh: powerVM.doIt(rh)], 'SMAPI': [ lambda rh: smapi.showInvLines(rh), lambda rh: smapi.showOperandLines(rh), lambda rh: smapi.parseCmdline(rh), lambda rh: smapi.doIt(rh)], } def __init__(self, **kwArgs): """ Constructor Input: captureLogs= Enables or disables log capture for all requests. cmdName= Name of the command that is using ReqHandle. This is only used for the function help. It defaults to "smtCmd.py". requestId=requestId Optional request Id smt= SMT daemon, it it exists. """ self.results = { 'overallRC': 0, # Overall return code for the function, e.g. # 0 - Everything went ok # 2 - Something in the IUCVCLNT failed # 3 - Something in a local vmcp failed # 4 - Input validation error # 5 - Miscellaneous processing error # 8 - SMCLI - SMAPI failure # 24 - SMCLI - Parsing failure # 25 - SMCLI - Internal Processing Error # 99 - Unexpected failure 'rc': 0, # Return code causing the return 'rs': 0, # Reason code causing the return 'errno': 0, # Errno value causing the return 'strError': '', # Error as a string value. # Normally, this is the errno description. 'response': [], # Response strings 'logEntries': [], # Syslog entries related to this request } if 'smt' in kwArgs.keys(): self.daemon = kwArgs['smt'] # SMT Daemon # Actual SysLog handling is done in SMT. else: self.daemon = '' # Set up SysLog handling to be done by ReqHandle self.logger = logging.getLogger(__name__) self.logger.setLevel(logging.DEBUG) self.handler = logging.handlers.SysLogHandler(address = '/dev/log') self.formatter = ( logging.Formatter('%(module)s.%(funcName)s: %(message)s')) self.handler.setFormatter(self.formatter) self.logger.addHandler(self.handler) if 'cmdName' in kwArgs.keys(): self.cmdName = kwArgs['cmdName'] else: self.cmdName = 'smtCmd.py' if 'requestId' in kwArgs.keys(): self.requestId = kwArgs['requestId'] else: self.requestId = 'REQ_' + hex(id(self))[2:] # Need to generate a default request Id self.function = '' # Function being processed self.subfunction = '' # Subfunction be processed (optional) self.userid = '' # Target userid self.parms = {} # Dictionary of additional parms self.argPos = 0 # Prep to parse first command line arg # Capture & return Syslog entries if 'captureLogs' in kwArgs.keys(): self.captureLogs = kwArgs['captureLogs'] else: self.captureLogs = False def driveFunction(self): """ Drive the function/subfunction call. Input: Self with request filled in. Output: Request Handle updated with the results. Overall return code - 0: successful, non-zero: error """ if self.function == 'HELP': # General help for all functions. self.printLn("N", "") self.printLn("N", "Usage:") self.printLn("N", " python " + self.cmdName + " --help") for key in sorted(ReqHandle.funcHandler): ReqHandle.funcHandler[key][0](self) self.printLn("N", "") self.printLn("N", "Operand(s):") for key in sorted(ReqHandle.funcHandler): ReqHandle.funcHandler[key][1](self) self.printLn("N", "") self.updateResults({}, reset=1) elif self.function == 'VERSION': # Version of ReqHandle. self.printLn("N", "Version: " + version) self.updateResults({}, reset=1) else: # Some type of function/subfunction invocation. if self.function in self.funcHandler: # Invoke the functions doIt routine to route to the # appropriate subfunction. self.funcHandler[self.function][3](self) else: # Unrecognized function msg = msgs.msg['0007'][1] % (modId, self.function) self.printLn("ES", msg) self.updateResults(msgs.msg['0007'][0]) return self.results def parseCmdline(self, requestData): """ Parse the request command string. Input: Self with request filled in. Output: Request Handle updated with the parsed information so that it is accessible via key/value pairs for later processing. Return code - 0: successful, non-zero: error """ self.printSysLog("Enter ReqHandle.parseCmdline") # Save the request data based on the type of operand. if isinstance(requestData, list): self.requestString = ' '.join(requestData) # Request as a string self.request = requestData # Request as a list elif isinstance(requestData, string_types): self.requestString = requestData # Request as a string self.request = shlex.split(requestData) # Request as a list else: # Request data type is not supported. msg = msgs.msg['0012'][1] % (modId, type(requestData)) self.printLn("ES", msg) self.updateResults(msgs.msg['0012'][0]) return self.results self.totalParms = len(self.request) # Number of parms in the cmd # Handle the request, parse it or return an error. if self.totalParms == 0: # Too few arguments. msg = msgs.msg['0009'][1] % modId self.printLn("ES", msg) self.updateResults(msgs.msg['0009'][0]) elif self.totalParms == 1: self.function = self.request[0].upper() if self.function == 'HELP' or self.function == 'VERSION': pass else: # Function is not HELP or VERSION. msg = msgs.msg['0008'][1] % (modId, self.function) self.printLn("ES", msg) self.updateResults(msgs.msg['0008'][0]) else: # Process based on the function operand. self.function = self.request[0].upper() if self.request[0] == 'HELP' or self.request[0] == 'VERSION': pass else: # Handle the function related parms by calling the function # parser. if self.function in ReqHandle.funcHandler: self.funcHandler[self.function][2](self) else: # Unrecognized function msg = msgs.msg['0007'][1] % (modId, self.function) self.printLn("ES", msg) self.updateResults(msgs.msg['0007'][0]) self.printSysLog("Exit ReqHandle.parseCmdline, rc: " + str(self.results['overallRC'])) return self.results def printLn(self, respType, respString): """ Add one or lines of output to the response list. Input: Response type: One or more characters indicate type of response. E - Error message N - Normal message S - Output should be logged W - Warning message """ if 'E' in respType: respString = '(Error) ' + respString if 'W' in respType: respString = '(Warning) ' + respString if 'S' in respType: self.printSysLog(respString) self.results['response'] = (self.results['response'] + respString.splitlines()) return def printSysLog(self, logString): """ Log one or more lines. Optionally, add them to logEntries list. Input: Strings to be logged. """ if self.daemon: self.daemon.logger.debug(self.requestId + ": " + logString) elif zvmsdklog.LOGGER.getloglevel() <= logging.DEBUG: # print log only when debug is enabled if self.daemon == '': self.logger.debug(self.requestId + ": " + logString) if self.captureLogs is True: self.results['logEntries'].append(self.requestId + ": " + logString) return def updateResults(self, newResults, **kwArgs): """ Update the results related to this request excluding the 'response' and 'logEntries' values. We specifically update (if present): overallRC, rc, rs, errno. Input: Dictionary containing the results to be updated or an empty dictionary the reset keyword was specified. Reset keyword: 0 - Not a reset. This is the default is reset keyword was not specified. 1 - Reset failure related items in the result dictionary. This exclude responses and log entries. 2 - Reset all result items in the result dictionary. Output: Request handle is updated with the results. """ if 'reset' in kwArgs.keys(): reset = kwArgs['reset'] else: reset = 0 if reset == 0: # Not a reset. Set the keys from the provided dictionary. for key in newResults.keys(): if key == 'response' or key == 'logEntries': continue self.results[key] = newResults[key] elif reset == 1: # Reset all failure related items. self.results['overallRC'] = 0 self.results['rc'] = 0 self.results['rs'] = 0 self.results['errno'] = 0 self.results['strError'] = '' elif reset == 2: # Reset all results information including any responses and # log entries. self.results['overallRC'] = 0 self.results['rc'] = 0 self.results['rs'] = 0 self.results['errno'] = 0 self.results['strError'] = '' self.results['logEntries'] = '' self.results['response'] = '' return zVMCloudConnector-1.6.3/smtLayer/getHost.py0000664000175000017510000006016514315210052020312 0ustar ruirui00000000000000# GetHost functions for Systems Management Ultra Thin Layer # # Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import subprocess from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import invokeSMCLI from zvmsdk import config modId = 'GHO' version = "1.0.0" # maximum I knew is 60019, so make it double maximumCyl = 130000 """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'DISKPOOLNAMES': ['help', lambda rh: getDiskPoolNames(rh)], 'DISKPOOLVOLUMES': ['help', lambda rh: getDiskPoolVolumes(rh)], 'VOLUMEINFO': ['help', lambda rh: getVolumeInfo(rh)], 'DISKPOOLSPACE': ['help', lambda rh: getDiskPoolSpace(rh)], 'FCPDEVICES': ['help', lambda rh: getFcpDevices(rh)], 'GENERAL': ['help', lambda rh: getGeneralInfo(rh)], 'HELP': ['help', lambda rh: help(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = { 'DISKPOOLSPACE': [ ['Disk Pool Name', 'poolName', False, 2] ], 'DISKPOOLVOLUMES': [ ['Disk Pool Name', 'poolName', False, 2] ] } """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'DISKPOOLNAMES': {'--showparms': ['showParms', 0, 0]}, 'DISKPOOLSPACE': {'--showparms': ['showParms', 0, 0]}, 'VOLUMEINFO': {'--showparms': ['showParms', 0, 0]}, 'FCPDEVICES': {'--showparms': ['showParms', 0, 0]}, 'GENERAL': {'--showparms': ['showParms', 0, 0]}, 'HELP': {'--showparms': ['showParms', 0, 0]}, 'VERSION': {'--showparms': ['showParms', 0, 0]}, } def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: getHost." + str(subfuncHandler[rh.subfunction][0]) + "(rh)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit getHost.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getDiskPoolNames(rh): """ Obtain the list of disk pools known to the directory manager. Input: Request Handle with the following properties: function - 'GETHOST' subfunction - 'DISKPOOLNAMES' Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.getDiskPoolNames") parms = ["-q", "1", "-e", "3", "-T", "dummy"] results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms) if results['overallRC'] == 0: for line in results['response'].splitlines(): poolName = line.partition(' ')[0] rh.printLn("N", poolName) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit getHost.getDiskPoolNames, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getDiskPoolVolumes(rh): """ Obtain the list of volumes for the disk_pools on the hypervisor. Input: Request Handle with the following properties: function - 'GETHOST' subfunction - 'DISKPOOLVOLUMES' Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.getDiskPoolVolumes") if 'poolName' not in rh.parms: poolNames = ["*"] else: if isinstance(rh.parms['poolName'], list): poolNames = rh.parms['poolName'] else: poolNames = [rh.parms['poolName']] parms = ["-q", "1", "-e", "3", "-T", "dummy", "-n", " ".join(poolNames)] results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms) if results['overallRC'] == 0: for line in results['response'].splitlines(): poolVolumes = line.strip().split() poolVolumes.pop(0) poolVolumes = ' '.join(poolVolumes) # Create output string outstr = 'Diskpool Volumes:' + poolVolumes rh.printLn("N", outstr) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit getHost.getDiskPoolVolumes, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVolumeInfo(rh): """ Obtain the description info of the volume on the hypervisor. Input: Request Handle with the following properties: function - 'GETHOST' subfunction - 'VOLUMEINFO' Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.getVolumeInfo") if 'volumeName' not in rh.parms: volumeName = ["*"] else: if isinstance(rh.parms['volumeName'], list): volumeName = rh.parms['volumeName'] else: volumeName = [rh.parms['volumeName']] parms = ["-q", "1", "-e", "1", "-T", "dummy", "-n", " ".join(volumeName)] results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms) if results['overallRC'] == 0: for line in results['response'].splitlines(): volumeInfo = line.strip().split() volumeName = volumeInfo[0] volumeType = volumeInfo[1] volumeSize = volumeInfo[2] # Create output string outstr1 = 'volume_name:' + volumeName outstr2 = 'volume_type:' + volumeType outstr3 = 'volume_size:' + volumeSize rh.printLn("N", outstr1) rh.printLn("N", outstr2) rh.printLn("N", outstr3) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit getHost.getVolumeInfo, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def _getDiskSize(parts): size = 0 if parts[1][:4] == "3390": size = int(parts[3]) * 737280 elif parts[1][:4] == "9336": size = int(parts[3]) * 512 else: # now we don't know the type, it might be caused # by SMAPI layer and we got a ??? type # then let's guess the type if > maximumCyl # then think it's a 9336, otherwise, take as 3390 if int(parts[3]) > maximumCyl: size = int(parts[3]) * 512 else: size = int(parts[3]) * 737280 return size def getDiskPoolSpace(rh): """ Obtain disk pool space information for all or a specific disk pool. Input: Request Handle with the following properties: function - 'GETHOST' subfunction - 'DISKPOOLSPACE' parms['poolName'] - Name of the disk pool. Optional, if not present then information for all disk pools is obtained. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.getDiskPoolSpace") results = {'overallRC': 0} if 'poolName' not in rh.parms: poolNames = ["*"] else: if isinstance(rh.parms['poolName'], list): poolNames = rh.parms['poolName'] else: poolNames = [rh.parms['poolName']] if results['overallRC'] == 0: # Loop thru each pool getting total. Do it for query 2 & 3 totals = {} for qType in ["2", "3"]: parms = [ "-q", qType, "-e", "3", "-T", "DUMMY", "-n", " ".join(poolNames)] results = invokeSMCLI(rh, "Image_Volume_Space_Query_DM", parms) if results['overallRC'] == 0: for line in results['response'].splitlines(): parts = line.split() if len(parts) == 9: poolName = parts[7] else: poolName = parts[4] if poolName not in totals: totals[poolName] = {"2": 0., "3": 0.} totals[poolName][qType] += _getDiskSize(parts) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI break if results['overallRC'] == 0: if len(totals) == 0: # No pool information found. msg = msgs.msg['0402'][1] % (modId, " ".join(poolNames)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0402'][0]) else: # Produce a summary for each pool for poolName in sorted(totals): total = totals[poolName]["2"] + totals[poolName]["3"] rh.printLn("N", poolName + " Total: " + generalUtils.cvtToMag(rh, total)) rh.printLn("N", poolName + " Used: " + generalUtils.cvtToMag(rh, totals[poolName]["3"])) rh.printLn("N", poolName + " Free: " + generalUtils.cvtToMag(rh, totals[poolName]["2"])) rh.printSysLog("Exit getHost.getDiskPoolSpace, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getFcpDevices(rh): """ Lists the FCP device channels that are active, free, or offline. Input: Request Handle with the following properties: function - 'GETHOST' subfunction - 'FCPDEVICES' Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.getFcpDevices") parms = ["-T", "dummy"] results = invokeSMCLI(rh, "System_WWPN_Query", parms) if results['overallRC'] == 0: rh.printLn("N", results['response']) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit getHost.getFcpDevices, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getCPUCount(rh): """ Obtain general information about the host. Input: Request Handle Output: Request Handle updated with the results. Return code - not 0: ok Return code - 0,0: problem getting some info by System_Processor_Query """ rh.printSysLog("Enter getHost.lparCPUCount") rh.results['overallRC'] = 0 # LPAR CPUs total and used is not support mixed CP + IFL # So get cpu num from System_Processor_Query # to override LPAR CPUs total and used parms = [] results = invokeSMCLI(rh, "System_Processor_Query", parms) cpu_total = 0 cpu_use = 0 if results['overallRC'] == 0: flag = 0 for line in results['response'].splitlines(): line_value = line.partition(' ')[2] if not line_value.strip(): continue else: type_row = line_value.split(' ') if len(type_row) > 1: type_row = line_value.split(' ')[1] if type_row == 'TYPE': flag = 1 if flag == 1: status_row = line_value.split(' ')[0] if (status_row.find('MASTER') != -1 or status_row == 'ALTERNATE' or status_row == 'PARKED'): cpu_use = cpu_use + 1 if (type_row == 'CP' or type_row == 'IFL'): cpu_total = cpu_total + 1 return cpu_total, cpu_use def getGeneralInfo(rh): """ Obtain general information about the host. Input: Request Handle with the following properties: function - 'GETHOST' subfunction - 'GENERAL' Output: Request Handle updated with the results. Return code - 0: ok Return code - 4: problem getting some info """ rh.printSysLog("Enter getHost.getGeneralInfo") # Get host using VMCP rh.results['overallRC'] = 0 cmd = ["sudo", "/sbin/vmcp", "query userid"] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: host = subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) host = bytes.decode(host) userid = host.split()[0] host = host.split()[2] except subprocess.CalledProcessError as e: msg = msgs.msg['0405'][1] % (modId, "Hypervisor Name", strCmd, e.output) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) host = "no info" except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.updateResults(msgs.msg['0421'][0]) host = "no info" # Get a bunch of info from /proc/sysinfo lparCpuTotal = "no info" lparCpuUsed = "no info" cecModel = "no info" cecVendor = "no info" hvInfo = "no info" with open('/proc/sysinfo', 'r') as myFile: for num, line in enumerate(myFile, 1): # Get total physical CPU in this LPAR if "LPAR CPUs Total" in line: lparCpuTotal = line.split()[3] # Get used physical CPU in this LPAR if "LPAR CPUs Configured" in line: lparCpuUsed = line.split()[3] # Get CEC model if "Type:" in line: cecModel = line.split()[1] # Get vendor of CEC if "Manufacturer:" in line: cecVendor = line.split()[1] # Get hypervisor type and version if "VM00 Control Program" in line: hvInfo = line.split()[3] + " " + line.split()[4] # update cpu number from getCPUCount by call API System_Processor_Query cpu_total = 0 cpu_used = 0 cpu_total, cpu_used = getCPUCount(rh) if (cpu_total != 0): lparCpuTotal = str(cpu_total) if (cpu_used != 0): lparCpuUsed = str(cpu_used) if lparCpuTotal == "no info": msg = msgs.msg['0405'][1] % (modId, "LPAR CPUs Total", "cat /proc/sysinfo", "not found") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) if lparCpuUsed == "no info": msg = msgs.msg['0405'][1] % (modId, "LPAR CPUs Configured", "cat /proc/sysinfo", "not found") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) if cecModel == "no info": msg = msgs.msg['0405'][1] % (modId, "Type:", "cat /proc/sysinfo", "not found") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) if cecVendor == "no info": msg = msgs.msg['0405'][1] % (modId, "Manufacturer:", "cat /proc/sysinfo", "not found") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) if hvInfo == "no info": msg = msgs.msg['0405'][1] % (modId, "VM00 Control Program", "cat /proc/sysinfo", "not found") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) # Get processor architecture arch = str(os.uname()[4]) # Get LPAR memory total & offline parm = ["-T", "dummy", "-k", "STORAGE="] lparMemTotal = "no info" lparMemStandby = "no info" results = invokeSMCLI(rh, "System_Information_Query", parm) if results['overallRC'] == 0: for line in results['response'].splitlines(): if "STORAGE=" in line: lparMemOnline = line.split()[0] lparMemStandby = line.split()[4] lparMemTotal = lparMemOnline.split("=")[2] lparMemStandby = lparMemStandby.split("=")[1] else: # SMAPI API failed, so we put out messages # 300 and 405 for consistency rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI msg = msgs.msg['0405'][1] % (modId, "LPAR memory", "(see message 300)", results['response']) rh.printLn("ES", msg) # Get LPAR memory in use parm = ["-T", "dummy", "-k", "detailed_cpu=show=no"] lparMemUsed = "no info" results = invokeSMCLI(rh, "System_Performance_Information_Query", parm) if results['overallRC'] == 0: for line in results['response'].splitlines(): if "MEMORY_IN_USE=" in line: lparMemUsed = line.split("=")[1] lparMemUsed = generalUtils.getSizeFromPage(rh, lparMemUsed) else: if config.CONF.zvm.bypass_smapiout: # we bypass the check of SMAPIOUT and use 0G directly # This currently used for test when SMAPIOUT is not ready lparMemUsed = '0G' else: # SMAPI API failed, so we put out messages # 300 and 405 for consistency rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI msg = msgs.msg['0405'][1] % (modId, "LPAR memory in use", "(see message 300)", results['response']) rh.printLn("ES", msg) # Get IPL Time ipl = "" cmd = ["sudo", "/sbin/vmcp", "query cplevel"] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: ipl = subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) ipl = bytes.decode(ipl).split("\n")[2] except subprocess.CalledProcessError as e: msg = msgs.msg['0405'][1] % (modId, "IPL Time", strCmd, e.output) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0405'][0]) except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.updateResults(msgs.msg['0421'][0]) # Create output string outstr = "ZCC USERID: " + userid outstr += "\nz/VM Host: " + host outstr += "\nArchitecture: " + arch outstr += "\nCEC Vendor: " + cecVendor outstr += "\nCEC Model: " + cecModel outstr += "\nHypervisor OS: " + hvInfo outstr += "\nHypervisor Name: " + host outstr += "\nLPAR CPU Total: " + lparCpuTotal outstr += "\nLPAR CPU Used: " + lparCpuUsed outstr += "\nLPAR Memory Total: " + lparMemTotal outstr += "\nLPAR Memory Offline: " + lparMemStandby outstr += "\nLPAR Memory Used: " + lparMemUsed outstr += "\nIPL Time: " + ipl rh.printLn("N", outstr) rh.printSysLog("Exit getHost.getGeneralInfo, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for GetHost functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getHost.parseCmdline") rh.userid = '' if rh.totalParms >= 2: rh.subfunction = rh.request[1].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 2 # Begin Parsing at 3rd operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit getHost.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " GetHost " + "diskpoolnames") rh.printLn("N", " python " + rh.cmdName + " GetHost " + "diskpoolspace ") rh.printLn("N", " python " + rh.cmdName + " GetHost fcpdevices") rh.printLn("N", " python " + rh.cmdName + " GetHost general") rh.printLn("N", " python " + rh.cmdName + " GetHost help") rh.printLn("N", " python " + rh.cmdName + " GetHost version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the GetHost function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " diskpoolnames - " + "Returns the names of the directory manager disk pools.") rh.printLn("N", " diskpoolspace - " + "Returns disk pool size information.") rh.printLn("N", " fcpdevices - " + "Lists the FCP device channels that are active, free, or") rh.printLn("N", " offline.") rh.printLn("N", " general - " + "Returns the general information related to the z/VM") rh.printLn("N", " hypervisor environment.") rh.printLn("N", " help - Returns this help information.") rh.printLn("N", " version - Show the version of this function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " - Name of the disk pool.") return zVMCloudConnector-1.6.3/smtLayer/msgs.py0000664000175000017510000013613414263437505017666 0ustar ruirui00000000000000# Messages for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ List of modules that use these messages and their module id. ModId Module ----- -------------------- CVM changeVM.py CMD cmdVM.py DVM deleteVM.py GUT generalUtils.py GHO getHost.py GVM getVM.py MVM makeVM.py MIG migrateVM.py PVM powerVM.py RQH ReqHandle.py SMC Reserved for smcli SMP smapi.py VMU vmUtils.py List of messages. Message id is the key. Messages are grouped by their message number. Each message is defined in a array that provides: - Dictionary of values for the result structure, e.g. {'overallRC': 4, 'rc': 100} The keys in the dictionary can contain: 'overallRC', 'rc', 'rs' 'errno', and 'strError'. These allow us to use the dictionary to update the ReqHandle results dictionary. - Message text (may contain substitution directives, e.g. %s, %i) - Sample text in a python tuple """ msg = { # 0001-0099: Parsing Messages '0001': [{'overallRC': 4, 'rc': 4, 'rs': 1}, "ULT%s0001E %s %s subfunction's operand at position %i (%s) " + "is not an integer: %s", ('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'OPERAND_POSITION', 'OPERAND', 'OPERAND_VALUE')], # Explain: An error was detected while parsing the command. # The indicated operand is not an integer. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax so that the indicated operand is # an integer, e.g., 10 and reissue the command. '0002': [{'overallRC': 4, 'rc': 4, 'rs': 2}, "ULT%s0002E %s's %s subfunction is missing positional " + "operand (%s) at position %i.", ('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'OPERAND', 'OPERAND_POSITION')], # Explain: An error was detected while parsing the command. # A positional operand is missing. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax by specifying the missing operand # and reissue the command. '0003': [{'overallRC': 4, 'rc': 4, 'rs': 3}, "ULT%s0003E %s's %s subfunction %s keyword operand is " + "missing a value.", ('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'KEYWORD')], # Explain: An error was detected while parsing the command. # A keyword operand that requires a value was specified without # the value. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax to provide a value for the specified # keyword operand and reissue the command. '0004': [{'overallRC': 4, 'rc': 4, 'rs': 4}, "ULT%s0004E %s's %s subfunction %s keyword operand is not " + "an integer: %s", ('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'KEYWORD', 'KEYWORD_VALUE')], # Explain: An error was detected while parsing the command. # The specified operand for a keyword is not an integer. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax so that the keyword operand is # an integer, e.g., 10 and reissue the command. '0005': [{'overallRC': 4, 'rc': 4, 'rs': 5}, "ULT%s0005E %s's %s subfunction does not recognize keyword: %s", ('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'KEYWORD')], # Explain: An error was detected while parsing the command. # An unrecognized keyword was encountered. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax to specify a recognized keyword # and reissue the command. '0006': [{'overallRC': 4, 'rc': 4, 'rs': 6}, "ULT%s0006E %s's %s subfunction encountered an unknown " + "operand: %s", ('RQH', 'FUNCTION_NAME', 'SUBFUNCTION_NAME', 'OPERAND')], # Explain: An error was detected while parsing the command. # An unknown operand was encountered. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax and reissue the command. '0007': [{'overallRC': 4, 'rc': 4, 'rs': 7}, "ULT%s0007E Unrecognized function: %s", ('RQH', 'FUNCTION_NAME')], # Explain: An error was detected while parsing the command. # The specified function is not known. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax and reissue the command. '0008': [{'overallRC': 4, 'rc': 4, 'rs': 8}, "ULT%s0008E Specified function is not 'HELP' or 'VERSION': %s", ('RQH', 'SPECIFIED_FUNCTION')], # Explain: An error was detected while parsing the command. # The specified function was not 'HELP' or 'VERSION' which are the # only valid functions for a command of the specified length. # SysAct: Processing of the function terminates. # UserResp: Correct the syntax and reissue the command. '0009': [{'overallRC': 4, 'rc': 4, 'rs': 9}, "ULT%s0009E Too few arguments specified.", ('RQH')], # Explain: An error was detected while parsing the command. # The minimum number of arguments were not provided for the command. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax and reissue the command. '0010': [{'overallRC': 4, 'rc': 4, 'rs': 10}, "ULT%s0010E Userid is missing", ('RQH')], # Explain: An error was detected while parsing the command. # A userid operand was not specified. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax and specify the userid along # with any other required operands and reissue the command. '0011': [{'overallRC': 4, 'rc': 4, 'rs': 11}, "ULT%s0011E Subfunction is missing. It should be one of " + "the following: %s", ('RQH', 'VALID_SUBFUNCTIONS')], # Explain: An error was detected while parsing the command. # The name of the subfunction was not specified. # SysAct: Processing of the function terminates. # UserResp: Correct the syntax and specify the userid along # with any other required operands and reissue the command. '0012': [{'overallRC': 4, 'rc': 4, 'rs': 12}, "ULT%s0012E The request data is not one of the supported types " + "of list or string: %s", ('RQH', 'REQUEST_DATA_TYPE')], # Explain: The ReqHandle parseCmdline method was called with # the request passed in a variable that was not a # list or base string. Only these types of variables are # supported for passing of the request to be parsed. # SysAct: Processing of the function terminates. # UserResp: Correct the calling function to use either a # list or base string to hold the request to be processed # and reinvoke the call. '0013': [{'overallRC': 4, 'rc': 4, 'rs': 13}, "ULT%s0010E The desired state was: %s. Valid states are: %s", ('RQH', 'DESIRED_STATS', 'VALID_STATS')], # Explain: An error was detected while parsing the command. # The state operand value is not one of the accepted values. # The valid values are shown in the message. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax to use one of the valid states # and reissue the command. '0014': [{'overallRC': 4, 'rc': 4, 'rs': 14}, "ULT%s0014E The option %s was specified but the option %s " + "was not specified. These options must both be specified.", ('RQH', 'OPTION1', 'OPTION2')], # Explain: An error was detected while parsing the command. # An option was specified which required a related # option that was not specified. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax to specify both options and # reissue the command. '0015': [{'overallRC': 4, 'rc': 4, 'rs': 15}, "ULT%s0015E The file system was not 'ext2', 'ext3', " + "'ext4', 'xfs' or 'swap': %s", ('RQH', 'FILE_SYSTEM')], # Explain: An error was detected while parsing the command. # The type of file system does not match one of the valid # values. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax to use a valid file system type # and reissue the command. '0016': [{'overallRC': 4, 'rc': 4, 'rs': 16}, "ULT%s0016E The scp Data Type was not 'hex', 'ebcdic', " + "or 'delete': %s", ('RQH', 'DATA_TYPE')], # Explain: An error was detected while parsing the command. # The value specified for the scp data type is not one of the # recognized values. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the syntax to use a valid scp data type and # reissue the command. '0017': [{'overallRC': 4, 'rc': 4, 'rs': 17}, # dict is not used "ULT%s0017W The maxwait time %i sec is not evenly divisible " + "by the poll interval %i sec. Maximum wait time will be %i " + "sec or %i poll intervals.", ('RQH', 'MAX_WAIT', 'POLL_INTERVAL', 'RECOMMEND_MAX_WAIT', 'RECOMMEND_POLL_INTERVAL')], # Explain: When trying to determine how many polling intervals # to wait for a desired guest power state, it was found that the # specified maximum wait time was not evenly divisible by the # number of polling interval seconds. The program instead # rounded the maximum wait time up to be evenly divisble # by the polling interval. # SysAct: Processing of the function continues with the # new wait time. # UserResp: If the wait time is unacceptably long, invoke # the function with a maximum wait time and polling # interval time which are evenly divisible and of an # acceptable duration. # 0200-0299: Utility Messages '0200': [{'overallRC': 4, 'rc': 4, 'rs': 200}, "ULT%s0200E The size of the disk is not valid: %s", ('GUT', 'DISK_SIZE')], # Explain: An error was encountered while attempting # to convert the size of a disk from bytes to cylinders # (for 3390 type disks) or bytes to blocks (for FBA type disks). # This error can be caused by specifying the size as only a # magnitude, (e.g., 'G' or 'M') instead of an integer # appended to a magnitude (e.g., '20G'). # SysAct: Processing of the subfunction terminates. # UserResp: Correct the disk size to specify a disk magnitude # that includes the integer portion of the size in addition # to the magnitude and reissue the command. '0201': [{'overallRC': 4, 'rc': 4, 'rs': 201}, "ULT%s0201E Failed to convert %s to a number of blocks.", ('GUT', 'DISK_SIZE')], # Explain: An error was encountered while attempting # to convert the size of a disk from bytes to blocks. # The size ended with a magnitude character and should have # had an integer value prepended to the magnitude character # (e.g. '10M' or '10G'). # The probable cause of the error is that the integer # portion of the size contains a non-numeric character. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the disk size to specify a valid value # and reissue the command. '0202': [{'overallRC': 4, 'rc': 4, 'rs': 202}, "ULT%s0202E %s is not an integer size of blocks.", ('GUT', 'NUM_BLOCKS')], # Explain: An error was encountered while attempting # to convert the size of a disk from bytes to blocks. # The size did not end with a valid magnitude character # (i.e., 'M' or 'G') so it was treated as an integer # value (e.g. '100000'). The probable cause of this # error is that the size contains non-numeric # characters. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the disk size to specify a valid value # and reissue the command. '0203': [{'overallRC': 4, 'rc': 4, 'rs': 203}, "ULT%s0203E Failed to convert %s to a number of cylinders.", ('GUT', 'DISK_SIZE')], # Explain: An error was encountered while attempting # to convert the size of a disk from bytes to cylinders. # The size ended with a magnitude character and should have # had an integer value prepended to the magnitude character # (e.g. '10M' or '10G'). # The probable cause of the error is that the integer # portion of the size contains non-numeric characters. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the disk size to specify a valid value # and reissue the command. '0204': [{'overallRC': 4, 'rc': 4, 'rs': 204}, "ULT%s0204E %s is not an integer size of cylinders.", ('GUT', 'DISK_SIZE')], # Explain: An error was encountered while attempting # to convert the size of a disk from bytes to cylinders. # The size did not end with a valid magnitude character # (i.e., 'M' or 'G') so it was treated as an integer # value (e.g. '100000'). The probable cause of this # error is that the size contains non-numeric # characters. # SysAct: Processing of the subfunction terminates. # UserResp: Correct the disk size to specify a valid value # and reissue the command. '0205': [{'overallRC': 4, 'rc': 4, 'rs': 205}, "ULT%s0205E memory size did not end with suffix 'G' or 'M'.", ('MVM')], # Explain: An error was encountered while handling memory size. # The size did not end with a valid magnitude character # (i.e., 'M' or 'G'). # SysAct: Processing of the subfunction terminates. # UserResp: Correct the memory size to specify a valid value # and reissue the command. '0206': [{'overallRC': 4, 'rc': 4, 'rs': 206}, "ULT%s0206E Max memory size %s specified is less than " + "initial memory size %s.", ('MVM', 'MAX_MEM_SIZE', 'INIT_MEM_SIZE')], # Explain: An error was encountered while handling memory size. # The size did not end with a valid magnitude character # (i.e., 'M' or 'G'). # SysAct: Processing of the subfunction terminates. # UserResp: Correct the memory size to specify a valid value # and reissue the command. '0207': [{'overallRC': 4, 'rc': 4, 'rs': 207}, "ULT%s0207E VDISK Size (swap disk) is greater than 2G.", ('MVM')], # Explain: An error was encountered while handling swap disk # The swap disk size can't be greater than 2G # SysAct: Processing of the subfunction terminates. # UserResp: Correct the swap size to specify a valid value # and reissue the command. # 0208-0299: Available # SMCLI and SMAPI related messages. '0300': [{'overallRC': 8}, # dict is not used. "ULT%s0300E SMAPI API failed: %s, overall rc: %s, rc: %s, " + "rs: %s, errno: %s, cmd: %s, out: %s", ('SMP', 'API_NAME', 'OVERALLRC', 'RC', 'RS', 'ERRNO', 'CMD', 'OUTPUT')], # Explain: The smcli program was invoked to call z/VM SMAPI for # the indicated API. An error was encountered. The overall rc # indicates the location of the problem: # 8 - SMAPI returned the error. The rc and rs values are # the values provided by SMAPI. The z/VM Systems # Management Application Programming book provides # additional documentation related to the return code and # reason code in the API description and in the "Return # and Reason Code Summary" chapter. # 24 - The smcli program identified a parameter validation # error. A message will indicate what was detected. # It could be a missing parameter, invalid parameter, etc. # Invoke the smcli program using the -h parameter and the # API name shown in the error message to obtain additional # invocation help, e.g. "./smcli Image_Query_DM -h". # In addition, the z/VM Systems Management Application # Programming book provides additional documentation # related to the return code and reason code in the API # description. # 25 - The smcli program encountered an internal error. # The rc and errno contains information related to the # problem. The error message from the smcli invocation # and log entries in the system log provide the most # useful information to debug this error. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the problem using the # information described in the explanation section. Reinvoke # the function after you correct the problem. '0301': [{'overallRC': 25, 'rc': 301, 'rs': 0}, "ULT%s0301E SMAPI API failed: %s, response header does not " + "have the expected 3 values before the (details) string. " + "cmd: %s, response header: %s, out: %s", ('SMP', 'API_NAME', 'CMD', 'HEADER', 'OUTPUT')], # Explain: The smcli program was invoked to call z/VM SMAPI for # the indicated API. The expected response from the smcli # program has a header that contains 3 integers followed by # the string '(details)'. The response returned by the program # does not have the expected header. This indicates a problem # in the smcli program or a problem invoking the smcli program. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure. If it is not a # Linux permission problem then investigate a possible coding # error in the smcli program. Correct the problem and reinvoke # the function. '0302': [{'overallRC': 25, 'rc': 302, 'rs': 0}, "ULT%s0302E SMAPI API failed: %s, word 1 in " + "the response header is not an integer or in the range of " + "expected values. word 1: %s, cmd: %s, response " + "header: %s, out: %s", ('SMP', 'API_NAME', 'WORD1', 'CMD', 'HEADER', 'OUTPUT')], # Explain: The smcli program was invoked to call z/VM SMAPI for # the indicated API. The expected response from the smcli # program has a header that contains 3 integers followed by # the string '(details)'. The first word should provide the # overall return code of the smcli invocation that indicates # where the failure occurred. However, it does not represent # an integer value or is not the expected error values of # 8, 24 or 25. This indicates a problem in the smcli program. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure. # Investigate a possible coding error in the smcli program. # Correct the problem and reinvoke the function. '0303': [{'overallRC': 25, 'rc': 303, 'rs': 0}, "ULT%s0303E SMAPI API failed: %s, word 2 in the response " + "header is not an integer. word 2: %s, cmd: %s, response " + "header: %s, out: %s", ('SMP', 'API_NAME', 'WORD2', 'CMD', 'HEADER', 'OUTPUT')], # Explain: The smcli program was invoked to call z/VM SMAPI for # the indicated API. The expected response from the smcli # program has a header that contains 3 integers followed by # the string '(details)'. The second word should provide the # specific return code of the smcli invocation. However, it # does not represent an integer value. This indicates a # problem in the smcli program. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure. # You may need an update to the smcli program. # Correct the problem and reinvoke the function. '0304': [{'overallRC': 25, 'rc': 304, 'rs': 0}, "ULT%s0304E SMAPI API failed: %s, word 3 in the response " + "header is not an integer. word 3: %s, cmd: %s, response " + "header: %s, out: %s", ('SMP', 'API_NAME', 'WORD3', 'CMD', 'HEADER', 'OUTPUT')], # Explain: The smcli program was invoked to call z/VM SMAPI for # the indicated API. The expected response from the smcli # program has a header that contains 3 integers followed by # the string '(details)'. The third word should provide # the reason code or errno, depending on the error. However, # it does not represent an integer value. This indicates # a problem in the smcli program. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure. # You may need an update to the smcli program. # Correct the problem and reinvoke the function. '0305': [{'overallRC': 99, 'rc': 305, 'rs': 0}, "ULT%s0305E Exception received on an attempt to " + "communicate with SMAPI, cmd: %s, exception: %s, " + "details: %s", ('SMP', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')], # Explain: The function attempted to invoke the smcli # program to communicate with z/VM SMAPI. This failed # due to the exception shown in the message. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure using # the exception and exception details provided in the message. # Reinvoke the function after correcting the problem. # 0306-0310: Available # IUCV related messages '0311': [{'overallRC': 2, 'rc': 2, 'rs': 99}, # dict is not used. "ULT%s0311E On %s, command sent through IUCV failed, " + "rc in response string is not an integer. " + "cmd: %s, rc: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'OUTPUT')], # Explain: The IUCV client returned a non-integer return # code value. # SysAct: Processing of the function terminates. # UserResp: Contact the support team with the information # included in the message. Investigate the problem in the # IUCVCLNT program, fix the code and reinvoke the function. '0312': [{'overallRC': 2, 'rc': 2, 'rs': 99}, # dict is not used. "ULT%s0312E On %s, command sent through IUCV failed, " + "reason code in response string is not an integer. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The IUCV client returned a non-integer reason # code value. # SysAct: Processing of the function terminates. # UserResp: Contact the support team with the information # included in the message. The IUCVCLNT program is the probable # cause of the failure. This will require a code change. '0313': [{'overallRC': 2, 'rc': 1}, # dict is not used. "ULT%s0313E On %s, command sent through IUCV was not " + "authorized or a generic Linux error occurred. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The command that was sent to the target system failed. # The cause of the failure is either a Linux permission problem # for the command being executed or a generic Linux error. # SysAct: Processing of the function terminates. # UserResp: Use the information included in the message to determine # the cause of the failure on the target system and correct the # problem. After correcting the problem, you should be able to # reinvoke the failing function. '0314': [{'overallRC': 2, 'rc': 2}, # dict is not used. "ULT%s0314E IUCV client parameter error sending command to %s. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The IUCVCLNT program communicates with managed # systems using IUCV. The program detected invocation # errors. This can be caused by a problem in the level of the # IUCVCLNT program or the function that invoked it. # SysAct: Processing of the function terminates. # UserResp: Use the information included in the message to determine # the cause of the failure. This could require the support # team to provide a code change to either the IUCVCLNT program # or the code that invoked it. '0315': [{'overallRC': 2, 'rc': 4}, # dict is not used. "ULT%s0315E IUCV socket error sending command to %s. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The IUCVCLNT program communicates with managed # systems using IUCV. The program encountered an IUCV # communication failure when it attempted to send a # command to the managed system. # This is probably caused by a failure in the managed system # that prevents the system from receiving the command. # One cause could be that the system logged off z/VM. # Another cause is that the managed system is not running the # related IUCV daemon or has not authorized access by # the system contacting it in the /etc/iucv_authorized_userid # file. # SysAct: Processing of the function terminates. # UserResp: Use the information included in the message to # determine the cause of the failure. Reinvoke the function # after you correct the problem. '0316': [{'overallRC': 2, 'rc': 8}, # dict is not used. "ULT%s0316E On %s, command sent through IUCV failed. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The command that was sent to the target system failed. # SysAct: Processing of the function terminates. # UserResp: Use the information included in the message to # determine the cause of the failure. Reinvoke the function # after you correct the problem. '0317': [{'overallRC': 2, 'rc': 16}, # dict is not used. "ULT%s0317E File transport failure while processing " + "command for %s. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The IUCVCLNT program failed to send a file to # the target system. # SysAct: Processing of the function terminates. # UserResp: Use the information included in the message to # determine the cause of the failure. Reinvoke the function # after you correct the problem. '0318': [{'overallRC': 2, 'rc': 32}, # dict is not used. "ULT%s0318E On %s, IUCV server file was not found. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The IUCVCLNT program failed to find the IUCVSERV # file on the local system. This file is expected to exist # in the same directory as the IUCVCLNT program. # SysAct: Processing of the function terminates. # UserResp: Determine the reason that the IUCVSERV file could # not be located and correct the problem. Reinvoke the # function after you correct the problem. '0319': [{'overallRC': 2}, # dict is not used. "ULT%s0319E Unrecognized IUCV client error encountered " + "while sending a command through IUCV to %s. " + "cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The IUCVCLNT program returned a non-zero return code # that does not correspond to a recognized error value. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the error using the # information in the message. Log files on the local system # and the target system may contain useful information to # identify the failure. Reinvoke the function after you # correct the problem. '0320': [{'overallRC': 3, 'rc': 64}, "ULT%s0320E On %s, command sent through IUCV failed because " + "timeout. cmd: %s, rc: %s, rs: %s, out: %s", ('SMP', 'USERID', 'CMD', 'RC', 'RS', 'OUTPUT')], # Explain: The command that was sent to the target system failed. # SysAct: Processing of the function terminates. # UserResp: Use the information included in the message to # determine the cause of the failure. Reinvoke the function # after you correct the problem. # General subfunction processing messages '0400': [{'overallRC': 4, 'rc': 4, 'rs': 400}, "ULT%s0400E The worker script %s does not exist.", ('GUT', 'SCRIPT_NAME')], # Explain: The activation engine modification script specified # for "aeScript" cannot be found. # SysAct: Processing of the function ends with no action # taken. # UserResp: Correct the function call to point to an existing script # and reinvoke the function. '0401': [{'overallRC': 4, 'rc': 7, 'rs': 401}, "ULT%s0401E Failed to punch %s file to guest: %s, out: %s", ('GUT', 'FILE_LOCATION', 'USERID', 'OUTPUT')], # Explain: The vmur punch command failed for the specified # reason. # SysAct: Processing of the function ends with no action # taken. # UserResp: Look up the reason the vmur command failed, correct # the problem and reinvoke the function. '0402': [{'overallRC': 4, 'rc': 5, 'rs': 402}, "ULT%s0402E No information was found for the specified " + "pool(s): %s", ('GUT', 'DISK_POOL')], # Explain: Image_Volume_Space_Query_DM returned successfully # but the list of pools of the specified names was empty. # SysAct: Processing terminates with an error. # UserResp: Correct the function call to query existing pools and # reinvoke the function. '0403': [{'overallRC': 4, 'rc': 99, 'rs': 403}, # dict is not used. "ULT%s0403E Failed to purge reader file %s, out: %s", ('GUT', 'SPOOL_ID', 'OUTPUT')], # Explain: The vmcp purge reader file command failed. # The system was already in the process of cleaning up from a # failed attempt to punch a file, so the error processing # continues. # SysAct: Error processing continues. # UserResp: Manually clean up the specified reader file using # CP commands to avoid problems with old files or spool space # filling up. '0404': [{'overallRC': 4, 'rc': 8, 'rs': 404}, "ULT%s0404E Failed to spool the punch to the specified class %s" + ", out:%s ", ('GUT', 'SPOOL_CLASS', 'OUTPUT')], # Explain: The vmcp change reader command failed with the # specified output. # SysAct: Processing of the function ends with no action # taken. # UserResp: Look up the reason the change reader command failed # in the CP messages book or vmcp help. Correct the problem # and reinvoke the function. '0405': [{'overallRC': 4, 'rc': 6, 'rs': 405}, "ULT%s0405E Unable to obtain information related to: " + "%s. Command used was: %s. Output was: %s", ('GUT', 'KEYWORD', 'CMD', 'OUTPUT')], # Explain: While gathering hypervisor information, one of the # commands used failed and that piece of information could # not be queried. # SysAct: The getHost GENERAL function returns "no info" for # the specified hypervisor information. # UserResp: If the information is needed, investigate the # failure, correct it and reinvoke the function. '0406': [{'overallRC': 4, 'rc': 9, 'rs': 406}, "ULT%s0406E Failed to punch %s because of VMUR timeout ", ('GUT', 'FILE_LOCATION')], # Explain: When punching a file to the reader, the vmur punch # command is issued up to 5 times with increasing timeouts. # This error comes after the 5th try if the vmur command # was still unsuccessful. # SysAct: Processing of the function ends with no action taken. # UserResp: This error could be because of another process # also issuing vmur commands at the same time. Wait a few # seconds and reinvoke the function. '0407': [{'overallRC': 4, 'rc': 4, 'rs': 407}, # dict is not used. "ULT%s0407W Unable to spool reader to all classes, " + "it is possible that there may be additional console " + "files available that are not listed in the response. " + "Response from %s is %s", ('GUT', 'CMD', 'OUTPUT')], # Explain: The vmcp spool reader class * command was not # successful. This means the reader could not be changed # to get files of all classes, and thus there could be # files that are ignored. # SysAct: Processing of the function continues. # UserResp: If missing files are suspected, investigate the # cause of the failure in the CP messages book or vmcp # help and reinvoke the function. '0408': [{'overallRC': 4, 'rc': 4, 'rs': 408}, "ULT%s0408E Error getting list of files in the reader " + "to search for logs from user %s. Response from %s is %s", ('GUT', 'USERID', 'CMD', 'OUTPUT')], # Explain: The vmur list command failed. The list of files # in the user's reader could not be determined. # SysAct: Processing of the function ends with no action taken. # UserResp: Investigate the failure in vmur and correct the # problem, then reinvoke the function. '0409': [{'overallRC': 4, 'rc': 4, 'rs': 409}, "ULT%s0409E Unable to get console log for user %s. " + "The userid is either: not logged on, not spooling " + "its console, or has not created any console output. " + "Error rc=rs=8 returned from " + "Image_Console_Get.", ('GUT', 'USERID')], # Explain: The Image_Console_Get SMAPI call returned that # there were no spool files available for that user. # SysAct: Processing of the function ends with no action taken. # UserResp: Check that the user is logged on, has issued a # SPOOL CONSOLE command and has done some actions that # would result in console output, then reinvoke the function. '0410': [{'overallRC': 4, 'rc': 4, 'rs': 410}, "ULT%s0410E Unable to get console log for user %s " + "no spool files were found in our reader from this " + "user, it is possible another process has already " + "received them.", ('GUT', 'USERID')], # Explain: The Image_Console_Get SMAPI call should have # put files of class T and "CON" with the userid as the # filename in our reader. However no files were found # in the vmur list output with these characteristcs. # SysAct: Processing of the function ends with no action taken. # UserResp: Likely another process in this virtual machine # has already processed the spool files. They are gone. '0411': [{'overallRC': 4, 'rc': 4, 'rs': 411}, "ULT%s0411E Unable to receive console output file. " + "Reader not online. /sys/bus/ccw/drivers/vmur/0.0.000c" + "/online = 0", ('GUT')], # Explain: The reader is typically at virtual device address # x'000C'. Linux does not believe this device is online. # SysAct: Processing of the function ends with no action taken. # UserResp: If the reader is at a different virtual device # address, update the SMT code to recognize the alternative # device address, otherwise bring the reader at x'000C' online # to Linux. Then, reinvoke the function. '0412': [{'overallRC': 4, 'rc': 4, 'rs': 412}, # dict is not used. "ULT%s0412E Malformed reply from SMAPI, unable to fill " + "in performance information, exception: %s, " + "details: %s, Response: %s", ('GUT', 'EXCEPTION', 'EXCEPTION_DETAILS', 'OUTPUT')], # Explain: An error was encountered while processing the # response information from the SMAPI Image_Performance_Query # API. The response is not in the expected format. # The exception that occurred during processing of the # response, its details and the response are included # in the message. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure. A code change # may be needed in the function or in the z/VM SMAPI code. # After correcting the code, reinvoke the function. '0413': [{'overallRC': 99, 'rc': 99, 'rs': 413}, "ULT%s0413E Userid '%s' did not enter the expected " + "operating system state of '%s' in %i seconds.", ('GUT', 'USERID', 'DESIRED_STATE', 'MAX_WAIT')], # Explain: The managed system did not enter the operating # system state that was shown in the message in the # maximum number of seconds allowed for this to happen. # The maximum number of seconds a combination of the, # specified or defaulted, polling interval and maximum # maximum number of polling attempts. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure and correct # the cause. '0414': [{'overallRC': 99, 'rc': 99, 'rs': 414}, "ULT%s0414E Userid '%s' did not enter the expected " + "virtual machine state of '%s' in %i seconds.", ('GUT', 'USERID', 'DESIRED_STATE', 'MAX_WAIT')], # Explain: The managed system did not enter the virtual # machine log on/off state that was shown in the message # in the maximum number of seconds allowed for this to happen. # The maximum number of seconds a combination of the, # specified or defaulted, polling interval and maximum # maximum number of polling attempts. # SysAct: Processing of the function terminates. # UserResp: Determine the cause of the failure and correct # the cause. '0415': [{'overallRC': 3, 'rc': 415}, # rs comes from failing rc "ULT%s0415E Command failed: '%s', rc: %i out: %s", ('GUT', 'CMD', 'RC', 'OUTPUT')], # Explain: The indicated command failed. The return code # and output from the command are shown. # SysAct: Function processing terminates. # UserResp: Use the information provided with the message # to determine the cause of the failure and correct the # problem. Reinvoke the function after you correct the # problem. '0416': [{'overallRC': 99, 'rc': 99, 'rs': 416}, "ULT%s0416E Command returned a response " + "containing '%s' but did not have at least %i words " + "following it. cmd: '%s', out: '%s'", ('GUT', 'KEYWORD', 'NUM', 'CMD', 'OUTPUT')], # Explain: A command was invoked that returned a successful # return code indication. The response contained the # expected string but did not contain the expected number # of words that follow the string. # SysAct: Processing of the function terminates. # UserResp: Use the information provided in the message # to determine the cause of the problem and correct it. # Reinvoke the function after you correct the problem. '0417': [{'overallRC': 99, 'rc': 99, 'rs': 417}, "ULT%s0417E Command did not return the expected response " + "containing '%s', cmd: '%s', out: '%s'", ('GUT', 'KEYWORD', 'CMD', 'OUTPUT')], # Explain: A command was invoked that returned a successful # return code indication. The response did not contain the # expected string. # SysAct: Processing of the function terminates. # UserResp: Use the information provided in the message # to determine the reason the identified string was not # present in the response to identify the cause. # Reinvoke the function after you correct the problem. '0418': [{'overallRC': 99, 'rc': 99, 'rs': 418}, "ULT%s0418E Userid %s is not logged on to this system.", ('GUT', 'USERID')], # Explain: A CP message HCP0045E was returned, indicating the # userid specified is not logged on to this z/VM system, # thus it cannot be relocated. # SysAct: Processing of the function ends with no action taken. # UserResp: Correct the function call to specify a correct userid and # reinvoke the function. '0419': [{'overallRC': 99, 'rc': 99, 'rs': 419}, "ULT%s0419E A relocation is not in progress for userid %s.", ('GUT', 'USERID')], # Explain: An attempt was made to query or cancel a relocation # for a user, but the SMAPI command indicated that no # relocation was in progress. # SysAct: Processing of the function ends with no action taken. # UserResp: Reinvoke the function for a relocation that is in # progress. '0420': [{'overallRC': 99, 'rc': 99, 'rs': 420}, # dict is not used. "ULT%s0420E An error occurred issuing a %s for userid %s. " + "Please look up message(s): %s in the CP Messages book for " + "more information.", ('GUT', 'CMD', 'USERID', 'ERROR_CODE')], # Explain: The VMRELOCATE command returns a list of messages # containing all the problems encountered when trying to issue # the command. # SysAct: Processing of the function ends with no action taken. # UserResp: Look up the codes provided in the CP messages book, # correct the problems and reinvoke the function. '0421': [{'overallRC': 99, 'rc': 421, 'rs': 0}, "ULT%s0421E Exception received on an attempt to " + "execute a cmd: %s, exception: %s, " + "details: %s", ('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')], # Explain: The command indicated by the message failed. # The error message contains exception name and details # contained in the exception. # SysAct: Processing of the function ends with no further # action taken. # UserResp: Use the information in the message to determine # the cause of the error and correct the problem. # Reinvoke the function after you have corrected the problem. '0422': [{'overallRC': 99, 'rc': 422, 'rs': 0}, "ULT%s0422W Exception received on an attempt to " + "execute a cmd: %s, exception: %s, " + "details: %s. Will attempt to continue processing.", ('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')], # Explain: While trying to execute a vmcp command, an error # occurred. However the vmcp command was not central # to processing the subfunction, so processing # continues. # SysAct: Function processing continues. # UserResp: If there is reason to suspect the function did # not execute completely, investigate the error. Otherwise # ignore this message. '0423': [{'overallRC': 4, 'rc': 4, 'rs': 423}, # dict is not used. "ULT%s0423W Unable to spool reader to all classes, " + "it is possible that there may be additional console " + "files available that are not listed in the response. " + "Command: %s, exception %s, details %s. Will attempt " + "to continue processing.", ('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')], # Explain: The vmcp spool reader class * command was not # successful. This means the reader could not be changed # to get files of all classes, and thus there could be # files that are ignored. The exception was of a different # type than in message 407. # SysAct: Processing of the function continues. # UserResp: If missing files are suspected, investigate the # cause of the failure in the CP messages book or vmcp # help and reinvoke the function. '0424': [{'overallRC': 4, 'rc': 4, 'rs': 424}, "ULT%s0424E Failed to transfer %s file to guest: %s, out: %s", ('GUT', 'FILE_LOCATION', 'USERID', 'OUTPUT')], # Explain: The vmcp transfer command failed for the specified # reason. # SysAct: Processing of the function ends with no action # taken. # UserResp: Look up the reason the vmcp transfer command failed, # correct the problem and reinvoke the function. '0501': [{'overallRC': 5, 'rc': 1, 'rs': 501}, "ULT%s0501E Timeout Exception recevied on an attempt to " + "execute a cmd: %s, exception: %s, " + "details: %s", ('GUT', 'CMD', 'EXCEPTION', 'EXCEPTION_DETAILS')], # Explain: The command indicated by the message failed of timeout. # The error message contains exception name and details # contained in the exception. # SysAct: Processing of the function ends with no further # action taken. # UserResp: Use the information in the message to determine # the cause of the error and correct the problem. # Reinvoke the function after you have corrected the problem. # 5000-6100: Reserved for SMCLI } zVMCloudConnector-1.6.3/smtLayer/changeVM.py0000664000175000017510000011471413672563714020412 0ustar ruirui00000000000000# ChangeVM functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os.path import re import shutil import tarfile import tempfile from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import disableEnableDisk, execCmdThruIUCV, installFS from smtLayer.vmUtils import invokeSMCLI, isLoggedOn from smtLayer.vmUtils import punch2reader, purgeReader modId = "CVM" version = "1.0.0" """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'ADD3390': ['add3390', lambda rh: add3390(rh)], 'ADD9336': ['add9336', lambda rh: add9336(rh)], 'DEDICATE': ['dedicate', lambda rh: dedicate(rh)], 'UNDEDICATE': ['undedicate', lambda rh: undedicate(rh)], 'AEMOD': ['addAEMOD', lambda rh: addAEMOD(rh)], 'IPL': ['addIPL', lambda rh: addIPL(rh)], 'LOADDEV': ['addLOADDEV', lambda rh: addLOADDEV(rh)], 'HELP': ['help', lambda rh: help(rh)], 'PUNCHFILE': ['punchFile', lambda rh: punchFile(rh)], 'PURGERDR': ['purgeRDR', lambda rh: purgeRDR(rh)], 'REMOVEDISK': ['removeDisk', lambda rh: removeDisk(rh)], 'REMOVEIPL': ['removeIPL', lambda rh: removeIPL(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = { 'ADD3390': [ ['Disk pool name', 'diskPool', True, 2], ['Virtual address', 'vaddr', True, 2], ['Disk size', 'diskSize', True, 2]], 'ADD9336': [ ['Disk pool name', 'diskPool', True, 2], ['Virtual address', 'vaddr', True, 2], ['Disk size', 'diskSize', True, 2]], 'DEDICATE': [ ['Virtual device address', 'vaddr', True, 2], ['Real device address', 'raddr', True, 2], ['Read only mode', 'mode', True, 2]], 'UNDEDICATE': [ ['Virtual device address', 'vaddr', True, 2]], 'AEMOD': [ ['Activation Engine Modification Script', 'aeScript', True, 2]], 'IPL': [ ['Virtual Address or NSS name', 'addrOrNSS', True, 2]], 'PUNCHFILE': [ ['File to punch', 'file', True, 2]], 'REMOVEDISK': [ ['Virtual address', 'vaddr', True, 2]], 'REMOVEIPL': [], } """ List of additional operands/options supported by the various subfunctions. The dictionary following the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'ADD3390': { '--filesystem': ['fileSystem', 1, 2], '--mode': ['mode', 1, 2], '--multipw': ['multiPW', 1, 2], '--readpw': ['readPW', 1, 2], '--showparms': ['showParms', 0, 0], '--writepw': ['writePW', 1, 2]}, 'ADD9336': { '--filesystem': ['fileSystem', 1, 2], '--mode': ['mode', 1, 2], '--multipw': ['multiPW', 1, 2], '--readpw': ['readPW', 1, 2], '--showparms': ['showParms', 0, 0], '--writepw': ['writePW', 1, 2]}, 'AEMOD': { '--invparms': ['invParms', 1, 2], '--showparms': ['showParms', 0, 0]}, 'HELP': {}, 'IPL': { '--loadparms': ['loadParms', 1, 2], '--parms': ['parms', 1, 2], '--showparms': ['showParms', 0, 0]}, 'LOADDEV': { '--boot': ['boot', 1, 2], '--addr': ['addr', 1, 2], '--lun': ['lun', 1, 2], '--wwpn': ['wwpn', 1, 2], '--scpDataType': ['scpDataType', 1, 2], '--scpData': ['scpData', 1, 2], '--showparms': ['showParms', 0, 0]}, 'PUNCHFILE': { '--class': ['class', 1, 2], '--showparms': ['showParms', 0, 0], }, 'PURGERDR': {'--showparms': ['showParms', 0, 0]}, 'REMOVEDISK': {'--showparms': ['showParms', 0, 0]}, 'REMOVEIPL': {'--showparms': ['showParms', 0, 0]}, 'VERSION': {}, } def add3390(rh): """ Adds a 3390 (ECKD) disk to a virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADD3390' userid - userid of the virtual machine parms['diskPool'] - Disk pool parms['diskSize'] - size of the disk in cylinders or bytes. parms['fileSystem'] - Linux filesystem to install on the disk. parms['mode'] - Disk access mode parms['multiPW'] - Multi-write password parms['readPW'] - Read password parms['vaddr'] - Virtual address parms['writePW'] - Write password Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.add3390") results, cyl = generalUtils.cvtToCyl(rh, rh.parms['diskSize']) if results['overallRC'] != 0: # message already sent. Only need to update the final results. rh.updateResults(results) if results['overallRC'] == 0: parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-t", "3390", "-a", "AUTOG", "-r", rh.parms['diskPool'], "-u", "1", "-z", cyl, "-f", "1"] hideList = [] if 'mode' in rh.parms: parms.extend(["-m", rh.parms['mode']]) else: parms.extend(["-m", 'W']) if 'readPW' in rh.parms: parms.extend(["-R", rh.parms['readPW']]) hideList.append(len(parms) - 1) if 'writePW' in rh.parms: parms.extend(["-W", rh.parms['writePW']]) hideList.append(len(parms) - 1) if 'multiPW' in rh.parms: parms.extend(["-M", rh.parms['multiPW']]) hideList.append(len(parms) - 1) results = invokeSMCLI(rh, "Image_Disk_Create_DM", parms, hideInLog=hideList) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results returned by invokeSMCLI if (results['overallRC'] == 0 and 'fileSystem' in rh.parms): results = installFS( rh, rh.parms['vaddr'], rh.parms['mode'], rh.parms['fileSystem'], "3390") if results['overallRC'] == 0: results = isLoggedOn(rh, rh.userid) if results['overallRC'] != 0: # Cannot determine if VM is logged on or off. # We have partially failed. Pass back the results. rh.updateResults(results) elif results['rs'] == 0: # Add the disk to the active configuration. parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-m", rh.parms['mode']] results = invokeSMCLI(rh, "Image_Disk_Create", parms) if results['overallRC'] == 0: rh.printLn("N", "Added dasd " + rh.parms['vaddr'] + " to the active configuration.") else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.add3390, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def add9336(rh): """ Adds a 9336 (FBA) disk to virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADD9336' userid - userid of the virtual machine parms['diskPool'] - Disk pool parms['diskSize'] - size of the disk in blocks or bytes. parms['fileSystem'] - Linux filesystem to install on the disk. parms['mode'] - Disk access mode parms['multiPW'] - Multi-write password parms['readPW'] - Read password parms['vaddr'] - Virtual address parms['writePW'] - Write password Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.add9336") results, blocks = generalUtils.cvtToBlocks(rh, rh.parms['diskSize']) if results['overallRC'] != 0: # message already sent. Only need to update the final results. rh.updateResults(results) if results['overallRC'] == 0: parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-t", "9336", "-a", "AUTOG", "-r", rh.parms['diskPool'], "-u", "1", "-z", blocks, "-f", "1"] hideList = [] if 'mode' in rh.parms: parms.extend(["-m", rh.parms['mode']]) else: parms.extend(["-m", 'W']) if 'readPW' in rh.parms: parms.extend(["-R", rh.parms['readPW']]) hideList.append(len(parms) - 1) if 'writePW' in rh.parms: parms.extend(["-W", rh.parms['writePW']]) hideList.append(len(parms) - 1) if 'multiPW' in rh.parms: parms.extend(["-M", rh.parms['multiPW']]) hideList.append(len(parms) - 1) results = invokeSMCLI(rh, "Image_Disk_Create_DM", parms, hideInLog=hideList) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if (results['overallRC'] == 0 and 'fileSystem' in rh.parms): # Install the file system results = installFS( rh, rh.parms['vaddr'], rh.parms['mode'], rh.parms['fileSystem'], "9336") if results['overallRC'] == 0: results = isLoggedOn(rh, rh.userid) if (results['overallRC'] == 0 and results['rs'] == 0): # Add the disk to the active configuration. parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-m", rh.parms['mode']] results = invokeSMCLI(rh, "Image_Disk_Create", parms) if results['overallRC'] == 0: rh.printLn("N", "Added dasd " + rh.parms['vaddr'] + " to the active configuration.") else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.add9336, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def dedicate(rh): """ Dedicate device. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'DEDICATEDM' userid - userid of the virtual machine parms['vaddr'] - Virtual address parms['raddr'] - Real address parms['mode'] - Read only mode or not. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.dedicate") parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-r", rh.parms['raddr'], "-R", rh.parms['mode']] hideList = [] results = invokeSMCLI(rh, "Image_Device_Dedicate_DM", parms, hideInLog=hideList) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['overallRC'] == 0: results = isLoggedOn(rh, rh.userid) if (results['overallRC'] == 0 and results['rs'] == 0): # Dedicate device to active configuration. parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-r", rh.parms['raddr'], "-R", rh.parms['mode']] results = invokeSMCLI(rh, "Image_Device_Dedicate", parms) if results['overallRC'] == 0: rh.printLn("N", "Dedicated device " + rh.parms['vaddr'] + " to the active configuration.") else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.dedicate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def undedicate(rh): """ Unedicate device. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'UNDEDICATE' userid - userid of the virtual machine parms['vaddr'] - Virtual address Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.undedicate") parms = [ "-T", rh.userid, "-v", rh.parms['vaddr']] hideList = [] results = invokeSMCLI(rh, "Image_Device_Undedicate_DM", parms, hideInLog=hideList) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['overallRC'] == 0: results = isLoggedOn(rh, rh.userid) if (results['overallRC'] == 0 and results['rs'] == 0): # Dedicate device to active configuration. parms = [ "-T", rh.userid, "-v", rh.parms['vaddr']] results = invokeSMCLI(rh, "Image_Device_Undedicate", parms) if results['overallRC'] == 0: rh.printLn("N", "Dedicated device " + rh.parms['vaddr'] + " to the active configuration.") else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.undedicate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def addAEMOD(rh): """ Send an Activation Modification Script to the virtual machine. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'AEMOD' userid - userid of the virtual machine parms['aeScript'] - File specification of the AE script parms['invparms'] - invparms operand Output: Request Handle updated with the results. Return code - 0: ok Return code - 4: input error, rs - 11 AE script not found """ rh.printSysLog("Enter changeVM.addAEMOD") invokeScript = "invokeScript.sh" trunkFile = "aemod.doscript" fileClass = "X" tempDir = tempfile.mkdtemp() if os.path.isfile(rh.parms['aeScript']): # Get the short name of our activation engine modifier script if rh.parms['aeScript'].startswith("/"): s = rh.parms['aeScript'] tmpAEScript = s[s.rindex("/") + 1:] else: tmpAEScript = rh.parms['aeScript'] # Copy the mod script to our temp directory shutil.copyfile(rh.parms['aeScript'], tempDir + "/" + tmpAEScript) # Create the invocation script. conf = "#!/bin/bash \n" baseName = os.path.basename(rh.parms['aeScript']) parm = "/bin/bash %s %s \n" % (baseName, rh.parms['invParms']) fh = open(tempDir + "/" + invokeScript, "w") fh.write(conf) fh.write(parm) fh.close() # Generate the tar package for punch tar = tarfile.open(tempDir + "/" + trunkFile, "w") for file in os.listdir(tempDir): tar.add(tempDir + "/" + file, arcname=file) tar.close() # Punch file to reader punch2reader(rh, rh.userid, tempDir + "/" + trunkFile, fileClass) shutil.rmtree(tempDir) else: # Worker script does not exist. shutil.rmtree(tempDir) msg = msgs.msg['0400'][1] % (modId, rh.parms['aeScript']) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0400'][0]) rh.printSysLog("Exit changeVM.addAEMOD, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def addIPL(rh): """ Sets the IPL statement in the virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'IPL' userid - userid of the virtual machine parms['addrOrNSS'] - Address or NSS name parms['loadparms'] - Loadparms operand (optional) parms['parms'] - Parms operand (optional) Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.addIPL") parms = ["-T", rh.userid, "-s", rh.parms['addrOrNSS']] if 'loadparms' in rh.parms: parms.extend(["-l", rh.parms['loadparms']]) if 'parms' in rh.parms: parms.extend(["-p", rh.parms['parms']]) results = invokeSMCLI(rh, "Image_IPL_Set_DM", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.addIPL, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def addLOADDEV(rh): """ Sets the LOADDEV statement in the virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'ADDLOADDEV' userid - userid of the virtual machine parms['boot'] - Boot program number parms['addr'] - Logical block address of the boot record parms['lun'] - One to eight-byte logical unit number of the FCP-I/O device. parms['wwpn'] - World-Wide Port Number parms['scpDataType'] - SCP data type parms['scpData'] - Designates information to be passed to the program is loaded during guest IPL. Note that any of the parms may be left blank, in which case we will not update them. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.addLOADDEV") # scpDataType and scpData must appear or disappear concurrently if ('scpData' in rh.parms and 'scpDataType' not in rh.parms): msg = msgs.msg['0014'][1] % (modId, "scpData", "scpDataType") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0014'][0]) return if ('scpDataType' in rh.parms and 'scpData' not in rh.parms): if rh.parms['scpDataType'].lower() == "delete": scpDataType = 1 else: # scpDataType and scpData must appear or disappear # concurrently unless we're deleting data msg = msgs.msg['0014'][1] % (modId, "scpDataType", "scpData") rh.printLn("ES", msg) rh.updateResults(msgs.msg['0014'][0]) return scpData = "" if 'scpDataType' in rh.parms: if rh.parms['scpDataType'].lower() == "hex": scpData = rh.parms['scpData'] scpDataType = 3 elif rh.parms['scpDataType'].lower() == "ebcdic": scpData = rh.parms['scpData'] scpDataType = 2 # scpDataType not hex, ebcdic or delete elif rh.parms['scpDataType'].lower() != "delete": msg = msgs.msg['0016'][1] % (modId, rh.parms['scpDataType']) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0016'][0]) return else: # Not specified, 0 for do nothing scpDataType = 0 scpData = "" if 'boot' not in rh.parms: boot = "" else: boot = rh.parms['boot'] if 'addr' not in rh.parms: block = "" else: block = rh.parms['addr'] if 'lun' not in rh.parms: lun = "" else: lun = rh.parms['lun'] # Make sure it doesn't have the 0x prefix lun.replace("0x", "") if 'wwpn' not in rh.parms: wwpn = "" else: wwpn = rh.parms['wwpn'] # Make sure it doesn't have the 0x prefix wwpn.replace("0x", "") parms = [ "-T", rh.userid, "-b", boot, "-k", block, "-l", lun, "-p", wwpn, "-s", str(scpDataType)] if scpData != "": parms.extend(["-d", scpData]) results = invokeSMCLI(rh, "Image_SCSI_Characteristics_Define_DM", parms) # SMAPI API failed. if results['overallRC'] != 0: rh.printLn("ES", results['response']) rh.updateResults(results) rh.printSysLog("Exit changeVM.addLOADDEV, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: changeVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit changeVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for ChangeVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit changeVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) if rh.results['overallRC'] == 0: if rh.subfunction in ['ADD3390', 'ADD9336']: if ('fileSystem' in rh.parms and rh.parms['fileSystem'] not in ['ext2', 'ext3', 'ext4', 'xfs', 'swap']): # Invalid file system specified. msg = msgs.msg['0015'][1] % (modId, rh.parms['fileSystem']) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0015'][0]) rh.printSysLog("Exit changeVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def punchFile(rh): """ Punch a file to a virtual reader of the specified virtual machine. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'PUNCHFILE' userid - userid of the virtual machine parms['class'] - Spool class (optional) parms['file'] - Filespec of the file to punch. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.punchFile") # Default spool class in "A" , if specified change to specified class spoolClass = "A" if 'class' in rh.parms: spoolClass = str(rh.parms['class']) punch2reader(rh, rh.userid, rh.parms['file'], spoolClass) rh.printSysLog("Exit changeVM.punchFile, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def purgeRDR(rh): """ Purge the reader belonging to the virtual machine. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'PURGERDR' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.purgeRDR") results = purgeReader(rh) rh.updateResults(results) rh.printSysLog("Exit changeVM.purgeRDR, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def removeDisk(rh): """ Remove a disk from a virtual machine. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'REMOVEDISK' userid - userid of the virtual machine parms['vaddr'] - Virtual address Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.removeDisk") results = {'overallRC': 0, 'rc': 0, 'rs': 0} # Is image logged on loggedOn = False results = isLoggedOn(rh, rh.userid) if results['overallRC'] == 0: if results['rs'] == 0: loggedOn = True results = disableEnableDisk( rh, rh.userid, rh.parms['vaddr'], '-d') if results['overallRC'] != 0: rh.printLn("ES", results['response']) rh.updateResults(results) if results['overallRC'] == 0 and loggedOn: strCmd = "/sbin/vmcp detach " + rh.parms['vaddr'] results = execCmdThruIUCV(rh, rh.userid, strCmd) if results['overallRC'] != 0: if re.search('(^HCP\w\w\w040E)', results['response']): # Device does not exist, ignore the error results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': ''} else: rh.printLn("ES", results['response']) rh.updateResults(results) if results['overallRC'] == 0: # Remove the disk from the user entry. parms = [ "-T", rh.userid, "-v", rh.parms['vaddr'], "-e", "0"] results = invokeSMCLI(rh, "Image_Disk_Delete_DM", parms) if results['overallRC'] != 0: if (results['overallRC'] == 8 and results['rc'] == 208 and results['rs'] == 36): # Disk does not exist, ignore the error results = {'overallRC': 0, 'rc': 0, 'rs': 0, 'response': ''} else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI else: # Unexpected error. Message already sent. rh.updateResults(results) rh.printSysLog("Exit changeVM.removeDisk, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def removeIPL(rh): """ Sets the IPL statement in the virtual machine's directory entry. Input: Request Handle with the following properties: function - 'CHANGEVM' subfunction - 'REMOVEIPL' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.removeIPL") parms = ["-T", rh.userid] results = invokeSMCLI(rh, "Image_IPL_Delete_DM", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit changeVM.removeIPL, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " ChangeVM add3390 ") rh.printLn("N", " --mode " + " --readpw ") rh.printLn("N", " --writepw " + "--multipw --filesystem ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM add9336 ") rh.printLn("N", " --mode " + " --readpw ") rh.printLn("N", " --writepw " + "--multipw --filesystem ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM aemod --invparms ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM IPL --loadparms ") rh.printLn("N", " --parms ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM loaddev --boot --addr ") rh.printLn("N", " --wwpn --lun " + "--scpdatatype --scpdata ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM punchFile --class ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM purgeRDR") rh.printLn("N", " python " + rh.cmdName + " ChangeVM removedisk ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM removeIPL ") rh.printLn("N", " python " + rh.cmdName + " ChangeVM help") rh.printLn("N", " python " + rh.cmdName + " ChangeVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the ChangeVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " add3390 - Add a 3390 (ECKD) disk " + "to a virtual machine's directory") rh.printLn("N", " entry.") rh.printLn("N", " add9336 - Add a 9336 (FBA) disk " + "to virtual machine's directory") rh.printLn("N", " entry.") rh.printLn("N", " aemod - Sends an activation " + "engine script to the managed virtual") rh.printLn("N", " machine.") rh.printLn("N", " help - Displays this help " + "information.") rh.printLn("N", " ipl - Sets the IPL statement in " + "the virtual machine's") rh.printLn("N", " directory entry.") rh.printLn("N", " loaddev - Sets the LOADDEV statement " + "in the virtual machine's") rh.printLn("N", " directory entry.") rh.printLn("N", " punchfile - Punch a file to a virtual " + "reader of the specified") rh.printLn("N", " virtual machine.") rh.printLn("N", " purgerdr - Purges the reader " + "belonging to the virtual machine.") rh.printLn("N", " removedisk - " + "Remove an mdisk from a virtual machine.") rh.printLn("N", " removeIPL - " + "Remove an IPL from a virtual machine's directory entry.") rh.printLn("N", " version - " + "show the version of the power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " -addr - " + "Specifies the logical block address of the") rh.printLn("N", " " + "boot record.") rh.printLn("N", " - " + "Specifies the virtual address or NSS name") rh.printLn("N", " to IPL.") rh.printLn("N", " - " + "aeScript is the fully qualified file") rh.printLn("N", " " + "specification of the script to be sent") rh.printLn("N", " --boot - " + "Boot program number") rh.printLn("N", " --class - " + "The class is optional and specifies the spool") rh.printLn("N", " " + "class for the reader file.") rh.printLn("N", " - " + "Specifies the directory manager disk pool to") rh.printLn("N", " " + "use to obtain the disk.") rh.printLn("N", " - " + "Specifies the size of the ECKD minidisk. ") rh.printLn("N", " - " + "Specifies the size of the FBA type minidisk.") rh.printLn("N", " - " + "File to punch to the target system.") rh.printLn("N", " --filesystem - " + "Specifies type of filesystem to be created on") rh.printLn("N", " the minidisk.") rh.printLn("N", " --invparms - " + "Specifies the parameters to be specified in the") rh.printLn("N", " " + "invocation script to call the aeScript.") rh.printLn("N", " --loadparms - " + "Specifies a 1 to 8-character load parameter that") rh.printLn("N", " " + "is used by the IPL'd system.") rh.printLn("N", " --lun - " + "One to eight-byte logical unit number of the") rh.printLn("N", " FCP-I/O device.") rh.printLn("N", " --mode - " + "Specifies the access mode for the minidisk.") rh.printLn("N", " --multipw - " + "Specifies the password that allows sharing the") rh.printLn("N", " " + "minidisk in multiple-write mode.") rh.printLn("N", " --parms - " + "Specifies a parameter string to pass to the") rh.printLn("N", " " + "virtual machine in general-purpose registers at") rh.printLn("N", " " + "user's the completion of the IPL.") rh.printLn("N", " --readpw - " + "Specifies the password that allows sharing the") rh.printLn("N", " " + "minidisk in read mode.") rh.printLn("N", " --scpdata - " + "Provides the SCP data information.") rh.printLn("N", " --scpdatatype - " + "Specifies whether the scp data is in hex,") rh.printLn("N", " " + "EBCDIC, or should be deleted.") rh.printLn("N", " - " + "Userid of the target virtual machine.") rh.printLn("N", " - " + "Virtual address of the device.") rh.printLn("N", " --writepw - " + "Specifies is the password that allows sharing") rh.printLn("N", " " + "the minidisk in write mode.") rh.printLn("N", " --wwpn - " + "The world-wide port number.") return zVMCloudConnector-1.6.3/smtLayer/getVM.py0000664000175000017510000005242014263437505017732 0ustar ruirui00000000000000# GetVM functions for Systems Management Ultra Thin Layer # # Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import subprocess from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import execCmdThruIUCV, getPerfInfo, invokeSMCLI from smtLayer.vmUtils import isLoggedOn modId = 'GVM' version = "1.0.0" """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'CONSOLEOUTPUT': ['getConsole', lambda rh: getConsole(rh)], 'DIRECTORY': ['getDirectory', lambda rh: getDirectory(rh)], 'ALLDIRECTORY': ['getAllDirectory', lambda rh: getAllDirectory(rh)], 'HELP': ['help', lambda rh: help(rh)], 'ISREACHABLE': ['checkIsReachable', lambda rh: checkIsReachable(rh)], 'STATUS': ['getStatus', lambda rh: getStatus(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], 'FCPINFO': ['fcpinfo', lambda rh: fcpinfo(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = { 'FCPINFO': [ ['Status filter', 'status', True, 2], ]} """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'CONSOLEOUTPUT': {'--showparms': ['showParms', 0, 0]}, 'DIRECTORY': {'--showparms': ['showParms', 0, 0]}, 'HELP': {}, 'ISREACHABLE': {'--showparms': ['showParms', 0, 0]}, 'STATUS': { '--all': ['allBasic', 0, 0], '--cpu': ['cpu', 0, 0], '--memory': ['memory', 0, 0], '--power': ['power', 0, 0], '--showparms': ['showParms', 0, 0]}, 'VERSION': {}, } def checkIsReachable(rh): """ Check if a virtual machine is reachable. Input: Request Handle Output: Request Handle updated with the results. overallRC - 0: determined the status, non-zero: some weird failure while trying to execute a command on the guest via IUCV rc - RC returned from execCmdThruIUCV rs - 0: not reachable, 1: reachable """ rh.printSysLog("Enter getVM.checkIsReachable, userid: " + rh.userid) strCmd = "echo 'ping'" results = execCmdThruIUCV(rh, rh.userid, strCmd) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": reachable") reachable = 1 else: # A failure from execCmdThruIUCV is acceptable way of determining # that the system is unreachable. We won't pass along the # error message. rh.printLn("N", rh.userid + ": unreachable") reachable = 0 rh.updateResults({"rs": reachable}) rh.printSysLog("Exit getVM.checkIsReachable, rc: 0") return 0 def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: getVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit getVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getConsole(rh): """ Get the virtual machine's console output. Input: Request Handle with the following properties: function - 'CMDVM' subfunction - 'CMD' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getVM.getConsole") # Transfer the console to this virtual machine. parms = ["-T", rh.userid] results = invokeSMCLI(rh, "Image_Console_Get", parms) if results['overallRC'] != 0: if (results['overallRC'] == 8 and results['rc'] == 8 and results['rs'] == 8): # Give a more specific message. Userid is either # not logged on or not spooling their console. msg = msgs.msg['0409'][1] % (modId, rh.userid) else: msg = results['response'] rh.updateResults(results) # Use results from invokeSMCLI rh.printLn("ES", msg) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] # Check whether the reader is online with open('/sys/bus/ccw/drivers/vmur/0.0.000c/online', 'r') as myfile: out = myfile.read().replace('\n', '') myfile.close() # Nope, offline, error out and exit if int(out) != 1: msg = msgs.msg['0411'][1] rh.printLn("ES", msg) rh.updateResults(msgs.msg['0411'][0]) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] # We should set class to *, otherwise we will get errors like: # vmur: Reader device class does not match spool file class cmd = ["sudo", "/sbin/vmcp", "spool reader class *"] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) except subprocess.CalledProcessError as e: # If we couldn't change the class, that's not fatal # But we want to warn about possibly incomplete # results msg = msgs.msg['0407'][1] % (modId, strCmd, e.output) rh.printLn("WS", msg) except Exception as e: # All other exceptions. # If we couldn't change the class, that's not fatal # But we want to warn about possibly incomplete # results rh.printLn("ES", msgs.msg['0422'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.printLn("ES", msgs.msg['0423'][1] % modId, strCmd, type(e).__name__, str(e)) # List the spool files in the reader cmd = ["sudo", "/usr/sbin/vmur", "list"] strCmd = ' '.join(cmd) rh.printSysLog("Invoking: " + strCmd) try: files = subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) files = bytes.decode(files) except subprocess.CalledProcessError as e: # Uh oh, vmur list command failed for some reason msg = msgs.msg['0408'][1] % (modId, rh.userid, strCmd, e.output) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0408'][0]) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] except Exception as e: # All other exceptions. rh.printLn("ES", msgs.msg['0421'][1] % (modId, strCmd, type(e).__name__, str(e))) rh.updateResults(msgs.msg['0421'][0]) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] # Now for each line that contains our user and is a # class T console file, add the spool id to our list spoolFiles = files.split('\n') outstr = "" for myfile in spoolFiles: if (myfile != "" and myfile.split()[0] == rh.userid and myfile.split()[2] == "T" and myfile.split()[3] == "CON"): fileId = myfile.split()[1] outstr += fileId + " " # No files in our list if outstr == "": msg = msgs.msg['0410'][1] % (modId, rh.userid) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0410'][0]) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] # Output the list rh.printLn("N", "List of spool files containing " "console logs from %s: %s" % (rh.userid, outstr)) rh.results['overallRC'] = 0 rh.printSysLog("Exit getVM.getConsole, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getDirectory(rh): """ Get the virtual machine's directory statements. Input: Request Handle with the following properties: function - 'CMDVM' subfunction - 'CMD' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getVM.getDirectory") parms = ["-T", rh.userid] results = invokeSMCLI(rh, "Image_Query_DM", parms) if results['overallRC'] == 0: results['response'] = re.sub('\*DVHOPT.*', '', results['response']) rh.printLn("N", results['response']) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit getVM.getDirectory, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getAllDirectory(rh): """ Get a list of defined virtual images. Input: Request Handle with the following properties: function - 'CMDVM' subfunction - 'CMD' Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getVM.getAllDirectory") parms = [] results = invokeSMCLI(rh, "Image_Name_Query_DM", parms) if results['overallRC'] == 0: results['response'] = re.sub('\*DVHOPT.*', '', results['response']) rh.printLn("N", results['response']) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit getVM.getAllDirectory, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getStatus(rh): """ Get the basic status of a virtual machine. Input: Request Handle with the following properties: function - 'CMDVM' subfunction - 'CMD' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getVM.getStatus, userid: " + rh.userid) results = isLoggedOn(rh, rh.userid) if results['rc'] != 0: # Uhoh, can't determine if guest is logged on or not rh.updateResults(results) rh.printSysLog("Exit getVM.getStatus, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] if results['rs'] == 1: # Guest is logged off, everything is 0 powerStr = "Power state: off" memStr = "Total Memory: 0M" usedMemStr = "Used Memory: 0M" procStr = "Processors: 0" timeStr = "CPU Used Time: 0 sec" else: powerStr = "Power state: on" if 'power' in rh.parms: # Test here to see if we only need power state # Then we can return early rh.printLn("N", powerStr) rh.updateResults(results) rh.printSysLog("Exit getVM.getStatus, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] if results['rs'] != 1: # Guest is logged on, go get more info results = getPerfInfo(rh, rh.userid) if results['overallRC'] != 0: # Something went wrong in subroutine, exit rh.updateResults(results) rh.printSysLog("Exit getVM.getStatus, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] else: # Everything went well, response should be good memStr = results['response'].split("\n")[0] usedMemStr = results['response'].split("\n")[1] procStr = results['response'].split("\n")[2] timeStr = results['response'].split("\n")[3] # Build our output string according # to what information was asked for if 'memory' in rh.parms: outStr = memStr + "\n" + usedMemStr elif 'cpu' in rh.parms: outStr = procStr + "\n" + timeStr else: # Default to all outStr = powerStr + "\n" + memStr + "\n" + usedMemStr outStr += "\n" + procStr + "\n" + timeStr rh.printLn("N", outStr) rh.printSysLog("Exit getVM.getStatus, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for GetVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter getVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit getVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " GetVM [ consoleoutput | directory | isreachable ]") rh.printLn("N", " python " + rh.cmdName + " GetVM ") rh.printLn("N", " " + "status [ --all | --cpu | --memory | --power ]") rh.printLn("N", " python " + rh.cmdName + " GetVM help") rh.printLn("N", " python " + rh.cmdName + " GetVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the GetVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " consoleoutput - " + "Obtains the console log from the virtual machine.") rh.printLn("N", " directory - " + "Displays the user directory lines for the virtual machine.") rh.printLn("N", " help - " + "Displays this help information.") rh.printLn("N", " isreachable - " + "Determine whether the virtual OS in a virtual machine") rh.printLn("N", " is reachable") rh.printLn("N", " status - " + "show the log on/off status of the virtual machine") rh.printLn("N", " version - " + "show the version of the power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " - " + "Userid of the target virtual machine") rh.printLn("N", " [ --all | --cpu | " + "--memory | --power ]") rh.printLn("N", " - " + "Returns information machine related to the number") rh.printLn("N", " " + "of virtual CPUs, memory size, power status or all of the") rh.printLn("N", " information.") return def extract_fcp_data(rh, raw_data, status): """ extract data from smcli System_WWPN_Query output. we always specify OWNER=YES. Input: raw data returned from smcli Output: data extracted would be a string like: 'FCP device number: 1B0E\n Status: Active\n NPIV world wide port number: C05076DE330005EA\n Channel path ID: 27\n Physical world wide port number:C05076DE33002E41\n Owner: TEST0008\n FCP device number: 1B0F\n Status: Active\n NPIV world wide port number: C05076DE330005EB\n Channel path ID: 27\n Physical world wide port number:C05076DE33002E41\n' Owner: NONE\n """ raw_data = raw_data.split('\n') # clear blank lines data = [] for i in raw_data: i = i.strip(' \n') if i == '': continue else: data.append(i) # put matched data into one list of strings results = [] lines_per_item = 6 for i in range(0, len(data), lines_per_item): if (i + lines_per_item) > len(data): # sometimes the SMCLI output: # # FCP device number: 1B0F # Status: Active # NPIV world wide port number: C05076DE330005EA # Channel path ID: 27 # Physical world wide port number: # Owner: turns # # which are more than 5 lines # we still do not know the reason, but we need handle this msg = ("extract_fcp_data interrupt because abnormal formatted " "output %s.", data) rh.printLn("WS", msg) break temp = data[i + 1].split(':')[-1].strip() # only return results match the status if status.lower() == "all" or temp.lower() == status.lower(): results.extend(data[i:i + lines_per_item]) return '\n'.join(results) def fcpinfo(rh): """ Get fcp info and filter by the status. Input: Request Handle with the following properties: function - 'GETVM' subfunction - 'FCPINFO' userid - userid of the virtual machine parms['status'] - The status for filter results. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter changeVM.dedicate") parms = ["-T", rh.userid, "-k OWNER=YES"] hideList = [] results = invokeSMCLI(rh, "System_WWPN_Query", parms, hideInLog=hideList) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['overallRC'] == 0: # extract data from smcli return ret = extract_fcp_data(rh, results['response'], rh.parms['status']) # write the ret into results['response'] rh.printLn("N", ret) else: rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI return rh.results['overallRC'] zVMCloudConnector-1.6.3/smtLayer/tests/0000775000175000017510000000000014315232035017462 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/smtLayer/tests/unit/0000775000175000017510000000000014315232035020441 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/smtLayer/tests/unit/test_makeVM.py0000664000175000017510000003355114315210052023234 0ustar ruirui00000000000000# Copyright 2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from smtLayer import makeVM from smtLayer import ReqHandle from smtLayer.tests.unit import base class SMTMakeVMTestCase(base.SMTTestCase): """Test cases for makeVM.py in smtLayer.""" def test_getReservedMemSize(self): rh = mock.Mock() rh.results = {'overallRC': 0, 'rc': 0, 'rs': 0} gap = makeVM.getReservedMemSize(rh, '1024M', '128g') self.assertEqual(gap, '65536M') def test_getReservedMemSize_invalid_suffix(self): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) gap = makeVM.getReservedMemSize(rh, '1024M', '128T') self.assertEqual(gap, '0M') self.assertEqual(rh.results['overallRC'], 4) self.assertEqual(rh.results['rc'], 4) self.assertEqual(rh.results['rs'], 205) def test_getReservedMemSize_max_less_than_initial(self): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) gap = makeVM.getReservedMemSize(rh, '64G', '32G') self.assertEqual(gap, '0M') self.assertEqual(rh.results['overallRC'], 4) self.assertEqual(rh.results['rc'], 4) self.assertEqual(rh.results['rs'], 206) def test_getReservedMemSize_equal_size(self): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) gap = makeVM.getReservedMemSize(rh, '1024M', '1G') self.assertEqual(gap, '0M') self.assertEqual(rh.results['overallRC'], 0) # default maximum reserved memory is 128G=131072M def test_getReservedMemSize_max_reserved(self): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) gap = makeVM.getReservedMemSize(rh, '512m', '256G') self.assertEqual(gap, '65536M') self.assertEqual(rh.results['overallRC'], 0) # As default maximum reserved memory is 128G, # which can't exceed 9999999M, so this case will # return 131072M. If future the maximum reserved # memory limit is not there, recover this case. def test_getReservedMemSize_gap_G(self): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) gap = makeVM.getReservedMemSize(rh, '512m', '9999G') # self.assertEqual(gap, '9998G') self.assertEqual(gap, '65536M') self.assertEqual(rh.results['overallRC'], 0) @mock.patch("os.write") def test_create_VM_profile(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'profName': "Profile1"} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'INCLUDE PROFILE1\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n') @mock.patch("os.write") def test_create_VM_account(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'account': "acc acc1 acc2 dummy"} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'ACCOUNT ACC ACC1 ACC2 DUMMY\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n') @mock.patch("os.write") def test_create_VM_cpu_4(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'vdisk': '0102:1G', 'cpuCnt': 4} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND DEFINE CPU 01 TYPE IFL\n' b'COMMAND DEFINE CPU 02 TYPE IFL\n' b'COMMAND DEFINE CPU 03 TYPE IFL\n' b'MDISK 0102 FB-512 V-DISK 2097152 MWV\n') @mock.patch("os.write") def test_create_VM_swap_1G(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'vdisk': '0102:1G'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'MDISK 0102 FB-512 V-DISK 2097152 MWV\n') @mock.patch("os.write") def test_create_VM_swap_2G(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'vdisk': '0102:2G'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'MDISK 0102 FB-512 V-DISK 4194296 MWV\n') @mock.patch("os.write") def test_create_VM_swap_2048M(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'vdisk': '0102:2048M'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'MDISK 0102 FB-512 V-DISK 4194296 MWV\n') @mock.patch("os.write") def test_create_VM_swap_4096M(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'vdisk': '0102:4096M'} rh.parms = parms rs = makeVM.createVM(rh) self.assertEqual(4, rs) @mock.patch("os.write") def test_create_VM_swap_256M(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'vdisk': '0102:256M'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'MDISK 0102 FB-512 V-DISK 524288 MWV\n') @mock.patch("os.write") def test_create_VM_STOR_RESERVED_positive(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '4G', 'privClasses': 'G', 'setReservedMem': ''} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 4G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND DEF STOR RESERVED 3072M\n') @mock.patch("os.write") def test_create_VM_STOR_RESERVED_0M(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1G', 'maxMemSize': '1G', 'privClasses': 'G', 'setReservedMem': ''} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1G 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND DEF STOR RESERVED 0M\n') @mock.patch("os.write") def test_create_VM_STOR_RESERVED_0M_diff_unit(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'setReservedMem': ''} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND DEF STOR RESERVED 0M\n') @mock.patch("os.write") def test_create_with_profile(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'comment': 'comment1$@$@$this is comment2$@$@$'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'* COMMENT1\n' b'* THIS IS COMMENT2\n') @mock.patch("os.write") def test_create_with_cpupool(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'commandSchedule': 'POOL1'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND SCHEDULE * WITHIN POOL POOL1\n') @mock.patch("os.write") def test_create_with_share(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'commandSetShare': 'RELATIVE 125'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'SHARE RELATIVE 125\n') @mock.patch("os.write") def test_create_with_rdomain(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'commandRDomain': 'Z15ONLY'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND SET VMRELOCATE * DOMAIN Z15ONLY\n') @mock.patch("os.write") def test_create_with_pcif(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'commandPcif': '100:200'} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'COMMAND ATTACH PCIF 100 * AS 200\n') @mock.patch("os.write") def test_create_with_logonby(self, write): rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) parms = {'pw': 'pwd', 'priMemSize': '1024M', 'maxMemSize': '1G', 'privClasses': 'G', 'byUsers': ['USER1', 'USER2']} rh.parms = parms makeVM.createVM(rh) write.assert_called_with(mock.ANY, b'USER pwd 1024M 1G G\n' b'COMMAND SET VCONFIG MODE LINUX\n' b'COMMAND DEFINE CPU 00 TYPE IFL\n' b'LOGONBY USER1 USER2\n') zVMCloudConnector-1.6.3/smtLayer/tests/unit/test_getHost.py0000664000175000017510000001306214263437516023506 0ustar ruirui00000000000000# Copyright 2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from smtLayer import ReqHandle from smtLayer import getHost from smtLayer.tests.unit import base class SMTGetHostTestCase(base.SMTTestCase): """Test cases for getHost.py in smtLayer.""" def test_getDiskPoolSpace9336(self): parts = ['v1', '9336-32', 1, 10000000] size = getHost._getDiskSize(parts) self.assertEqual(5120000000, size) def test_getDiskPoolSpace3390(self): parts = ['v1', '3390-09', 1, 10016] size = getHost._getDiskSize(parts) self.assertEqual(7384596480, size) def test_getDiskPoolSpace9336Unknown(self): parts = ['v1', '????', 1, 10000000] size = getHost._getDiskSize(parts) self.assertEqual(5120000000, size) def test_getDiskPoolSpace3390Unknown(self): parts = ['v1', '????', 1, 10016] size = getHost._getDiskSize(parts) self.assertEqual(7384596480, size) def test_getDiskPoolSpace9336UnknownWithFlag(self): parts = ['v1', '9336-?', 1, 10000000] size = getHost._getDiskSize(parts) self.assertEqual(5120000000, size) def test_getDiskPoolSpace3390UnknownWithFlag(self): parts = ['v1', '3390-?', 1, 10016] size = getHost._getDiskSize(parts) self.assertEqual(7384596480, size) @mock.patch.object(getHost, 'invokeSMCLI') def test_getCPUCount_without_STANDBY(self, fake_smcli): fake_smcli.return_value = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': 'Partition mode: Z/VM\n\n' 'ADDRESS STATUS TYPE CORE_ID\n' '0000 MASTER-PROCESSOR CP 0000\n' '0002 ALTERNATE IFL 0001\n' '0003 PARKED IFL 0001\n' '0004 PARKED IFL 0002\n', 'strError': '' } rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) ret_total, ret_used = getHost.getCPUCount(rh) print("return value1:", ret_total, ret_used) self.assertEqual(4, ret_total) self.assertEqual(4, ret_used) @mock.patch.object(getHost, 'invokeSMCLI') def test_getCPUCount_with_STANDBY(self, fake_smcli): fake_smcli.return_value = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': 'Partition mode: Z/VM\n\n' 'ADDRESS STATUS TYPE CORE_ID\n' '0000 MASTER-PROCESSOR CP 0000\n' '0002 ALTERNATE IFL 0001\n' '0003 PARKED IFL 0001\n' '0004 STANDBY IFL 0002\n', 'strError': '' } rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) ret_total, ret_used = getHost.getCPUCount(rh) print("return value2:", ret_total, ret_used) self.assertEqual(4, ret_total) self.assertEqual(3, ret_used) @mock.patch.object(getHost, 'invokeSMCLI') def test_getCPUCount_no_CPU(self, fake_smcli): fake_smcli.return_value = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': 'Partition mode: Z/VM\n\n' 'ADDRESS STATUS TYPE CORE_ID\n', 'strError': '' } rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) ret_total, ret_used = getHost.getCPUCount(rh) print("return value3:", ret_total, ret_used) self.assertEqual(0, ret_total) self.assertEqual(0, ret_used) @mock.patch.object(getHost, 'invokeSMCLI') def test_getCPUCount_noTypebutOOO(self, fake_smcli): fake_smcli.return_value = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'response': 'Partition mode: Z/VM\n\n' 'ADDRESS STATUS OOO CORE_ID\n' '0000 MASTER-PROCESSOR CP 0000\n' '0002 ALTERNATE IFL 0001\n' '0003 PARKED IFL 0001\n' '0004 STANDBY IFL 0002\n', 'strError': '' } rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) ret_total, ret_used = getHost.getCPUCount(rh) print("return value4:", ret_total, ret_used) self.assertEqual(0, ret_total) self.assertEqual(0, ret_used) @mock.patch.object(getHost, 'invokeSMCLI') def test_getCPUCount_with_overallRC_error(self, fake_smcli): fake_smcli.return_value = { 'overallRC': 24, 'rc': 0, 'rs': 0, 'errno': 0, 'response': 'SMAPI API failed\n', 'strError': 'Input error' } rh = ReqHandle.ReqHandle(captureLogs=False, smt=mock.Mock()) ret_total, ret_used = getHost.getCPUCount(rh) print("return value5:", ret_total, ret_used) self.assertEqual(0, ret_total) self.assertEqual(0, ret_used) zVMCloudConnector-1.6.3/smtLayer/tests/unit/test_getVM.py0000664000175000017510000000316514263437505023114 0ustar ruirui00000000000000# Copyright 2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from smtLayer import getVM from smtLayer.tests.unit import base class SMTGetVMTestCase(base.SMTTestCase): """Test cases for getHost.py in smtLayer.""" def test_extract_fcp_data(self): rh = mock.Mock() fake_response = ['FCP device number: 1B0E', 'Status: Free', 'NPIV world wide port number: C05076DE330005EA', 'Channel path ID: 27', 'Physical world wide port number: C05076DE33002E41', 'Owner: NONE', 'FCP device number: 1B0F', 'Status: Free', 'NPIV world wide port number: C05076DE330005EB', 'Channel path ID: 27', 'Physical world wide port number:', 'Owner: turns', ''] raw_data = '\n'.join(fake_response) ret = getVM.extract_fcp_data(rh, raw_data, 'free') self.assertEqual(ret, '\n'.join(fake_response[0:12])) zVMCloudConnector-1.6.3/smtLayer/tests/unit/base.py0000664000175000017510000000132113672563714021742 0ustar ruirui00000000000000# Copyright 2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest class SMTTestCase(unittest.TestCase): def setUp(self): super(SMTTestCase, self).setUp() zVMCloudConnector-1.6.3/smtLayer/tests/unit/test_vmStatus.py0000664000175000017510000000413514266177632023723 0ustar ruirui00000000000000# -*- coding: utf-8 # Copyright 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smtLayer import vmStatus from smtLayer.tests.unit import base class SMTvmStatusTestCase(base.SMTTestCase): """Test cases for vmStatus.py in smtLayer.""" def test_get_success(self): s = vmStatus.SMAPIStatus() s.RecordSuccess() s.RecordSuccess() ret = s.Get()['SMAPI'] d = ret.pop('lastSuccess', None) self.assertIsNotNone(d) exp = {'continueousFail': 0, 'totalFail': 0, 'healthy': True, 'lastFail': '', 'totalSuccess': 2} self.assertDictEqual(exp, ret) def test_get_fail(self): s = vmStatus.SMAPIStatus() s.RecordFail() s.RecordFail() ret = s.Get()['SMAPI'] d = ret.pop('lastFail', None) self.assertIsNotNone(d) exp = {'continueousFail': 2, 'totalFail': 2, 'healthy': True, 'lastSuccess': '', 'totalSuccess': 0} self.assertDictEqual(exp, ret) def test_get_multiplefail(self): s = vmStatus.SMAPIStatus() s.RecordSuccess() for i in range(40): s.RecordFail() ret = s.Get()['SMAPI'] d = ret.pop('lastFail', None) self.assertIsNotNone(d) m = ret.pop('lastSuccess', None) self.assertIsNotNone(m) exp = {'continueousFail': 40, 'totalFail': 40, 'healthy': False, 'totalSuccess': 1} self.assertDictEqual(exp, ret) zVMCloudConnector-1.6.3/smtLayer/tests/unit/__init__.py0000664000175000017510000000000013672563714022560 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/smtLayer/tests/unit/test_vmUtils.py0000664000175000017510000000423313672563714023537 0ustar ruirui00000000000000# -*- coding: utf-8 # Copyright 2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from smtLayer import vmUtils from smtLayer import ReqHandle from smtLayer.tests.unit import base class SMTvmUtilsTestCase(base.SMTTestCase): """Test cases for vmUtils.py in smtLayer.""" def test_getVM_directory_py3(self): rh = ReqHandle.ReqHandle(captureLogs=False) with mock.patch('subprocess.check_output') as exec_cmd: # subprocess.check_output returns bytes in py3 exec_cmd.return_value = ( b"0 0 0 (details) None\n" b"USER T9572493 LBYONLY 2048m 64G G\nINCLUDE ZCCDFLT\n" b"COMMAND DEF STOR RESERVED 63488M\n" b"CPU 00 BASE\nIPL 0100\nLOGONBY MAINT\nMACHINE ESA 32\n" b"MDISK 0100 3390 48697 5500 OMB1B6 MR\n" b"*DVHOPT LNK0 LOG1 RCM1 SMS0 NPW1 LNGAMENG PWC20180808 " b"CRC\xf3:\n") expected_resp = ( u"USER T9572493 LBYONLY 2048m 64G G\nINCLUDE ZCCDFLT\n" u"COMMAND DEF STOR RESERVED 63488M\nCPU 00 BASE\nIPL 0100\n" u"LOGONBY MAINT\nMACHINE ESA 32\n" u"MDISK 0100 3390 48697 5500 OMB1B6 MR\n" u"*DVHOPT LNK0 LOG1 RCM1 SMS0 NPW1 LNGAMENG PWC20180808 " u"CRC\ufffd:\n") res = vmUtils.invokeSMCLI(rh, "Image_Query_DM", ['-T', 'fakeuid']) self.assertEqual(res['response'], expected_resp) exec_cmd.assert_called_once_with( ['sudo', '/opt/zthin/bin/smcli', 'Image_Query_DM', '--addRCheader', '-T', 'fakeuid'], close_fds=True) zVMCloudConnector-1.6.3/smtLayer/tests/__init__.py0000664000175000017510000000000013672563714021601 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/smtLayer/__init__.py0000664000175000017510000000000013672563714020437 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/smtLayer/deleteVM.py0000664000175000017510000002101113672563714020412 0ustar ruirui00000000000000# DeleteVM functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import invokeSMCLI, isLoggedOn, purgeReader modId = "DVM" version = "1.0.0" # List of subfunction handlers. # Each subfunction contains a list that has: # Readable name of the routine that handles the subfunction, # Code for the function call. subfuncHandler = { 'DIRECTORY': ['deleteMachine', lambda rh: deleteMachine(rh)], 'HELP': ['help', lambda rh: help(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = {} """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'DIRECTORY': {'--showparms': ['showParms', 0, 0]}, 'HELP': {}, 'VERSION': {}, } def deleteMachine(rh): """ Delete a virtual machine from the user directory. Input: Request Handle with the following properties: function - 'DELETEVM' subfunction - 'DIRECTORY' userid - userid of the virtual machine to be deleted. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter deleteVM.deleteMachine") results = {'overallRC': 0, 'rc': 0, 'rs': 0} # Is image logged on ? state = 'on' # Assume 'on'. results = isLoggedOn(rh, rh.userid) if results['overallRC'] != 0: # Cannot determine the log on/off state. # Message already included. Act as if it is 'on'. pass elif results['rs'] == 0: # State is powered on. pass else: state = 'off' # Reset values for rest of subfunction results['overallRC'] = 0 results['rc'] = 0 results['rs'] = 0 if state == 'on': parms = ["-T", rh.userid, "-f IMMED"] results = invokeSMCLI(rh, "Image_Deactivate", parms) if results['overallRC'] == 0: pass elif (results['overallRC'] == 8 and results['rc'] == 200 and (results['rs'] == 12 or results['rs'] == 16)): # Tolerable error. Machine is already in or going into the state # that we want it to enter. rh.updateResults({}, reset=1) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results returned by invokeSMCLI # Clean up the reader before delete if results['overallRC'] == 0: result = purgeReader(rh) if result['overallRC'] != 0: # Tolerable the purge failure error rh.updateResults({}, reset=1) if results['overallRC'] == 0: parms = ["-T", rh.userid, "-e", "0"] results = invokeSMCLI(rh, "Image_Delete_DM", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results returned by invokeSMCLI rh.printSysLog("Exit deleteVM.deleteMachine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter deleteVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: deleteVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit deleteVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for DeleteVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter deleteVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit deleteVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit deleteVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " DeleteVM directory") rh.printLn("N", " python " + rh.cmdName + " DeleteVM help") rh.printLn("N", " python " + rh.cmdName + " DeleteVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the DeleteVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " directory - " + "Delete a virtual machine from the user directory.") rh.printLn("N", " help - " + "Displays this help information.") rh.printLn("N", " version - " + "Show the version of the power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " - " + "Userid of the target virtual machine") return zVMCloudConnector-1.6.3/smtLayer/cmdVM.py0000664000175000017510000001640714263437505017723 0ustar ruirui00000000000000# CmdVM functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import execCmdThruIUCV modId = 'CMD' version = "1.0.0" """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'CMD': ['invokeCmd', lambda rh: invokeCmd(rh)], 'HELP': ['help', lambda rh: help(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = { 'CMD': [ ['Command to send', 'cmd', True, 2], ['Timeout value', 'timeout', False, 1], ], } """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'CMD': { '--showparms': ['showParms', 0, 0], }, } def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter cmdVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: cmdVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit cmdVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for CmdVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def invokeCmd(rh): """ Invoke the command in the virtual machine's operating system. Input: Request Handle with the following properties: function - 'CMDVM' subfunction - 'CMD' userid - userid of the virtual machine parms['cmd'] - Command to send Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter cmdVM.invokeCmd, userid: " + rh.userid) timeout = rh.parms.get('timeout', None) results = execCmdThruIUCV(rh, rh.userid, rh.parms['cmd'], timeout=timeout) if results['overallRC'] == 0: rh.printLn("N", results['response']) else: rh.printLn("ES", results['response']) rh.updateResults(results) rh.printSysLog("Exit cmdVM.invokeCmd, rc: " + str(results['overallRC'])) return results['overallRC'] def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter cmdVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) rh.printSysLog("Exit cmdVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " CmdVM cmd ") rh.printLn("N", " python " + rh.cmdName + " CmdVM help") rh.printLn("N", " python " + rh.cmdName + " CmdVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the CmdVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " cmd - " + "Send a command to a virtual machine's operating system.") rh.printLn("N", " help - " + "Displays this help information.") rh.printLn("N", " version - " + "show the version of the power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " - " + "Userid of the target virtual machine") rh.printLn("N", " - " + "Command to send to the virtual machine's OS.") return zVMCloudConnector-1.6.3/smtLayer/vmStatus.py0000664000175000017510000000451714266177632020547 0ustar ruirui00000000000000# Virtual Machine Utilities for Systems Management Ultra Thin Layer # # Copyright 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from datetime import datetime from threading import Lock CONTINUE_FAIL_THRESHOLD = 20 class SMAPIStatus(): def __init__(self): # time stamp of last call, note the fail here doesn't mean # SMAPI API failed, it's SMAPI failed to execute the command self.lastSuccess = '' self.lastFail = '' # latest call status, 0 means success # >0 means continueous failure self.continueFail = 0 self.totalSuccess = 0 self.totalFail = 0 # we have multiple threads for cloud connector, need lock for # operations for global data self.lock = Lock() def RecordSuccess(self): self.lock.acquire() self.lastSuccess = datetime.now() self.totalSuccess += 1 # we think a success means there is no continue Fail self.continueFail = 0 self.lock.release() def RecordFail(self): self.lock.acquire() self.lastFail = datetime.now() self.totalFail += 1 self.continueFail += 1 self.lock.release() def Get(self): status = {'SMAPI': {'totalSuccess': self.totalSuccess, 'totalFail': self.totalFail, 'lastSuccess': self.lastSuccess, 'lastFail': self.lastFail, 'continueousFail': self.continueFail, 'healthy': self.IsHealthy()} } return status def IsHealthy(self): return self.continueFail < CONTINUE_FAIL_THRESHOLD _SMAPIStatus = None def GetSMAPIStatus(): global _SMAPIStatus if _SMAPIStatus is None: _GetSMAPIStatus = SMAPIStatus() return _GetSMAPIStatus zVMCloudConnector-1.6.3/smtLayer/smtTest.py0000664000175000017510000016653613672563714020376 0ustar ruirui00000000000000#!/usr/bin/env python # Test logic for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import argparse import datetime import os import re import six from six import string_types import sys import subprocess from subprocess import CalledProcessError from tempfile import NamedTemporaryFile from smtLayer.smt import SMT from smtLayer.ReqHandle import ReqHandle version = '1.0.0' # Version of this script longstring = '1' * 4096 """ The following dictionary contains keys and values used as substitution in the requests that are processed. Replaceable values are identified in the requests by '<<<' and '>>>'. The key within the '<<<' and '>>>' is in the subs dictionary. """ subs = { '<<>>': 'someid', # An existing userid that can be # started and stopped '<<>>': 'STUS', # A prefix for a userid that gets # created and destroyed. Tests # add to the prefix to get an id. '<<>>': 'g[][325$$$', # A userid that makes SMAPI cry # and beg for a swift death '<<>>': '', # An existing userid that can be # migrated or empty to bypass tests. '<<>>': '', # An existing userid that cannot be # migrated or empty to bypass tests. '<<>>': 'zvmhost', # A z/VM host for migration into it '<<>>': 'password', # password '<<>>': '2G', # Virtual machine size '<<>>': 'POOL1', # 3390 disk pool (keep this in # uppercase for smtTest ease of use) '<<>>': '1100', # Size of a 3390 for system deploys '<<>>': 'POOL4', # 9336 disk pool (keep this in # uppercase for smtTest ease of use) '<<>>': '/opt/xcat/share/xcat/scripts/setupDisk', # SetupDisk '<<>>': '/install/zvm/POC/testImages/cfgdrive.tgz', # Simple tar file for the config drive '<<>>': '/install/zvm/POC/testImages/' + 'rhel67eckd_small_1100cyl.img', # Small image file '<<>>': '/opt/zthin/bin/unpackdiskimage', # Location of unpackdiskimage '<<>>': longstring, '<<>>': '0', # Wait time for makeVM to fully # complete } # Add a substitution key for the userid of this system. cmd = ["sudo", "/sbin/vmcp", "query userid"] try: subs['<<>>'] = subprocess.check_output( cmd, close_fds=True, stderr=subprocess.STDOUT) subs['<<>>'] = bytes.decode(subs['<<>>']) subs['<<>>'].split()[0] except Exception: print("Could not find the userid of this system.") subs['<<>>'] = 'unknownUserid' # Add a substitution key for the name of the aemod script that # set the /etc/iucv_authorized_userid file to use our userid # and create the script. modFile = NamedTemporaryFile(delete=False) subs['<<>>'] = modFile.name file = open(modFile.name, 'w') file.write("#!/usr/bin/env bash\n") file.write("echo -n $1 > /etc/iucv_authorized_userid\n") file.close() """ A dictionary contains the elements needed to process a test. This includes the following keys: description - Discriptive information to show when running the test. request - Request to be passed to SMT. out - Input to grep to validate the output from a test. Normally, this is a reqular expression. The regular expression is input to grep which scans and validates the output. If output is an empty string then the test is assumed to have passed the output check. overallRC - A single return code or a list of return codes to compare against the overallRC property in the results. If the test returns an overallRC value that matches one of the specified values then it has passed the overallRC check. rc - A single return code or a list of return codes. If the test returns a return code that matches one of the specified return codes then it has passed the return code check. rs - A single return code or a list of return codes. If the test returns a return code that matches one of the specified return codes then it has passed the return code check. Note: A test must pass all specified tests (e.g. output, rc, etc.) in order for the test to pass. """ deployTests = [ { 'description': "Create a simple system: <<>>1", 'request': "MakeVM <<>>1 directory <<>> " + "<<>> g --ipl 100 --profile OSDFLT", 'out': "", 'overallRC': [0], }, { 'description': "Purge the reader", 'request': "ChangeVM <<>>1 purgerdr", 'out': "", 'overallRC': [0], }, { 'description': "Add a 3390 disk to <<>>1 as 100", 'request': "ChangeVM <<>>1 add3390 <<>> 100 " + "<<>>", 'out': "", 'overallRC': [0], }, { 'description': "Check out the user entry", 'request': "GetVM <<>>1 directory", 'out': "", 'overallRC': [0], }, { 'description': "Unpack the image into the disk.", 'request': "SHELL_TEST <<>> <<>>1 100 " + "<<>>", 'out': "", 'overallRC': [0], }, { 'description': "Punch the config drive tar file to the system.", 'request': "ChangeVM <<>>1 punchfile " + "<<>> --class x", 'out': "", 'overallRC': [0], }, { 'description': "Send an aemod to allow IUCV access by this system.", 'request': "ChangeVM <<>>1 aemod <<>> " + "--invparms <<>>", 'out': "", 'overallRC': [0], }, { 'description': "Power on the system and wait for to OS to come up.", 'request': "PowerVM <<>>1 on --wait --state up", 'out': "", 'overallRC': [0], }, { 'description': "Send a commmand to a system.", 'request': "CmdVM <<>>1 cmd pwd", 'out': "", 'overallRC': [0], }, { 'description': "Delete a system: <<>>1", 'request': "DeleteVM <<>>1 directory", 'out': "", 'overallRC': [0], }, ] generalTests = [ { 'description': "Test Help Function", 'request': "help", 'overallRC': [0], }, { 'description': "Test no operands => error", 'request': "", # Request with no parms 'overallRC': [4], 'rc': [4], 'rs': [9], }, { 'description': "Test Version", 'request': "version", 'out': "^Version:", 'overallRC': [0], }, { 'description': "Test unrecognized operands", 'request': "Steve is great", 'overallRC': [4], 'rc': [4], 'rs': [7], }, ] guestTests = [ { 'description': "Power on a system: <<>>", 'request': "PowerVM <<>> on", 'out': "", 'overallRC': [0], }, { 'description': "Get the status of the system: <<>>", 'request': "getvm <<>> status --all", 'out': "CPU Used Time:", 'overallRC': [0], }, { 'description': "Get the power status of the system: <<>>", 'request': "getvm <<>> status --power", 'out': "Power state: on", 'overallRC': [0], }, { 'description': "Get the memory status of the system: <<>>", 'request': "getvm <<>> status --memory", 'out': "Total Memory:", 'overallRC': [0], }, { 'description': "Get the cpu status of the system: <<>>", 'request': "getvm <<>> status --cpu", 'out': "Processors:", 'overallRC': [0], }, { 'description': "Power off the system: <<>>", 'request': "PowerVM <<>> off", 'out': "", 'overallRC': [0], }, { 'description': "Get the status of the system: <<>>", 'request': "getvm <<>> status", 'out': "CPU Used Time: 0 sec", 'overallRC': [0], }, { 'description': "Get the power status of the system: <<>>", 'request': "getvm <<>> status --power", 'out': "Power state: off", 'overallRC': [0], }, { 'description': "Get the memory status of the system: <<>>", 'request': "getvm <<>> status --memory", 'out': "Total Memory: 0M", 'overallRC': [0], }, { 'description': "Get the cpu status of the system: <<>>", 'request': "getvm <<>> status --cpu", 'out': "Processors: 0", 'overallRC': [0], }, ] hostTests = [ { 'description': "Get the list of disk pools.", 'request': "GetHost diskpoolnames", 'overallRC': [0], }, { 'description': "Get the space for all disk pools.", 'request': "GetHost diskpoolspace", 'out': "Total", 'overallRC': [0], }, { 'description': "Get the space for a specific 3390 disk pool: " + "<<>>", 'request': "GetHost diskpoolspace <<>>", 'out': "^<<>> Total", 'overallRC': [0], }, { 'description': "Get the space for a specific 9336 disk pool: " + "<<>>", 'request': "GetHost diskpoolspace <<>>", 'out': "^<<>> Total", 'overallRC': [0], }, { 'description': "Get the FCP Device information.", 'request': "GetHost fcpdevices", 'out': "^FCP device number", 'overallRC': [0], }, { 'description': "Get the general information.", 'request': "GetHost general", 'out': "", 'overallRC': [0], }, ] iucvTests = [ { 'description': "Power on a system: <<>>", 'request': "PowerVM <<>> on --wait --state up", 'out': "", 'overallRC': [0], }, { 'description': "Send a commmand to a system.", 'request': "CmdVM <<>> cmd pwd", 'out': "", 'overallRC': [0], }, { 'description': "Send an failing commmand to a system.", 'request': "CmdVM <<>> cmd \"echo 'bob'|grep /john/\"", 'out': "", 'overallRC': [2], 'rc': [8], 'rs': [1], }, { 'description': "Send an unknown commmand to a system.", 'request': "CmdVM <<>> cmd SteveIsGreat", 'out': "", 'overallRC': [2], 'rc': [8], 'rs': [127], }, ] lifecycleTests = [ { 'description': "Create a simple system: <<>>2", 'request': "makevm <<>>2 directory smapi 2g g", 'out': "", 'overallRC': [0], }, { 'description': "Verify system exists: <<>>2", 'request': "smapi <<>>2 api Image_Query_DM", 'out': "", 'overallRC': [0], }, { 'description': "Delete a system: <<>>2", 'request': "deletevm <<>>2 directory", 'out': "", 'overallRC': [0], }, # We used to verify that system no longer exists but dirmaint was slower # and test case sometimes fails. ] migrateTests = [ { 'description': "Get status for a specific userid that " + "cannot be migrated: <<>>", 'doIf': "'<<>>' != ''", 'request': "migrateVM <<>> status", 'overallRC': [99], 'rc': [99], 'rs': [419], }, { 'description': "Get all migration status for a host with " + "no active migrations.", 'doIf': "'<<>>' != ''", 'request': "migrateVM <<>> status --all", 'overallRC': [99], 'rc': [99], 'rs': [419], }, { 'description': ("Get incoming migration status for a host " + "with no active migrations."), 'doIf': "'<<>>' != ''", 'request': "migrateVM <<>> status --incoming", 'overallRC': [99], 'rc': [99], 'rs': [419], }, { 'description': "Get outgoing migration status for a host " + "with no active migrations.", 'doIf': "'<<>>' != ''", 'request': "migrateVM <<>> status --outgoing", 'overallRC': [99], 'rc': [99], 'rs': [419], }, { 'description': "Test a system for migration: <<>>", 'doIf': "'<<>>' != ''", 'request': "migrateVM <<>> test --destination " + "<<>>", 'overallRC': [99], 'rc': [99], 'rs': [418], }, { 'description': "Cancel a migration", 'doIf': "'<<>>' != ''", 'request': "migrateVM <<>> cancel", 'overallRC': [99], 'rc': [99], 'rs': [419], }, ] modifyTests = [ # >>>>>>>>> Create a simple system for logged off tests. { 'description': "Create a simple system: <<>>3", 'request': "MakeVM <<>>3 directory <<>> " + "<<>> g --ipl 100 --profile OSDFLT", 'out': "", 'overallRC': [0], }, { 'description': "Verify no console log is available: <<>>3", 'request': "getvm <<>>3 consoleoutput", 'out': "", 'overallRC': [8], 'rc': [8], 'rs': [8] }, { 'description': "Wait <<>> seconds for source " + "directory to be updated.", 'request': "SHELL echo 'Sleeping for <<>> seconds " + "to allow source directory update to complete';sleep " + "<<>>", 'out': "", 'overallRC': [0], }, { 'description': "Add modifications to the activation engine", 'request': 'ChangeVM <<>>3 aemod <<>> ' + '--invparms "action=addMdisk vaddr=101 filesys=ext4 ' + 'mntdir=/mnt/ephemeral/0.0.0101"', 'out': "", 'overallRC': [0], }, { 'description': "Add unknown script mods to the activation engine", 'request': 'ChangeVM <<>>3 aemod BAD ' + '--invparms "action=addMdisk vaddr=101 filesys=ext4 ' + 'mntdir=/mnt/ephemeral/0.0.0101"', 'out': "", 'overallRC': [4], 'rc': [4], 'rs': [400], }, { 'description': "Add modifications to activation engine for bad id", 'request': 'ChangeVM BADID aemod <<>> ' + '--invparms "action=addMdisk vaddr=101 filesys=ext4 ' + 'mntdir=/mnt/ephemeral/0.0.0101"', 'out': "", 'overallRC': [4], }, { 'description': "Purge the reader: <<>>3", 'request': "ChangeVM <<>>3 purgerdr", 'out': "", 'overallRC': [0], }, { 'description': "Add a 3390 disk to the system with ext4: " + "<<>>3", 'request': "changevm <<>>3 add3390 <<>> " + "101 100m --mode w --filesystem ext4 " + "--readpw readpw --writepw writepw --multipw multipw", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 3390 disk with ext4: <<>>3", 'request': "changevm <<>>3 removedisk 101", 'out': "", 'overallRC': [0], }, { 'description': "Add a 3390 disk to the system with xfs: " + "<<>>3", 'request': "changevm <<>>3 add3390 <<>> " + "102 100m --mode w --filesystem xfs", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 3390 disk with xfs: <<>>3", 'request': "changevm <<>>3 removedisk 102", 'out': "", 'overallRC': [0], }, { 'description': "Add a 3390 disk to the system with swap: " + "<<>>3", 'request': "changevm <<>>3 add3390 <<>> " + "103 100m --mode w --filesystem swap", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 3390 disk with swap: <<>>3", 'request': "changevm <<>>3 removedisk 103", 'out': "", 'overallRC': [0], }, { 'description': "Remove a disk that does not exist: <<>>3", 'request': "changevm <<>>3 removedisk 104", 'out': "", 'overallRC': [0], }, { 'description': "Add a 9336 disk to the system with ext4.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 add9336 <<>> " + "120 100m --mode w --filesystem ext4 " + "--readpw readpw --writepw writepw --multipw multipw", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 9336 disk with ext4.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 removedisk 120", 'out': "", 'overallRC': [0], }, { 'description': "Add a 9336 disk to the system with xfs.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 add9336 <<>> " + "121 100m --mode w --filesystem xfs", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 9336 disk with xfs.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 removedisk 121", 'out': "", 'overallRC': [0], }, { 'description': "Add a 9336 disk to the system with swap.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 add9336 <<>> " + "122 100m --mode w --filesystem swap", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 9336 disk with swap.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 removedisk 122", 'out': "", 'overallRC': [0], }, # >>>>>>>>> Deploy an image for active system tests. { 'description': "Add a 3390 disk for the root disk: <<>>3", 'request': "ChangeVM <<>>3 add3390 <<>> 100 " + "<<>>", 'out': "", 'overallRC': [0], }, { 'description': "Unpack the image into the disk: <<>>3", 'request': "SHELL_TEST <<>> <<>>3 100 " + "<<>>", 'out': "", 'overallRC': [0], }, { 'description': "Punch the config drive tar file to the system: " + "<<>>3", 'request': "ChangeVM <<>>3 punchfile " + "<<>> --class x", 'out': "", 'overallRC': [0], }, { 'description': "Send an aemod to allow IUCV access by this system.", 'request': "ChangeVM <<>>3 aemod <<>> " + "--invparms <<>>", 'out': "", 'overallRC': [0], }, { 'description': "Power on the system and wait for to OS to " + "come up: <<>>3", 'request': "PowerVM <<>>3 on --wait --state up", 'out': "", 'overallRC': [0], }, # >>>>>>>>> Tests that are related to active systems. { 'description': "Start console spooling on the system: " + "<<>>3", 'request': "CmdVM <<>>3 cmd 'vmcp spool console " + "to <<>>3 start'", 'overallRC': [0], }, { 'description': "Enable tracing so we put stuff to the " + "console of <<>>3", 'request': "CmdVM <<>>3 cmd 'vmcp trace diag run'", 'overallRC': [0], }, { 'description': "Force more to the console of " + "<<>>3", 'request': "CmdVM <<>>3 cmd 'vmcp query userid'", 'overallRC': [0], }, { 'description': "Get the console log of the system: <<>>3", 'request': "getvm <<>>3 consoleoutput", 'out': "List of spool files containing console logs " + "from <<>>3:", 'overallRC': [0], }, { 'description': "Add a 3390 disk to the system with ext4: " + "<<>>3", 'request': "changevm <<>>3 add3390 <<>> " + "110 100m --mode w --filesystem ext4", 'out': "", 'overallRC': [0], }, { 'description': "Online the 110 ECKD disk with ext4: " + "<<>>3", 'request': "CmdVM <<>>3 cmd '/sbin/cio_ignore -r 110; " + "which udevadm &> /dev/null && udevadm settle || udevsettle ;" + "/sbin/chccwdev -e 110 2>&1'", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 3390 disk with ext4: <<>>3 110", 'request': "changevm <<>>3 removedisk 110", 'out': "", 'overallRC': [0], }, { 'description': "Add a 3390 disk to the system with xfs: " + "<<>>3", 'request': "changevm <<>>3 add3390 <<>> " + "111 100m --mode w --filesystem xfs", 'out': "", 'overallRC': [0], }, # Don't online the disk. This makes the chccwdev fail but the # failure should be ignored. { 'description': "Remove the 3390 disk with xfs: " + "<<>>3 111", 'request': "changevm <<>>3 removedisk 111", 'out': "", 'overallRC': [0], }, { 'description': "Add a 3390 disk to the system with swap: " + "<<>>3 112", 'request': "changevm <<>>3 add3390 <<>> " + "112 100m --mode w --filesystem swap", 'out': "", 'overallRC': [0], }, { 'description': "Online the 112 ECKD disk with swap: " + "<<>>3", 'request': "CmdVM <<>>3 cmd '/sbin/cio_ignore -r 112; " + "which udevadm &> /dev/null && udevadm settle || udevsettle ;" + "/sbin/chccwdev -e 112 2>&1'", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 3390 disk with swap: " + "<<>>3 112", 'request': "changevm <<>>3 removedisk 112", 'out': "", 'overallRC': [0], }, { 'description': "Add a 9336 disk to an active system with ext4.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 add9336 <<>> " + "130 100m --mode w --filesystem ext4 " + "--readpw readpw --writepw writepw --multipw multipw", 'out': "", 'overallRC': [0], }, { 'description': "Check out the user entry", 'request': "GetVM <<>>3 directory", 'out': "", 'overallRC': [0], }, { 'description': "Online the 130 FBA disk with swap: " + "<<>>3", 'request': "CmdVM <<>>3 cmd '/sbin/cio_ignore -r 130; " + "which udevadm &> /dev/null && udevadm settle || udevsettle ;" + "/sbin/chccwdev -e 130 2>&1'", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 9336 disk with ext4.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 removedisk 130", 'out': "", 'overallRC': [0], }, { 'description': "Add a 9336 disk to an active system with xfs.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 add9336 <<>> " + "131 100m --mode w --filesystem xfs", 'out': "", 'overallRC': [0], }, { 'description': "Online the 131 FBA disk with swap: " + "<<>>3", 'request': "CmdVM <<>>3 cmd '/sbin/cio_ignore -r 131; " + "which udevadm &> /dev/null && udevadm settle || udevsettle ;" + "/sbin/chccwdev -e 131 2>&1'", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 9336 disk with xfs.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 removedisk 131", 'out': "", 'overallRC': [0], }, { 'description': "Add a 9336 disk to an active system with swap.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 add9336 <<>> " + "132 100m --mode w --filesystem swap", 'out': "", 'overallRC': [0], }, { 'description': "Online the 132 FBA disk with swap: " + "<<>>3", 'request': "CmdVM <<>>3 cmd '/sbin/cio_ignore -r 132; " + "which udevadm &> /dev/null && udevadm settle || udevsettle ;" + "/sbin/chccwdev -e 132 2>&1'", 'out': "", 'overallRC': [0], }, { 'description': "Remove the 9336 disk with swap.", 'doIf': "'<<>>' != ''", 'request': "changevm <<>>3 removedisk 132", 'out': "", 'overallRC': [0], }, { 'description': "Add/change an IPL statement", 'request': "changevm <<>>3 ipl 100", 'out': "", 'overallRC': [0], }, { 'description': "Add/change an IPL statement with loadparms", 'request': "changevm <<>>3 ipl 100 --loadparms cl", 'out': "", 'overallRC': [0], }, { 'description': "Add/change an IPL statement with loadparms", 'request': "changevm <<>>3 ipl 100 --loadparms lots", 'out': "", 'overallRC': [0], }, { 'description': "Add/change an IPL statement with parms", 'request': "changevm <<>>3 ipl cms --parms autocr", 'out': "", 'overallRC': [0], }, { 'description': "Verify IPL statement exists.", 'request': "smapi <<>>3 api Image_Query_DM", 'out': "IPL CMS PARM AUTOCR", 'overallRC': [0], }, { 'description': "Remove an IPL statement", 'request': "changevm <<>>3 removeipl", 'out': "", 'overallRC': [0], }, { 'description': "Add some loaddev statements", 'request': "changevm <<>>3 loaddev --boot 0 " + "--addr 123411 --lun 12345678 --wwpn " + "5005076800aa0001 --scpDataType hex " "--scpData 1212", 'out': "", 'overallRC': [0], }, { 'description': "No datatype loaddev statements", 'request': "changevm <<>>3 loaddev --boot 0 " + "--addr 123411 --lun 12345678 --wwpn " + "5005076800aa0001 --scpData 1212", 'out': "", 'overallRC': [4], 'rc': [4], 'rs': [14], }, { 'description': "No data loaddev statements", 'request': "changevm <<>>3 loaddev --boot 0 " + "--addr 123411 --lun 12345678 --wwpn " + "5005076800aa0001 --scpDataType hex", 'out': "", 'overallRC': [4], 'rc': [4], 'rs': [14], }, { 'description': "Bad datatype loaddev statements", 'request': "changevm <<>>3 loaddev --boot 0 " + "--addr 123411 --lun 12345678 --wwpn " + "5005076800aa0001 --scpDataType BAD --scpData 1212", 'out': "", 'overallRC': [4], 'rc': [4], 'rs': [16], }, { 'description': "Really long scp data", 'request': "changevm <<>>3 loaddev --boot 0 " + "--addr 123411 --lun 12345678 --wwpn " + "5005076800aa0001 --scpDataType hex " + "--scpData <<>>", 'out': "", 'overallRC': [0], }, { 'description': "No boot parm (keep old boot)", 'request': "changevm <<>>3 loaddev --addr 123411 " + "--lun 12345678 --wwpn 5005076800aa0001 " + "--scpDataType hex --scpData 1212", 'out': "", 'overallRC': [0], }, { 'description': "No addr parm (keep old block address)", 'request': "changevm <<>>3 loaddev --lun " + "12345678 --wwpn 5005076800aa0001 " + "--scpDataType hex --scpData 1212", 'out': "", 'overallRC': [0], }, { 'description': "No lun parm (keep old lun)", 'request': "changevm <<>>3 loaddev --wwpn " + "5005076800aa0001 --scpDataType hex --scpData 1212", 'out': "", 'overallRC': [0], }, { 'description': "No wwpn parm (keep old wwpn)", 'request': "changevm <<>>3 loaddev --scpDataType " + "hex --scpData 1212", 'out': "", 'overallRC': [0], }, { 'description': "No parms (keep old parms)", 'request': "changevm <<>>3 loaddev", 'out': "", 'overallRC': [0], }, { 'description': "Verify loaddev boot statements exist", 'request': "smapi <<>>3 api Image_Query_DM", 'out': "LOADDEV BOOTPROG 0", 'overallRC': [0], }, { 'description': "Verify loaddev addr statements exist", 'request': "smapi <<>>3 api Image_Query_DM", 'out': "LOADDEV BR_LBA 0000000000123411", 'overallRC': [0], }, { 'description': "Verify loaddev lun statements exist", 'request': "smapi <<>>3 api Image_Query_DM", 'out': "LOADDEV LUN 0000000012345678", 'overallRC': [0], }, { 'description': "Verify loaddev wwpn statements exist.", 'request': "smapi <<>>3 api Image_Query_DM", 'out': "LOADDEV PORTNAME 5005076800AA0001", 'overallRC': [0], }, { 'description': "Verify loaddev wwpn statements exist", 'request': "smapi <<>>3 api Image_Query_DM", 'out': "LOADDEV SCPDATA HEX", 'overallRC': [0], }, { 'description': "Delete statements", 'request': "changevm <<>>3 loaddev --boot DELETE " + "--addr DELETE --lun DELETE --wwpn DELETE " + "--scpDataType DELETE", 'out': "", 'overallRC': [0], }, { 'description': "Verify loaddev statements are gone", 'request': "SMAPI <<>>3 API " + "Image_SCSI_Characteristics_Query_DM", 'out': "", 'overallRC': [8], 'rc': [0], 'rs': [28], }, { 'description': "Successfully purge the reader: <<>>3", 'request': "changeVM <<>>3 purgeRDR ", 'overallRC': [0], }, { 'description': "Try to purge read of a bad id: <<>>", 'request': "changeVM <<>> purgeRDR ", 'out': "Syntax error in function parameter 8", 'overallRC': [8], 'rc': [24], 'rs': [813] }, { 'description': "Punch the config drive tar file to the system.", 'request': "ChangeVM <<>>3 punchfile <<>>", 'out': "", 'overallRC': [0], }, { 'description': "Punch the config drive tar file to the system" + " with valid spool class.", 'request': "ChangeVM <<>>3 punchfile <<>>" + " --class b", 'out': "", 'overallRC': [0], }, { 'description': "Punch the config drive tar file to the system" + " with an invalid userid and file.", 'request': "ChangeVM <<>> punchfile invalid.config", 'out': "", 'overallRC': [4], 'rc': [7], 'rs': [401], }, { 'description': "Punch the config drive tar file to the system" + " with an invalid userid and spool class.", 'request': "ChangeVM <<>>3 punchfile invalid.config" + " --class b*", 'out': "", 'overallRC': [4], 'rc': [7], 'rs': [401], }, { 'description': "Punch the config drive tar file to the system" + " with an invalid userid.", 'request': "ChangeVM <<>> punchfile <<>>" + " --class b", 'out': "", 'overallRC': [4], 'rc': [4], 'rs': [424], }, { 'description': "Punch the config drive tar file to the system" + " with an invalid class.", 'request': "ChangeVM <<>>3 punchfile <<>>" + " --class b*", 'out': "", 'overallRC': [4], 'rc': [8], 'rs': [404], }, { 'description': "Punch the config drive tar file to the system" + " with an invalid file.", 'request': "ChangeVM <<>>3 punchfile invalid.config", 'out': "", 'overallRC': [4], 'rc': [7], 'rs': [401], }, # >>>>>>>>> Clean up by destroying the system. { 'description': "Delete the system: <<>>3", 'request': "deletevm <<>>3 directory", 'out': "", 'overallRC': [0], }, { 'description': "Clean up an reader files for <<>>3.", 'request': "CODE_SEG purgeRdr('<<>>3')", 'overallRC': [0], }, ] powerTests = [ { 'description': "Test PowerVM VERSION.", 'request': "PowerVM version", 'out': "^Version:", 'overallRC': [0], }, { 'description': "'PowerVM xxx JUNK' fails", 'request': "PowerVM xxx junk", 'out': "", 'overallRC': [4], }, { 'description': "Power off a system: <<>>", 'request': "PowerVM <<>> off --wait", 'out': "", 'overallRC': [0], }, { 'description': "Check status of powered off system.", 'request': "PowerVM <<>> status", 'out': "<<>>: off", 'overallRC': [0], 'rc': [0], 'rs': [1] }, { 'description': "Check isreachable of powered off system.", 'request': "PowerVM <<>> isreachable", 'out': "<<>>: unreachable", 'overallRC': [0], 'rs': [0] }, { 'description': "Power off an already powered off system.", 'request': "PowerVM <<>> off", 'out': "", 'overallRC': [0], }, { 'description': "Power on a system: <<>>", 'request': "PowerVM <<>> on", 'out': "", 'overallRC': [0], }, { 'description': "Power off a system with softOff option: " + "<<>>", 'request': "PowerVM <<>> softoff", 'out': "", 'overallRC': [0], }, { 'description': "Power on a system: <<>>", 'request': "PowerVM <<>> on", 'out': "", 'overallRC': [0], }, { 'description': "Power on a system that is on but not up: " + "<<>>", 'request': "PowerVM <<>> on --wait --state up", 'out': "<<>>: up", 'overallRC': [0], }, { 'description': "Check status of powered on system: <<>>", 'request': "PowerVM <<>> status", 'out': "<<>>: on", 'overallRC': [0], 'rc': [0], 'rs': [0] }, { 'description': "Check isreachable of powered on system: " + "<<>>", 'request': "PowerVM <<>> isreachable", 'out': "<<>>: reachable", 'overallRC': [0], 'rs': [1] }, { 'description': "Pause a system: <<>>", 'request': "PowerVM <<>> pause", 'out': "", 'overallRC': [0], }, { 'description': "Isreachable of a paused system is unreachable: " + "<<>>", 'request': "PowerVM <<>> isreachable", 'out': "<<>>: unreachable", 'overallRC': [0], 'rs': [0] }, { 'description': "Unpause a system: <<>>", 'request': "PowerVM <<>> unpause", 'out': "", 'overallRC': [0], }, { 'description': "Isreachable of an unpaused system is reachable: " + "<<>>", 'request': "PowerVM <<>> isreachable", 'out': "<<>>: reachable", 'overallRC': [0], 'rs': [1] }, { 'description': "Reset a system: <<>>", 'request': "PowerVM <<>> reset --wait --state up", 'out': "", 'overallRC': [0], }, { 'description': "Isreachable of an unpaused system is reachable: " + "<<>>", 'request': "PowerVM <<>> isreachable", 'out': "<<>>: reachable", 'overallRC': [0], 'rs': [1] }, { 'description': "Reboot a system: <<>>", 'request': "PowerVM <<>> reboot --wait", 'out': "", 'overallRC': [0], }, { 'description': "Reboot a system w/o waiting for the OS to be up: " + "<<>>", 'request': "PowerVM <<>> reboot", 'out': "", 'overallRC': [0], }, { 'description': "Wait for the OS to come up: <<>>", 'request': "PowerVM <<>> wait --state up", 'out': "<<>>: up", 'overallRC': [0], 'rs': [0] }, ] smapiTests = [ { 'description': "Directory related query w/o operands.", 'request': "smapi <<>> api Image_Query_DM", 'out': "", 'overallRC': [0], }, { 'description': "Disk pool query with operands.", 'request': "smapi <<>> api Image_Volume_Space_Query_DM " + "--operands '-q' 1 '-e' 1", 'out': "", 'overallRC': [0], }, { 'description': "Failing disk pool query with operands.", 'request': "smapi <<>> api Image_Volume_Space_Query_DM " + "--operands '-q' 4 '-e' 1", 'out': "", 'overallRC': [8], 'rc': [24], 'rs': [1018], }, ] testSets = { 'DEPLOY': { 'description': 'ECKD deploy image tests', 'doIf': "'<<>>' != ''", 'tests': deployTests}, 'GENERAL': { 'description': 'Tests that are not specific to a ' + 'particular function.', 'tests': generalTests}, 'GUEST': { 'description': 'Guest tests that are not covered under ' + 'other functions.', 'tests': guestTests}, 'HOST': { 'description': 'Host related tests', 'tests': hostTests}, 'IUCV': { 'description': 'Send commands to VM over IUCV', 'tests': iucvTests}, 'LIFECYCLE': { 'description': 'VM Life Cycle related tests', 'tests': lifecycleTests}, 'MIGRATE': { 'description': 'VM Migration related tests', 'tests': migrateTests}, 'MODIFY': { 'description': 'Modify a VM', 'tests': modifyTests}, 'POWER': { 'description': 'VM Power function tests', 'tests': powerTests}, 'SMAPI': { 'description': 'SMAP API invocation tests', 'tests': smapiTests}, } def localize(localFile, subs, testSets): """ Perform localization of substitution variables and test sets. This allows the invoker to extend or modify defined tests without modifying this file. Input: Name of local tailorization file (without .py) e.g. smtTestLocal for smtTestLocal.py file. Substitution dictionary to be updated. Test set dictionary to be updated. Output: None Note: - Upon exit the substitution and test set dictionary have been updated with the data from the localization file. """ try: smtTestLocal = __import__(localFile, fromlist=["*"]) except Exception as e: print(e) return 1 # Apply local overrides to the subs dictionary. if len(smtTestLocal.localSubs) > 0: print("Localizing localSubs dictionary.") for key in smtTestLocal.localSubs: print("Localizing " + key + ": " + smtTestLocal.localSubs[key]) subs[key] = smtTestLocal.localSubs[key] else: print("No local overrides exist for the subs dictionary.") # Apply local overrides to the testSets dictionary. if len(smtTestLocal.localTestSets) > 0: print("Localizing the test sets.") if 'clear:testSets' in smtTestLocal.localTestSets: print("Removing all original test sets.") testSets.clear() for key in smtTestLocal.localTestSets: if key == 'clear:testSets': continue print("Localizing test set: " + key) testSets[key] = smtTestLocal.localTestSets[key] else: print("No local test sets exist.") return 0 def purgeRdr(userid): """ Purge contents in this system's reader from a userid. Input: userid that originated the files we want to purge. Output: Return code - 0: no problems, 1: problem encountered. """ subRC = 0 userid = userid.upper() spoolList = [] queryCmd = ("sudo /sbin/vmcp query rdr userid '*' | " + "grep ^" + userid + " | awk '{print $2}'") try: qryRes = subprocess.check_output( queryCmd, close_fds=True, shell=True) qryRes = bytes.decode(qryRes) spoolList = qryRes.splitlines() except Exception as e: # All exceptions. print("Unable to purge reader files for in this " + "system's reader originally owned by: " + userid + ", exception: " + str(e)) subRC = 1 purgeCmd = ['sudo', '/sbin/vmcp', 'purge', 'reader', '0'] for purgeCmd[3] in spoolList: try: subprocess.check_output( purgeCmd, close_fds=True) except Exception as e: # All exceptions. print("Unable to purge reader file " + purgeCmd[3] + ", exception: " + str(e)) subRC = 1 return subRC def runTest(smt, test): """ Drive a test and validate the results. Input: SMT daemon object Dictionary element for the test to drive. Output: Final test score - 0: failed, 1: passed, """ global args if test['request'][0:10] != 'SHELL_TEST': reqHandle = ReqHandle(cmdName=sys.argv[0], captureLogs=True) results = reqHandle.parseCmdline(test['request']) if results['overallRC'] == 0: results = reqHandle.driveFunction() else: # Issue a function that is not considered a test. results = { 'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'strError': '', 'response': [], 'logEntries': [], } shellCmd = test['request'][11:] try: results['response'] = subprocess.check_output( shellCmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) results['response'] = bytes.decode(results['response']) except CalledProcessError as e: results['response'] = e.output results['overallRC'] = e.returncode except Exception as e: # All other exceptions. if 'output' in e: results['response'] = e.output else: results['response'] = ('Exception encountered: ' + "details: %s" % six.text_type(e)) if 'returncode' in e: results['overallRC'] = e.returncode else: results['overallRC'] = -9999999 if isinstance(results['response'], string_types): results['response'] = [results['response']] print(" Overall rc: %s" % results['overallRC']) print(" rc: %s" % results['rc']) print(" rs: %s" % results['rs']) if len(results['response']) > 0: print(" Response:") for line in results['response']: print(" " + line) else: print(" Response: None returned") # Validate the response strings respScore = 1 # Assume the response tests passed. if 'out' in test.keys() and len(test['out']) > 0: # Expect a response let's test it. if len(results['response']) == 0: # No response returned when one was expected -> failed respScore = 0 else: # Test the response to see it matches an expected response # Put the response into a file. This avoids problems with # having special characters in the response that would # cause the shell to complain or get confused. tempFile = NamedTemporaryFile(delete=False) file = open(tempFile.name, 'w') for line in results['response']: file.write(line + '\n') file.close() cmd = ['grep', ''.join(test['out']), tempFile.name] try: junk = subprocess.check_output(cmd, close_fds=True) junk = bytes.decode(junk) if junk == '': respScore = 0 except Exception: respScore = 0 os.remove(tempFile.name) else: pass # No responses listed, treat as a match # Validate the Overall return code orcScore = 0 # Assume RC is not a desired one if 'overallRC' not in test.keys(): orcScore = 1 # No special value, assume it passed elif len(test['overallRC']) == 1: if test['overallRC'][0] == results['overallRC']: orcScore = 1 else: for wanted in test['overallRC']: if results['overallRC'] == wanted: orcScore = 1 break # Validate the failure return code rcScore = 0 # Assume RC is not a desired one if 'rc' not in test.keys(): rcScore = 1 # No special value, assume it passed elif len(test['rc']) == 1: if test['rc'][0] == results['rc']: rcScore = 1 else: for wanted in test['rc']: if results['rc'] == wanted: rcScore = 1 break # Validate the failure reason code rsScore = 0 # Assume RC is not a desired one if 'rs' not in test.keys(): rsScore = 1 # No special value, assume it passed elif len(test['rs']) == 1: if test['rs'][0] == results['rs']: rsScore = 1 else: for wanted in test['rs']: if results['rs'] == wanted: rsScore = 1 break # Determine the final score and show the success or failure of the test if respScore != 1 or orcScore != 1 or rcScore != 1 or rsScore != 1: testScore = 0 if len(results['logEntries']) != 0: print(" Log Entries:") for line in results['logEntries']: print(" " + line) print(" Test Status: FAILED") if respScore != 1: print(" Response Validation: FAILED") if orcScore != 1: print(" Overall RC Validation: FAILED") if rcScore != 1: print(" rc Validation: FAILED") if rsScore != 1: print(" rs Validation: FAILED") else: testScore = 1 if args.showLog is True and len(results['logEntries']) != 0: print(" Log Entries:") for line in results['logEntries']: print(" " + line) print(" Test Status: PASSED") return testScore def driveTestSet(smt, setId, setToTest): """ Drive a set of test. Input: SMT daemon object Dictionary element for the test to drive. Global: Count of tests Count of passed tests Count of failed tests List of failed Tests Output: Global values changed """ global args global cnts print(" ") print("******************************************************************") print("******************************************************************") print("Beginning Test Set: " + setToTest['description']) print("******************************************************************") print("******************************************************************") localTotal = 0 localAttempted = 0 localPassed = 0 localFailed = 0 localBypassed = 0 failInfo = [] startTime = datetime.datetime.now() for test in setToTest['tests']: if args.listParms is True: # Only want to list the requests. print(test['request']) continue # Produce Common Test/shell count info. print("") localTotal += 1 cntInfo = "%i/%i" % (localTotal, (cnts['total'] + localTotal)) if 'doIf' in test and not eval(test['doIf']): print("Bypassing %s: %s" % (cntInfo, test['description'])) localBypassed += 1 continue if test['request'][0:6] == 'SHELL ': # Issue a function that is not considered a test. print("Shell %s: %s" % (cntInfo, test['description'])) shellCmd = test['request'][6:] shellRC = 0 try: out = subprocess.check_output( shellCmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) out = bytes.decode(out) out = "".join(out) except CalledProcessError as e: out = e.output shellRC = e.returncode except Exception as e: # All other exceptions. if 'output' in e: out = e.output else: out = ('Exception encountered: ' + "details: %s" % six.text_type(e)) if 'returncode' in e: shellRC = e.returncode else: shellRC = -9999999 if isinstance(out, string_types): out = [out] shellOk = 0 if 'overallRC' in test: for testRC in test['overallRC']: if shellRC == testRC: shellOk = 1 break if shellOk == 0: print("***Warning*** A non test related shell function " + "returned rc: " + str(shellRC) + " out: " + ''.join(out)) elif test['request'][0:9] == 'CODE_SEG ': print("Code Segment: %s: %s" % (cntInfo, test['description'])) codeSeg = test['request'][9:] exec(codeSeg) else: # Attempt the test. print("Test %s: %s" % (cntInfo, test['description'])) localAttempted += 1 testScore = runTest(smt, test) if testScore == 1: localPassed += 1 else: localFailed += 1 failInfo.append(cntInfo) endTime = datetime.datetime.now() cnts['total'] += localTotal cnts['attempted'] += localAttempted cnts['passed'] += localPassed cnts['failed'] += localFailed cnts['bypassed'] += localBypassed print(" ") print("Status of this set...") print(" Time:") print(" Started: " + str(startTime)) print(" Ended: " + str(endTime)) print(" Duration: " + str(endTime - startTime)) print(" Total Requests: %i, Bypassed Requests: %i" % (localTotal, localBypassed)) print(" Tests attempted: %i, passed: %i, failed: %i" % (localAttempted, localPassed, localFailed)) if localFailed > 0: cnts['failedTests'].append(setId + ": " + " ".join(failInfo)) """ ****************************************************************************** main routine ****************************************************************************** """ # Parse the input and assign it to the variables. parser = argparse.ArgumentParser() parser.add_argument('--listareas', dest='listAreas', action='store_true', help='List names of the test set areas.') parser.add_argument('--listparms', dest='listParms', action='store_true', help='List the command being run.') parser.add_argument('--local', default='smtTestLocal', dest='localFile', help="Localization file or 'none'.") parser.add_argument('--showlog', dest='showLog', action='store_true', help='Show log entries for successful tests.') parser.add_argument('setsToRun', metavar='N', nargs='*', help='Test sets to run') args = parser.parse_args() if args.localFile != 'none': # Perform the localization. print("Localization file specified as: " + args.localFile) print("Importing " + args.localFile) rc = localize(args.localFile, subs, testSets) if rc != 0: exit(2) else: print("No localization will be performed.") # The next lines produce the code that allows the regular expressions to work. regSubs = dict((re.escape(k), v) for k, v in subs.iteritems()) pattern = re.compile("|".join(regSubs.keys())) smt = SMT() smt.enableLogCapture() # Capture request related logs cnts = {} cnts['total'] = 0 cnts['passed'] = 0 cnts['failed'] = 0 cnts['failedTests'] = [] cnts['attempted'] = 0 cnts['bypassed'] = 0 # Temporary Preparation for punchFile Test. Create a sample config file. f = open("sample.config", "w+") f.write("This is sample config file for punchFile Test") f.close() if args.listAreas is True: for key in sorted(testSets): print(key + ": " + testSets[key]['description']) else: # Initialize the environment. Online the punch. cmd = "sudo /sbin/cio_ignore -r d; sudo /sbin/chccwdev -e d" try: subprocess.check_output( cmd, stderr=subprocess.STDOUT, close_fds=True, shell=True) except CalledProcessError as e: print("Warning: Failed to enable the punch, " + "cmd: %s, rc: %i, out: %s" % (cmd, e.returncode, e.output)) except Exception as e: # All other exceptions. if 'output' in e: out = e.output else: out = ('Exception encountered: ' + "details: %s" % six.text_type(e)) if 'returncode' in e: eRC = e.returncode else: eRC = -9999999 print("Warning: Failed to enable the punch, " + "cmd: %s, rc: %i, %s" % (cmd, eRC, out)) # Perform the substitution change to all requests and responses for key in testSets: if 'doIf' in testSets[key]: testSets[key]['doIf'] = pattern.sub(lambda m: regSubs[re.escape(m.group(0))], testSets[key]['doIf']) for test in testSets[key]['tests']: test['description'] = pattern.sub(lambda m: regSubs[re.escape(m.group(0))], test['description']) test['request'] = pattern.sub(lambda m: regSubs[re.escape(m.group(0))], test['request']) if 'doIf' in test: test['doIf'] = pattern.sub(lambda m: regSubs[re.escape(m.group(0))], test['doIf']) if 'out' in test: test['out'] = pattern.sub(lambda m: regSubs[re.escape(m.group(0))], test['out']) # Apply testSet['doIf'] to the tests, if it exists. if 'doIf' in testSets[key]: if 'doIf' in test: test['doIf'] = (testSets[key]['doIf'] + ' and ' + test['doIf']) else: test['doIf'] = testSets[key]['doIf'] # Determine the tests to run based on the first argument. tests = [] totalStartTime = datetime.datetime.now() if len(args.setsToRun) > 0: for key in args.setsToRun: key = key.upper() if key in testSets: driveTestSet(smt, key, testSets[key]) else: print("The following tests set was not recognized: " + key) else: for key in sorted(testSets): driveTestSet(smt, key, testSets[key]) totalEndTime = datetime.datetime.now() # Cleanup the work files. if (os.path.exists("sample.config")): os.remove("sample.config") if (os.path.exists(subs['<<>>'])): os.remove(subs['<<>>']) print("") print("******************************************************************") print("Status of this run...") print(" Time:") print(" Started: " + str(totalStartTime)) print(" Ended: " + str(totalEndTime)) print(" Duration: " + str(totalEndTime - totalStartTime)) print(" Total Requests: %i, Bypassed Requests: %i" % (cnts['total'], cnts['bypassed'])) print(" Tests attempted: %i, passed: %i, failed: %i" % (cnts['attempted'], cnts['passed'], cnts['failed'])) print(" Failed Test(s): " + str(cnts['failedTests'])) print("******************************************************************") if cnts['failed'] == 0: exit(0) else: exit(1) zVMCloudConnector-1.6.3/smtLayer/powerVM.py0000664000175000017510000007460514263437505020320 0ustar ruirui00000000000000# Power functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import execCmdThruIUCV, invokeSMCLI from smtLayer.vmUtils import isLoggedOn from smtLayer.vmUtils import waitForOSState, waitForVMState modId = 'PVM' vmOSUpStates = ['on', 'up'] vmOSUpDownStates = ['down', 'off', 'on', 'up'] version = "1.0.0" """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'HELP': ['help', lambda rh: help(rh)], 'ISREACHABLE': ['checkIsReachable', lambda rh: checkIsReachable(rh)], 'OFF': ['deactivate', lambda rh: deactivate(rh)], 'ON': ['activate', lambda rh: activate(rh)], 'PAUSE': ['pause', lambda rh: pause(rh)], 'REBOOT': ['reboot', lambda rh: reboot(rh)], 'RESET': ['reset', lambda rh: reset(rh)], 'SOFTOFF': ['softDeactivate', lambda rh: softDeactivate(rh)], 'STATUS': ['getStatus', lambda rh: getStatus(rh)], 'UNPAUSE': ['unpause', lambda rh: unpause(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)], 'WAIT': ['wait', lambda rh: wait(rh)], } """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = {} """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) For example, the 'WAIT' subfunction has a 'poll' operand that takes one additional operand (time in seconds) which is an int. While the 'showparms' operand is just the keyword and has no additional value portion. """ keyOpsList = { 'HELP': {}, 'ISREACHABLE': { '--showparms': ['showParms', 0, 0]}, 'OFF': { '--maxwait': ['maxWait', 1, 1], '--poll': ['poll', 1, 1], '--showparms': ['showParms', 0, 0], '--wait': ['wait', 0, 0]}, 'ON': { '--state': ['desiredState', 1, 2], '--maxwait': ['maxWait', 1, 1], '--poll': ['poll', 1, 1], '--showparms': ['showParms', 0, 0], '--wait': ['wait', 0, 0]}, 'PAUSE': {'--showparms': ['showParms', 0, 0]}, 'REBOOT': { '--maxwait': ['maxWait', 1, 1], '--poll': ['poll', 1, 1], '--showparms': ['showParms', 0, 0], '--wait': ['wait', 0, 0]}, 'RESET': { '--state': ['desiredState', 1, 2], '--maxwait': ['maxWait', 1, 1], '--poll': ['poll', 1, 1], '--showparms': ['showParms', 0, 0], '--wait': ['wait', 0, 0]}, 'SOFTOFF': { '--maxwait': ['maxWait', 1, 1], '--poll': ['poll', 1, 1], '--showparms': ['showParms', 0, 0], '--wait': ['wait', 0, 0]}, 'STATUS': { '--showparms': ['showParms', 0, 0] }, 'UNPAUSE': { '--showparms': ['showParms', 0, 0]}, 'VERSION': {}, 'WAIT': { '--maxwait': ['maxWait', 1, 1], '--poll': ['poll', 1, 1], '--showparms': ['showParms', 0, 0], '--state': ['desiredState', 1, 2]}, } def activate(rh): """ Activate a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'ON' userid - userid of the virtual machine parms['desiredState'] - Desired state. Optional, unless 'maxQueries' is specified. parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.activate, userid: " + rh.userid) parms = ["-T", rh.userid] smcliResults = invokeSMCLI(rh, "Image_Activate", parms) if smcliResults['overallRC'] == 0: pass elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and smcliResults['rs'] == 8): pass # All good. No need to change the ReqHandle results. else: # SMAPI API failed. rh.printLn("ES", smcliResults['response']) rh.updateResults(smcliResults) # Use results from invokeSMCLI if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms: # Wait for the system to be in the desired state of: # OS is 'up' and reachable or VM is 'on'. if rh.parms['desiredState'] == 'up': results = waitForOSState( rh, rh.userid, rh.parms['desiredState'], maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) else: results = waitForVMState( rh, rh.userid, rh.parms['desiredState'], maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if results['overallRC'] == 0: rh.printLn("N", "%s: %s" % (rh.userid, rh.parms['desiredState'])) else: rh.updateResults(results) rh.printSysLog("Exit powerVM.activate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def checkIsReachable(rh): """ Check if a virtual machine is reachable. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'ISREACHABLE' userid - userid of the virtual machine Output: Request Handle updated with the results. overallRC - 0: determined the status, non-zero: some weird failure while trying to execute a command on the guest via IUCV rc - RC returned from execCmdThruIUCV rs - 0: not reachable, 1: reachable """ rh.printSysLog("Enter powerVM.checkIsReachable, userid: " + rh.userid) strCmd = "echo 'ping'" results = execCmdThruIUCV(rh, rh.userid, strCmd) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": reachable") reachable = 1 else: # A failure from execCmdThruIUCV is acceptable way of determining # that the system is unreachable. We won't pass along the # error message. rh.printLn("N", rh.userid + ": unreachable") reachable = 0 rh.updateResults({"rs": reachable}) rh.printSysLog("Exit powerVM.checkIsReachable, rc: 0") return 0 def deactivate(rh): """ Deactivate a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'OFF' userid - userid of the virtual machine parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.deactivate, userid: " + rh.userid) parms = ["-T", rh.userid, "-f", "IMMED"] results = invokeSMCLI(rh, "Image_Deactivate", parms) if results['overallRC'] == 0: pass elif (results['overallRC'] == 8 and results['rc'] == 200 and (results['rs'] == 12 or results['rs'] == 16)): # Tolerable error. Machine is already in or going into the state # we want it to enter. rh.printLn("N", rh.userid + ": off") rh.updateResults({}, reset=1) else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['overallRC'] == 0 and 'maxQueries' in rh.parms: results = waitForVMState( rh, rh.userid, 'off', maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": off") else: rh.updateResults(results) rh.printSysLog("Exit powerVM.deactivate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: powerVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit powerVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getStatus(rh): """ Get the power (logon/off) status of a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'STATUS' userid - userid of the virtual machine Output: Request Handle updated with the results. results['overallRC'] - 0: ok, non-zero: error if ok: results['rc'] - 0: for both on and off cases results['rs'] - 0: powered on results['rs'] - 1: powered off """ rh.printSysLog("Enter powerVM.getStatus, userid: " + rh.userid) results = isLoggedOn(rh, rh.userid) if results['overallRC'] != 0: # Unexpected error pass elif results['rs'] == 0: rh.printLn("N", rh.userid + ": on") else: rh.printLn("N", rh.userid + ": off") rh.updateResults(results) rh.printSysLog("Exit powerVM.getStatus, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for PowerVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit powerVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) waiting = 0 if rh.results['overallRC'] == 0: if rh.subfunction == 'WAIT': waiting = 1 if rh.parms['desiredState'] not in vmOSUpDownStates: # Desired state is not: down, off, on or up. msg = msgs.msg['0013'][1] % (modId, rh.parms['desiredState'], ", ".join(vmOSUpDownStates)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0013'][0]) if (rh.results['overallRC'] == 0 and 'wait' in rh.parms): waiting = 1 if 'desiredState' not in rh.parms: if rh.subfunction in ['ON', 'RESET', 'REBOOT']: rh.parms['desiredState'] = 'up' else: # OFF and SOFTOFF default to 'off'. rh.parms['desiredState'] = 'off' if rh.results['overallRC'] == 0 and waiting == 1: if rh.subfunction == 'ON' or rh.subfunction == 'RESET': if ('desiredState' not in rh.parms or rh.parms['desiredState'] not in vmOSUpStates): # Desired state is not: on or up. msg = msgs.msg['0013'][1] % (modId, rh.parms['desiredState'], ", ".join(vmOSUpStates)) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0013'][0]) if rh.results['overallRC'] == 0: if 'maxWait' not in rh.parms: rh.parms['maxWait'] = 300 if 'poll' not in rh.parms: rh.parms['poll'] = 15 rh.parms['maxQueries'] = (rh.parms['maxWait'] + rh.parms['poll'] - 1) / rh.parms['poll'] # If we had to do some rounding, give a warning # out to the command line user that the wait # won't be what they expected. if rh.parms['maxWait'] % rh.parms['poll'] != 0: msg = msgs.msg['0017'][1] % (modId, rh.parms['maxWait'], rh.parms['poll'], rh.parms['maxQueries'] * rh.parms['poll'], rh.parms['maxQueries']) rh.printLn("W", msg) rh.printSysLog("Exit powerVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def pause(rh): """ Pause a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'PAUSE' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.pause, userid: " + rh.userid) parms = ["-T", rh.userid, "-k", "PAUSE=YES"] results = invokeSMCLI(rh, "Image_Pause", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit powerVM.pause, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def reboot(rh): """ Reboot a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'REBOOT' userid - userid of the virtual machine parms['desiredState'] - Desired state. Optional, unless 'maxQueries' is specified. parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.reboot, userid: " + rh.userid) strCmd = "shutdown -r now" results = execCmdThruIUCV(rh, rh.userid, strCmd, timeout = 60) if results['overallRC'] != 0: # Command failed to execute using IUCV. rh.printLn("ES", results['response']) rh.updateResults(results) if rh.results['overallRC'] == 0: # Wait for the OS to go down results = waitForOSState(rh, rh.userid, "down", maxQueries=30, sleepSecs=10) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": down (interim state)") if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms: results = waitForOSState(rh, rh.userid, 'up', maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": up") else: rh.updateResults(results) rh.printSysLog("Exit powerVM.reboot, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def reset(rh): """ Reset a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'RESET' userid - userid of the virtual machine parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.reset, userid: " + rh.userid) # Log off the user parms = ["-T", rh.userid] results = invokeSMCLI(rh, "Image_Deactivate", parms) if results['overallRC'] != 0: if results['rc'] == 200 and results['rs'] == 12: # Tolerated error. Machine is already in the desired state. results['overallRC'] = 0 results['rc'] = 0 results['rs'] = 0 else: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI # Wait for the logoff to complete if results['overallRC'] == 0: results = waitForVMState(rh, rh.userid, "off", maxQueries=30, sleepSecs=10) # Log the user back on if results['overallRC'] == 0: parms = ["-T", rh.userid] results = invokeSMCLI(rh, "Image_Activate", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI if results['overallRC'] == 0 and 'maxQueries' in rh.parms: if rh.parms['desiredState'] == 'up': results = waitForOSState( rh, rh.userid, rh.parms['desiredState'], maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) else: results = waitForVMState( rh, rh.userid, rh.parms['desiredState'], maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": " + rh.parms['desiredState']) else: rh.updateResults(results) rh.printSysLog("Exit powerVM.reset, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " PowerVM ") rh.printLn("N", " [isreachable | pause | " + "status | unpause]") rh.printLn("N", " python " + rh.cmdName + " PowerVM ") rh.printLn("N", " [on | reset] --wait --state " + "[on | up] --maxwait ") rh.printLn("N", " --poll ") rh.printLn("N", " python " + rh.cmdName + " PowerVM ") rh.printLn("N", " [off | reboot | softoff] " + "--maxwait --poll ") rh.printLn("N", " python " + rh.cmdName + " PowerVM " + " wait") rh.printLn("N", " --state [down | on | off | up] " + "--maxwait ") rh.printLn("N", " --poll ") rh.printLn("N", " python " + rh.cmdName + " PowerVM help") rh.printLn("N", " python " + rh.cmdName + " PowerVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the PowerVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " help - Displays this help " + "information.") rh.printLn("N", " isreachable - Determine whether the " + "virtual OS in a virtual machine") rh.printLn("N", " is reachable") rh.printLn("N", " on - Log on the virtual machine") rh.printLn("N", " off - Log off the virtual machine") rh.printLn("N", " pause - Pause a virtual machine") rh.printLn("N", " reboot - Issue a shutdown command to " + "reboot the OS in a virtual") rh.printLn("N", " machine") rh.printLn("N", " reset - Power a virtual machine off " + "and then back on") rh.printLn("N", " softoff - Issue a shutdown command to " + "shutdown the OS in a virtual") rh.printLn("N", " machine and then log the " + "virtual machine off z/VM.") rh.printLn("N", " status - show the log on/off status " + "of the virtual machine") rh.printLn("N", " unpause - Unpause a virtual machine") rh.printLn("N", " wait - Wait for the virtual machine " + "to go into the specified") rh.printLn("N", " state of either:") rh.printLn("N", " down: virtual machine's " + "OS is not reachable with IUCV") rh.printLn("N", " off: virtual machine is " + "logged off") rh.printLn("N", " on: virtual machine is " + "logged on") rh.printLn("N", " up: virtual machine's OS " + "is reachable with IUCV") rh.printLn("N", " version - show the version of the " + "power function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " - Userid of the target " + "virtual machine") rh.printLn("N", " --maxwait - " + "Maximum time in seconds to wait") rh.printLn("N", " --poll - " + "Seconds to wait between polling") rh.printLn("N", " --state [down | off | on | up] - " + "Desired state for virtual machine") rh.printLn("N", " (on or off) or for the operating " + "system (down or up).") rh.printLn("N", " --wait - wait for the machine to go into " + "the desired state.") return def softDeactivate(rh): """ Deactivate a virtual machine by first shutting down Linux and then log it off. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'SOFTOFF' userid - userid of the virtual machine parms['maxQueries'] - Maximum number of queries to issue. Optional. parms['maxWait'] - Maximum time to wait in seconds. Optional, unless 'maxQueries' is specified. parms['poll'] - Polling interval in seconds. Optional, unless 'maxQueries' is specified. Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.softDeactivate, userid: " + rh.userid) strCmd = "echo 'ping'" iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd) if iucvResults['overallRC'] == 0: # We could talk to the machine, tell it to shutdown nicely. strCmd = "shutdown -h now" iucvResults = execCmdThruIUCV(rh, rh.userid, strCmd, timeout = 60) if iucvResults['overallRC'] == 0: time.sleep(15) else: # Shutdown failed. Let CP take down the system # after we log the results. rh.printSysLog("powerVM.softDeactivate " + rh.userid + " is unreachable. Treating it as already shutdown.") else: # Could not ping the machine. Treat it as a success # after we log the results. rh.printSysLog("powerVM.softDeactivate " + rh.userid + " is unreachable. Treating it as already shutdown.") # Tell z/VM to log off the system. parms = ["-T", rh.userid] smcliResults = invokeSMCLI(rh, "Image_Deactivate", parms) if smcliResults['overallRC'] == 0: pass elif (smcliResults['overallRC'] == 8 and smcliResults['rc'] == 200 and (smcliResults['rs'] == 12 or + smcliResults['rs'] == 16)): # Tolerable error. # Machine is already logged off or is logging off. rh.printLn("N", rh.userid + " is already logged off.") else: # SMAPI API failed. rh.printLn("ES", smcliResults['response']) rh.updateResults(smcliResults) # Use results from invokeSMCLI if rh.results['overallRC'] == 0 and 'maxQueries' in rh.parms: # Wait for the system to log off. waitResults = waitForVMState( rh, rh.userid, 'off', maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if waitResults['overallRC'] == 0: rh.printLn("N", "Userid '" + rh.userid + " is in the desired state: off") else: rh.updateResults(waitResults) rh.printSysLog("Exit powerVM.softDeactivate, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def unpause(rh): """ Unpause a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'UNPAUSE' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.unpause, userid: " + rh.userid) parms = ["-T", rh.userid, "-k", "PAUSE=NO"] results = invokeSMCLI(rh, "Image_Pause", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit powerVM.unpause, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def wait(rh): """ Wait for the virtual machine to go into the specified state. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'WAIT' userid - userid of the virtual machine parms['desiredState'] - Desired state parms['maxQueries'] - Maximum number of queries to issue parms['maxWait'] - Maximum time to wait in seconds parms['poll'] - Polling interval in seconds Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.wait, userid: " + rh.userid) if (rh.parms['desiredState'] == 'off' or rh.parms['desiredState'] == 'on'): results = waitForVMState( rh, rh.userid, rh.parms['desiredState'], maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) else: results = waitForOSState( rh, rh.userid, rh.parms['desiredState'], maxQueries=rh.parms['maxQueries'], sleepSecs=rh.parms['poll']) if results['overallRC'] == 0: rh.printLn("N", rh.userid + ": " + rh.parms['desiredState']) else: rh.updateResults(results) rh.printSysLog("Exit powerVM.wait, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] zVMCloudConnector-1.6.3/smtLayer/makeVM.py0000664000175000017510000004477314315210052020064 0ustar ruirui00000000000000# MakeVM functions for Systems Management Ultra Thin Layer # # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from tempfile import mkstemp from smtLayer import generalUtils from smtLayer import msgs from smtLayer.vmUtils import invokeSMCLI from zvmsdk import config from zvmsdk import utils as zvmutils modId = 'MVM' version = "1.0.0" # max vidks blocks can't exceed 4194296 MAX_VDISK_BLOCKS = 4194296 """ List of subfunction handlers. Each subfunction contains a list that has: Readable name of the routine that handles the subfunction, Code for the function call. """ subfuncHandler = { 'DIRECTORY': ['createVM', lambda rh: createVM(rh)], 'HELP': ['help', lambda rh: help(rh)], 'VERSION': ['getVersion', lambda rh: getVersion(rh)]} """ List of positional operands based on subfunction. Each subfunction contains a list which has a dictionary with the following information for the positional operands: - Human readable name of the operand, - Property in the parms dictionary to hold the value, - Is it required (True) or optional (False), - Type of data (1: int, 2: string). """ posOpsList = { 'DIRECTORY': [ ['password', 'pw', True, 2], ['Primary Memory Size (e.g. 2G)', 'priMemSize', True, 2], ['Privilege Class(es)', 'privClasses', True, 2]], } """ List of additional operands/options supported by the various subfunctions. The dictionary followng the subfunction name uses the keyword from the command as a key. Each keyword has a dictionary that lists: - the related parms item that stores the value, - how many values follow the keyword, and - the type of data for those values (1: int, 2: string) """ keyOpsList = { 'DIRECTORY': { '--cpus': ['cpuCnt', 1, 1], '--ipl': ['ipl', 1, 2], '--logonby': ['byUsers', 1, 2], '--maxMemSize': ['maxMemSize', 1, 2], '--profile': ['profName', 1, 2], '--maxCPU': ['maxCPU', 1, 1], '--setReservedMem': ['setReservedMem', 0, 0], '--showparms': ['showParms', 0, 0], '--iplParam': ['iplParam', 1, 2], '--iplLoadparam': ['iplLoadparam', 1, 2], '--dedicate': ['dedicate', 1, 2], '--loadportname': ['loadportname', 1, 2], '--loadlun': ['loadlun', 1, 2], '--vdisk': ['vdisk', 1, 2], '--account': ['account', 1, 2], '--comment': ['comment', 1, 2], '--commandSchedule': ['commandSchedule', 1, 2], '--commandSetShare': ['commandSetShare', 1, 2], '--commandRelocationDomain': ['commandRDomain', 1, 2], '--commandPcif': ['commandSchedule', 1, 2]}, 'HELP': {}, 'VERSION': {}, } def createVM(rh): """ Create a virtual machine in z/VM. Input: Request Handle with the following properties: function - 'CMDVM' subfunction - 'CMD' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter makeVM.createVM") dirLines = [] dirLines.append("USER " + rh.userid + " " + rh.parms['pw'] + " " + rh.parms['priMemSize'] + " " + rh.parms['maxMemSize'] + " " + rh.parms['privClasses']) if 'profName' in rh.parms: dirLines.append("INCLUDE " + rh.parms['profName'].upper()) if 'maxCPU' in rh.parms: dirLines.append("MACHINE ESA %i" % rh.parms['maxCPU']) if 'account' in rh.parms: dirLines.append("ACCOUNT %s" % rh.parms['account'].upper()) dirLines.append("COMMAND SET VCONFIG MODE LINUX") dirLines.append("COMMAND DEFINE CPU 00 TYPE IFL") if 'cpuCnt' in rh.parms: for i in range(1, rh.parms['cpuCnt']): dirLines.append("COMMAND DEFINE CPU %0.2X TYPE IFL" % i) if 'commandSchedule' in rh.parms: v = rh.parms['commandSchedule'] dirLines.append("COMMAND SCHEDULE * WITHIN POOL %s" % v) if 'commandSetShare' in rh.parms: v = rh.parms['commandSetShare'] dirLines.append("SHARE %s" % v) if 'commandRDomain' in rh.parms: v = rh.parms['commandRDomain'] dirLines.append("COMMAND SET VMRELOCATE * DOMAIN %s" % v) if 'commandPcif' in rh.parms: v = rh.parms['commandPcif'] s = v.split(':') dirLines.append("COMMAND ATTACH PCIF %s * AS %s" % (s[0], s[1])) if 'ipl' in rh.parms: ipl_string = "IPL %s " % rh.parms['ipl'] if 'iplParam' in rh.parms: ipl_string += ("PARM %s " % rh.parms['iplParam']) if 'iplLoadparam' in rh.parms: ipl_string += ("LOADPARM %s " % rh.parms['iplLoadparam']) dirLines.append(ipl_string) if 'byUsers' in rh.parms: users = ' '.join(rh.parms['byUsers']) dirLines.append("LOGONBY " + users) priMem = rh.parms['priMemSize'].upper() maxMem = rh.parms['maxMemSize'].upper() if 'setReservedMem' in rh.parms: reservedSize = getReservedMemSize(rh, priMem, maxMem) if rh.results['overallRC'] != 0: rh.printSysLog("Exit makeVM.createVM, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] # Even reservedSize is 0M, still write the line "COMMAND DEF # STOR RESERVED 0M" in direct entry, in case cold resize of # memory decreases the defined memory, then reserved memory # size would be > 0, this line in direct entry need be updated. # If no such line defined in user direct, resizing would report # error due to it can't get the original reserved memory value. dirLines.append("COMMAND DEF STOR RESERVED %s" % reservedSize) if 'loadportname' in rh.parms: wwpn = rh.parms['loadportname'].replace("0x", "") dirLines.append("LOADDEV PORTname %s" % wwpn) if 'loadlun' in rh.parms: lun = rh.parms['loadlun'].replace("0x", "") dirLines.append("LOADDEV LUN %s" % lun) if 'dedicate' in rh.parms: vdevs = rh.parms['dedicate'].split() # add a DEDICATE statement for each vdev for vdev in vdevs: dirLines.append("DEDICATE %s %s" % (vdev, vdev)) if 'vdisk' in rh.parms: v = rh.parms['vdisk'].split(':') sizeUpper = v[1].strip().upper() sizeUnit = sizeUpper[-1] # blocks = size / 512, as we are using M, # it means 1024*1024 / 512 = 2048 if sizeUnit == 'M': blocks = int(sizeUpper[0:len(sizeUpper) - 1]) * 2048 else: blocks = int(sizeUpper[0:len(sizeUpper) - 1]) * 2097152 if blocks > 4194304: # not support exceed 2G disk size msg = msgs.msg['0207'][1] % (modId) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0207'][0]) rh.printSysLog("Exit makeVM.createVM, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] # https://www.ibm.com/support/knowledgecenter/SSB27U_6.4.0/ # com.ibm.zvm.v640.hcpb7/defvdsk.htm#defvdsk # the maximum number of VDISK blocks is 4194296 if blocks > MAX_VDISK_BLOCKS: blocks = MAX_VDISK_BLOCKS dirLines.append("MDISK %s FB-512 V-DISK %s MWV" % (v[0], blocks)) if 'comment' in rh.parms: for comment in rh.parms['comment'].split("$@$@$"): if comment: dirLines.append("* %s" % comment.upper()) # Construct the temporary file for the USER entry. fd, tempFile = mkstemp() to_write = '\n'.join(dirLines) + '\n' os.write(fd, to_write.encode()) os.close(fd) parms = ["-T", rh.userid, "-f", tempFile] results = invokeSMCLI(rh, "Image_Create_DM", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI os.remove(tempFile) rh.printSysLog("Exit makeVM.createVM, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def doIt(rh): """ Perform the requested function by invoking the subfunction handler. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter makeVM.doIt") # Show the invocation parameters, if requested. if 'showParms' in rh.parms and rh.parms['showParms'] is True: rh.printLn("N", "Invocation parameters: ") rh.printLn("N", " Routine: makeVM." + str(subfuncHandler[rh.subfunction][0]) + "(reqHandle)") rh.printLn("N", " function: " + rh.function) rh.printLn("N", " userid: " + rh.userid) rh.printLn("N", " subfunction: " + rh.subfunction) rh.printLn("N", " parms{}: ") for key in rh.parms: if key != 'showParms': rh.printLn("N", " " + key + ": " + str(rh.parms[key])) rh.printLn("N", " ") # Call the subfunction handler subfuncHandler[rh.subfunction][1](rh) rh.printSysLog("Exit makeVM.doIt, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def getVersion(rh): """ Get the version of this function. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printLn("N", "Version: " + version) return 0 def help(rh): """ Produce help output specifically for MakeVM functions. Input: Request Handle Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ showInvLines(rh) showOperandLines(rh) return 0 def parseCmdline(rh): """ Parse the request command input. Input: Request Handle Output: Request Handle updated with parsed input. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter makeVM.parseCmdline") if rh.totalParms >= 2: rh.userid = rh.request[1].upper() else: # Userid is missing. msg = msgs.msg['0010'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0010'][0]) rh.printSysLog("Exit makeVM.parseCmdLine, rc: " + rh.results['overallRC']) return rh.results['overallRC'] if rh.totalParms == 2: rh.subfunction = rh.userid rh.userid = '' if rh.totalParms >= 3: rh.subfunction = rh.request[2].upper() # Verify the subfunction is valid. if rh.subfunction not in subfuncHandler: # Subfunction is missing. subList = ', '.join(sorted(subfuncHandler.keys())) msg = msgs.msg['0011'][1] % (modId, subList) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0011'][0]) # Parse the rest of the command line. if rh.results['overallRC'] == 0: rh.argPos = 3 # Begin Parsing at 4th operand generalUtils.parseCmdline(rh, posOpsList, keyOpsList) if 'byUsers' in rh.parms: users = [] for user in rh.parms['byUsers'].split(':'): users.append(user) rh.parms['byUsers'] = [] rh.parms['byUsers'].extend(users) if rh.subfunction == 'DIRECTORY' and 'maxMemSize' not in rh.parms: rh.parms['maxMemSize'] = rh.parms['priMemSize'] rh.printSysLog("Exit makeVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC'] def showInvLines(rh): """ Produce help output related to command synopsis Input: Request Handle """ if rh.subfunction != '': rh.printLn("N", "Usage:") rh.printLn("N", " python " + rh.cmdName + " MakeVM directory ") rh.printLn("N", " --cpus " + "--ipl --logonby ") rh.printLn("N", " --maxMemSize " + "--profile ") rh.printLn("N", " --maxCPU " + "--setReservedMem") rh.printLn("N", " --dedicate ") rh.printLn("N", " --loadportname " + "--loadlun ") rh.printLn("N", " python " + rh.cmdName + " MakeVM help") rh.printLn("N", " python " + rh.cmdName + " MakeVM version") return def showOperandLines(rh): """ Produce help output related to operands. Input: Request Handle """ if rh.function == 'HELP': rh.printLn("N", " For the MakeVM function:") else: rh.printLn("N", "Sub-Functions(s):") rh.printLn("N", " directory - " + "Create a virtual machine in the z/VM user directory.") rh.printLn("N", " help - Displays this help information.") rh.printLn("N", " version - " + "show the version of the makeVM function") if rh.subfunction != '': rh.printLn("N", "Operand(s):") rh.printLn("N", " --cpus - " + "Specifies the desired number of virtual CPUs the") rh.printLn("N", " " + "guest will have.") rh.printLn("N", " --maxcpu - " + "Specifies the maximum number of virtual CPUs the") rh.printLn("N", " " + "guest is allowed to define.") rh.printLn("N", " --ipl - " + "Specifies an IPL disk or NSS for the virtual") rh.printLn("N", " " + "machine's directory entry.") rh.printLn("N", " --dedicate - " + "Specifies a device vdev list to dedicate to the ") rh.printLn("N", " " + "virtual machine.") rh.printLn("N", " --loadportname - " + "Specifies a one- to eight-byte fibre channel port ") rh.printLn("N", " " + "name of the FCP-I/O device to define with a LOADDEV ") rh.printLn("N", " " + "statement in the virtual machine's definition") rh.printLn("N", " --loadlun - " + "Specifies a one- to eight-byte logical unit number ") rh.printLn("N", " " + "name of the FCP-I/O device to define with a LOADDEV ") rh.printLn("N", " " + "statement in the virtual machine's definition") rh.printLn("N", " --logonby - " + "Specifies a list of up to 8 z/VM userids who can log") rh.printLn("N", " " + "on to the virtual machine using their id and password.") rh.printLn("N", " --maxMemSize - " + "Specifies the maximum memory the virtual machine") rh.printLn("N", " " + "is allowed to define.") rh.printLn("N", " --setReservedMem - " + "Set the additional memory space (maxMemSize - priMemSize)") rh.printLn("N", " " + "as reserved memory of the virtual machine.") rh.printLn("N", " - " + "Specifies the password for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " - " + "Specifies the initial memory size for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " - " + "Specifies the privilege classes for the new virtual") rh.printLn("N", " " + "machine.") rh.printLn("N", " --profile - " + "Specifies the z/VM PROFILE to include in the") rh.printLn("N", " " + "virtual machine's directory entry.") rh.printLn("N", " - " + "Userid of the virtual machine to create.") return def getReservedMemSize(rh, mem, maxMem): rh.printSysLog("Enter makeVM.getReservedMemSize") gap = '0M' # Check size suffix memSuffix = mem[-1].upper() maxMemSuffix = maxMem[-1].upper() if (memSuffix not in ['M', 'G']) or (maxMemSuffix not in ['M', 'G']): # Suffix is not 'M' or 'G' msg = msgs.msg['0205'][1] % modId rh.printLn("ES", msg) rh.updateResults(msgs.msg['0205'][0]) rh.printSysLog("Exit makeVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return gap # Convert both size to 'M' memMb = int(mem[:-1]) maxMemMb = int(maxMem[:-1]) if memSuffix == 'G': memMb = memMb * 1024 if maxMemSuffix == 'G': maxMemMb = maxMemMb * 1024 # Check maxsize is greater than initial mem size if maxMemMb < memMb: msg = msgs.msg['0206'][1] % (modId, maxMem, mem) rh.printLn("ES", msg) rh.updateResults(msgs.msg['0206'][0]) rh.printSysLog("Exit makeVM.parseCmdLine, rc: " + str(rh.results['overallRC'])) return gap # The define storage command can support 1-7 digits decimal number # So we will use 'M' as suffix unless the gap size exceeds 9999999 # then convert to Gb. gapSize = maxMemMb - memMb # get make max reserved memory value MAX_STOR_RESERVED = int(zvmutils.convert_to_mb( config.CONF.zvm.user_default_max_reserved_memory)) if gapSize > MAX_STOR_RESERVED: gapSize = MAX_STOR_RESERVED if gapSize > 9999999: gapSize = gapSize / 1024 gap = "%iG" % gapSize else: gap = "%iM" % gapSize rh.printSysLog("Exit makeVM.getReservedMemSize, rc: " + str(rh.results['overallRC'])) return gap zVMCloudConnector-1.6.3/LICENSE0000664000175000017510000002363513575566551015562 0ustar ruirui00000000000000 Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 1. Definitions. "License" shall mean the terms and conditions for use, reproduction, and distribution as defined by Sections 1 through 9 of this document. "Licensor" shall mean the copyright owner or entity authorized by the copyright owner that is granting the License. "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity. "You" (or "Your") shall mean an individual or Legal Entity exercising permissions granted by this License. "Source" form shall mean the preferred form for making modifications, including but not limited to software source code, documentation source, and configuration files. "Object" form shall mean any form resulting from mechanical transformation or translation of a Source form, including but not limited to compiled object code, generated documentation, and conversions to other media types. "Work" shall mean the work of authorship, whether in Source or Object form, made available under the License, as indicated by a copyright notice that is included in or attached to the work (an example is provided in the Appendix below). "Derivative Works" shall mean any work, whether in Source or Object form, that is based on (or derived from) the Work and for which the editorial revisions, annotations, elaborations, or other modifications represent, as a whole, an original work of authorship. For the purposes of this License, Derivative Works shall not include works that remain separable from, or merely link (or bind by name) to the interfaces of, the Work and Derivative Works thereof. "Contribution" shall mean any work of authorship, including the original version of the Work and any modifications or additions to that Work or Derivative Works thereof, that is intentionally submitted to Licensor for inclusion in the Work by the copyright owner or by an individual or Legal Entity authorized to submit on behalf of the copyright owner. For the purposes of this definition, "submitted" means any form of electronic, verbal, or written communication sent to the Licensor or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, the Licensor for the purpose of discussing and improving the Work, but excluding communication that is conspicuously marked or otherwise designated in writing by the copyright owner as "Not a Contribution." "Contributor" shall mean Licensor and any individual or Legal Entity on behalf of whom a Contribution has been received by Licensor and subsequently incorporated within the Work. 2. Grant of Copyright License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, prepare Derivative Works of, publicly display, publicly perform, sublicense, and distribute the Work and such Derivative Works in Source or Object form. 3. Grant of Patent License. Subject to the terms and conditions of this License, each Contributor hereby grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by such Contributor that are necessarily infringed by their Contribution(s) alone or by combination of their Contribution(s) with the Work to which such Contribution(s) was submitted. If You institute patent litigation against any entity (including a cross-claim or counterclaim in a lawsuit) alleging that the Work or a Contribution incorporated within the Work constitutes direct or contributory patent infringement, then any patent licenses granted to You under this License for that Work shall terminate as of the date such litigation is filed. 4. Redistribution. You may reproduce and distribute copies of the Work or Derivative Works thereof in any medium, with or without modifications, and in Source or Object form, provided that You meet the following conditions: (a) You must give any other recipients of the Work or Derivative Works a copy of this License; and (b) You must cause any modified files to carry prominent notices stating that You changed the files; and (c) You must retain, in the Source form of any Derivative Works that You distribute, all copyright, patent, trademark, and attribution notices from the Source form of the Work, excluding those notices that do not pertain to any part of the Derivative Works; and (d) If the Work includes a "NOTICE" text file as part of its distribution, then any Derivative Works that You distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the Derivative Works, in at least one of the following places: within a NOTICE text file distributed as part of the Derivative Works; within the Source form or documentation, if provided along with the Derivative Works; or, within a display generated by the Derivative Works, if and wherever such third-party notices normally appear. The contents of the NOTICE file are for informational purposes only and do not modify the License. You may add Your own attribution notices within Derivative Works that You distribute, alongside or as an addendum to the NOTICE text from the Work, provided that such additional attribution notices cannot be construed as modifying the License. You may add Your own copyright statement to Your modifications and may provide additional or different license terms and conditions for use, reproduction, or distribution of Your modifications, or for any such Derivative Works as a whole, provided Your use, reproduction, and distribution of the Work otherwise complies with the conditions stated in this License. 5. Submission of Contributions. Unless You explicitly state otherwise, any Contribution intentionally submitted for inclusion in the Work by You to the Licensor shall be under the terms and conditions of this License, without any additional terms or conditions. Notwithstanding the above, nothing herein shall supersede or modify the terms of any separate license agreement you may have executed with Licensor regarding such Contributions. 6. Trademarks. This License does not grant permission to use the trade names, trademarks, service marks, or product names of the Licensor, except as required for reasonable and customary use in describing the origin of the Work and reproducing the content of the NOTICE file. 7. Disclaimer of Warranty. Unless required by applicable law or agreed to in writing, Licensor provides the Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for determining the appropriateness of using or redistributing the Work and assume any risks associated with Your exercise of permissions under this License. 8. Limitation of Liability. In no event and under no legal theory, whether in tort (including negligence), contract, or otherwise, unless required by applicable law (such as deliberate and grossly negligent acts) or agreed to in writing, shall any Contributor be liable to You for damages, including any direct, indirect, special, incidental, or consequential damages of any character arising as a result of this License or out of the use or inability to use the Work (including but not limited to damages for loss of goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses), even if such Contributor has been advised of the possibility of such damages. 9. Accepting Warranty or Additional Liability. While redistributing the Work or Derivative Works thereof, You may choose to offer, and charge a fee for, acceptance of support, warranty, indemnity, or other liability obligations and/or rights consistent with this License. However, in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. zVMCloudConnector-1.6.3/zvmconnector/0000775000175000017510000000000014315232035017247 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmconnector/connector.py0000664000175000017510000000765113720612363021631 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmconnector import socketclient from zvmconnector import restclient CONN_TYPE_SOCKET = 'socket' CONN_TYPE_REST = 'rest' class baseConnection(object): def request(self, api_name, *api_args, **api_kwargs): pass class socketConnection(baseConnection): def __init__(self, ip_addr='127.0.0.1', port=2000, timeout=3600): self.client = socketclient.SDKSocketClient(ip_addr, port, timeout) def request(self, api_name, *api_args, **api_kwargs): return self.client.call(api_name, *api_args, **api_kwargs) class restConnection(baseConnection): def __init__(self, ip_addr='127.0.0.1', port=8080, ssl_enabled=False, verify=False, token_path=None): self.client = restclient.RESTClient(ip_addr, port, ssl_enabled, verify, token_path) def request(self, api_name, *api_args, **api_kwargs): return self.client.call(api_name, *api_args, **api_kwargs) class ZVMConnector(object): def __init__(self, ip_addr=None, port=None, timeout=3600, connection_type=None, ssl_enabled=False, verify=False, token_path=None): """ :param str ip_addr: IP address of SDK server :param int port: Port of SDK server daemon :param int timeout: Wait timeout if request no response :param str connection_type: The value should be 'socket' or 'rest' :param boolean ssl_enabled: Whether SSL enabled or not. If enabled, use HTTPS instead of HTTP. The https server should enable SSL to support this. :param boolean/str verify: Either a boolean, in which case it controls whether we verify the server's TLS certificate, or a string, in which case it must be a path to a CA bundle to use. Default to False. :param str token_path: The path of token file. """ if (connection_type is not None and connection_type.lower() == CONN_TYPE_SOCKET): connection_type = CONN_TYPE_SOCKET else: connection_type = CONN_TYPE_REST self.conn = self._get_connection(ip_addr, port, timeout, connection_type, ssl_enabled, verify, token_path) def _get_connection(self, ip_addr, port, timeout, connection_type, ssl_enabled, verify, token_path): if connection_type == CONN_TYPE_SOCKET: return socketConnection(ip_addr or '127.0.0.1', port or 2000, timeout) else: return restConnection(ip_addr or '127.0.0.1', port or 8080, ssl_enabled=ssl_enabled, verify=verify, token_path=token_path) def send_request(self, api_name, *api_args, **api_kwargs): """Refer to SDK API documentation. :param api_name: SDK API name :param *api_args: SDK API sequence parameters :param **api_kwargs: SDK API keyword parameters """ return self.conn.request(api_name, *api_args, **api_kwargs) zVMCloudConnector-1.6.3/zvmconnector/restclient.py0000664000175000017510000011563214315210052022000 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import os import requests import six import tempfile import threading import uuid from zvmsdk import config CONF = config.CONF # TODO:set up configuration file only for RESTClient and configure this value TOKEN_LOCK = threading.Lock() CHUNKSIZE = 4096 REST_REQUEST_ERROR = [{'overallRC': 101, 'modID': 110, 'rc': 101}, {1: "Request to zVM Cloud Connector failed: %(error)s", 2: "Token file not found: %(error)s", 3: "Request to url: %(url)s got unexpected response: " "status_code: %(status)s, reason: %(reason)s, " "text: %(text)s", 4: "Get Token failed: %(error)s"}, "zVM Cloud Connector request failed", ] SERVICE_UNAVAILABLE_ERROR = [{'overallRC': 503, 'modID': 110, 'rc': 503}, {2: "Service is unavailable. reason: %(reason)s," " text: %(text)s"}, "Service is unavailable", ] INVALID_API_ERROR = [{'overallRC': 400, 'modID': 110, 'rc': 400}, {1: "Invalid API name, '%(msg)s'"}, "Invalid API name", ] class UnexpectedResponse(Exception): def __init__(self, resp): self.resp = resp class ServiceUnavailable(Exception): def __init__(self, resp): self.resp = resp class TokenNotFound(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) class TokenFileOpenError(Exception): def __init__(self, msg): self.msg = msg class CACertNotFound(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) class APINameNotFound(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) class ArgsFormatError(Exception): def __init__(self, msg): self.msg = msg def __str__(self): return repr(self.msg) def fill_kwargs_in_body(body, **kwargs): for key in kwargs.keys(): body[key] = kwargs.get(key) def req_version(start_index, *args, **kwargs): url = '/' body = None return url, body def req_guest_list(start_index, *args, **kwargs): url = '/guests' body = None return url, body def req_guest_delete(start_index, *args, **kwargs): url = '/guests/%s' body = None return url, body def req_guest_get_definition_info(start_index, *args, **kwargs): url = '/guests/%s' body = None return url, body def req_guest_create(start_index, *args, **kwargs): url = '/guests' body = {'guest': {'userid': args[start_index], 'vcpus': args[start_index + 1], 'memory': args[start_index + 2]}} fill_kwargs_in_body(body['guest'], **kwargs) return url, body def req_guest_inspect_stats(start_index, *args, **kwargs): if type(args[start_index]) is str: url = '/guests/stats?userid=%s' % args[start_index] else: userids = ','.join(args[start_index]) url = '/guests/stats?userid=%s' % userids body = None return url, body def req_guest_inspect_vnics(start_index, *args, **kwargs): if type(args[start_index]) is str: url = '/guests/interfacestats?userid=%s' % args[start_index] else: userids = ','.join(args[start_index]) url = '/guests/interfacestats?userid=%s' % userids body = None return url, body def req_guests_get_nic_info(start_index, *args, **kwargs): url = '/guests/nics' # process appends in GET method userid = kwargs.get('userid', None) nic_id = kwargs.get('nic_id', None) vswitch = kwargs.get('vswitch', None) if ((userid is None) and (nic_id is None) and (vswitch is None)): append = '' else: append = "?" if userid is not None: append += 'userid=%s&' % userid if nic_id is not None: append += 'nic_id=%s&' % nic_id if vswitch is not None: append += 'vswitch=%s&' % vswitch append = append.strip('&') url = url + append body = None return url, body # FIXME: the order of args need adjust def req_guest_start(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'start'} fill_kwargs_in_body(body, **kwargs) return url, body def req_guest_stop(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'stop'} fill_kwargs_in_body(body, **kwargs) return url, body def req_guest_softstop(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'softstop'} fill_kwargs_in_body(body, **kwargs) return url, body def req_guest_pause(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'pause'} return url, body def req_guest_unpause(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'unpause'} return url, body def req_guest_reboot(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'reboot'} return url, body def req_guest_reset(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'reset'} return url, body def req_guest_get_console_output(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'get_console_output'} return url, body def req_guest_live_migrate(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'live_migrate_vm', 'dest_zcc_userid': args[start_index], 'destination': args[start_index + 1], 'parms': args[start_index + 2], 'operation': args[start_index + 3]} return url, body def req_guest_register(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'register_vm', 'meta': args[start_index], 'net_set': args[start_index + 1]} if len(args) - start_index == 3: body['port_macs'] = args[start_index + 2] return url, body def req_guest_deregister(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'deregister_vm'} return url, body def req_guest_live_resize_cpus(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'live_resize_cpus', 'cpu_cnt': args[start_index]} return url, body def req_guest_resize_cpus(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'resize_cpus', 'cpu_cnt': args[start_index]} return url, body def req_guest_resize_mem(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'resize_mem', 'size': args[start_index]} return url, body def req_guest_live_resize_mem(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'live_resize_mem', 'size': args[start_index]} return url, body def req_guest_grow_root_volume(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'grow_root_volume', 'os_version': args[start_index]} return url, body def req_guest_capture(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'capture', 'image': args[start_index]} fill_kwargs_in_body(body, **kwargs) return url, body def req_guest_deploy(start_index, *args, **kwargs): url = '/guests/%s/action' body = {'action': 'deploy', 'image': args[start_index]} fill_kwargs_in_body(body, **kwargs) return url, body def req_guest_get_power_state_real(start_index, *args, **kwargs): url = '/guests/%s/power_state_real' body = None return url, body def req_guest_get_info(start_index, *args, **kwargs): url = '/guests/%s/info' body = None return url, body def req_guest_get_user_direct(start_index, *args, **kwargs): url = '/guests/%s/user_direct' body = None return url, body def req_guest_get_adapters_info(start_index, *args, **kwargs): url = '/guests/%s/adapters' body = None return url, body def req_guest_create_nic(start_index, *args, **kwargs): url = '/guests/%s/nic' body = {'nic': {}} fill_kwargs_in_body(body['nic'], **kwargs) return url, body def req_guest_delete_nic(start_index, *args, **kwargs): url = '/guests/%s/nic/%s' body = {} fill_kwargs_in_body(body, **kwargs) return url, body def req_guest_nic_couple_to_vswitch(start_index, *args, **kwargs): url = '/guests/%s/nic/%s' body = {'info': {'couple': True, 'vswitch': args[start_index]}} fill_kwargs_in_body(body['info'], **kwargs) return url, body def req_guest_nic_uncouple_from_vswitch(start_index, *args, **kwargs): url = '/guests/%s/nic/%s' body = {'info': {'couple': False}} fill_kwargs_in_body(body['info'], **kwargs) return url, body def req_guest_create_network_interface(start_index, *args, **kwargs): url = '/guests/%s/interface' body = {'interface': {'os_version': args[start_index], 'guest_networks': args[start_index + 1]}} fill_kwargs_in_body(body['interface'], **kwargs) return url, body def req_guest_delete_network_interface(start_index, *args, **kwargs): url = '/guests/%s/interface' body = {'interface': {'os_version': args[start_index], 'vdev': args[start_index + 1]}} fill_kwargs_in_body(body['interface'], **kwargs) return url, body def req_guest_get_power_state(start_index, *args, **kwargs): url = '/guests/%s/power_state' body = None return url, body def req_guest_create_disks(start_index, *args, **kwargs): url = '/guests/%s/disks' body = {'disk_info': {'disk_list': args[start_index]}} return url, body def req_guest_delete_disks(start_index, *args, **kwargs): url = '/guests/%s/disks' body = {'vdev_info': {'vdev_list': args[start_index]}} return url, body def req_guest_config_minidisks(start_index, *args, **kwargs): url = '/guests/%s/disks' body = {'disk_info': {'disk_list': args[start_index]}} fill_kwargs_in_body(body['disk_info'], **kwargs) return url, body def req_volume_attach(start_index, *args, **kwargs): url = '/guests/volumes' body = {'info': {'connection': args[start_index]}} return url, body def req_volume_detach(start_index, *args, **kwargs): url = '/guests/volumes' body = {'info': {'connection': args[start_index]}} return url, body def req_volume_refresh_bootmap(start_index, *args, **kwargs): url = '/volumes/volume_refresh_bootmap' fcpchannel = kwargs.get('fcpchannels', None) wwpn = kwargs.get('wwpn', None) lun = kwargs.get('lun', None) wwid = kwargs.get('wwid', '') transportfiles = kwargs.get('transportfiles', '') guest_networks = kwargs.get('guest_networks', []) fcp_template_id = kwargs.get('fcp_template_id', None) body = {'info': { "fcpchannel": fcpchannel, "wwpn": wwpn, "lun": lun, "wwid": wwid, "transportfiles": transportfiles, "guest_networks": guest_networks, "fcp_template_id": fcp_template_id } } fill_kwargs_in_body(body['info'], **kwargs) return url, body def req_get_volume_connector(start_index, *args, **kwargs): url = '/volumes/conn/%s' reserve = kwargs.get('reserve', False) fcp_template_id = kwargs.get('fcp_template_id', None) sp_name = kwargs.get('storage_provider', None) body = {'info': { "reserve": reserve, "fcp_template_id": fcp_template_id, "storage_provider": sp_name } } fill_kwargs_in_body(body['info'], **kwargs) return url, body def req_get_fcp_templates(start_index, *args, **kwargs): url = '/volumes/fcptemplates' template_id_list = kwargs.get('template_id_list', None) assigner_id = kwargs.get('assigner_id', None) default_sp_list = kwargs.get('storage_providers', None) host_default = kwargs.get('host_default', False) if template_id_list: url += "?template_id_list=%s" % template_id_list elif assigner_id: url += "?assigner_id=%s" % assigner_id elif default_sp_list: url += "?storage_providers=%s" % default_sp_list elif host_default: url += "?host_default=%s" % host_default body = None return url, body def req_get_fcp_templates_details(start_index, *args, **kwargs): url = '/volumes/fcptemplates/detail' template_id_list = kwargs.get('template_id_list', None) raw = kwargs.get('raw', False) statistics = kwargs.get('statistics', True) sync_with_zvm = kwargs.get('sync_with_zvm', False) if template_id_list: url += "?template_id_list=%s&" % template_id_list else: url += "?" url += "raw=%s&" % raw url += "statistics=%s&" % statistics url += "sync_with_zvm=%s" % sync_with_zvm body = None return url, body def req_delete_fcp_template(start_index, *args, **kwargs): url = '/volumes/fcptemplates/%s' body = None return url, body def req_get_fcp_usage(start_index, *args, **kwargs): url = '/volumes/fcp/%s' body = None return url, body def req_set_fcp_usage(start_index, *args, **kwargs): url = '/volumes/fcp/%s' body = {'info': {'userid': args[start_index], 'reserved': args[start_index + 1], 'connections': args[start_index + 2], 'fcp_template_id': args[start_index + 3]}} fill_kwargs_in_body(body['info'], **kwargs) return url, body def req_create_fcp_template(start_index, *args, **kwargs): url = '/volumes/fcptemplates' body = {'name': args[start_index]} fill_kwargs_in_body(body, **kwargs) return url, body def req_edit_fcp_template(start_index, *args, **kwargs): # the function is called by _get_url_body_headers() url = '/volumes/fcptemplates/%s' body = dict() # no other args except fcp_templated_id in url path, # param in url path is set by _get_url_body_headers(). # hence, only save kwargs in body fill_kwargs_in_body(body, **kwargs) return url, body def req_host_get_info(start_index, *args, **kwargs): url = '/host' body = None return url, body def req_host_get_guest_list(start_index, *args, **kwargs): url = '/host/guests' body = None return url, body def req_host_get_diskpool_volumes(start_index, *args, **kwargs): url = '/host/diskpool_volumes' poolname = kwargs.get('disk_pool', None) append = '' if poolname is not None: append += "?poolname=%s" % poolname url += append body = None return url, body def req_host_diskpool_get_info(start_index, *args, **kwargs): url = '/host/diskpool' poolname = kwargs.get('disk_pool', None) append = '' if poolname is not None: append += "?poolname=%s" % poolname url += append body = None return url, body def req_host_get_volume_info(start_index, *args, **kwargs): url = '/host/volume' volumename = kwargs.get('volume', None) append = '' if volumename is not None: append += "?volumename=%s" % volumename url += append body = None return url, body def req_host_get_ssi_info(start_index, *args, **kwargs): url = '/host/ssi' body = None return url, body def req_image_import(start_index, *args, **kwargs): url = '/images' body = {'image': {'image_name': args[start_index], 'url': args[start_index + 1], 'image_meta': args[start_index + 2]}} fill_kwargs_in_body(body['image'], **kwargs) return url, body def req_image_query(start_index, *args, **kwargs): url = '/images' image_name = kwargs.get('imagename', None) if image_name is None: append = '' else: append = "?" append += "imagename=%s" % image_name url += append body = None return url, body def req_image_delete(start_index, *args, **kwargs): url = '/images/%s' body = None return url, body def req_image_export(start_index, *args, **kwargs): url = '/images/%s' body = {'location': {'dest_url': args[start_index]}} fill_kwargs_in_body(body['location'], **kwargs) return url, body def req_image_get_root_disk_size(start_index, *args, **kwargs): url = '/images/%s/root_disk_size' body = None return url, body def req_file_import(start_index, *args, **kwargs): url = '/files' file_spath = args[start_index] body = get_data_file(file_spath) return url, body def req_file_export(start_index, *args, **kwargs): url = '/files' body = {'source_file': args[start_index]} return url, body def req_token_create(start_index, *args, **kwargs): url = '/token' body = None return url, body def req_vswitch_get_list(start_index, *args, **kwargs): url = '/vswitches' body = None return url, body def req_vswitch_create(start_index, *args, **kwargs): url = '/vswitches' body = {'vswitch': {'name': args[start_index]}} fill_kwargs_in_body(body['vswitch'], **kwargs) return url, body def req_vswitch_delete(start_index, *args, **kwargs): url = '/vswitches/%s' body = None return url, body def req_vswitch_query(start_index, *args, **kwargs): url = '/vswitches/%s' body = None return url, body def req_vswitch_grant_user(start_index, *args, **kwargs): url = '/vswitches/%s' body = {'vswitch': {'grant_userid': args[start_index]}} fill_kwargs_in_body(body['vswitch'], **kwargs) return url, body def req_vswitch_revoke_user(start_index, *args, **kwargs): url = '/vswitches/%s' body = {'vswitch': {'revoke_userid': args[start_index]}} fill_kwargs_in_body(body['vswitch'], **kwargs) return url, body def req_vswitch_set_vlan_id_for_user(start_index, *args, **kwargs): url = '/vswitches/%s' body = {'vswitch': {'user_vlan_id': {'userid': args[start_index], 'vlanid': args[start_index + 1]}}} fill_kwargs_in_body(body['vswitch'], **kwargs) return url, body # Save data used for comprsing RESTful request # method: request type # args_required: arguments in args are required, record the count here. # if len(args) not equal to this number, raise exception # params_path: parameters amount in url path # request: function that provide url and body for comprosing a request DATABASE = { 'version': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_version}, 'guest_create': { 'method': 'POST', 'args_required': 3, 'params_path': 0, 'request': req_guest_create}, 'guest_list': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_guest_list}, 'guest_inspect_stats': { 'method': 'GET', 'args_required': 1, 'params_path': 0, 'request': req_guest_inspect_stats}, 'guest_inspect_vnics': { 'method': 'GET', 'args_required': 1, 'params_path': 0, 'request': req_guest_inspect_vnics}, 'guests_get_nic_info': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_guests_get_nic_info}, 'guest_delete': { 'method': 'DELETE', 'args_required': 1, 'params_path': 1, 'request': req_guest_delete}, 'guest_get_definition_info': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_definition_info}, 'guest_start': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_start}, 'guest_stop': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_stop}, 'guest_softstop': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_softstop}, 'guest_pause': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_pause}, 'guest_unpause': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_unpause}, 'guest_reboot': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_reboot}, 'guest_reset': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_reset}, 'guest_get_console_output': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_console_output}, 'guest_register': { 'method': 'POST', 'args_required': 3, 'args_optional': 1, 'params_path': 1, 'request': req_guest_register}, 'guest_deregister': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_deregister}, 'guest_live_migrate': { 'method': 'POST', 'args_required': 5, 'params_path': 1, 'request': req_guest_live_migrate}, 'guest_live_resize_cpus': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_live_resize_cpus}, 'guest_resize_cpus': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_resize_cpus}, 'guest_live_resize_mem': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_live_resize_mem}, 'guest_resize_mem': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_resize_mem}, 'guest_grow_root_volume': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_grow_root_volume}, 'guest_capture': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_capture}, 'guest_deploy': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_deploy}, 'guest_get_power_state_real': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_power_state_real}, 'guest_get_info': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_info}, 'guest_get_user_direct': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_user_direct}, 'guest_get_adapters_info': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_adapters_info}, 'guest_create_nic': { 'method': 'POST', 'args_required': 1, 'params_path': 1, 'request': req_guest_create_nic}, 'guest_delete_nic': { 'method': 'DELETE', 'args_required': 2, 'params_path': 2, 'request': req_guest_delete_nic}, 'guest_nic_couple_to_vswitch': { 'method': 'PUT', 'args_required': 3, 'params_path': 2, 'request': req_guest_nic_couple_to_vswitch}, 'guest_nic_uncouple_from_vswitch': { 'method': 'PUT', 'args_required': 2, 'params_path': 2, 'request': req_guest_nic_uncouple_from_vswitch}, 'guest_create_network_interface': { 'method': 'POST', 'args_required': 3, 'params_path': 1, 'request': req_guest_create_network_interface}, 'guest_delete_network_interface': { 'method': 'DELETE', 'args_required': 3, 'params_path': 1, 'request': req_guest_delete_network_interface}, 'guest_get_power_state': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_guest_get_power_state}, 'guest_create_disks': { 'method': 'POST', 'args_required': 2, 'params_path': 1, 'request': req_guest_create_disks}, 'guest_delete_disks': { 'method': 'DELETE', 'args_required': 2, 'params_path': 1, 'request': req_guest_delete_disks}, 'guest_config_minidisks': { 'method': 'PUT', 'args_required': 2, 'params_path': 1, 'request': req_guest_config_minidisks}, 'volume_attach': { 'method': 'POST', 'args_required': 1, 'params_path': 0, 'request': req_volume_attach}, 'volume_detach': { 'method': 'DELETE', 'args_required': 1, 'params_path': 0, 'request': req_volume_detach}, 'volume_refresh_bootmap': { 'method': 'PUT', 'args_required': 0, 'params_path': 0, 'request': req_volume_refresh_bootmap}, 'get_volume_connector': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_get_volume_connector}, 'get_fcp_templates': { 'method': 'GET', 'args_required': 0, 'args_optional': 1, 'params_path': 0, 'request': req_get_fcp_templates}, 'get_fcp_templates_details': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_get_fcp_templates_details}, 'delete_fcp_template': { 'method': 'DELETE', 'args_required': 1, 'params_path': 1, 'request': req_delete_fcp_template}, 'get_fcp_usage': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_get_fcp_usage}, 'set_fcp_usage': { 'method': 'PUT', 'args_required': 5, 'params_path': 1, 'request': req_set_fcp_usage}, 'create_fcp_template': { 'method': 'POST', 'args_required': 1, 'params_path': 0, 'request': req_create_fcp_template}, 'edit_fcp_template': { 'method': 'PUT', # args_required and args_optional are used for args rather than kwargs, # refer to 'def _check_arguments' for details. # In total, # 1 args: fcp_template_id # 5 kwargs: name, desc, fcp_devices, host_default, storage_providers # args_required : 1 # fcp_template_id 'args_required': 1, # params_path is the count of params in url path, # url path is '/volumes/fcptemplates/%s' # %s is for fcp_template_id # %s is from args 'params_path': 1, 'request': req_edit_fcp_template}, 'host_get_info': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_host_get_info}, 'host_get_guest_list': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_host_get_guest_list}, 'host_get_diskpool_volumes': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_host_get_diskpool_volumes}, 'host_diskpool_get_info': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_host_diskpool_get_info}, 'host_get_volume_info': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_host_get_volume_info}, 'host_get_ssi_info': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_host_get_ssi_info}, 'image_import': { 'method': 'POST', 'args_required': 3, 'params_path': 0, 'request': req_image_import}, 'image_query': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_image_query}, 'image_delete': { 'method': 'DELETE', 'args_required': 1, 'params_path': 1, 'request': req_image_delete}, 'image_export': { 'method': 'PUT', 'args_required': 2, 'params_path': 1, 'request': req_image_export}, 'image_get_root_disk_size': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_image_get_root_disk_size}, 'file_import': { 'method': 'PUT', 'args_required': 1, 'params_path': 0, 'request': req_file_import}, 'file_export': { 'method': 'POST', 'args_required': 1, 'params_path': 0, 'request': req_file_export}, 'token_create': { 'method': 'POST', 'args_required': 0, 'params_path': 0, 'request': req_token_create}, 'vswitch_get_list': { 'method': 'GET', 'args_required': 0, 'params_path': 0, 'request': req_vswitch_get_list}, 'vswitch_create': { 'method': 'POST', 'args_required': 1, 'params_path': 0, 'request': req_vswitch_create}, 'vswitch_delete': { 'method': 'DELETE', 'args_required': 1, 'params_path': 1, 'request': req_vswitch_delete}, 'vswitch_grant_user': { 'method': 'PUT', 'args_required': 2, 'params_path': 1, 'request': req_vswitch_grant_user}, 'vswitch_query': { 'method': 'GET', 'args_required': 1, 'params_path': 1, 'request': req_vswitch_query}, 'vswitch_revoke_user': { 'method': 'PUT', 'args_required': 2, 'params_path': 1, 'request': req_vswitch_revoke_user}, 'vswitch_set_vlan_id_for_user': { 'method': 'PUT', 'args_required': 3, 'params_path': 1, 'request': req_vswitch_set_vlan_id_for_user}, } def get_data_file(fpath): if fpath: return open(fpath, 'rb') class RESTClient(object): def __init__(self, ip='127.0.0.1', port=8888, ssl_enabled=False, verify=False, token_path=None, auth=None): # SSL enable or not if ssl_enabled: self.base_url = "https://" + ip + ":" + str(port) else: self.base_url = "http://" + ip + ":" + str(port) # if value of verify is str, means its value is # the path of CA certificate if type(verify) == str: if not os.path.exists(verify): raise CACertNotFound('CA certificate file not found.') self.verify = verify self.token_path = token_path # need send token to validate # This is client, so must NOT use zvmsdk.conf file setting self.auth = auth def _check_arguments(self, api_name, *args, **kwargs): # check api_name exist or not if api_name not in DATABASE.keys(): msg = "API name %s not exist." % api_name raise APINameNotFound(msg) # check args count is valid count = DATABASE[api_name]['args_required'] optional = 0 if 'args_optional' in DATABASE[api_name].keys(): optional = DATABASE[api_name]['args_optional'] if len(args) < count: msg = "Missing some args,please check:%s." % str(args) raise ArgsFormatError(msg) if len(args) > count + optional: msg = "Too many args,please check:%s." % str(args) raise ArgsFormatError(msg) def _get_admin_token(self, path): if os.path.exists(path): TOKEN_LOCK.acquire() try: with open(path, 'r') as fd: token = fd.read().strip() except Exception: raise TokenFileOpenError('token file open failed.') finally: TOKEN_LOCK.release() else: raise TokenNotFound('token file not found.') return token def _get_token(self): _headers = {'Content-Type': 'application/json'} admin_token = self._get_admin_token(self.token_path) _headers['X-Admin-Token'] = admin_token url = self.base_url + '/token' method = 'POST' response = requests.request(method, url, headers=_headers, verify=self.verify) if response.status_code == 503: # service unavailable raise ServiceUnavailable(response) else: try: token = response.headers['X-Auth-Token'] except KeyError: raise UnexpectedResponse(response) return token def _get_url_body_headers(self, api_name, *args, **kwargs): headers = {} headers['Content-Type'] = 'application/json' count_params_in_path = DATABASE[api_name]['params_path'] func = DATABASE[api_name]['request'] url, body = func(count_params_in_path, *args, **kwargs) if api_name in ['file_import']: headers['Content-Type'] = 'application/octet-stream' if count_params_in_path > 0: url = url % tuple(args[0:count_params_in_path]) full_url = '%s%s' % (self.base_url, url) return full_url, body, headers def _process_rest_response(self, response): content_type = response.headers.get('Content-Type') if ('application/json' not in content_type) and ( 'application/octet-stream' not in content_type): # Currently, all the response content from zvmsdk wsgi are # 'application/json' or application/octet-stream type. # If it is not, the response may be sent by HTTP server due # to internal server error or time out, # it is an unexpected response to the rest client. # If new content-type is added to the response by sdkwsgi, the # parsing function here is also required to change. raise UnexpectedResponse(response) # Read body into string if it isn't obviously image data if 'application/octet-stream' in content_type: # Do not read all response in memory when downloading an file body_iter = self._close_after_stream(response, CHUNKSIZE) else: body_iter = None return response, body_iter def api_request(self, url, method='GET', body=None, headers=None, **kwargs): _headers = {} _headers.update(headers or {}) if body is not None and not isinstance(body, six.string_types): try: body = json.dumps(body) except TypeError: # if data is a file-like object body = body if self.auth == 'token' and self.token_path is not None: _headers['X-Auth-Token'] = self._get_token() content_type = headers['Content-Type'] stream = content_type == 'application/octet-stream' if stream: response = requests.request(method, url, data=body, headers=_headers, verify=self.verify, stream=stream) else: response = requests.request(method, url, data=body, headers=_headers, verify=self.verify) return response def call(self, api_name, *args, **kwargs): try: # check validation of arguments self._check_arguments(api_name, *args, **kwargs) # get method by api_name method = DATABASE[api_name]['method'] # get url,body with api_name and method url, body, headers = self._get_url_body_headers(api_name, *args, **kwargs) response = self.api_request(url, method, body=body, headers=headers) # change response to SDK format resp, body_iter = self._process_rest_response(response) if api_name == 'file_export' and resp.status_code == 200: # Save the file in an temporary path return self._save_exported_file(body_iter) results = json.loads(resp.content) except TokenFileOpenError as err: errmsg = REST_REQUEST_ERROR[1][4] % {'error': err.msg} results = REST_REQUEST_ERROR[0] results.update({'rs': 4, 'errmsg': errmsg, 'output': ''}) except TokenNotFound as err: errmsg = REST_REQUEST_ERROR[1][2] % {'error': err.msg} results = REST_REQUEST_ERROR[0] results.update({'rs': 2, 'errmsg': errmsg, 'output': ''}) except UnexpectedResponse as err: errmsg = REST_REQUEST_ERROR[1][3] % ({ 'url': err.resp.url, 'status': err.resp.status_code, 'reason': err.resp.reason, 'text': err.resp.text}) results = REST_REQUEST_ERROR[0] results.update({'rs': 3, 'errmsg': errmsg, 'output': ''}) except ServiceUnavailable as err: errmsg = SERVICE_UNAVAILABLE_ERROR[1][2] % { 'reason': err.resp.reason, 'text': err.resp.text} results = SERVICE_UNAVAILABLE_ERROR[0] results.update({'rs': 2, 'errmsg': errmsg, 'output': ''}) except Exception as err: errmsg = REST_REQUEST_ERROR[1][1] % {'error': six.text_type(err)} results = REST_REQUEST_ERROR[0] results.update({'rs': 1, 'errmsg': errmsg, 'output': ''}) return results def _save_exported_file(self, body_iter): fname = str(uuid.uuid1()) tempDir = tempfile.mkdtemp() os.chmod(tempDir, 0o777) target_file = '/'.join([tempDir, fname]) self._save_file(body_iter, target_file) file_size = os.path.getsize(target_file) output = {'filesize_in_bytes': file_size, 'dest_url': target_file} results = {'overallRC': 0, 'modID': None, 'rc': 0, 'output': output, 'rs': 0, 'errmsg': ''} return results def _close_after_stream(self, response, chunk_size): """Iterate over the content and ensure the response is closed after.""" # Yield each chunk in the response body for chunk in response.iter_content(chunk_size=chunk_size): yield chunk # Once we're done streaming the body, ensure everything is closed. response.close() def _save_file(self, data, path): """Save an file to the specified path. :param data: binary data of the file :param path: path to save the file to """ with open(path, 'wb') as tfile: for chunk in data: tfile.write(chunk) zVMCloudConnector-1.6.3/zvmconnector/socketclient.py0000664000175000017510000001324613575566551022342 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import six import socket SDKCLIENT_MODID = 110 SOCKET_ERROR = [{'overallRC': 101, 'modID': SDKCLIENT_MODID, 'rc': 101}, {1: "Failed to create client socket, error: %(error)s", 2: ("Failed to connect SDK server %(addr)s:%(port)s, " "error: %(error)s"), 3: ("Failed to send all API call data to SDK server, " "only %(sent)d bytes sent. API call: %(api)s"), 4: "Client receive empty data from SDK server", 5: ("Client got socket error when sending API call to " "SDK server, error: %(error)s"), 6: ("Client got socket error when receiving response " "from SDK server, error: %(error)s")}, "SDK client or server get socket error", ] INVALID_API_ERROR = [{'overallRC': 400, 'modID': SDKCLIENT_MODID, 'rc': 400}, {1: "Invalid API name, '%(msg)s'"}, "Invalid API name" ] class SDKSocketClient(object): def __init__(self, addr='127.0.0.1', port=2000, request_timeout=3600): self.addr = addr self.port = port # request_timeout is used to set the client socket timeout when # waiting results returned from server. self.timeout = request_timeout def _construct_api_name_error(self, msg): results = INVALID_API_ERROR[0] results.update({'rs': 1, 'errmsg': INVALID_API_ERROR[1][1] % {'msg': msg}, 'output': ''}) return results def _construct_socket_error(self, rs, **kwargs): results = SOCKET_ERROR[0] results.update({'rs': rs, 'errmsg': SOCKET_ERROR[1][rs] % kwargs, 'output': ''}) return results def call(self, func, *api_args, **api_kwargs): """Send API call to SDK server and return results""" if not isinstance(func, str) or (func == ''): msg = ('Invalid input for API name, should be a' 'string, type: %s specified.') % type(func) return self._construct_api_name_error(msg) # Create client socket try: cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error as err: return self._construct_socket_error(1, error=six.text_type(err)) try: # Set socket timeout cs.settimeout(self.timeout) # Connect SDK server try: cs.connect((self.addr, self.port)) except socket.error as err: return self._construct_socket_error(2, addr=self.addr, port=self.port, error=six.text_type(err)) # Prepare the data to be sent and switch to bytes if needed api_data = json.dumps((func, api_args, api_kwargs)) api_data = api_data.encode() # Send the API call data to SDK server sent = 0 total_len = len(api_data) got_error = False try: while (sent < total_len): this_sent = cs.send(api_data[sent:]) if this_sent == 0: got_error = True break sent += this_sent except socket.error as err: return self._construct_socket_error(5, error=six.text_type(err)) if got_error or sent != total_len: return self._construct_socket_error(3, sent=sent, api=api_data) # Receive data from server return_blocks = [] try: while True: block = cs.recv(4096) if not block: break block = bytes.decode(block) return_blocks.append(block) except socket.error as err: # When the sdkserver cann't handle all the client request, # some client request would be rejected. # Under this case, the client socket can successfully # connect/send, but would get exception in recv with error: # "error: [Errno 104] Connection reset by peer" return self._construct_socket_error(6, error=six.text_type(err)) finally: # Always close the client socket to avoid too many hanging # socket left. cs.close() # Transform the received stream to standard result form # This client assumes that the server would return result in # the standard result form, so client just return the received # data if return_blocks: results = json.loads(''.join(return_blocks)) else: results = self._construct_socket_error(4) return results zVMCloudConnector-1.6.3/zvmconnector/__init__.py0000664000175000017510000000000013575566551021372 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/README.md0000664000175000017510000000305714315226016016006 0ustar ruirui00000000000000 ![](https://github.com/openmainframeproject/artwork/raw/master/projects/feilong/feilong-color.svg) ![License](https://img.shields.io/github/license/OpenMainframeProject/feilong) # Feilong ## Description Feilong is a development sdk for managing z/VM. It provides a set of APIs to operate z/VM including guest, image, network, volume etc. Just like os-win for nova hyperv driver and oslo.vmware for nova vmware driver, Feilong is for nova z/VM driver and other z/VM related openstack driver such as neutron, ceilometer. ## Quickstart Please refer to [Quick Start Guide](https://cloudlib4zvm.readthedocs.io/en/latest/quickstart.html). ## Documentation Please refer to [Documentation of Feilong](https://cloudlib4zvm.readthedocs.io/en/latest/index.html). ## License This package is licensed under the [Apache 2.0 License](LICENSE) ## Bug reporting If you encounter any problem with this package, please open a bug against [cloud connector issue tracker](https://bugs.launchpad.net/python-zvm-sdk/+bug) ## Governance Feilong is a hosted project at the [Open Mainframe Project](https://openmainframeproject.com), and is openly governed as defined in [GOVERNANCE.md](GOVERNANCE.md). ---- Creative Commons License
Documentation license: Creative Commons Attribution 4.0 International License. zVMCloudConnector-1.6.3/MANIFEST.in0000664000175000017510000000020013672563714016266 0ustar ruirui00000000000000include LICENSE include README.md include CONTRIBUTING.md include requirements.txt include test-requirements.txt include tox.inizVMCloudConnector-1.6.3/scripts/0000775000175000017510000000000014315232035016207 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/scripts/sdkserver0000664000175000017510000000144513575566551020172 0ustar ruirui00000000000000#!/usr/bin/python # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk import config from zvmsdk import log if __name__ == '__main__': config.load_config() log.setup_log() from zvmsdk import sdkserver sdkserver.start_daemon() zVMCloudConnector-1.6.3/scripts/zvmsdk-gentoken0000775000175000017510000000550514263437505021303 0ustar ruirui00000000000000#!/bin/python import getopt import os import random import stat import string import sys DEFAULT_TOKEN_PATH = '/etc/zvmsdk/token.dat' def usage(): print('Usage: zvmsdk-gentoken [-h] [-u] [token file path]') print('') print('Tool to Generate and Update Token') print('') print('Optional aruments:') print(' -h, --help\t\tshow this help message and exit') print(' -u, --update\tgenerate a random token and update into file') print(' \t\t\tif path not assigned, use default /etc/zvmsdk/token.dat.') def random_token(): # random token only include upper cases and digits token_random = ''.join( random.choice(string.ascii_uppercase +\ string.ascii_lowercase +\ string.digits) for _ in range(42)) return token_random def create_token_file(file_path): #if file not exits, create it if os.path.exists(file_path): print('Token File Already Existed!') print('') sys.exit(2) # create token file os.mknod(file_path) # generate random token token = random_token() # write token data with open(file_path, 'w') as fp: fp.write(token) # change file mode to 400 os.chmod(file_path, stat.S_IRUSR|stat.S_IWUSR) def update_token_file(file_path): #if file not exits, raise exception if not os.path.exists(file_path): print('Token File Not Exist!\n') print('') sys.exit(3) token = random_token() # write token data with open(file_path, 'w') as fp: fp.write(token) def main(argv): try: opts, args = getopt.gnu_getopt(argv, 'hu', ["help", "update"]) # if none arguments and options means initialization if opts == [] and args == []: create_token_file(DEFAULT_TOKEN_PATH) sys.exit(0) if args != [] and opts == []: create_token_file(args[0]) sys.exit(0) # print help first and exit for o, a in opts: # print help message if o in ('-h', '--help'): usage() sys.exit(0) # process options for o, a in opts: # update token file if o in ('-u', '--update'): if args == []: file_path = DEFAULT_TOKEN_PATH else: file_path = args[0] update_token_file(file_path) msg = "token updated, please remember " +\ "to update the token data on client side!" print(msg) except getopt.GetoptError: print('syntax ERROR!') usage() sys.exit(1) # exist code: # 0: success # 1: sytax error # 2: file not exist # 3: file already existed if "__main__" == __name__: main(sys.argv[1:]) sys.exit(0) zVMCloudConnector-1.6.3/PKG-INFO0000664000175000017510000000455114315232035015622 0ustar ruirui00000000000000Metadata-Version: 2.1 Name: zVMCloudConnector Version: 1.6.3 Summary: z/VM cloud management library in Python Home-page: https://github.com/openmainframeproject/python-zvm-sdk Author: IBM License: ASL 2.0 Keywords: zvm cloud library Platform: UNKNOWN Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Description-Content-Type: text/markdown License-File: LICENSE ![](https://github.com/openmainframeproject/artwork/raw/master/projects/feilong/feilong-color.svg) ![License](https://img.shields.io/github/license/OpenMainframeProject/feilong) # Feilong ## Description Feilong is a development sdk for managing z/VM. It provides a set of APIs to operate z/VM including guest, image, network, volume etc. Just like os-win for nova hyperv driver and oslo.vmware for nova vmware driver, Feilong is for nova z/VM driver and other z/VM related openstack driver such as neutron, ceilometer. ## Quickstart Please refer to [Quick Start Guide](https://cloudlib4zvm.readthedocs.io/en/latest/quickstart.html). ## Documentation Please refer to [Documentation of Feilong](https://cloudlib4zvm.readthedocs.io/en/latest/index.html). ## License This package is licensed under the [Apache 2.0 License](LICENSE) ## Bug reporting If you encounter any problem with this package, please open a bug against [cloud connector issue tracker](https://bugs.launchpad.net/python-zvm-sdk/+bug) ## Governance Feilong is a hosted project at the [Open Mainframe Project](https://openmainframeproject.com), and is openly governed as defined in [GOVERNANCE.md](GOVERNANCE.md). ---- Creative Commons License
Documentation license: Creative Commons Attribution 4.0 International License. zVMCloudConnector-1.6.3/CONTRIBUTING.md0000664000175000017510000000422613720612363016762 0ustar ruirui00000000000000### Welcome We welcome contributions to python-zvm-sdk! ### Repository The repository for python-zvm-sdk on GitHub: https://github.com/openmainframeproject/python-zvm-sdk ### Reporting bugs If you are a user and you find a bug, please submit a [bug](https://bugs.launchpad.net/python-zvm-sdk). Please try to provide sufficient information for someone else to reproduce the issue. One of the project's maintainers should respond to your issue within 24 hours. If not, please bump the issue and request that it be reviewed. ### Fixing bugs Review the [bug list](https://bugs.launchpad.net/python-zvm-sdk) and find something that interests you. We are using the [GerritHub](https://review.gerrithub.io/) process to manage code contributions. To work on something, whether a new feature or a bugfix: ## 1. Clone python-zvm-sdk locally ``` git clone https://github.com/openmainframeproject/python-zvm-sdk.git ``` ## 2. Add the GerritHub repository as a remote as gerrit ``` git remote add gerrit ssh://@review.gerrithub.io:29418/openmainframeproject/python-zvm-sdk ``` Where `````` is your GerritHub username. And, you should add the public key of your workstation into your GerritHub SSH public keys. ## 3. Create a branch Create a descriptively-named branch off of your cloned repository ``` cd python-zvm-sdk git checkout -b fix-bug-xxxx ``` ## 4. Commit your code Commit to that branch locally ## 5. Commit messages Commit messages must have a short description no longer than 50 characters followed by a blank line and a longer, more descriptive message that includes reference to issue(s) being addressed so that they will be automatically closed on a merge e.g. ```Closes #1234``` or ```Fixes #1234```. ## 6. Run checks Run checks via issue: ``` tox -v ``` ## 7. Once all checks passed, you can submit your change for review: ``` git review ``` ## 8. Any code changes that affect documentation should be accompanied by corresponding changes (or additions) to the documentation and tests. This will ensure that if the merged PR is reversed, all traces of the change will be reversed as well. zVMCloudConnector-1.6.3/data/0000775000175000017510000000000014315232035015431 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/data/sudoers-zvmsdk0000664000175000017510000000065113672563714020376 0ustar ruirui00000000000000zvmsdk ALL = (ALL) NOPASSWD:/sbin/vmcp, /opt/zthin/bin/smcli, /sbin/chccwdev, /sbin/cio_ignore, /sbin/fdasd, /sbin/fdisk, /usr/sbin/vmur, /bin/mount, /bin/umount, /sbin/mkfs, /sbin/mkfs.xfs, /usr/sbin/mkswap, /sbin/dasdfmt, /opt/zthin/bin/unpackdiskimage, /opt/zthin/bin/creatediskimage, /opt/zthin/bin/linkdiskandbringonline, /opt/zthin/bin/offlinediskanddetach, /opt/zthin/bin/IUCV/iucvclnt, /opt/zthin/bin/refresh_bootmap zVMCloudConnector-1.6.3/data/setupDisk0000775000175000017510000005360614315210052017337 0ustar ruirui00000000000000#!/bin/bash ############################################################################### # Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # ############################################################################### # This script is used to handle xCAT disk initialization and configuration(eg. # attach/detach a SCSI volume, add an additional ephemeral disk when vm is in # inactive status). It will be invoked and executed when vm start up. ############################################################################### version=3.0 function getOsVersion { # @Description: # Returns the Linux distro version in an easy to parse format. # @Input: # None # @Output: # os - Variable set with OS and version information. For example: # "rhel62" or "sles11sp2" # @Code: if [[ -e "/etc/os-release" ]]; then os=`cat /etc/os-release | grep "^ID=" | sed \ -e 's/ID=//' \ -e 's/"//g'` version=`cat /etc/os-release | grep "^VERSION_ID=" | sed \ -e 's/VERSION_ID=//' \ -e 's/"//g' \ -e 's/\.//'` os=$os$version #The /etc/SuSE-release file will be deprecated in sles11.4 and later release elif [[ -e "/etc/SuSE-release" ]]; then os='sles' version=`cat /etc/SuSE-release | grep "VERSION =" | sed \ -e 's/^.*VERSION =//' \ -e 's/\s*$//' \ -e 's/.//' \ -e 's/[^0-9]*([0-9]+).*/$1/'` os=$os$version # Append service level level=`echo "/etc/SuSE-release" | grep "LEVEL =" | sed \ -e 's/^.*LEVEL =//' \ -e 's/\s*$//' \ -e 's/.//' \ -e 's/[^0-9]*([0-9]+).*/$1/'` os=$os'sp'$level #The /etc/redhat-release file will be deprecated in rhel7 and later release elif [[ -e "/etc/redhat-release" ]]; then os='rhel' version=`cat /etc/redhat-release | grep -i "Red Hat Enterprise Linux Server" | sed \ -e 's/[A-Za-z\/\.\(\)]//g' \ -e 's/^ *//g' \ -e 's/ *$//g' \ -e 's/\s.*$//'` os=$os$version fi return } function onlineDevice { # @Description: # Brings a Linux device online. # @Input: # Device number, e.g. "0.0.000c" # @Output: # Return code indicates success or failure # @Code: device=$1 local funcName="onlineDevice" rc=$(/sbin/chccwdev -e $device > /dev/null; echo $?) if (( rc != 0 )); then if [[ -e /sbin/cio_ignore ]]; then rc=$(/sbin/cio_ignore -r 0.0.$device > /dev/null; echo $?) which udevadm &> /dev/null && udevadm settle || udevsettle fi rc=$(/sbin/chccwdev -e $device > /dev/null; echo $?) if (( rc != 0 )); then echo "$funcName (Error) Could not activate the virtual device $device" return 1 fi fi which udevadm &> /dev/null && udevadm settle || udevsettle return 0 } function setupDisk { # @Description: # Processes a disk file for the following functions: # Create a file system node # Remove a file system node # Setup a SCSI volume # Removes a SCSI volume # Add a mdisk based ephemeral disk # @Input: # Disk handling parameters # @Output: # None # @Code: local funcName="setupDisk" # Parse the parameter from parameter list for parameter in $@; do keyName=${parameter%\=*} value=${parameter#*\=} value=$(echo ${value} | sed -e 's/^ *//g') newKey='xcat_'$keyName eval $newKey=$value done # Remove the invokeScript.sh file after we have read it rm -f invokeScript.sh ########################################################################## # Handle creating a file system node # Disk handler input parameters: # action - "createfilesysnode" # srcFile - location/name of the source file for the mknod command # xcat_tgtFile - location/name of the target file for the mknod command ########################################################################## if [[ $xcat_action == "createfilesysnode" ]]; then echo "Creating a file system node, source: $xcat_srcFile, target: $xcat_tgtFile" if [[ ! -n $xcat_srcFile ]]; then echo "$funcName (Error) Source file for creating a file system node was not specified" return fi if [[ ! -n $xcat_tgtFile ]]; then echo "$funcName (Error) Target file for creating a file system node was not specified" return fi if [[ -e $xcat_tgtFile ]]; then echo "$funcName (Error) Target file for creating a file system node already exists" return fi configFile='/etc/udev/rules.d/56-zfcp.rules' # Create udev config file if not exist if [[ ! -e $configFile ]]; then touch $configFile if [[ $os == rhel* ]]; then echo "KERNEL==\"zfcp\", RUN+=\"/sbin/zfcpconf.sh\"" >> ${configFile} echo "KERNEL==\"zfcp\", RUN+=\"/sbin/multipath -r\"" >> ${configFile} fi fi tgtNode=$(echo ${xcat_tgtFile} | sed -e 's/^\/dev\///') if [[ $os == sles* || $os == rhel* ]]; then wwpn_lun=$(echo ${xcat_srcFile} | sed -e 's/^\/dev.*-zfcp-//') wwpn=$(echo ${wwpn_lun} | sed -e 's/:0x.*//') lun=$(echo ${wwpn_lun} | sed -e 's/^0x.*://') else wwpn_lun=$(echo ${xcat_srcFile} | sed -e 's/^\/dev.*-fc-//') wwpn=$(echo ${wwpn_lun} | sed -e 's/-lun-.*//') lun=$(echo ${wwpn_lun} | sed -e 's/^0x.*-lun-//') fi multipath=0 out=`echo $wwpn | grep ","` if [[ -n "$out" ]]; then multipath=1 fi if [[ $os == sles* || $os == rhel* ]]; then fcp=$(echo ${xcat_srcFile} | sed -e 's/^\/dev.*ccw-0.0.//' | sed -e 's/-zfcp-.*$//') else fcp=$(echo ${xcat_srcFile} | sed -e 's/^\/dev.*ccw-0.0.//' | sed -e 's/-fc-.*$//') fi oldIFS=$IFS IFS="," fcpList=($fcp) for fcp in ${fcpList[@]} do if [[ $multipath == 1 ]]; then # Find the name of the multipath device by arbitrary one path in the set wwpnList=($wwpn) for wwpn in ${wwpnList[@]} do if [[ ${wwpn:0:2} -ne "0x" ]]; then wwpn="0x$wwpn" fi if [[ $os == sles* || $os == rhel* ]]; then cur_wwpn_lun=${wwpn}:${lun} srcFile=$(echo ${xcat_srcFile} | sed -e 's/-zfcp-.*//')"-zfcp-"$cur_wwpn_lun srcFile=$(echo ${srcFile} | sed -e 's/ccw-.*-zfcp/ccw-0.0.'$fcp'-zfcp/') else cur_wwpn_lun=${wwpn}-lun-${lun} srcFile=$(echo ${xcat_srcFile} | sed -e 's/-fc-.*//')"-fc-"$cur_wwpn_lun srcFile=$(echo ${srcFile} | sed -e 's/ccw-.*-fc/ccw-0.0.'$fcp'-fc/') fi out=`/usr/bin/stat --printf=%n ${srcFile}` if (( $? != 0 )); then echo "$funcName (Error) Unable to stat the source file: $srcFile" continue fi out=`/sbin/udevadm info --query=all --name=$srcFile | grep ID_SERIAL=` devName=$(echo ${out} | sed -e 's/^E:\s//') multipathUuid=$(echo $devName | sed -e 's/ID_SERIAL=//') if [[ -n "$multipathUuid" ]]; then break fi done if [[ -z "$multipathUuid" ]]; then echo "$funcName (Error) Building up multipath failed!" return fi else if [[ $os == sles* || $os == rhel* ]]; then srcFile=$(echo ${xcat_srcFile} | sed -e 's/ccw-.*-zfcp/ccw-0.0.'$fcp'-zfcp/') else srcFile=$(echo ${xcat_srcFile} | sed -e 's/ccw-.*-zfcp/ccw-0.0.'$fcp'-fc/') fi out=`/usr/bin/stat --printf=%n ${srcFile}` if (( $? != 0 )); then echo "$funcName (Error) Unable to stat the source file: $xcat_srcFile" return fi fi done IFS=$oldIFS # Add the entry into udev config file if [[ $multipath == 1 ]]; then echo "KERNEL==\"dm*\", ENV{DM_UUID}==\"mpath-${multipathUuid}\", SYMLINK+=\"${tgtNode}\"" >> ${configFile} udevadm control --reload udevadm trigger --sysname-match=dm-* else echo "KERNEL==\"sd*\", ATTRS{wwpn}==\"${wwpn}\", ATTRS{fcp_lun}==\"${lun}\", SYMLINK+=\"${tgtNode}%n\"" >> ${configFile} udevadm control --reload udevadm trigger --sysname-match=sd* fi echo "$funcName successfully create the file system node ${xcat_tgtFile}" ########################################################################## # Handle removing a file system node # Disk file input parameters: # action - "removefilesysnode" # tgtFile - location/name of the target file for the mknod command ########################################################################## elif [[ $xcat_action == "removefilesysnode" ]]; then echo "Removing a file system node, target: $xcat_tgtFile" if [[ ! -n $xcat_tgtFile ]]; then echo "$funcName (Error) Target file for creating a file system node was not specified" return fi configFile='/etc/udev/rules.d/56-zfcp.rules' tgtNode=$(echo ${xcat_tgtFile} | sed -e 's/^\/dev\///') sed -i -e /SYMLINK+=\"${tgtNode}%n\"/d ${configFile} sed -i -e /SYMLINK+=\"${tgtNode}\"/d ${configFile} udevadm control --reload udevadm trigger --sysname-match=sd* udevadm trigger --sysname-match=dm-* echo "$funcName successfully remove the file system node ${xcat_tgtFile}" ########################################################################## # Handle adding a SCSI volume # Disk file input parameters: # action - "addScsiVolume" # fcpAddr - FCP device address # wwpn - WWPN number # lun - LUN number ########################################################################## elif [[ $xcat_action == "addScsiVolume" ]]; then echo "Adding a SCSI Volume, FCP addr: $xcat_fcpAddr, WWPN: $xcat_wwpn, LUN: $xcat_lun" # Validate the input if [[ ! -n $xcat_fcpAddr ]]; then echo "$funcName (Error) FCP address was not specified" return fi xcat_fcpAddr=`echo $xcat_fcpAddr | tr '[A-Z]' '[a-z]'` if [[ ! -n $xcat_wwpn ]]; then echo "$funcName (Error) WWPN was not specified" return fi xcat_wwpn=`echo $xcat_wwpn | tr '[A-Z]' '[a-z]'` if [[ ! -n $xcat_lun ]]; then echo "$funcName (Error) LUN was not specified" return fi xcat_lun=`echo $xcat_lun | tr '[A-Z]' '[a-z]'` decimal_lun=$((16#${xcat_lun:0:4})) # Online the device oldIFS=$IFS IFS="," fcp_list=($xcat_fcpAddr) for fcp in ${fcp_list[@]} do rc= onlineDevice $fcp if (( rc != 0 )); then return fi if [[ $os == sles12* ]]; then out=`cat /boot/zipl/active_devices.txt | grep -i "0.0.$fcp"` if [[ -z $out ]]; then /sbin/zfcp_host_configure 0.0.$fcp 1 fi elif [[ $os == sles11* ]]; then /sbin/zfcp_host_configure 0.0.$fcp 1 elif [[ $os == ubuntu* ]]; then /sbin/chzdev zfcp-host $fcp -e fi done multipath=0 out=`echo $xcat_wwpn | grep ","` if [[ -n "$out" ]]; then multipath=1 fi # Start multipathd service if [[ $multipath == 1 ]]; then if [[ $os == sles* ]]; then insserv multipathd elif [[ $os == rhel6* ]]; then chkconfig multipathd on else systemctl enable multipathd fi modprobe dm-multipath fi for fcp in ${fcp_list[@]} do wwpn_list=($xcat_wwpn) for wwpn in ${wwpn_list[@]} do # Set WWPN and LUN in sysfs echo 0x$xcat_lun > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/0x$wwpn/unit_add # Set WWPN and LUN in configuration files if [[ $os == sles* ]]; then # SLES: /etc/udev/rules.d/51-zfcp* /sbin/zfcp_disk_configure 0.0.$fcp $wwpn $xcat_lun 1 # Configure zFCP device to be persistent touch /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules # Check if the file already contains the zFCP channel out=`cat "/etc/udev/rules.d/51-zfcp-0.0.$fcp.rules" | egrep -i "ccw/0.0.$fcp]online"` if [[ ! $out ]]; then echo "ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"0.0.$fcp\", IMPORT{program}=\"collect 0.0.$fcp %k 0.0.$fcp zfcp\"" \ | tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules echo "ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"zfcp\", IMPORT{program}=\"collect 0.0.$fcp %k 0.0.$fcp zfcp\"" \ | tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules echo "ACTION==\"add\", ENV{COLLECT_0.0.$fcp}==\"0\", ATTR{[ccw/0.0.$fcp]online}=\"1\"" \ | tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules fi echo "ACTION==\"add\", KERNEL==\"rport-*\", ATTR{port_name}==\"0x$wwpn\", SUBSYSTEMS==\"ccw\", KERNELS==\"0.0.$fcp\", ATTR{[ccw/0.0.$fcp]0x$wwpn/unit_add}=\"0x$xcat_lun\"" \ | tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules elif [[ $os == rhel* ]]; then # RHEL: /etc/zfcp.conf echo "0.0.$fcp 0x$wwpn 0x$xcat_lun" >> /etc/zfcp.conf echo "add" > /sys/bus/ccw/devices/0.0.$fcp/uevent elif [[ $os == ubuntu* ]]; then # Ubuntu: chzdev zfcp-lun 0.0.$device:0x$wwpn:0x$lun -e /sbin/chzdev zfcp-lun 0.0.$fcp:0x$wwpn:0x$xcat_lun -e fi # Settle the file system so when we are done the device is fully available if [[ $(which udevadm 2> /dev/null) != '' ]]; then udevadm settle else udevsettle fi if [[ $os == rhel* || $os == sles* ]]; then if [[ ! -e "/dev/disk/by-path/ccw-0.0.${fcp}-zfcp-0x${wwpn}:0x${xcat_lun}" ]]; then # Sometimes the file takes longer to appear. We will wait up to 3 minutes. maxTime=0 for time in 1 2 2 5 10 10 30 60 60 do if [[ -e "/dev/disk/by-path/ccw-0.0.${fcp}-zfcp-0x${wwpn}:0x${xcat_lun}" ]]; then # Leave the loop now that the file exists break fi maxTime=$maxTime+$time echo "Sleeping for $time seconds to allow /dev/disk/by-path/ccw-0.0.${fcp}-zfcp-0x${wwpn}:0x${xcat_lun} to be created" sleep $time done fi if [[ ! -e "/dev/disk/by-path/ccw-0.0.${fcp}-zfcp-0x${wwpn}:0x${xcat_lun}" ]]; then echo "/dev/disk/by-path/ccw-0.0.${fcp}-zfcp-0x${wwpn}:0x${xcat_lun} did not appear in $maxTime seconds, continuing." fi elif [[ $os == ubuntu* ]]; then if [[ ! -e "/dev/disk/by-path/ccw-0.0.${fcp}-fc-0x${wwpn}-lun-${decimal_lun}" ]]; then # Sometimes the file takes longer to appear. We will wait up to 3 minutes. maxTime=0 for time in 1 2 2 5 10 10 30 60 60 do if [[ -e "/dev/disk/by-path/ccw-0.0.${fcp}-fc-0x${wwpn}-lun-${decimal_lun}" ]]; then # Leave the loop now that the file exists break fi maxTime=$maxTime+$time echo "Sleeping for $time seconds to allow /dev/disk/by-path/ccw-0.0.${fcp}-fc-0x${wwpn}-lun-${decimal_lun} to be created" sleep $time done fi if [[ ! -e "/dev/disk/by-path/ccw-0.0.${fcp}-fc-0x${wwpn}-lun-${decimal_lun}" ]]; then echo "/dev/disk/by-path/ccw-0.0.${fcp}-fc-0x${wwpn}-lun-${decimal_lun} did not appear in $maxTime seconds, continuing." fi fi done done IFS=$oldIFS /sbin/multipath -r echo "$funcName successfully create the SCSI volume" ########################################################################## # Handle removing a SCSI volume # Disk file input parameters: # action - "removeScsiVolume" # fcpAddr - FCP device address # wwpn - WWPN number # lun - LUN number ########################################################################## elif [[ $xcat_action == "removeScsiVolume" ]]; then echo "Removing a SCSI Volume, FCP addr: $xcat_fcpAddr, WWPN: $xcat_wwpn, LUN: $xcat_lun" # Validate the input if [[ ! -n $xcat_fcpAddr ]]; then echo "$funcName (Error) FCP address was not specified" return fi xcat_fcpAddr=`echo $xcat_fcpAddr | tr '[A-Z]' '[a-z]'` if [[ ! -n $xcat_wwpn ]]; then echo "$funcName (Error) WWPN was not specified" return fi xcat_wwpn=`echo $xcat_wwpn | tr '[A-Z]' '[a-z]'` if [[ ! -n $xcat_lun ]]; then echo "$funcName (Error) LUN was not specified" return fi xcat_lun=`echo $xcat_lun | tr '[A-Z]' '[a-z]'` oldIFS=$IFS IFS="," fcp_list=($xcat_fcpAddr) for fcp in ${fcp_list[@]} do wwpn_list=($xcat_wwpn) for wwpn in ${wwpn_list[@]} do # Delete the SCSI device scsiDevice=`lszfcp -l 0x$xcat_lun | grep 0x$wwpn | cut -d " " -f2` if [[ -n $scsiDevice ]]; then echo 1 > "/sys/bus/scsi/devices/$scsiDevice/delete" fi # Delete WWPN and LUN from sysfs if [[ -e /sys/bus/ccw/drivers/zfcp/0.0.$fcp/0x$wwpn/unit_remove ]]; then if [[ $(which udevadm 2> /dev/null) != '' ]]; then udevadm settle else udevsettle fi echo 0x$xcat_lun > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/0x$wwpn/unit_remove fi # Delete WWPN and LUN from configuration files if [[ $os == sles11* || $os == sles12* ]]; then # SLES: /etc/udev/rules.d/51-zfcp* expression="/$xcat_lun/d" sed --in-place -e $expression /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules elif [[ $os == rhel* ]]; then # RHEL: /etc/zfcp.conf expression="/$xcat_lun/d" sed --in-place -e $expression /etc/zfcp.conf elif [[ $os == ubuntu* ]]; then # Ubuntu: chzdev zfcp-lun 0.0.$device:0x$wwpn:0x$lun -d /sbin/chzdev zfcp-lun 0.0.$fcp:0x$wwpn:0x$xcat_lun -d fi done done IFS=$oldIFS /sbin/multipath -W /sbin/multipath -r echo "$funcName successfully remove the SCSI volume" ########################################################################### # Handle adding a mdisk based ephemeral disk. # Disk file input parameters: # action - "addMdisk" # vaddr - virtual address of the minidisk # filesys - Filesystem type # mntdir - The directory that mount the mdisk to ########################################################################## elif [[ $xcat_action == "addMdisk" ]]; then echo "Adding a minidisk based ephemeral disk, Vaddr: $xcat_vaddr, Filesystem: $xcat_filesys mountpoint:$xcat_mntdir" # Validate the input if [[ ! -n $xcat_vaddr ]]; then echo "$funcName (Error) Virtual address was not specified" return fi xcat_vaddr=`echo $xcat_vaddr | tr '[A-Z]' '[a-z]'` # Online the device # When the distro is sles15, the following /sbin/dasd_configure will online and configure device if [[ $os != sles15* ]]; then onlineDevice $xcat_vaddr if (( $? != 0 )); then echo "$funcName (Error) fail to online the disk $xcat_vaddr" fi fi # Configure the added dasd to be persistent echo "Permanently online the ephemeral disk" if [[ $os == rhel* ]]; then out=`cat "/etc/dasd.conf" | egrep -i $xcat_vaddr` if [[ ! $out ]]; then echo "0.0.$xcat_vaddr" >> /etc/dasd.conf fi elif [[ $os == sles* ]]; then /sbin/dasd_configure 0.0.$xcat_vaddr 1 elif [[ $os == ubuntu22* ]]; then chzdev -e dasd 0.0.$xcat_vaddr elif [[ $os == ubuntu20* ]]; then chzdev -e dasd 0.0.$xcat_vaddr elif [[ $os == ubuntu16* ]]; then touch /etc/sysconfig/hardware/config-ccw-0.0.$xcat_vaddr else echo "$funcName (Error) failed to permanently online the disk:$xcat_vaddr on os: $os, please check if $os is in the supported distribution list" return fi # Mount the mdisk to the specified mount point echo "Mounting the ephemeral disk $xcat_vaddr to directory $xcat_mntdir" if [[ -d $xcat_mntdir ]]; then rm -rf $xcat_mntdir fi mkdir -p $xcat_mntdir cp /etc/fstab /etc/fstab.bak out=`cat "/etc/fstab" | egrep -i "ccw-0.0.$xcat_vaddr"` if [[ $out ]]; then sed -i '/ccw-0.0.'"$xcat_vaddr"'/d' /etc/fstab fi if [[ $os == sles12* ]]; then echo "/dev/disk/by-path/ccw-0.0.${xcat_vaddr}-part1 $xcat_mntdir $xcat_filesys defaults,nofail 0 0" >> /etc/fstab else echo "/dev/disk/by-path/ccw-0.0.${xcat_vaddr}-part1 $xcat_mntdir $xcat_filesys defaults 0 0" >> /etc/fstab fi out=`mount -a 2>&1` if [[ "$out" ]]; then echo "Fail to mount the disk $xcat_vaddr with reason $out" mv /etc/fstab.bak /etc/fstab mount -a else echo "The disk $xcat_vaddr has been mounted to $xcat_mntdir in format $xcat_filesys successfully" fi # in case it's VDISK, this is the only chance that we can do the swap setup # and if it's not VDISK, mkswap again do no harm if [[ $xcat_filesys == swap ]]; then mkswap /dev/disk/by-path/ccw-0.0.${xcat_vaddr}-part1 # this is because we need ask /etc/rc.d/rc.local to include swap disk setup # because VDISK will be setup every time and need format every time Linux startup # actually with those scripts, we don't need run them now, can defer to VM startup phase echo "mkswap /dev/disk/by-path/ccw-0.0.${xcat_vaddr}-part1" >> /etc/rc.d/rc.local echo "swapon -a" >> /etc/rc.d/rc.local chmod +x /etc/rc.d/rc.local fi out=`swapon -a` if [[ "$out" ]]; then echo "Failed to swapon swap partition with reason $out" else echo "Swapon the swap partition successfully" fi fi return } ############################################################################ # Main Code Section ############################################################################ # Get Linux version getOsVersion setupDisk $@ rm -f setupDisk zVMCloudConnector-1.6.3/data/uwsgi-zvmsdk.conf0000664000175000017510000000065013575566551020777 0ustar ruirui00000000000000[uwsgi] chmod-socket = 666 uwsgi-socket = 127.0.0.1:35000 lazy-apps = true add-header = Connection: close buffer-size = 65535 thunder-lock = true plugins = python enable-threads = true exit-on-reload = true die-on-term = true master = true processes = 2 threads = 32 wsgi-file = /usr/bin/zvmsdk-wsgi pidfile = /tmp/zvmsdk-wsgi.pid socket = /tmp/zvmsdk-wsgi.socket uid = zvmsdk gid = zvmsdk logto = /var/log/zvmsdk/uwsgi.log zVMCloudConnector-1.6.3/data/sdkserver.service0000664000175000017510000000027313575566551021051 0ustar ruirui00000000000000[Unit] Description=zVM SDK API server After=network.target syslog.target [Service] Type=simple User=zvmsdk Group=zvmsdk ExecStart=/usr/bin/sdkserver [Install] WantedBy=multi-user.targetzVMCloudConnector-1.6.3/setup.py0000664000175000017510000000540214315231673016242 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import setuptools from zvmsdk import version as sdkversion setuptools.setup( name='zVMCloudConnector', version=sdkversion.__version__, license='ASL 2.0', author='IBM', description='z/VM cloud management library in Python', long_description=open('README.md').read(), long_description_content_type='text/markdown', url='https://github.com/openmainframeproject/python-zvm-sdk', keywords='zvm cloud library', install_requires=open('requirements.txt').read(), packages=setuptools.find_packages(exclude=["zvmsdk.tests.fvt*"]), package_data={ 'zvmsdk': [ 'vmactions/templates/grow_root_volume.j2', 'volumeops/templates/rhel7_attach_volume.j2', 'volumeops/templates/rhel7_detach_volume.j2', 'volumeops/templates/rhel8_attach_volume.j2', 'volumeops/templates/rhel8_detach_volume.j2', 'volumeops/templates/sles_attach_volume.j2', 'volumeops/templates/sles_detach_volume.j2', 'volumeops/templates/ubuntu_attach_volume.j2', 'volumeops/templates/ubuntu_detach_volume.j2' ] }, classifiers=[ "Intended Audience :: Information Technology", "Intended Audience :: System Administrators", "License :: OSI Approved :: Apache Software License", "Operating System :: POSIX :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", ], entry_points={ 'wsgi_scripts': [ 'sdk_api = zvmsdk.sdkwsgi.wsgi:init_application', ] }, scripts=['scripts/sdkserver', 'zvmsdk/sdkwsgi/zvmsdk-wsgi', 'scripts/zvmsdk-gentoken'], data_files=[('/lib/systemd/system', ['data/sdkserver.service']), ('/var/lib/zvmsdk', ['data/setupDisk']), ('/etc/sudoers.d', ['data/sudoers-zvmsdk']), ('/etc/zvmsdk', ['data/uwsgi-zvmsdk.conf']), ('/etc/zvmsdk', ['doc/source/zvmsdk.conf.sample'])], ) zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/0000775000175000017510000000000014315232035021570 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/dependency_links.txt0000664000175000017510000000000114315232035025636 0ustar ruirui00000000000000 zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/entry_points.txt0000664000175000017510000000007714315232035025072 0ustar ruirui00000000000000[wsgi_scripts] sdk_api = zvmsdk.sdkwsgi.wsgi:init_application zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/top_level.txt0000664000175000017510000000003514315232035024320 0ustar ruirui00000000000000smtLayer zvmconnector zvmsdk zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/PKG-INFO0000664000175000017510000000455114315232035022672 0ustar ruirui00000000000000Metadata-Version: 2.1 Name: zVMCloudConnector Version: 1.6.3 Summary: z/VM cloud management library in Python Home-page: https://github.com/openmainframeproject/python-zvm-sdk Author: IBM License: ASL 2.0 Keywords: zvm cloud library Platform: UNKNOWN Classifier: Intended Audience :: Information Technology Classifier: Intended Audience :: System Administrators Classifier: License :: OSI Approved :: Apache Software License Classifier: Operating System :: POSIX :: Linux Classifier: Programming Language :: Python Classifier: Programming Language :: Python :: 2 Classifier: Programming Language :: Python :: 2.7 Classifier: Programming Language :: Python :: 3 Classifier: Programming Language :: Python :: 3.5 Classifier: Programming Language :: Python :: 3.6 Description-Content-Type: text/markdown License-File: LICENSE ![](https://github.com/openmainframeproject/artwork/raw/master/projects/feilong/feilong-color.svg) ![License](https://img.shields.io/github/license/OpenMainframeProject/feilong) # Feilong ## Description Feilong is a development sdk for managing z/VM. It provides a set of APIs to operate z/VM including guest, image, network, volume etc. Just like os-win for nova hyperv driver and oslo.vmware for nova vmware driver, Feilong is for nova z/VM driver and other z/VM related openstack driver such as neutron, ceilometer. ## Quickstart Please refer to [Quick Start Guide](https://cloudlib4zvm.readthedocs.io/en/latest/quickstart.html). ## Documentation Please refer to [Documentation of Feilong](https://cloudlib4zvm.readthedocs.io/en/latest/index.html). ## License This package is licensed under the [Apache 2.0 License](LICENSE) ## Bug reporting If you encounter any problem with this package, please open a bug against [cloud connector issue tracker](https://bugs.launchpad.net/python-zvm-sdk/+bug) ## Governance Feilong is a hosted project at the [Open Mainframe Project](https://openmainframeproject.com), and is openly governed as defined in [GOVERNANCE.md](GOVERNANCE.md). ---- Creative Commons License
Documentation license: Creative Commons Attribution 4.0 International License. zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/requires.txt0000664000175000017510000000015714315232035024173 0ustar ruirui00000000000000jsonschema>=2.3.0 netaddr>=0.7.5 PyJWT>=1.0.1 requests>=2.6.0 Routes>=2.2 six>=1.9.0 WebOb>=1.2.3 PyYAML>=3.10 zVMCloudConnector-1.6.3/zVMCloudConnector.egg-info/SOURCES.txt0000664000175000017510000000752614315232035023466 0ustar ruirui00000000000000CONTRIBUTING.md LICENSE MANIFEST.in README.md requirements.txt setup.py test-requirements.txt tox.ini data/sdkserver.service data/setupDisk data/sudoers-zvmsdk data/uwsgi-zvmsdk.conf doc/source/zvmsdk.conf.sample scripts/sdkserver scripts/zvmsdk-gentoken smtLayer/ReqHandle.py smtLayer/__init__.py smtLayer/changeVM.py smtLayer/cmdVM.py smtLayer/deleteVM.py smtLayer/generalUtils.py smtLayer/getHost.py smtLayer/getVM.py smtLayer/makeVM.py smtLayer/migrateVM.py smtLayer/msgs.py smtLayer/powerVM.py smtLayer/smapi.py smtLayer/smt.py smtLayer/smtCmd.py smtLayer/smtTest.py smtLayer/vmStatus.py smtLayer/vmUtils.py smtLayer/tests/__init__.py smtLayer/tests/unit/__init__.py smtLayer/tests/unit/base.py smtLayer/tests/unit/test_getHost.py smtLayer/tests/unit/test_getVM.py smtLayer/tests/unit/test_makeVM.py smtLayer/tests/unit/test_vmStatus.py smtLayer/tests/unit/test_vmUtils.py zVMCloudConnector.egg-info/PKG-INFO zVMCloudConnector.egg-info/SOURCES.txt zVMCloudConnector.egg-info/dependency_links.txt zVMCloudConnector.egg-info/entry_points.txt zVMCloudConnector.egg-info/requires.txt zVMCloudConnector.egg-info/top_level.txt zvmconnector/__init__.py zvmconnector/connector.py zvmconnector/restclient.py zvmconnector/socketclient.py zvmsdk/__init__.py zvmsdk/api.py zvmsdk/config.py zvmsdk/configdrive.py zvmsdk/constants.py zvmsdk/database.py zvmsdk/dist.py zvmsdk/exception.py zvmsdk/hostops.py zvmsdk/imageops.py zvmsdk/log.py zvmsdk/monitor.py zvmsdk/networkops.py zvmsdk/returncode.py zvmsdk/sdkserver.py zvmsdk/smtclient.py zvmsdk/utils.py zvmsdk/version.py zvmsdk/vmops.py zvmsdk/volumeop.py zvmsdk/sdkwsgi/__init__.py zvmsdk/sdkwsgi/deploy.py zvmsdk/sdkwsgi/handler.py zvmsdk/sdkwsgi/requestlog.py zvmsdk/sdkwsgi/util.py zvmsdk/sdkwsgi/zvmsdk-wsgi zvmsdk/sdkwsgi/handlers/__init__.py zvmsdk/sdkwsgi/handlers/file.py zvmsdk/sdkwsgi/handlers/guest.py zvmsdk/sdkwsgi/handlers/healthy.py zvmsdk/sdkwsgi/handlers/host.py zvmsdk/sdkwsgi/handlers/image.py zvmsdk/sdkwsgi/handlers/tokens.py zvmsdk/sdkwsgi/handlers/version.py zvmsdk/sdkwsgi/handlers/volume.py zvmsdk/sdkwsgi/handlers/vswitch.py zvmsdk/sdkwsgi/schemas/__init__.py zvmsdk/sdkwsgi/schemas/guest.py zvmsdk/sdkwsgi/schemas/host.py zvmsdk/sdkwsgi/schemas/image.py zvmsdk/sdkwsgi/schemas/volume.py zvmsdk/sdkwsgi/schemas/vswitch.py zvmsdk/sdkwsgi/validation/__init__.py zvmsdk/sdkwsgi/validation/parameter_types.py zvmsdk/tests/__init__.py zvmsdk/tests/unit/__init__.py zvmsdk/tests/unit/base.py zvmsdk/tests/unit/test_api.py zvmsdk/tests/unit/test_config.py zvmsdk/tests/unit/test_database.py zvmsdk/tests/unit/test_dist.py zvmsdk/tests/unit/test_hostops.py zvmsdk/tests/unit/test_imageops.py zvmsdk/tests/unit/test_monitor.py zvmsdk/tests/unit/test_networkops.py zvmsdk/tests/unit/test_smtclient.py zvmsdk/tests/unit/test_utils.py zvmsdk/tests/unit/test_vmops.py zvmsdk/tests/unit/test_volumeop.py zvmsdk/tests/unit/sdkclientcases/__init__.py zvmsdk/tests/unit/sdkclientcases/test_restclient.py zvmsdk/tests/unit/sdkwsgi/__init__.py zvmsdk/tests/unit/sdkwsgi/test_handler.py zvmsdk/tests/unit/sdkwsgi/test_utils.py zvmsdk/tests/unit/sdkwsgi/handlers/__init__.py zvmsdk/tests/unit/sdkwsgi/handlers/test_guest.py zvmsdk/tests/unit/sdkwsgi/handlers/test_host.py zvmsdk/tests/unit/sdkwsgi/handlers/test_image.py zvmsdk/tests/unit/sdkwsgi/handlers/test_version.py zvmsdk/tests/unit/sdkwsgi/handlers/test_volume.py zvmsdk/tests/unit/sdkwsgi/handlers/test_vswitch.py zvmsdk/vmactions/__init__.py zvmsdk/vmactions/templates/grow_root_volume.j2 zvmsdk/volumeops/__init__.py zvmsdk/volumeops/templates/rhel7_attach_volume.j2 zvmsdk/volumeops/templates/rhel7_detach_volume.j2 zvmsdk/volumeops/templates/rhel8_attach_volume.j2 zvmsdk/volumeops/templates/rhel8_detach_volume.j2 zvmsdk/volumeops/templates/sles_attach_volume.j2 zvmsdk/volumeops/templates/sles_detach_volume.j2 zvmsdk/volumeops/templates/ubuntu_attach_volume.j2 zvmsdk/volumeops/templates/ubuntu_detach_volume.j2zVMCloudConnector-1.6.3/test-requirements.txt0000664000175000017510000000016014266177632020776 0ustar ruirui00000000000000coverage nose mock>=2.0.0 # BSD PyYAML>=3.10 python-subunit>=1.0.0 # Apache-2.0/BSD sphinx>=1.6.2,!=1.8.0 # BSD zVMCloudConnector-1.6.3/setup.cfg0000664000175000017510000000004614315232035016341 0ustar ruirui00000000000000[egg_info] tag_build = tag_date = 0 zVMCloudConnector-1.6.3/doc/0000775000175000017510000000000014315232035015265 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/doc/source/0000775000175000017510000000000014315232035016565 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/doc/source/zvmsdk.conf.sample0000664000175000017510000004024314315210052022230 0ustar ruirui00000000000000[database] # # Directory to store database. # # SDK databases are used to store a set of tables which contain the # information of network, volume, image, etc. This option is used to # tell SDK where to store the database files, make sure the process # running SDK is able to read write and execute the directory. # # This param is optional #dir=/var/lib/zvmsdk/databases/ [file] # # Directory to store sdk imported or exported files. # # SDK file repository to store the imported files and the files that will be # exported, the imported files will be put into /imported # the files to be exported will be put into /exported # # This param is optional #file_repository=/var/lib/zvmsdk/files [guest] # # The maximum allowed console log size, in kilobytes. # # Console logs might be transferred to sdk user, this option controls how # large each file can be. A smaller size may mean more calls will be needed # to transfer large consoles, which may not be desirable for performance reasons. # # This param is optional #console_log_size=100 # # Whether to automatically extend the partition and filesystem of guest. # # If set to True, when deploying an image to a larger disk, zvmsdk # automatically extends the last partition and the file system to # use up the whole disk. # # If do not want to do the extend action automaictly, you must set this option # to be False. # # This param is optional #extend_partition_fs=True # # The maximum time waiting until the guest reachable after started. # # When starting a guest, specify the timeout value will check the guest status # untils it becames reachable or timeout. # # This param is optional #reachable_timeout=180 # # The interval time between 2 retries, in seconds. # # This will take effect only when you set softstop_retries item. # What's more, the value of softstop_timeout/softstop_interval is # the times retried. # # This param is optional #softstop_interval=10 # # The maximum time waiting until the guest shut down. # # Sometimes, the shutdown action will take a bit lone time to complete. # If you want to make sure the guest in shut-down status after executing action # of softstop, this will help. # # This param is optional #softstop_timeout=120 [image] # # Default compress level for captured image. # # This param is optional #default_compress_level=6 # # Directory to store sdk images. # # SDK image repository to store the imported images and the staging images that # is in snapshotting. Once snapshot finished, the image will be removed to the # netboot directory accordingly. Two kinds of image repository looks like: # /var/lib/zvmsdk/images/netboot// # /var/lib/zvmsdk/images/staging// # # This param is optional #sdk_image_repository=/var/lib/zvmsdk/images [logging] # # Directory where log file to be put into. # # SDK has a set of logs to help administrator to debug # and aduit actions performed through SDK. Edit this option # if you want to put logs into specified place. # # Please ensure the service running on the consume which # consumes SDK has the authorization to write to the path. # # This param is optional #log_dir=/var/log/zvmsdk/ # # Level of the log. # # SDK utilize python logging package to help admin debug # or analyze issues. it's recommend to set this value # to logging.DEBUG to get more detailed logs and set it to # logging.INFO(default) in normal situation. # # recommend values: # logging.ERROR: level above ERROR will be written to log file. # logging.WARNINS: level above WARNING(ERROR, WARNING) # will be written to log file. # logging.INFO: level above INFO(ERROR, WARNING, INFO) # will be written to log file. # logging.DEBUG: All log level (ERROR, WARNING, INFO, DEBUG) # will be written to log file. # # This param is optional #log_level=logging.INFO [monitor] # # Cached monitor data update interval # # This is used to prevent excessive effort spent retrieving the # monitor data by calling the SDK backend utilities. When this cache # is enabled, a inspect call will only call the SDK backend utilities # when the inspected guest's info does not exist in the cache or # when the cache data is expired. And when an cache update is needed, # all the existing guests' data will be retrieved in a single call to # the backend. # # When this value is below or equal to zero, the cache # will be disabled and each inspect call will need to call the backend # utilities to get the inspected guest's monitor data. # # This param is optional #cache_interval=300 [network] # # IP address of the Linux machine which is running SDK on. # # Some remote copy operations need to be performed during guest creation, # this option tell the SDK the host ip which can be used to perform copy # from and copy to operations. # # This param is required #my_ip=None [sdkserver] # # The IP address that the SDK server is listen on. # # When the SDK server deamon starts, it will try to bind to # this address and port bind_port, and wait for the SDK client # connection to handle API request. # # This param is optional #bind_addr=127.0.0.1 # # The port that the SDK server is listen on. # # This will work as a pair with bind_addr when the SDK server daemon # starts, more info can be found in that configuration description. # # This param is optional #bind_port=2000 # # The maximum number of worker thread in SDK server to handle client requests. # # These worker threads would work concurrently to handle requests from client. # This value should be adjusted according to the system resource and workload. # # This param is optional #max_worker_count=64 # # The size of request queue in SDK server. # # SDK server maintains a queue to keep all the accepted but not handled requests, # and the SDK server workers fetch requests from this queue. # To some extend, this queue size decides the max socket opened in SDK server. # This value should be adjusted according to the system resource. # # This param is optional #request_queue_size=128 [volume] # # volume fcp list. # # SDK will only use the fcp devices in the scope of this value. # # This param is optional #fcp_list= # # fcp pair selection algorithm # # fcp_list example: # fa00-fa02; fb00-fb02 # # If use get_fcp_pair_with_same_index, # then fcp pair is randomly selected from below combinations. # [fa00,fb00],[fa01,fb01],[fa02,fb02] # # If use get_fcp_pair, # then fcp pair is randomly selected from below combinations. # [fa00,fb00],[fa01,fb00],[fa02,fb00] # [fa00,fb01],[fa01,fb01],[fa02,fb01] # [fa00,fb02],[fa01,fb02],[fa02,fb02] # # Possible value: # 0 : use get_fcp_pair. this is the default # 1 : use get_fcp_pair_with_same_index # # This param is optional #get_fcp_pair_with_same_index=0 # # The timeout value for waiting attach/detach punch scripts # execution, in seconds. # # The default value is 1800 seconds, if the execution of punch scripts # reached the timeout, the attach/detach will fail. # # This param is optional #punch_script_execution_timeout=1800 # # The timeout value for waiting refresh_bootmap execution, in seconds. # # The default value is 1200 seconds, if the execution of refresh_bootmap # reached the timeout, the process of refresh_bootmap will be stopped. # # This param is optional #refresh_bootmap_timeout=1200 [wsgi] # # Whether auth will be used. # # When sending http request from outside to running zvmsdk, # Client will be requested to input username/password in order # to authorize the call. # Set this to 'none' indicated no auth will be used and 'auth' # means username and password need to be specified. # # Possible value: # 'none': no auth will be required # 'auth': need auth, currently pyjwt is used to return a token # to caller if the username and password is correct. # # This param is optional #auth=none # # The max total number of concurrent deploy and capture requests allowed in a # single z/VM Cloud Connector process. # # If more requests than this value are revieved concurrently, the z/VM Cloud # Connector would reject the requests and return error to avoid resource # exhaustion. # . # # This param is optional #max_concurrent_deploy_capture=20 # # file path that contains admin-token to access sdk http server. # # Admin-token in order to get a user-token from zvm sdk, and the user-token # will be used to validate request before user-token expire. # # This param is optional #token_path=/etc/zvmsdk/token.dat # # How long the token is valid. # # If a token auth is used, the token return to user will be # expired after the period passed. This ensure an user who # get this token will not be authorized all the time, a new # token need to be recreated after certain time period. # # This param is optional #token_validation_period=3600 [zvm] # # Only used for SMAPIOUT is not ready. # # This param is optional #bypass_smapiout=False # # Default LOGONBY userid(s) for the cloud. # # This is a set of z/VM userid(s) which are allowed to logon using the LOGONBY # keyword to the guests created by the z/VM SDK solution, compatible with # the LBYONLY keyword of the user directory statement. This value is only used # when a guest is created. If you change this value, existing guests' directory # entries are not automatically updated with the new value. # When an ESM is installed, this parameter only governs when the ESM # defers to CP's processing. # # Usage note: # The default is empty string with nothing set. # '' is an invalid value and it will cause VM deploying failed. # Thus, DO NOT set default_admin_userid=''. # When a non-empty string is provided, blank chars will be used as delimiter, # you can use LOGONBY xxx command to log on the guest using the corresponding # admin userid's password. # # For example, when you set this value to 'oper1 oper2 oper3 jones', it means # you can use any one of 'oper1', 'oper2', 'oper3', 'jones' as an admin user. # # see the z/VM CP Planning and Administration for additional information. # # Possible values: # A maximum of 8 blank-delimited strings. Each non-blank string must be a # valid z/VM userid. # e.g 'oper1 oper2' is a valid value. # 'o1 o2 o3 o4 o5 o6 o7 o8 o9' is NOT a valid value. # # This param is optional #default_admin_userid=None # # Virtual device number for default NIC address. # # This value is the first NIC virtual device number, # each NIC needs 3 numbers for control/read/write, so by default # the first NIC's address is 1000, the second one is 1003 etc. # # Possible values: # An integer value in hex format, between 0 and 65536 (x'FFFF'). # It should not conflict with other device numbers in the z/VM guest's # configuration, for example device numbers of the root or ephemeral or # persistent disks. # # Sample NIC definitions in the z/VM user directory: # NICDEF 1000 TYPE QDIO LAN SYSTEM MACID # NICDEF 1003 TYPE QDIO LAN SYSTEM MACID # # This param is optional #default_nic_vdev=1000 # # zVM disk pool and type for root/ephemeral disks. # # The option is combined by 2 parts and use : as separator. # # The first part is the type of disks in the disk pool. # The disks in one disk pool must in same type (ECKD or FBA). # Possible values of the disk pool type: # A string, either ECKD or FBA. # # The second part is the volume group name defined in your directory manager # on your z/VM system, which will be used for allocating disks for # new guest. A dollar sign ($) is not allowed in the name. # # Sample disk_pool values: # ECKD:diskpo1 # FBA:testpool # # This param is optional #disk_pool=None # # Virtual device number for capture function. # # This value identity the virtual device number for capture # image when z/VM guest is power off. # # Possible values: # An string value identify disk number like '0100'. # If this value has been configured, capture image function will use # this value as disk info to capture with first priority when z/VM # guest is power off. # This value don't work if z/VM guest status is power on. # Sample root disk in user directory: # MDISK 0100 # # This param is optional #force_capture_disk=None # # The name of a list containing names of virtual servers to be queried. The list # which contains the userid list by default is named: VSMWORK1 NAMELIST, see # DMSSICNF COPY key: NameListFileIdAny. The list has to be accessible to the # SMAPI servers. # # The length of namelist must no longer than 64. # # This param is optional #namelist=None # # The port number of remotehost sshd. # # This param is optional #remotehost_sshd_port=22 # # For swap disk to create from mdisk instead of vdisk. # In boot from volume case, there might be no disk pool at all, then # the only choice is to use vdisk (or using FCP LUN which is complicated), # if customer doesn't want vdisk, then set this value to `True` so # VDISK will not be used and in turn it will fail check. # # This param is optional #swap_force_mdisk=False # # The default maximum number of virtual processers the user can define. # This value is used as the default value for maximum vcpu number when # create a guest with no max_cpu specified. # # The number must be a decimal value between 1 and 64. # # This param is optional #user_default_max_cpu=32 # # The default maximum size of memory the user can define. # This value is used as the default value for maximum memory size when # create a guest with no max_mem specified. # The value can be specified by 1-4 bits of number suffixed by either # M (Megabytes) or G (Gigabytes) and the number must be a whole number, # values such as 4096.8M or 32.5G are not supported. # # The value should be adjusted based on your system capacity. # # This param is optional #user_default_max_memory=64G # # The default maximum size of reserved memory in a vm's direct entry. # This value is used as the default value for maximum reserved memory # size for a guest. # The value can be specified by 1-4 bits of number suffixed by either # M (Megabytes) or G (Gigabytes) and the number must be a whole number, # values such as 4096.8M or 32.5G are not supported. # # The value should be adjusted based on your system capacity. # # This param is optional #user_default_max_reserved_memory=64G # This param is optional #user_default_password=None # # The default SHARE settings configuration. # # The recommend value of SHARE. From z/VM doc, SHARE is relative value of # virtual machine and if you set SHARE to 100 while virtual CPUs are 4, # then each vCPU get 25 entitlement. # # So the mechanism currently is: # # 1) If a share is given, set SHARE value to the VM # 2) If no SHARE is given during creation, check user_default_share_unit # 3) If user_default_share_unit is 0, do nothing # 4) If user_default_share_unit it not 0(current default is 100), # then insert statement `SHARE RELATIVE user_default_share_unit*vCPU` # into user direct, for example, with user_default_share_unit=100, # 4 vCPU will create `SHARE RELATIVE 400`. # # This align the best practice of z/VM recommendation. # # This param is optional #user_default_share_unit=100 # # PROFILE name to use when creating a z/VM guest. # # When SDK deploys an guest on z/VM, it can include some # common statements from a PROFILE definition. # This PROFILE must already be included in your z/VM user directory. # # Possible values: # An 8 character name of a PROFILE that is already defined in the z/VM # user directory. # # This param is required #user_profile=None # # Virtual device number for root disk. # # When SDK deploys an guest, it creates a root disk and potentially # several data disks. This value is the virtual device number of the root # disk. # # Possible values: # An integer value in hex format, between 0 and 65536 (x'FFFF'). # It should not conflict with other device numbers in the z/VM guest's # configuration, for example device numbers of the NICs or ephemeral or # persistent disks. # # Sample root disk in user directory: # MDISK 0100 # # This param is optional #user_root_vdev=0100 zVMCloudConnector-1.6.3/zvmsdk/0000775000175000017510000000000014315232035016036 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/imageops.py0000664000175000017510000000347013672563714020240 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk import config from zvmsdk import log from zvmsdk import smtclient from zvmsdk import utils as zvmutils LOG = log.LOG CONF = config.CONF _IMAGEOPS = None def get_imageops(): global _IMAGEOPS if _IMAGEOPS is None: _IMAGEOPS = ImageOps() return _IMAGEOPS class ImageOps(object): def __init__(self): self._smtclient = smtclient.get_smtclient() self._pathutils = zvmutils.PathUtils() def image_get_root_disk_size(self, image_name): return self._smtclient.image_get_root_disk_size(image_name) def image_import(self, image_name, url, image_meta, remote_host=None): return self._smtclient.image_import(image_name, url, image_meta, remote_host) def image_query(self, imagename=None): return self._smtclient.image_query(imagename) def image_delete(self, image_name): return self._smtclient.image_delete(image_name) def image_export(self, image_name, dest_url, remote_host=None): return self._smtclient.image_export(image_name, dest_url, remote_host) zVMCloudConnector-1.6.3/zvmsdk/smtclient.py0000664000175000017510000057722714315210052020431 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import hashlib import math # On SLES12, we found that if you import urllib.parse later # than requests, you will find a error like 'not able to load # urllib.parse, this is because urllib will be in sys.modules # when first import requests # as workaround here, we first import urllib then import requests # later, we need consider to use urllib.request to replace # requests if that's possible to avoid this kind of issue from io import IOBase import shutil import six.moves.urllib.parse as urlparse import requests import threading import os import re import six import string import subprocess import tempfile import time from smtLayer import smt from zvmsdk import config from zvmsdk import constants as const from zvmsdk import database from zvmsdk import exception from zvmsdk import log from zvmsdk import returncode from zvmsdk import utils as zvmutils CONF = config.CONF LOG = log.LOG _LOCK = threading.Lock() CHUNKSIZE = 4096 DIRMAINT_ERROR_MESSAGE = ("https://www-40.ibm.com/servers/resourcelink/" "svc0302a.nsf/pages/zVMV7R2gc246282?OpenDocument") CP_ERROR_MESSAGE = ("https://www-40.ibm.com/servers/resourcelink/" "svc0302a.nsf/pages/zVMV7R2gc246270?OpenDocument") _SMT_CLIENT = None def get_smtclient(): global _SMT_CLIENT if _SMT_CLIENT is None: try: _SMT_CLIENT = zvmutils.import_object( 'zvmsdk.smtclient.SMTClient') except ImportError: LOG.error("Unable to get smtclient") raise ImportError return _SMT_CLIENT class SMTClient(object): def __init__(self): self._smt = smt.SMT() self._pathutils = zvmutils.PathUtils() self._NetDbOperator = database.NetworkDbOperator() self._GuestDbOperator = database.GuestDbOperator() self._ImageDbOperator = database.ImageDbOperator() def _request(self, requestData): try: results = self._smt.request(requestData) except Exception as err: LOG.error('SMT internal parse encounter error') raise exception.SDKInternalError(msg=err, modID='smt') def _is_smt_internal_error(results): internal_error_list = returncode.SMT_INTERNAL_ERROR for error in internal_error_list: if results['overallRC'] != error[0]: # overallRC does not match, continue next continue if error[1] is not None and results['rc'] != error[1]: # rc match failed continue if error[2] is not None and results['rs'] not in error[2]: # rs match failed continue # All match finish successfully, return true return True return False if results['overallRC'] != 0: results.pop('logEntries') # Check whether this smt error belongs to internal error, if so, # raise internal error, otherwise raise clientrequestfailed error if _is_smt_internal_error(results): msg = "SMT internal error. Results: %s." % str(results) rc = results.get('rc', 0) if rc in [-110, -102, -103, -108]: msg += ("This is likely to be caused by temporary z/VM " "SMAPI down issue, Contact with your z/VM " "administrators for further help") raise exception.SDKInternalError(msg=msg, modID='smt', results=results) else: # no solution if we don't know, so empty string solution = '' rc = results.get('rc', 0) if rc == 396: solution = (("CP command failed, with error code %s." "Check <%s> on z/VM CP error messages") % (results['rs'], CP_ERROR_MESSAGE)) if rc == 596: solution = (("DIRMAINT command failed, with error code %s." "Check <%s> on z/VM DIRMAINT error messages") % (results['rs'], DIRMAINT_ERROR_MESSAGE)) msg = (("SMT request failed. RequestData: '%s', Results: '%s'." "%s") % (requestData, str(results), solution)) raise exception.SDKSMTRequestFailed(results, msg) return results def get_guest_temp_path(self, userid): return self._pathutils.get_guest_temp_path(userid) def get_guest_path(self, userid): return self._pathutils.get_guest_path(userid) def clean_temp_folder(self, tmp_folder): return self._pathutils.clean_temp_folder(tmp_folder) def _generate_vdev(self, base, offset): """Generate virtual device number based on base vdev :param base: base virtual device number, string of 4 bit hex. :param offset: offset to base, integer. """ vdev = hex(int(base, 16) + offset)[2:] return vdev.rjust(4, '0') def _generate_increasing_nic_id(self, nic_id): """Generate increasing nic id string :param nic_id: hexadecimal nic id like '1000' :return: increasing nic id, string like '0.0.1000,0.0.1001,0.0.1002' """ nic_id = str(hex(int(nic_id, 16)))[2:] nic_id_1 = str(hex(int(nic_id, 16) + 1))[2:] nic_id_2 = str(hex(int(nic_id, 16) + 2))[2:] if len(nic_id_2) > 4: errmsg = ("Virtual device number %s is not valid" % nic_id_2) raise exception.SDKInvalidInputFormat(msg=errmsg) return "0.0.%s,0.0.%s,0.0.%s" % (nic_id, nic_id_1, nic_id_2) def generate_disk_vdev(self, start_vdev=None, offset=0): """Generate virtual device number for disks :param offset: offset of user_root_vdev. :return: virtual device number, string of 4 bit hex. """ if not start_vdev: start_vdev = CONF.zvm.user_root_vdev vdev = self._generate_vdev(start_vdev, offset) if offset >= 0 and offset < 254: return vdev else: msg = ("Failed to generate disk vdev, invalid virtual device" "number for disk:%s" % vdev) LOG.error(msg) raise exception.SDKGuestOperationError(rs=2, msg=msg) def add_mdisks(self, userid, disk_list, start_vdev=None): """Add disks for the userid :disks: A list dictionary to describe disk info, for example: disk: [{'size': '1g', 'format': 'ext3', 'disk_pool': 'ECKD:eckdpool1'}, {'size': '1g', 'format': 'ext3'}] """ # Firstly, check disk_pool in disk_list, if disk_pool not specified # and not configured(the default vaule is None), report error # report error for idx, disk in enumerate(disk_list): disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool disk['disk_pool'] = disk_pool if disk_pool is None: msg = ('disk_pool not configured for sdkserver.') LOG.error(msg) raise exception.SDKGuestOperationError(rs=2, msg=msg) for idx, disk in enumerate(disk_list): if 'vdev' in disk: # this means user want to create their own device number vdev = disk['vdev'] else: vdev = self.generate_disk_vdev(start_vdev=start_vdev, offset=idx) self._add_mdisk(userid, disk, vdev) disk['vdev'] = vdev sizeUpper = disk.get('size').strip().upper() sizeUnit = sizeUpper[-1] if sizeUnit != 'G' and sizeUnit != 'M': sizeValue = sizeUpper disk_pool = disk.get('disk_pool') [diskpool_type, diskpool_name] = disk_pool.split(':') if (diskpool_type.upper() == 'ECKD'): # Convert the cylinders to bytes convert = 737280 else: # Convert the blocks to bytes convert = 512 byteSize = float(float(int(sizeValue) * convert / 1024) / 1024) unit = "M" if (byteSize > 1024): byteSize = float(byteSize / 1024) unit = "G" byteSize = "%.1f" % byteSize disk['size'] = byteSize + unit return disk_list def remove_mdisks(self, userid, vdev_list): for vdev in vdev_list: self._remove_mdisk(userid, vdev) def dedicate_device(self, userid, vaddr, raddr, mode): """dedicate device :userid: The name of the image obtaining a dedicated device :vaddr: The virtual device number of the device :raddr: A real device number to be dedicated or attached to the specified image :mode: Specify a 1 if the virtual device is to be in read-only mode. Otherwise, specify a 0. """ # dedicate device to directory entry self._dedicate_device(userid, vaddr, raddr, mode) def _dedicate_device(self, userid, vaddr, raddr, mode): """dedicate device.""" action = 'dedicate' rd = ('changevm %(uid)s %(act)s %(va)s %(ra)s %(mod)i' % {'uid': userid, 'act': action, 'va': vaddr, 'ra': raddr, 'mod': mode}) action = "dedicate device to userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_fcp_info_by_status(self, userid, status=None): """get fcp information by the status. :userid: (str) The name of the image to query fcp info :status: (str) If status is None, will return the FCP devices of all statuses. If status specified, will only return the FCP devices of this status. The status must be 'active', 'free' or 'offline'. :returns: (list) a list of string lines that the command output. """ action = 'fcpinfo' if status is None: # if status is None, will transfer status to all # to let smtLayer return the FCPs of all the statuses status = "all" # always set -k OWNER=YES rd = ' '.join(['getvm', userid, action, status, "YES"]) action = "query fcp info of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) return results['response'] def undedicate_device(self, userid, vaddr): """undedicate device :userid: The name of the image obtaining a dedicated device :vaddr: The virtual device number of the device """ # undedicate device to directory entry self._undedicate_device(userid, vaddr) def _undedicate_device(self, userid, vaddr): """undedicate device.""" action = 'undedicate' rd = ('changevm %(uid)s %(act)s %(va)s' % {'uid': userid, 'act': action, 'va': vaddr}) action = "undedicate device from userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_image_performance_info(self, userid): """Get CPU and memory usage information. :userid: the zvm userid to be queried """ pi_dict = self.image_performance_query([userid]) return pi_dict.get(userid, None) def get_adapters_info(self, userid): rd = ' '.join(( "SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid, "--operands", "-k 'image_device_number=*'")) results = None action = "get network info of userid '%s'" % str(userid) with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) ret = results['response'] # TODO: muti NIC support? nic_count = 0 for line in ret: if 'adapter_count=' in line: nic_count = int(line.strip().split('=')[-1]) break if nic_count < 1: msg = 'get_network_info:No NIC found on userid %s' % userid LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) # save network info into dict by index from 1 to nic_count # Firstly, get adapter information adapters_info = [] adapter = dict() # if found IP, no need to continue found_mac = False for line in ret: if 'adapter_address=' in line: adapter_addr = line.strip().split('=')[-1] adapter['adapter_address'] = adapter_addr if 'adapter_status=' in line: adapter_type = line.strip().split('=')[-1] adapter['adapter_status'] = adapter_type if 'lan_owner=' in line: lan_owner = line.strip().split('=')[-1] adapter['lan_owner'] = lan_owner if 'lan_name=' in line: lan_name = line.strip().split('=')[-1] adapter['lan_name'] = lan_name if 'mac_address=' in line and not found_mac: mac_addr = line.strip().split('=')[-1] pattern = re.compile('.{2}') mac_address = ':'.join(pattern.findall(mac_addr)) adapter['mac_address'] = mac_address if 'mac_ip_version=' in line: ip_version = line.strip().split('=')[-1] adapter['mac_ip_version'] = ip_version if 'mac_ip_address=' in line: # once we found mac_ip_address, assume this is the MAC # we are using, then jump to next adapter mac_ip = line.strip().split('=')[-1] adapter['mac_ip_address'] = mac_ip found_mac = True if 'adapter_info_end' in line: adapters_info.append(adapter) # clear adapter and process next adapter = dict() found_mac = False return adapters_info def _parse_vswitch_inspect_data(self, rd_list): """ Parse the Virtual_Network_Vswitch_Query_Byte_Stats data to get inspect data. """ def _parse_value(data_list, idx, keyword, offset): return idx + offset, data_list[idx].rpartition(keyword)[2].strip() vsw_dict = {} with zvmutils.expect_invalid_resp_data(): # vswitch count idx = 0 idx, vsw_count = _parse_value(rd_list, idx, 'vswitch count:', 2) vsw_dict['vswitch_count'] = int(vsw_count) # deal with each vswitch data vsw_dict['vswitches'] = [] for i in range(vsw_dict['vswitch_count']): vsw_data = {} # skip vswitch number idx += 1 # vswitch name idx, vsw_name = _parse_value(rd_list, idx, 'vswitch name:', 1) vsw_data['vswitch_name'] = vsw_name # uplink count idx, up_count = _parse_value(rd_list, idx, 'uplink count:', 1) # skip uplink data idx += int(up_count) * 9 # skip bridge data idx += 8 # nic count vsw_data['nics'] = [] idx, nic_count = _parse_value(rd_list, idx, 'nic count:', 1) nic_count = int(nic_count) for j in range(nic_count): nic_data = {} idx, nic_id = _parse_value(rd_list, idx, 'nic_id:', 1) userid, toss, vdev = nic_id.partition(' ') nic_data['userid'] = userid nic_data['vdev'] = vdev idx, nic_data['nic_fr_rx'] = _parse_value(rd_list, idx, 'nic_fr_rx:', 1 ) idx, nic_data['nic_fr_rx_dsc'] = _parse_value(rd_list, idx, 'nic_fr_rx_dsc:', 1 ) idx, nic_data['nic_fr_rx_err'] = _parse_value(rd_list, idx, 'nic_fr_rx_err:', 1 ) idx, nic_data['nic_fr_tx'] = _parse_value(rd_list, idx, 'nic_fr_tx:', 1 ) idx, nic_data['nic_fr_tx_dsc'] = _parse_value(rd_list, idx, 'nic_fr_tx_dsc:', 1 ) idx, nic_data['nic_fr_tx_err'] = _parse_value(rd_list, idx, 'nic_fr_tx_err:', 1 ) idx, nic_data['nic_rx'] = _parse_value(rd_list, idx, 'nic_rx:', 1 ) idx, nic_data['nic_tx'] = _parse_value(rd_list, idx, 'nic_tx:', 1 ) vsw_data['nics'].append(nic_data) # vlan count idx, vlan_count = _parse_value(rd_list, idx, 'vlan count:', 1) # skip vlan data idx += int(vlan_count) * 3 # skip the blank line idx += 1 vsw_dict['vswitches'].append(vsw_data) return vsw_dict def _is_vdev_valid(self, vdev, vdev_info): for used_vdev in vdev_info: if (((int(vdev, 16) >= int(used_vdev, 16)) and (int(vdev, 16) <= int(used_vdev, 16) + 2)) or ((int(vdev, 16) < int(used_vdev, 16)) and (int(vdev, 16) >= int(used_vdev, 16) - 2))): return False return True def get_power_state(self, userid): """Get power status of a z/VM instance.""" LOG.debug('Querying power stat of %s' % userid) requestData = "PowerVM " + userid + " status" action = "query power state of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(requestData) with zvmutils.expect_invalid_resp_data(results): status = results['response'][0].partition(': ')[2] return status def _check_power_state(self, userid, action): # Get the vm status power_state = self.get_power_state(userid) # Power on the vm if it is inactive if power_state == 'off': msg = ('The vm %s is powered off, please start up it ' 'before %s' % (userid, action)) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) def guest_start(self, userid): """Power on VM.""" requestData = "PowerVM " + userid + " on" with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_stop(self, userid, **kwargs): """Power off VM.""" requestData = "PowerVM " + userid + " off" if 'timeout' in kwargs.keys() and kwargs['timeout']: requestData += ' --maxwait ' + str(kwargs['timeout']) if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']: requestData += ' --poll ' + str(kwargs['poll_interval']) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_softstop(self, userid, **kwargs): """Power off VM gracefully, it will call shutdown os then deactivate vm""" requestData = "PowerVM " + userid + " softoff --wait" if 'timeout' in kwargs.keys() and kwargs['timeout']: requestData += ' --maxwait ' + str(kwargs['timeout']) else: requestData += ' --maxwait ' + str(CONF.guest.softstop_timeout) if 'poll_interval' in kwargs.keys() and kwargs['poll_interval']: requestData += ' --poll ' + str(kwargs['poll_interval']) else: requestData += ' --poll ' + str(CONF.guest.softstop_interval) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_pause(self, userid): self._check_power_state(userid, 'pause') requestData = "PowerVM " + userid + " pause" with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_unpause(self, userid): self._check_power_state(userid, 'unpause') requestData = "PowerVM " + userid + " unpause" with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_reboot(self, userid): requestData = ' '.join(("PowerVM", userid, "reboot")) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def guest_reset(self, userid): requestData = ' '.join(("PowerVM", userid, "reset")) with zvmutils.log_and_reraise_smt_request_failed(): self._request(requestData) def live_migrate_move(self, userid, destination, parms): """ moves the specified virtual machine, while it continues to run, to the specified system within the SSI cluster. """ rd = ('migratevm %(uid)s move --destination %(dest)s ' % {'uid': userid, 'dest': destination}) if 'maxtotal' in parms: rd += ('--maxtotal ' + str(parms['maxtotal'])) if 'maxquiesce' in parms: rd += (' --maxquiesce ' + str(parms['maxquiesce'])) if 'immediate' in parms: rd += " --immediate" if 'forcearch' in parms: rd += " --forcearch" if 'forcedomain' in parms: rd += " --forcedomain" if 'forcestorage' in parms: rd += " --forcestorage" action = "move userid '%s' to SSI '%s'" % (userid, destination) try: self._request(rd) except exception.SDKSMTRequestFailed as err: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) def live_migrate_test(self, userid, destination): """ tests the specified virtual machine and reports whether or not it is eligible to be relocated to the specified system. """ rd = ('migratevm %(uid)s test --destination %(dest)s ' % {'uid': userid, 'dest': destination}) action = "test to move userid '%s' to SSI '%s'" % (userid, destination) try: self._request(rd) except exception.SDKSMTRequestFailed as err: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) def _get_ipl_param(self, ipl_from): if len(ipl_from) > 0: ipl_param = ipl_from else: ipl_param = CONF.zvm.user_root_vdev return ipl_param def create_vm(self, userid, cpu, memory, disk_list, profile, max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam, dedicate_vdevs, loaddev, account, comment_list, cschedule='', cshare='', rdomain='', pcif=''): """ Create VM and add disks if specified. """ rd = ('makevm %(uid)s directory LBYONLY %(mem)im %(pri)s ' '--cpus %(cpu)i --profile %(prof)s --maxCPU %(max_cpu)i ' '--maxMemSize %(max_mem)s --setReservedMem' % {'uid': userid, 'mem': memory, 'pri': const.ZVM_USER_DEFAULT_PRIVILEGE, 'cpu': cpu, 'prof': profile, 'max_cpu': max_cpu, 'max_mem': max_mem}) if CONF.zvm.default_admin_userid: ids = CONF.zvm.default_admin_userid.split(' ') id_str = ':'.join(ids) rd += (' --logonby %s' % id_str) # when use dasd as root disk, the disk_list[0] would be the boot # disk. # when boot from volume, ipl_from should be specified explicitly. if (disk_list and 'is_boot_disk' in disk_list[0] and disk_list[0]['is_boot_disk']) or ipl_from: # we assume at least one disk exist, which means, is_boot_disk # is true for exactly one disk. rd += (' --ipl %s' % self._get_ipl_param(ipl_from)) # load param for ipl if ipl_param: rd += ' --iplParam %s' % ipl_param if ipl_loadparam: rd += ' --iplLoadparam %s' % ipl_loadparam if dedicate_vdevs: rd += ' --dedicate "%s"' % " ".join(dedicate_vdevs) if account: rd += ' --account "%s"' % account if cschedule: rd += ' --commandSchedule %s' % cschedule # if share given, then user it # or if CONF.zvm.user_default_share_unit is not 0 # set relative share to CONF.zvm.user_default_share_unit*cpu if cshare: rd += ' --commandSetShare "%s"' % cshare else: # only add SHARE statement if unit > 0 if CONF.zvm.user_default_share_unit > 0: total = CONF.zvm.user_default_share_unit * cpu data = 'RELATIVE %d' % total rd += ' --commandSetShare "%s"' % data if rdomain: rd += ' --commandRDomain %s' % rdomain if pcif: v = pcif.split(':') if len(v) != 2: errmsg = ("pcif input %s is invalid, must be format like" " :" % pcif) raise exception.SDKInvalidInputFormat(msg=errmsg) rd += ' --commandPcif %s' % pcif comments = '' if comment_list is not None: for comment in comment_list: comments += comment # This s a dummy spliter and will be used for split # the comment, for example, input comment is # comment1,comment2, it will be constructed into # comment1$@$@$comment2 and send to smtLayer to handle comments += '$@$@$' if comments: rd += ' --comment "%s"' % comments if loaddev: if 'portname' in loaddev: rd += ' --loadportname %s' % loaddev['portname'] if 'lun' in loaddev: rd += ' --loadlun %s' % loaddev['lun'] # now, we need consider swap only case, customer using boot # from volume but no disk pool provided, we allow to create # swap disk from vdisk by default, when we come to this logic # we are very sure that if no disk pool, there is only one # disk in disk_list and that's swap vdisk = None # this is swap only case, which means, you only create a swap # disk (len disk_list is 1) and no other disks if len(disk_list) == 1: disk = disk_list[0] if 'format' in disk and disk['format'].lower() == 'swap': disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool if disk_pool is None: # if it's vdisk, then create user direct directly vd = disk.get('vdev') or self.generate_disk_vdev(offset=0) disk['vdev'] = vd sizeUpper = disk['size'].strip().upper() sizeUnit = sizeUpper[-1] if sizeUnit != 'M' and sizeUnit != 'G': errmsg = ("%s must has 'M' or 'G' suffix" % sizeUpper) raise exception.SDKInvalidInputFormat(msg=errmsg) if sizeUnit == 'M': size = int(sizeUpper[:-1]) if size > 2048: errmsg = ("%s is great than 2048M" % sizeUpper) raise exception.SDKInvalidInputFormat(msg=errmsg) if sizeUnit == 'G': size = int(sizeUpper[:-1]) if size > 2: errmsg = ("%s is great than 2G" % sizeUpper) raise exception.SDKInvalidInputFormat(msg=errmsg) rd += ' --vdisk %s:%s' % (vd, sizeUpper) vdisk = disk action = "create userid '%s'" % userid try: self._request(rd) except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 436) and (err.results['rs'] == 4)): result = "Profile '%s'" % profile raise exception.SDKObjectNotExistError(obj_desc=result, modID='guest') elif ((err.results['rc'] == 596) and (err.results['rs'] == 3658)): # internal issue 9939 # That is because a previous definition of CIC may have # caused it to be defined. I would log it somewhere. LOG.warning("ignoring 596/3658 as it might be defined already") else: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) # Add the guest to db immediately after user created action = "add guest '%s' to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.add_guest(userid) # Continue to add disk, if vdisk is None, it means # it's not vdisk routine and we need add disks if vdisk is None and disk_list: # not perform mkfs against root disk if disk_list[0].get('is_boot_disk'): disk_list[0].update({'format': 'none'}) return self.add_mdisks(userid, disk_list) # we must return swap disk in order to make guest config # handle other remaining jobs return disk_list def _add_mdisk(self, userid, disk, vdev): """Create one disk for userid NOTE: No read, write and multi password specified, and access mode default as 'MR'. """ size = disk['size'] fmt = disk.get('format', 'ext4') disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool # Check disk_pool, if it's None, report error if disk_pool is None: msg = ('disk_pool not configured for sdkserver.') LOG.error(msg) raise exception.SDKGuestOperationError(rs=2, msg=msg) [diskpool_type, diskpool_name] = disk_pool.split(':') if (diskpool_type.upper() == 'ECKD'): action = 'add3390' else: action = 'add9336' rd = ' '.join(['changevm', userid, action, diskpool_name, vdev, size, '--mode MR']) if fmt and fmt != 'none': rd += (' --filesystem %s' % fmt.lower()) action = "add mdisk to userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_vm_list(self): """Get the list of guests that are created by SDK return userid list""" action = "list all guests in database" with zvmutils.log_and_reraise_sdkbase_error(action): guests_in_db = self._GuestDbOperator.get_guest_list() guests_migrated = \ self._GuestDbOperator.get_migrated_guest_info_list() # db query return value in tuple (uuid, userid, metadata, comments) userids_in_db = [g[1].upper() for g in guests_in_db] userids_migrated = [g[1].upper() for g in guests_migrated] userid_list = list(set(userids_in_db) - set(userids_migrated)) return userid_list def _remove_mdisk(self, userid, vdev): rd = ' '.join(('changevm', userid, 'removedisk', vdev)) action = "remove disk with vdev '%s' from userid '%s'" % (vdev, userid) with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def guest_authorize_iucv_client(self, userid, client=None): """Punch a script that used to set the authorized client userid in vm If the guest is in log off status, the change will take effect when the guest start up at first time. If the guest is in active status, power off and power on are needed for the change to take effect. :param str guest: the user id of the vm :param str client: the user id of the client that can communicate to guest using IUCV""" client = client or zvmutils.get_smt_userid() iucv_path = "/tmp/" + userid if not os.path.exists(iucv_path): os.makedirs(iucv_path) iucv_auth_file = iucv_path + "/iucvauth.sh" zvmutils.generate_iucv_authfile(iucv_auth_file, client) try: requestData = "ChangeVM " + userid + " punchfile " + \ iucv_auth_file + " --class x" self._request(requestData) except exception.SDKSMTRequestFailed as err: msg = ("Failed to punch IUCV auth file to userid '%s'. SMT error:" " %s" % (userid, err.format_message())) LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) finally: self._pathutils.clean_temp_folder(iucv_path) def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, wwid='', transportfiles=None, guest_networks=None, min_fcp_paths_count=0): guest_networks = guest_networks or [] fcps = ','.join(fcpchannels) ws = ','.join(wwpns) fcs = "--fcpchannel=%s" % fcps wwpns = "--wwpn=%s" % ws lun = "--lun=%s" % lun wwid = "--wwid=%s" % wwid paths = "--minfcp=%s" % min_fcp_paths_count cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', fcs, wwpns, lun, wwid, paths] if guest_networks: # prepare additional parameters for RHCOS BFV if not transportfiles: err_msg = 'Ignition file is required when deploying RHCOS' LOG.error(err_msg) raise exception.SDKVolumeOperationError(rs=10) # get NIC ID from zvmsdk import dist _dist_manager = dist.LinuxDistManager() linuxdist = _dist_manager.get_linux_dist("rhcos4")() ip_config = linuxdist.create_coreos_parameter(guest_networks) nic_id = self._generate_increasing_nic_id( ip_config.split(":")[5].replace("enc", "")) cmd += ["--ignitionurl=%s" % transportfiles, "--nicid=%s" % nic_id, "--ipconfig=%s" % ip_config] LOG.info("Running command: %s", cmd) try: (rc, output) = zvmutils.execute(cmd, timeout=CONF.volume.refresh_bootmap_timeout) except subprocess.TimeoutExpired as err: err_msg = err.format_message() raise exception.SDKVolumeOperationError(rs=7, msg=err_msg) except PermissionError: # because zvmsdk user dont have permission to kill background # process so if the excute timeout, will raise PermissionError # we also treat it as timeout exception err_msg = ("Running command: %s timed out." % cmd) raise exception.SDKVolumeOperationError(rs=7, msg=err_msg) if rc != 0: err_msg = ("refresh_bootmap failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("Exit MSG:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) raise exception.SDKVolumeOperationError(rs=5, errcode=rc, errmsg=err_output) output_lines = output.split('\n') paths_dict = {} for line in output_lines: if line.__contains__("RESULT PATHS: "): paths_str = line[14:] # paths_str format: "FCP1:W1 W2,FCP2:W3 W4" # convert paths string into a dict paths_list = paths_str.split(',') for path in paths_list: fcp, wwpn = path.split(':') wwpn_list = wwpn.split(' ') paths_dict[fcp] = wwpn_list return paths_dict def guest_deploy(self, userid, image_name, transportfiles=None, remotehost=None, vdev=None, skipdiskcopy=False): """ Deploy image and punch config driver to target """ # (TODO: add the support of multiple disks deploy) if skipdiskcopy: msg = ('Start guest_deploy without unpackdiskimage, guest: %(vm)s' 'os_version: %(img)s' % {'img': image_name, 'vm': userid}) LOG.info(msg) else: msg = ('Start to deploy image %(img)s to guest %(vm)s' % {'img': image_name, 'vm': userid}) LOG.info(msg) image_file = '/'.join([self._get_image_path_by_name(image_name), CONF.zvm.user_root_vdev]) cmd = ['/usr/bin/hexdump', '-C', '-n', '64', image_file] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) msg = ('Image header info in guest_deploy: rc: %d, header:\n%s' % (rc, output)) LOG.info(msg) # Unpack image file to root disk vdev = vdev or CONF.zvm.user_root_vdev cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev, image_file] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("unpackdiskimage failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("ERROR:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) raise exception.SDKGuestOperationError(rs=3, userid=userid, unpack_rc=rc, err=err_output) # Purge guest reader to clean dirty data rd = ("changevm %s purgerdr" % userid) action = "purge reader of '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) # Punch transport files if specified if transportfiles: # Copy transport file to local msg = ('Start to send customized file to vm %s' % userid) LOG.info(msg) try: tmp_trans_dir = tempfile.mkdtemp() local_trans = '/'.join([tmp_trans_dir, os.path.basename(transportfiles)]) if remotehost: cmd = ["/usr/bin/scp", "-B", "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", ("%s:%s" % (remotehost, transportfiles)), local_trans] else: cmd = ["/usr/bin/cp", transportfiles, local_trans] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ('copy config drive with command %(cmd)s ' 'failed with output: %(res)s' % {'cmd': str(cmd), 'res': output}) LOG.error(err_msg) raise exception.SDKGuestOperationError(rs=4, userid=userid, err_info=err_msg) # Punch config drive to guest userid rd = ("changevm %(uid)s punchfile %(file)s --class X" % {'uid': userid, 'file': local_trans}) action = "punch config drive to userid '%s'" % userid with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) finally: # remove the local temp config drive folder self._pathutils.clean_temp_folder(tmp_trans_dir) # Authorize iucv client client_id = None # try to re-use previous iucv authorized userid at first if os.path.exists(const.IUCV_AUTH_USERID_PATH): LOG.debug("Re-use previous iucv authorized userid") with open(const.IUCV_AUTH_USERID_PATH) as f: client_id = f.read().strip() self.guest_authorize_iucv_client(userid, client_id) # Update os version in guest metadata # TODO: may should append to old metadata, not replace if skipdiskcopy: os_version = image_name else: image_info = self._ImageDbOperator.image_query_record(image_name) os_version = image_info[0]['imageosdistro'] metadata = 'os_version=%s' % os_version self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata) if skipdiskcopy: msg = ('guest_deploy without unpackdiskimage finish successfully, ' 'guest: %(vm)s, os_version: %(img)s' % {'img': image_name, 'vm': userid}) else: msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s' ' successfully' % {'img': image_name, 'vm': userid, 'vdev': vdev}) LOG.info(msg) def guest_deploy_rhcos(self, userid, image_name, transportfiles, remotehost=None, vdev=None, hostname=None, skipdiskcopy=False): """ Deploy image""" # (TODO: add the support of multiple disks deploy) if transportfiles is None: err_msg = 'Ignition file is required when deploying RHCOS image' LOG.error(err_msg) raise exception.SDKGuestOperationError(rs=13, userid=userid) if skipdiskcopy: msg = ('Start guest_deploy without copy disk, guest: %(vm)s' 'os_version: %(img)s' % {'img': image_name, 'vm': userid}) LOG.info(msg) image_file = None else: msg = ('Start to deploy image %(img)s to guest %(vm)s' % {'img': image_name, 'vm': userid}) LOG.info(msg) image_file = '/'.join([self._get_image_path_by_name(image_name), CONF.zvm.user_root_vdev]) # Unpack image file to root disk vdev = vdev or CONF.zvm.user_root_vdev tmp_trans_dir = None try: if remotehost: # download igintion file from remote host tmp_trans_dir = tempfile.mkdtemp() local_trans = '/'.join([tmp_trans_dir, os.path.basename(transportfiles)]) cmd = ["/usr/bin/scp", "-B", "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", ("%s:%s" % (remotehost, transportfiles)), local_trans] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ('copy ignition file with command %(cmd)s ' 'failed with output: %(res)s' % {'cmd': str(cmd), 'res': output}) LOG.error(err_msg) raise exception.SDKGuestOperationError(rs=4, userid=userid, err_info=err_msg) transportfiles = local_trans cmd = self._get_unpackdiskimage_cmd_rhcos(userid, image_name, transportfiles, vdev, image_file, hostname, skipdiskcopy) with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("unpackdiskimage failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("ERROR:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) raise exception.SDKGuestOperationError(rs=3, userid=userid, unpack_rc=rc, err=err_output) finally: # remove the temp ignition file if tmp_trans_dir: self._pathutils.clean_temp_folder(tmp_trans_dir) # Update os version in guest metadata # TODO: may should append to old metadata, not replace if skipdiskcopy: os_version = image_name else: os_version = self.image_get_os_distro(image_name) metadata = 'os_version=%s' % os_version self._GuestDbOperator.update_guest_by_userid(userid, meta=metadata) if skipdiskcopy: msg = ('guest_deploy without copy disk finish successfully, ' 'guest: %(vm)s, os_version: %(img)s' % {'img': image_name, 'vm': userid}) else: msg = ('Deploy image %(img)s to guest %(vm)s disk %(vdev)s' ' successfully' % {'img': image_name, 'vm': userid, 'vdev': vdev}) LOG.info(msg) def get_os_version_from_userid(self, userid): """Get the os_verison of guests from userid. return os_version or UNKNOWN""" action = "get guests os_version from userid." with zvmutils.log_and_reraise_sdkbase_error(action): guests_in_db = self._GuestDbOperator.\ get_guest_metadata_with_userid(userid) # db query return metadata in tuple (metadata) os_version = 'UNKNOWN' for g in guests_in_db: if 'os_version='.upper() in g[0].upper(): os_version = g[0].upper().strip().split('=')[1] break return os_version def guest_capture(self, userid, image_name, capture_type='rootonly', compress_level=6, capture_device_assign=None): if capture_type == "alldisks": func = ('Capture guest with type: %s' % capture_type) msg = ('%s is not supported in current release' % func) LOG.error(msg) raise exception.SDKFunctionNotImplementError(func=func, modID='guest') msg = ('Start to capture %(vm)s to generate image %(img)s with ' 'capture type %(type)s' % {'vm': userid, 'img': image_name, 'type': capture_type}) LOG.info(msg) # self._check_power_state(userid, 'capture') restart_flag = False reachable = self.get_guest_connection_status(userid) if reachable: # Make sure iucv channel is ready for communication on source vm try: self.execute_cmd(userid, 'pwd') except exception.SDKSMTRequestFailed as err: msg = ('Failed to check iucv status on capture source vm ' '%(vm)s with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) # Get the os version of the vm try: os_version = self.guest_get_os_version(userid) except exception.SDKSMTRequestFailed as err: msg = ('Failed to execute command on capture source vm %(vm)s' 'to get os version with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) except Exception as err: msg = ('Error happened when parsing os version on source vm ' '%(vm)s with error: %(err)s' % {'vm': userid, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) msg = ('The capture source vm os version %(vm)s is %(version)s' % {'vm': userid, 'version': os_version}) LOG.info(msg) # Find the root device according to the capture type try: capture_devices = self._get_capture_devices(userid, capture_type) except exception.SDKSMTRequestFailed as err: msg = ('Failed to execute command on source vm %(vm)s to get ' 'devices for capture with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) except Exception as err: msg = ('Internal error happened when getting the devices for ' 'capture on source vm %(vm)s with error %(err)s' % {'vm': userid, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) except exception.SDKGuestOperationError: raise # Shutdown the vm before capture self.guest_softstop(userid) # keep restart flag used after capture. restart_flag = True else: os_version = self.get_os_version_from_userid(userid) # Capture_device_assign as assign capture disk. # Input should be string to identity disk. # use force_capture_disk value first if # force_capture_disk=xxxx in zvmsdk.conf. if CONF.zvm.force_capture_disk: capture_devices = [str(CONF.zvm.force_capture_disk)] else: if capture_device_assign: capture_devices = [str(capture_device_assign)] else: direct_info = self.get_user_direct(userid) disk_info =\ [x for x in direct_info if x.startswith('MDISK')] capture_devices = \ [x.split(' ')[1].strip(' ') for x in disk_info] if not capture_devices: msg = ('Error happened when getting the devices for ' 'get vm disk information on source vm %(vm)s ' % {'vm': userid}) LOG.error(msg) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) # if VM power on, the VM need be perform stop and start power_state = self.get_power_state(userid) if power_state == 'on': # Shutdown the vm before capture self.guest_stop(userid) restart_flag = True # Prepare directory for writing image file image_temp_dir = '/'.join((CONF.image.sdk_image_repository, const.IMAGE_TYPE['CAPTURE'], os_version, image_name)) self._pathutils.mkdir_if_not_exist(image_temp_dir) # Call creatediskimage to capture a vm to generate an image # TODO:(nafei) to support multiple disk capture vdev = capture_devices[0] msg = ('Found the device %(vdev)s of %(vm)s for capture' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) image_file_name = vdev image_file_path = '/'.join((image_temp_dir, image_file_name)) cmd = ['sudo', '/opt/zthin/bin/creatediskimage', userid, vdev, image_file_path, '--compression', str(compress_level)] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("creatediskimage failed with return code: %d." % rc) err_output = "" output_lines = output.split('\n') for line in output_lines: if line.__contains__("ERROR:"): err_output += ("\\n" + line.strip()) LOG.error(err_msg + err_output) self._pathutils.clean_temp_folder(image_temp_dir) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=err_output) # Move the generated image to netboot folder image_final_dir = '/'.join([CONF.image.sdk_image_repository, const.IMAGE_TYPE['DEPLOY'], os_version, image_name]) image_final_path = '/'.join((image_final_dir, image_file_name)) self._pathutils.mkdir_if_not_exist(image_final_dir) cmd = ['mv', image_file_path, image_final_path] with zvmutils.expect_and_reraise_internal_error(modID='guest'): (rc, output) = zvmutils.execute(cmd) if rc != 0: err_msg = ("move image file from staging to netboot " "folder failed with return code: %d." % rc) LOG.error(err_msg) self._pathutils.clean_temp_folder(image_temp_dir) self._pathutils.clean_temp_folder(image_final_dir) raise exception.SDKGuestOperationError(rs=5, userid=userid, err=err_msg) self._pathutils.clean_temp_folder(image_temp_dir) msg = ('Updating the metadata for captured image %s ' % image_name) LOG.info(msg) # Get md5sum of image real_md5sum = self._get_md5sum(image_final_path) # Get disk_size_units of image disk_size_units = self._get_disk_size_units(image_final_path) # Get the image physical size image_size = self._get_image_size(image_final_path) # Create the image record in image database self._ImageDbOperator.image_add_record(image_name, os_version, real_md5sum, disk_size_units, image_size, capture_type) if restart_flag: LOG.info('Try start %s for capture completed successfully.' % userid) self.guest_start(userid) LOG.info('Image %s is captured and imported to image repository ' 'successfully' % image_name) def guest_get_os_version(self, userid): os_version = '' release_file = self.execute_cmd(userid, 'ls /etc/*-release') if '/etc/os-release' in release_file: # Parse os-release file, part of the output looks like: # NAME="Red Hat Enterprise Linux Server" # ID="rhel" # VERSION_ID="7.0" release_info = self.execute_cmd(userid, 'cat /etc/os-release') release_dict = {} for item in release_info: if item: release_dict[item.split('=')[0]] = item.split('=')[1] distro = release_dict['ID'] version = release_dict['VERSION_ID'] if '"' in distro: distro = eval(distro) if '"' in version: version = eval(version) os_version = '%s%s' % (distro, version) return os_version elif '/etc/redhat-release' in release_file: # The output looks like: # "Red Hat Enterprise Linux Server release 6.7 (Santiago)" distro = 'rhel' release_info = self.execute_cmd(userid, 'cat /etc/redhat-release') distro_version = release_info[0].split()[6] os_version = ''.join((distro, distro_version)) return os_version elif '/etc/SuSE-release' in release_file: # The output for this file looks like: # SUSE Linux Enterprise Server 11 (s390x) # VERSION = 11 # PATCHLEVEL = 3 distro = 'sles' release_info = self.execute_cmd(userid, 'cat /etc/SuSE-release') LOG.debug('OS release info is %s' % release_info) release_version = '.'.join((release_info[1].split('=')[1].strip(), release_info[2].split('=')[1].strip())) os_version = ''.join((distro, release_version)) return os_version elif '/etc/system-release' in release_file: # For some rhel6.7 system, it only have system-release file and # the output looks like: # "Red Hat Enterprise Linux Server release 6.7 (Santiago)" distro = 'rhel' release_info = self.execute_cmd(userid, 'cat /etc/system-release') distro_version = release_info[0].split()[6] os_version = ''.join((distro, distro_version)) return os_version def _get_capture_devices(self, userid, capture_type='rootonly'): capture_devices = [] if capture_type == 'rootonly': # Parse the /proc/cmdline to get root devices proc_cmdline = self.execute_cmd(userid, 'cat /proc/cmdline ' '| tr " " "\\n" | grep -a "^root=" | cut -c6-') root_device_info = proc_cmdline[0] if not root_device_info: msg = ('Unable to get useful info from /proc/cmdline to ' 'locate the device associated with the root directory ' 'on capture source vm %s' % userid) raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) else: if 'UUID=' in root_device_info: uuid = root_device_info.split()[0].split('=')[1] root_device = '/'.join(('/dev/disk/by-uuid', uuid)) elif 'LABEL=' in root_device_info: label = root_device_info.split()[0].split('=')[1] root_device = '/'.join(('/dev/disk/by-label', label)) elif 'mapper' in root_device_info: msg = ('Capturing a disk with root filesystem on logical' ' volume is not supported') raise exception.SDKGuestOperationError(rs=5, userid=userid, msg=msg) else: root_device = root_device_info root_device_node = self.execute_cmd(userid, 'readlink -f %s' % root_device)[0] # Get device node vdev by node name cmd = ('cat /proc/dasd/devices | grep -i "is %s" ' % root_device_node.split('/')[-1].rstrip(string.digits)) result = self.execute_cmd(userid, cmd)[0] root_device_vdev = result.split()[0][4:8] capture_devices.append(root_device_vdev) return capture_devices else: # For sysclone, parse the user directory entry to get the devices # for capture, leave for future pass def _get_unpackdiskimage_cmd_rhcos(self, userid, image_name, transportfiles=None, vdev=None, image_file=None, hostname=None, skipdiskcopy=False): if skipdiskcopy: os_version = image_name image_disk_type = 'SCSI' else: os_version = self.image_get_os_distro(image_name) # Query image disk type image_disk_type = self._get_image_disk_type(image_name) if image_disk_type is None: err_msg = ("failed to get image disk type for " "image '%(image_name)s'." % {'image_name': image_name}) raise exception.SDKGuestOperationError(rs=12, userid=userid, err=err_msg) try: # Query vm's disk pool type and image disk type from zvmsdk import dist _dist_manager = dist.LinuxDistManager() linuxdist = _dist_manager.get_linux_dist(os_version)() # Read coros fixed ip parameter from tempfile fixed_ip_parameter = linuxdist.read_coreos_parameter(userid) except Exception as err: err_msg = ("failed to read coreos fixed ip " "parameters for userid '%(userid)s'," "error: %(err)s." % {'userid': userid, 'err': err}) raise exception.SDKGuestOperationError(rs=12, userid=userid, err=err_msg) if fixed_ip_parameter is None: err_msg = ("coreos fixed ip parameters don't exist.") raise exception.SDKGuestOperationError(rs=12, userid=userid, err=err_msg) if hostname: # replace hostname to display name instead of userid fixed_ip_parameter = fixed_ip_parameter.replace(userid.upper(), hostname) # read nic device id and change it into the form like # "0.0.1000,0.0.1001,0.0.1002" nic_id = self._generate_increasing_nic_id( fixed_ip_parameter.split(":")[5].replace("enc", "")) if image_disk_type == 'SCSI': (wwpn, lun) = self._get_wwpn_lun(userid) if wwpn is None or lun is None: err_msg = ("wwpn and lun is required for FCP devices," " please set LOADDEV for userid %s" % userid) raise exception.SDKGuestOperationError(rs=14, userid=userid, msg=err_msg) wwpn = '0x' + wwpn lun = '0x' + lun if skipdiskcopy: return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev, wwpn, lun, transportfiles, nic_id, fixed_ip_parameter] else: return ['sudo', '/opt/zthin/bin/unpackdiskimage', vdev, wwpn, lun, image_file, transportfiles, image_disk_type, nic_id, fixed_ip_parameter] else: return ['sudo', '/opt/zthin/bin/unpackdiskimage', userid, vdev, image_file, transportfiles, image_disk_type, nic_id, fixed_ip_parameter] def grant_user_to_vswitch(self, vswitch_name, userid): """Set vswitch to grant user.""" smt_userid = zvmutils.get_smt_userid() requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid, "--operands", "-k switch_name=%s" % vswitch_name, "-k grant_userid=%s" % userid, "-k persist=YES")) try: self._request(requestData) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to grant user %s to vswitch %s, error: %s" % (userid, vswitch_name, err.format_message())) self._set_vswitch_exception(err, vswitch_name) def _set_vswitch_exception(self, error, switch_name): if ((error.results['rc'] == 212) and (error.results['rs'] == 40)): obj_desc = "Vswitch %s" % switch_name raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 396) and (error.results['rs'] == 2846)): errmsg = ("Operation is not allowed for a " "VLAN UNAWARE vswitch") raise exception.SDKConflictError(modID='network', rs=5, vsw=switch_name, msg=errmsg) elif ((error.results['rc'] == 396) and ((error.results['rs'] == 2838) or (error.results['rs'] == 2853) or (error.results['rs'] == 2856) or (error.results['rs'] == 2858) or (error.results['rs'] == 3022) or (error.results['rs'] == 3033))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=5, vsw=switch_name, msg=errmsg) else: raise error def revoke_user_from_vswitch(self, vswitch_name, userid): """Revoke user for vswitch.""" smt_userid = zvmutils.get_smt_userid() requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Vswitch_Set_Extended' % smt_userid, "--operands", "-k switch_name=%s" % vswitch_name, "-k revoke_userid=%s" % userid, "-k persist=YES")) try: self._request(requestData) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to revoke user %s from vswitch %s, error: %s" % (userid, vswitch_name, err.format_message())) self._set_vswitch_exception(err, vswitch_name) def image_performance_query(self, uid_list): """Call Image_Performance_Query to get guest current status. :uid_list: A list of zvm userids to be queried """ if uid_list == []: return {} if not isinstance(uid_list, list): uid_list = [uid_list] smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Image_Performance_Query" % smt_userid, "--operands", '-T "%s"' % (' '.join(uid_list)), "-c %d" % len(uid_list))) action = "get performance info of userid '%s'" % str(uid_list) with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) ipq_kws = { 'userid': "Guest name:", 'guest_cpus': "Guest CPUs:", 'used_cpu_time': "Used CPU time:", 'elapsed_cpu_time': "Elapsed time:", 'min_cpu_count': "Minimum CPU count:", 'max_cpu_limit': "Max CPU limit:", 'samples_cpu_in_use': "Samples CPU in use:", 'samples_cpu_delay': "Samples CPU delay:", 'used_memory': "Used memory:", 'max_memory': "Max memory:", 'min_memory': "Minimum memory:", 'shared_memory': "Shared memory:", } pi_dict = {} pi = {} rpi_list = ('\n'.join(results['response'])).split("\n\n") for rpi in rpi_list: try: pi = zvmutils.translate_response_to_dict(rpi, ipq_kws) except exception.SDKInternalError as err: emsg = err.format_message() # when there is only one userid queried and this userid is # in 'off'state, the smcli will only returns the queried # userid number, no valid performance info returned. if (emsg.__contains__("No value matched with keywords.")): continue else: raise err for k, v in pi.items(): pi[k] = v.strip('" ') if pi.get('userid') is not None: pi_dict[pi['userid']] = pi return pi_dict def system_image_performance_query(self, namelist): """Call System_Image_Performance_Query to get guest current status. :namelist: A namelist that defined in smapi namelist file. """ smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API System_Image_Performance_Query" % smt_userid, "--operands -T %s" % namelist)) action = "get performance info of namelist '%s'" % namelist with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) ipq_kws = { 'userid': "Guest name:", 'guest_cpus': "Guest CPUs:", 'used_cpu_time': "Used CPU time:", 'elapsed_cpu_time': "Elapsed time:", 'min_cpu_count': "Minimum CPU count:", 'max_cpu_limit': "Max CPU limit:", 'samples_cpu_in_use': "Samples CPU in use:", 'samples_cpu_delay': "Samples CPU delay:", 'used_memory': "Used memory:", 'max_memory': "Max memory:", 'min_memory': "Minimum memory:", 'shared_memory': "Shared memory:", } pi_dict = {} pi = {} rpi_list = ('\n'.join(results['response'])).split("\n\n") for rpi in rpi_list: try: pi = zvmutils.translate_response_to_dict(rpi, ipq_kws) except exception.SDKInternalError as err: emsg = err.format_message() # when there is only one userid queried and this userid is # in 'off'state, the smcli will only returns the queried # userid number, no valid performance info returned. if (emsg.__contains__("No value matched with keywords.")): continue else: raise err for k, v in pi.items(): pi[k] = v.strip('" ') if pi.get('userid') is not None: pi_dict[pi['userid']] = pi return pi_dict def virtual_network_vswitch_query_byte_stats(self): smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Query_Byte_Stats" % smt_userid, "--operands", '-T "%s"' % smt_userid, '-k "switch_name=*"' )) action = "query vswitch usage info" with zvmutils.log_and_reraise_smt_request_failed(action): results = self._request(rd) return self._parse_vswitch_inspect_data(results['response']) def get_host_info(self): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getHost general") host_info = zvmutils.translate_response_to_dict( '\n'.join(results['response']), const.RINV_HOST_KEYWORDS) return host_info def get_diskpool_info(self, pool): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getHost diskpoolspace %s" % pool) dp_info = zvmutils.translate_response_to_dict( '\n'.join(results['response']), const.DISKPOOL_KEYWORDS) return dp_info def get_vswitch_list(self): smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Query" % smt_userid, "--operands", "-s \'*\'")) try: result = self._request(rd) except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 212) and (err.results['rs'] == 40)): LOG.warning("No Virtual switch in the host") return [] else: LOG.error("Failed to get vswitch list, error: %s" % err.format_message()) raise with zvmutils.expect_invalid_resp_data(): if (not result['response'] or not result['response'][0]): return [] else: data = '\n'.join([s for s in result['response'] if isinstance(s, six.string_types)]) output = re.findall('VSWITCH: Name: (.*)', data) return output def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id): smt_userid = zvmutils.get_smt_userid() msg = ('Start to set VLAN ID %(vid)s on vswitch %(vsw)s ' 'for guest %(vm)s' % {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid}) LOG.info(msg) rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Set_Extended" % smt_userid, "--operands", "-k grant_userid=%s" % userid, "-k switch_name=%s" % vswitch_name, "-k user_vlan_id=%s" % vlan_id, "-k persist=YES")) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to set VLAN ID %s on vswitch %s for user %s, " "error: %s" % (vlan_id, vswitch_name, userid, err.format_message())) self._set_vswitch_exception(err, vswitch_name) msg = ('Set VLAN ID %(vid)s on vswitch %(vsw)s ' 'for guest %(vm)s successfully' % {'vid': vlan_id, 'vsw': vswitch_name, 'vm': userid}) LOG.info(msg) def add_vswitch(self, name, rdev=None, controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='UNAWARE', port_type='ACCESS', gvrp='GVRP', queue_mem=8, native_vid=1, persist=True): smt_userid = zvmutils.get_smt_userid() msg = ('Start to create vswitch %s' % name) LOG.info(msg) rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Create_Extended" % smt_userid, "--operands", '-k switch_name=%s' % name)) if rdev is not None: rd += " -k real_device_address" +\ "=\'%s\'" % rdev.replace(',', ' ') if controller != '*': rd += " -k controller_name=%s" % controller rd = ' '.join((rd, "-k connection_value=%s" % connection, "-k queue_memory_limit=%s" % queue_mem, "-k transport_type=%s" % network_type, "-k vlan_id=%s" % vid, "-k persist=%s" % (persist and 'YES' or 'NO'))) # Only if vswitch is vlan awared, port_type, gvrp and native_vid are # allowed to specified if isinstance(vid, int) or vid.upper() != 'UNAWARE': rd = ' '.join((rd, "-k port_type=%s" % port_type, "-k gvrp_value=%s" % gvrp, "-k native_vlanid=%s" % native_vid)) if router is not None: rd += " -k routing_value=%s" % router msg = ('Start to create vswitch %s' % name) LOG.info(msg) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to create vswitch %s, error: %s" % (name, err.format_message())) raise msg = ('Create vswitch %s successfully' % name) LOG.info(msg) def set_vswitch(self, switch_name, **kwargs): """Set vswitch""" smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Set_Extended" % smt_userid, "--operands", "-k switch_name=%s" % switch_name)) for k, v in kwargs.items(): rd = ' '.join((rd, "-k %(key)s=\'%(value)s\'" % {'key': k, 'value': v})) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to set vswitch %s, error: %s" % (switch_name, err.format_message())) self._set_vswitch_exception(err, switch_name) def delete_vswitch(self, switch_name, persist=True): smt_userid = zvmutils.get_smt_userid() msg = ('Start to delete vswitch %s' % switch_name) LOG.info(msg) rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Delete_Extended" % smt_userid, "--operands", "-k switch_name=%s" % switch_name, "-k persist=%s" % (persist and 'YES' or 'NO'))) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results if ((results['rc'] == 212) and (results['rs'] == 40)): LOG.warning("Vswitch %s does not exist", switch_name) return else: LOG.error("Failed to delete vswitch %s, error: %s" % (switch_name, err.format_message())) raise msg = ('Delete vswitch %s successfully' % switch_name) LOG.info(msg) def create_nic(self, userid, vdev=None, nic_id=None, mac_addr=None, active=False): nic_vdev = self._get_available_vdev(userid, vdev=vdev) LOG.debug('Nic attributes: vdev is %(vdev)s, ' 'ID is %(id)s, address is %(address)s', {'vdev': nic_vdev, 'id': nic_id or 'not specified', 'address': mac_addr or 'not specified'}) self._create_nic(userid, nic_vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) return nic_vdev def _create_nic_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=7, vdev=vdev, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)): obj_desc = "Guest device %s" % vdev raise exception.SDKConflictError(modID='network', rs=7, vdev=vdev, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _create_nic_active_exception(self, error, userid, vdev): if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or ((error.results['rc'] == 204) and (error.results['rs'] == 28))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) elif ((error.results['rc'] == 396) and (error.results['rs'] == 2797)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _is_active(self, userid): # Get the vm status power_state = self.get_power_state(userid) if power_state == 'off': LOG.error('The vm %s is powered off, ' 'active operation is not allowed' % userid) raise exception.SDKConflictError(modID='network', rs=1, userid=userid) def _create_nic(self, userid, vdev, nic_id=None, mac_addr=None, active=False): if active: self._is_active(userid) msg = ('Start to create nic device %(vdev)s for guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Adapter_Create_Extended_DM' % userid, "--operands", "-k image_device_number=%s" % vdev, "-k adapter_type=QDIO")) if mac_addr is not None: mac = ''.join(mac_addr.split(':'))[6:] requestData += ' -k mac_id=%s' % mac retry = 1 for secs in [1, 3, 5, 8, -1]: try: self._request(requestData) break except exception.SDKSMTRequestFailed as err: if (err.results['rc'] == 400 and err.results['rs'] == 12 and retry < 5): LOG.info("The VM is locked, will retry") time.sleep(secs) retry += 1 else: LOG.error("Failed to create nic %s for user %s in " "the guest's user direct, error: %s" % (vdev, userid, err.format_message())) self._create_nic_inactive_exception(err, userid, vdev) if active: if mac_addr is not None: LOG.warning("Ignore the mac address %s when " "adding nic on an active system" % mac_addr) requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Adapter_Create_Extended' % userid, "--operands", "-k image_device_number=%s" % vdev, "-k adapter_type=QDIO")) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err1: msg1 = err1.format_message() persist_OK = True requestData = ' '.join(( 'SMAPI %s API Virtual_Network_Adapter_Delete_DM' % userid, "--operands", '-v %s' % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: results = err2.results msg2 = err2.format_message() if ((results['rc'] == 404) and (results['rs'] == 8)): persist_OK = True else: persist_OK = False if persist_OK: self._create_nic_active_exception(err1, userid, vdev) else: raise exception.SDKNetworkOperationError(rs=4, nic=vdev, userid=userid, create_err=msg1, revoke_err=msg2) self._NetDbOperator.switch_add_record(userid, vdev, port=nic_id) msg = ('Create nic device %(vdev)s for guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def get_user_direct(self, userid): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getvm %s directory" % userid) return results.get('response', []) def get_all_user_direct(self): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("getvm alldirectory") return results.get('response', []) def get_diskpool_volumes(self, pool): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("gethost diskpoolvolumes %s" % pool) diskpool_volumes = zvmutils.translate_response_to_dict( '\n'.join(results['response']), const.DISKPOOL_VOLUME_KEYWORDS) return diskpool_volumes def get_volume_info(self): with zvmutils.log_and_reraise_smt_request_failed(): results = self._request("gethost volumeinfo") with zvmutils.expect_invalid_resp_data(results): volume_info = zvmutils.translate_response_data_to_expect_dict( results['response'], 3) return volume_info def _delete_nic_active_exception(self, error, userid, vdev): if ((error.results['rc'] == 204) and (error.results['rs'] == 28)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=8, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _delete_nic_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=9, vdev=vdev, userid=userid, obj=obj_desc) else: raise error def delete_nic(self, userid, vdev, active=False): if active: self._is_active(userid) vdev_exist = False nic_list = self._NetDbOperator.switch_select_record_for_userid(userid) for p in nic_list: if (int(p['interface'], 16) == int(vdev, 16)): vdev_exist = True vdev_info = p break if not vdev_exist: # Device has already be removed from user direct LOG.warning("Virtual device %s does not exist in the switch table", vdev) if active: try: resp = self.execute_cmd(userid, 'vmcp q %s' % vdev) nic_info = "%s ON NIC" % vdev.zfill(4).upper() osa_info = "%s ON OSA" % vdev.zfill(4).upper() if nic_info in resp[0]: pass elif osa_info in resp[0]: self._undedicate_nic(userid, vdev, active=active, del_active_only=True) return else: LOG.warning("Device %s of guest %s is not " "network adapter" % (vdev, userid)) return except exception.SDKSMTRequestFailed as err: emsg = err.format_message() ignored_msg = ('Device %s does not exist' % vdev.zfill(4).upper()) if (emsg.__contains__(ignored_msg)): LOG.warning("Virtual device %s does not exist for " "active guest %s" % (vdev, userid)) return else: raise else: return else: # Device hasnot be removed from user direct, # check whether it is related to a dedicated OSA device if ((vdev_info["comments"] is not None) and (vdev_info["comments"].__contains__('OSA='))): self._undedicate_nic(userid, vdev, active=active) return msg = ('Start to delete nic device %(vdev)s for guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) if vdev_exist: rd = ' '.join(( "SMAPI %s API Virtual_Network_Adapter_Delete_DM" % userid, "--operands", '-v %s' % vdev)) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results emsg = err.format_message() if ((results['rc'] == 404) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist in " "the guest's user direct", vdev) else: LOG.error("Failed to delete nic %s for %s in " "the guest's user direct, error: %s" % (vdev, userid, emsg)) self._delete_nic_inactive_exception(err, userid, vdev) self._NetDbOperator.switch_delete_record_for_nic(userid, vdev) if active: rd = ' '.join(( "SMAPI %s API Virtual_Network_Adapter_Delete" % userid, "--operands", '-v %s' % vdev)) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results emsg = err.format_message() if ((results['rc'] == 204) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist on " "the active guest system", vdev) else: LOG.error("Failed to delete nic %s for %s on " "the active guest system, error: %s" % (vdev, userid, emsg)) self._delete_nic_active_exception(err, userid, vdev) msg = ('Delete nic device %(vdev)s for guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def _couple_active_exception(self, error, userid, vdev, vswitch): if ((error.results['rc'] == 212) and ((error.results['rs'] == 28) or (error.results['rs'] == 8))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=10, vdev=vdev, userid=userid, vsw=vswitch, msg=errmsg) elif ((error.results['rc'] == 212) and (error.results['rs'] == 40)): obj_desc = "Vswitch %s" % vswitch raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 204) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 396) and ((error.results['rs'] == 2788) or (error.results['rs'] == 2848) or (error.results['rs'] == 3034) or (error.results['rs'] == 6011))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=10, vdev=vdev, userid=userid, vsw=vswitch, msg=errmsg) else: raise error def _couple_inactive_exception(self, error, userid, vdev, vswitch): if ((error.results['rc'] == 412) and (error.results['rs'] == 28)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=10, vdev=vdev, userid=userid, vsw=vswitch, msg=errmsg) elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=11, vdev=vdev, userid=userid, vsw=vswitch, obj=obj_desc) elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)): obj_desc = "Guest %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)): obj_desc = "Guest device %s" % vdev raise exception.SDKConflictError(modID='network', rs=11, vdev=vdev, userid=userid, vsw=vswitch, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') else: raise error def _couple_nic(self, userid, vdev, vswitch_name, active=False): """Couple NIC to vswitch by adding vswitch into user direct.""" if active: self._is_active(userid) requestData = ' '.join(( 'SMAPI %s' % userid, 'API Virtual_Network_Adapter_Connect_Vswitch', "--operands", "-v %s" % vdev, "-n %s" % vswitch_name)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err1: results1 = err1.results msg1 = err1.format_message() if ((results1 is not None) and (results1['rc'] == 204) and (results1['rs'] == 20)): LOG.warning("Virtual device %s already connected " "on the active guest system", vdev) else: persist_OK = True requestData = ' '.join(( 'SMAPI %s' % userid, 'API Virtual_Network_Adapter_Disconnect_DM', "--operands", '-v %s' % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: results2 = err2.results msg2 = err2.format_message() if ((results2 is not None) and (results2['rc'] == 212) and (results2['rs'] == 32)): persist_OK = True else: persist_OK = False if persist_OK: self._couple_active_exception(err1, userid, vdev, vswitch_name) else: raise exception.SDKNetworkOperationError(rs=3, nic=vdev, vswitch=vswitch_name, couple_err=msg1, revoke_err=msg2) """Update information in switch table.""" self._NetDbOperator.switch_update_record_with_switch(userid, vdev, vswitch_name) msg = ('Couple nic device %(vdev)s of guest %(vm)s ' 'with vswitch %(vsw)s successfully' % {'vdev': vdev, 'vm': userid, 'vsw': vswitch_name}) LOG.info(msg) def couple_nic_to_vswitch(self, userid, nic_vdev, vswitch_name, active=False, vlan_id=-1, port_type='ACCESS'): """Couple nic to vswitch.""" if active: msg = ("both in the user direct of guest %s and on " "the active guest system" % userid) else: msg = "in the user direct of guest %s" % userid LOG.debug("Connect nic %s to switch %s %s", nic_vdev, vswitch_name, msg) # previously we use Virtual_Network_Adapter_Connect_Vswitch_DM # but due to limitation in SMAPI, we have to create such user # direct by our own due to no way to add VLAN ID msg = ('Start to couple nic device %(vdev)s of guest %(vm)s ' 'with vswitch %(vsw)s with vlan %(vlan_id)s:' % {'vdev': nic_vdev, 'vm': userid, 'vsw': vswitch_name, 'vlan_id': vlan_id}) LOG.info(msg) user_direct = self.get_user_direct(userid) new_user_direct = [] nicdef = "NICDEF %s" % nic_vdev.upper() for ent in user_direct: if len(ent) > 0: new_user_direct.append(ent) if ent.upper().startswith(nicdef): # If NIC already coupled with this vswitch, # return and skip following actions, # such as migrating VM if ("LAN SYSTEM %s" % vswitch_name) in ent: LOG.info("NIC %s already coupled to vswitch %s, " "skip couple action." % (nic_vdev, vswitch_name)) return # vlan_id < 0 means no VLAN ID given v = nicdef if vlan_id < 0: v += " LAN SYSTEM %s" % vswitch_name else: v += " LAN SYSTEM %s VLAN %s PORTTYPE %s" \ % (vswitch_name, vlan_id, port_type) new_user_direct.append(v) try: self._lock_user_direct(userid) except exception.SDKSMTRequestFailed as e: raise exception.SDKGuestOperationError(rs=9, userid=userid, err=e.format_message()) # Replace user directory try: self._replace_user_direct(userid, new_user_direct) except exception.SDKSMTRequestFailed as e: rd = ("SMAPI %s API Image_Unlock_DM " % userid) try: self._request(rd) except exception.SDKSMTRequestFailed as err2: # ignore 'not locked' error if ((err2.results['rc'] == 400) and ( err2.results['rs'] == 24)): LOG.debug("Guest '%s' unlocked successfully." % userid) pass else: # just print error and ignore this unlock error msg = ("Unlock definition of guest '%s' failed " "with SMT error: %s" % (userid, err2.format_message())) LOG.error(msg) raise exception.SDKGuestOperationError(rs=10, userid=userid, err=e.format_message()) self._couple_nic(userid, nic_vdev, vswitch_name, active=active) def _uncouple_active_exception(self, error, userid, vdev): if ((error.results['rc'] == 204) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 204) and (error.results['rs'] == 28)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=12, vdev=vdev, userid=userid, msg=errmsg) else: raise error def _uncouple_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 404) and (error.results['rs'] == 8)): obj_desc = "Guest device %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 400) and (error.results['rs'] == 4)): obj_desc = "Guest %s" % vdev raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') elif ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=13, vdev=vdev, userid=userid, obj=obj_desc) else: raise error def _uncouple_nic(self, userid, vdev, active=False): """Uncouple NIC from vswitch""" if active: self._is_active(userid) msg = ('Start to uncouple nic device %(vdev)s of guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) requestData = ' '.join(( 'SMAPI %s' % userid, "API Virtual_Network_Adapter_Disconnect_DM", "--operands", "-v %s" % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: results = err.results emsg = err.format_message() if ((results is not None) and (results['rc'] == 212) and (results['rs'] == 32)): LOG.warning("Virtual device %s is already disconnected " "in the guest's user direct", vdev) else: LOG.error("Failed to uncouple nic %s in the guest's user " "direct, error: %s" % (vdev, emsg)) self._uncouple_inactive_exception(err, userid, vdev) """Update information in switch table.""" self._NetDbOperator.switch_update_record_with_switch(userid, vdev, None) # the inst must be active, or this call will failed if active: requestData = ' '.join(( 'SMAPI %s' % userid, 'API Virtual_Network_Adapter_Disconnect', "--operands", "-v %s" % vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: results = err.results emsg = err.format_message() if ((results is not None) and (results['rc'] == 204) and (results['rs'] == 48)): LOG.warning("Virtual device %s is already " "disconnected on the active " "guest system", vdev) else: LOG.error("Failed to uncouple nic %s on the active " "guest system, error: %s" % (vdev, emsg)) self._uncouple_active_exception(err, userid, vdev) msg = ('Uncouple nic device %(vdev)s of guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def uncouple_nic_from_vswitch(self, userid, nic_vdev, active=False): if active: msg = ("both in the user direct of guest %s and on " "the active guest system" % userid) else: msg = "in the user direct of guest %s" % userid LOG.debug("Disconnect nic %s with network %s", nic_vdev, msg) self._uncouple_nic(userid, nic_vdev, active=active) def _delete_uesrid_again(self, rd, userid): # ok, this is ugly, as we never know when this will happen # so we try the stop again and ignore any exception here try: self.guest_stop(userid, timeout=30, poll_interval=10) except Exception as err: msg = "SMT error: %s" % err.format_message() LOG.info("guest force stop when 596/6831: %s" % msg) # wait some time to let guest shutoff and cleanup time.sleep(2) try: self._request(rd) except Exception as err: msg = "SMT error: %s" % err.format_message() LOG.info("guest force delete when 596/6831: %s" % msg) def delete_userid(self, userid): rd = ' '.join(('deletevm', userid, 'directory')) try: self._request(rd) except exception.SDKSMTRequestFailed as err: if err.results['rc'] == 400 and err.results['rs'] == 4: # guest vm definition not found LOG.debug("The guest %s does not exist." % userid) return # ingore delete VM not finished error if err.results['rc'] == 596 and err.results['rs'] == 6831: # 596/6831 means delete VM not finished yet LOG.warning("The guest %s deleted with 596/6831" % userid) self._delete_uesrid_again(rd, userid) return # ignore delete VM with VDISK format error # DirMaint does not support formatting TDISK or VDISK extents. if err.results['rc'] == 596 and err.results['rs'] == 3543: LOG.debug("The guest %s deleted with 596/3543" % userid) return # The CP or CMS command shown resulted in a non-zero # return code. This message is frequently preceded by # a DMK, HCP, or DMS error message that describes the cause # https://www-01.ibm.com/servers/resourcelink/svc0302a.nsf/ # pages/zVMV7R2gc246282/$file/hcpk2_v7r2.pdf if err.results['rc'] == 596 and err.results['rs'] == 2119: LOG.debug("The guest %s deleted with 596/2119" % userid) return msg = "SMT error: %s" % err.format_message() raise exception.SDKSMTRequestFailed(err.results, msg) def delete_vm(self, userid): self.delete_userid(userid) # remove userid from smapi namelist self.namelist_remove(zvmutils.get_namelist(), userid) # revoke userid from vswitch action = "revoke id %s authority from vswitch" % userid with zvmutils.log_and_reraise_sdkbase_error(action): switch_info = self._NetDbOperator.switch_select_record_for_userid( userid) switch_list = set() for item in switch_info: switch_list.add(item['switch']) for item in switch_list: if item is not None: self.revoke_user_from_vswitch(item, userid) # cleanup db record from network table action = "delete network record for user %s" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._NetDbOperator.switch_delete_record_for_userid(userid) # TODO: cleanup db record from volume table pass # cleanup persistent folder for guest self._pathutils.remove_guest_path(userid) # cleanup db record from guest table action = "delete guest %s from database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.delete_guest_by_userid(userid) def execute_cmd(self, userid, cmdStr): """"cmdVM.""" requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\'' with zvmutils.log_and_reraise_smt_request_failed(action='execute ' 'command on vm via iucv channel'): results = self._request(requestData) ret = results['response'] return ret def execute_cmd_direct(self, userid, cmdStr, timeout=None): """"cmdVM.""" if not timeout: requestData = 'cmdVM ' + userid + ' CMD \'' + cmdStr + '\'' else: requestData = ("cmdVM %s CMD \'%s\' %s" % (userid, cmdStr, timeout)) results = self._smt.request(requestData) return results def image_import(self, image_name, url, image_meta, remote_host=None): """Import the image specified in url to SDK image repository, and create a record in image db, the imported images are located in image_repository/prov_method/os_version/image_name/, for example, /opt/sdk/images/netboot/rhel7.2/90685d2b-167bimage/0100""" image_info = [] try: image_info = self._ImageDbOperator.image_query_record(image_name) except exception.SDKObjectNotExistError: msg = ("The image record %s doens't exist in SDK image datebase," " will import the image and create record now" % image_name) LOG.info(msg) # Ensure the specified image is not exist in image DB if image_info: msg = ("The image name %s has already exist in SDK image " "database, please check if they are same image or consider" " to use a different image name for import" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=13, img=image_name) try: image_os_version = image_meta['os_version'].lower() target_folder = self._pathutils.create_import_image_repository( image_os_version, const.IMAGE_TYPE['DEPLOY'], image_name) except Exception as err: msg = ('Failed to create repository to store image %(img)s with ' 'error: %(err)s, please make sure there are enough space ' 'on zvmsdk server and proper permission to create the ' 'repository' % {'img': image_name, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKImageOperationError(rs=14, msg=msg) if self.is_rhcos(image_os_version): image_disk_type = image_meta.get('disk_type') if ((image_disk_type is None) or ((image_disk_type.upper() != "DASD" and image_disk_type.upper() != "SCSI"))): msg = ('Disk type is required for RHCOS image import, ' 'the value should be DASD or SCSI') LOG.error(msg) raise exception.SDKImageOperationError(rs=14, msg=msg) else: comments = {'disk_type': image_disk_type.upper()} comments = str(comments) else: comments = None try: import_image_fn = urlparse.urlparse(url).path.split('/')[-1] import_image_fpath = '/'.join([target_folder, import_image_fn]) self._scheme2backend(urlparse.urlparse(url).scheme).image_import( image_name, url, import_image_fpath, remote_host=remote_host) # Check md5 after import to ensure import a correct image # TODO change to use query image name in DB expect_md5sum = image_meta.get('md5sum') real_md5sum = self._get_md5sum(import_image_fpath) if expect_md5sum and expect_md5sum != real_md5sum: msg = ("The md5sum after import is not same as source image," " the image has been broken") LOG.error(msg) raise exception.SDKImageOperationError(rs=4) # After import to image repository, figure out the image type is # single disk image or multiple-disk image,if multiple disks image, # extract it, if it's single image, rename its name to be same as # specific vdev # TODO: (nafei) use sub-function to check the image type image_type = 'rootonly' if image_type == 'rootonly': final_image_fpath = '/'.join([target_folder, CONF.zvm.user_root_vdev]) os.rename(import_image_fpath, final_image_fpath) elif image_type == 'alldisks': # For multiple disks image, extract it, after extract, the # content under image folder is like: 0100, 0101, 0102 # and remove the image file 0100-0101-0102.tgz pass # TODO: put multiple disk image into consideration, update the # disk_size_units and image_size db field if not self.is_rhcos(image_os_version): disk_size_units = self._get_disk_size_units(final_image_fpath) else: disk_size_units = self._get_disk_size_units_rhcos( final_image_fpath) image_size = self._get_image_size(final_image_fpath) # TODO: update the real_md5sum field to include each disk image self._ImageDbOperator.image_add_record(image_name, image_os_version, real_md5sum, disk_size_units, image_size, image_type, comments=comments) LOG.info("Image %s is import successfully" % image_name) except Exception: # Cleanup the image from image repository self._pathutils.clean_temp_folder(target_folder) raise def image_export(self, image_name, dest_url, remote_host=None): """Export the specific image to remote host or local file system :param image_name: image name that can be uniquely identify an image :param dest_path: the location to store exported image, eg. /opt/images, the image will be stored in folder /opt/images/ :param remote_host: the server that export image to, the format is username@IP eg. nova@192.168.99.1, if remote_host is None, it means the image will be stored in local server :returns a dictionary that contains the exported image info { 'image_name': the image_name that exported 'image_path': the image_path after exported 'os_version': the os version of the exported image 'md5sum': the md5sum of the original image 'comments': the comments of the original image } """ image_info = self._ImageDbOperator.image_query_record(image_name) if not image_info: msg = ("The image %s does not exist in image repository" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=20, img=image_name) image_type = image_info[0]['type'] # TODO: (nafei) according to image_type, detect image exported path # For multiple disk image, make the tgz firstly, the specify the # source_path to be something like: 0100-0101-0102.tgz if image_type == 'rootonly': source_path = '/'.join([CONF.image.sdk_image_repository, const.IMAGE_TYPE['DEPLOY'], image_info[0]['imageosdistro'], image_name, CONF.zvm.user_root_vdev]) else: pass self._scheme2backend(urlparse.urlparse(dest_url).scheme).image_export( source_path, dest_url, remote_host=remote_host) # TODO: (nafei) for multiple disks image, update the expect_dict # to be the tgz's md5sum export_dict = {'image_name': image_name, 'image_path': dest_url, 'os_version': image_info[0]['imageosdistro'], 'md5sum': image_info[0]['md5sum'], 'comments': image_info[0]['comments']} LOG.info("Image %s export successfully" % image_name) return export_dict def _get_image_disk_size_units(self, image_path): """ Return a comma separated string to indicate the image disk size and units for each image disk file under image_path For single disk image , it looks like: 0100=3338:CYL For multiple disk image, it looks like: 0100=3338:CYL,0101=4194200:BLK, 0102=4370:CYL""" pass def _get_disk_size_units(self, image_path): command = 'hexdump -n 48 -C %s' % image_path (rc, output) = zvmutils.execute(command) LOG.debug("hexdump result is %s" % output) if rc: msg = ("Error happened when executing command hexdump with" "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=5) try: root_disk_size = int(output[144:156]) disk_units = output[220:223] root_disk_units = ':'.join([str(root_disk_size), disk_units]) except ValueError: msg = ("Image file at %s is missing built-in disk size " "metadata, it was probably not captured by SDK" % image_path) LOG.error(msg) raise exception.SDKImageOperationError(rs=6) if 'FBA' not in output and 'CKD' not in output: raise exception.SDKImageOperationError(rs=7) LOG.debug("The image's root_disk_units is %s" % root_disk_units) return root_disk_units def _get_disk_size_units_rhcos(self, image_path): command = "fdisk -b 4096 -l %s | head -2 | awk '{print $5}'" % ( image_path) rc = 0 output = "" try: # shell should be set True because it is a shell command with # pipeline, so can not use utils.execute function here output = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT) output = bytes.decode(output) except subprocess.CalledProcessError as err: rc = err.returncode output = err.output except Exception as err: err_msg = ('Command "%s" Error: %s' % (' '.join(command), str(err))) raise exception.SDKInternalError(msg=err_msg) if rc or output.strip('1234567890*\n'): msg = ("Error happened when executing command fdisk with " "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=8) image_size = output.split()[0] try: cyl = (float(image_size)) / 737280 cyl = str(int(math.ceil(cyl))) except Exception: msg = ("Failed to convert %s to a number of cylinders." % image_size) LOG.error(msg) raise exception.SDKImageOperationError(rs=14, msg=msg) disk_units = "CYL" root_disk_units = ':'.join([str(cyl), disk_units]) LOG.debug("The image's root_disk_units is %s" % root_disk_units) return root_disk_units def _get_image_size(self, image_path): """Return disk size in bytes""" command = 'du -b %s' % image_path (rc, output) = zvmutils.execute(command) if rc: msg = ("Error happened when executing command du -b with" "reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=8) size = output.split()[0] return size def _get_image_path_by_name(self, image_name): try: target_info = self._ImageDbOperator.image_query_record(image_name) except exception.SDKObjectNotExistError: msg = ("The image %s does not exist in image repository" % image_name) LOG.error(msg) raise exception.SDKImageOperationError(rs=20, img=image_name) # TODO: (nafei) Handle multiple disks image deploy image_path = '/'.join([CONF.image.sdk_image_repository, const.IMAGE_TYPE['DEPLOY'], target_info[0]['imageosdistro'], image_name]) return image_path def _scheme2backend(self, scheme): try: return { "file": FilesystemBackend, "http": HTTPBackend, # "https": HTTPSBackend }[scheme] except KeyError: msg = ("No backend found for '%s'" % scheme) LOG.error(msg) raise exception.SDKImageOperationError(rs=2, schema=scheme) def _get_md5sum(self, fpath): """Calculate the md5sum of the specific image file""" try: current_md5 = hashlib.md5() if isinstance(fpath, six.string_types) and os.path.exists(fpath): with open(fpath, "rb") as fh: for chunk in self._read_chunks(fh): current_md5.update(chunk) elif (fpath.__class__.__name__ in ["StringIO", "StringO"] or isinstance(fpath, IOBase)): for chunk in self._read_chunks(fpath): current_md5.update(chunk) else: return "" return current_md5.hexdigest() except Exception: msg = ("Failed to calculate the image's md5sum") LOG.error(msg) raise exception.SDKImageOperationError(rs=3) def _read_chunks(self, fh): fh.seek(0) chunk = fh.read(CHUNKSIZE) while chunk: yield chunk chunk = fh.read(CHUNKSIZE) else: fh.seek(0) def image_delete(self, image_name): # Delete image file try: self._delete_image_file(image_name) # Delete image record from db self._ImageDbOperator.image_delete_record(image_name) except exception.SDKImageOperationError as err: results = err.results if ((results['rc'] == 300) and (results['rs'] == 20)): LOG.warning("Image %s does not exist", image_name) return else: LOG.error("Failed to delete image %s, error: %s" % (image_name, err.format_message())) raise msg = ('Delete image %s successfully' % image_name) LOG.info(msg) def _delete_image_file(self, image_name): image_path = self._get_image_path_by_name(image_name) self._pathutils.clean_temp_folder(image_path) def _get_image_last_access_time(self, image_name, raise_exception=True): """Get the last access time of the image.""" image_file = os.path.join(self._get_image_path_by_name(image_name), CONF.zvm.user_root_vdev) if not os.path.exists(image_file): if raise_exception: msg = 'Failed to get time stamp of image:%s' % image_name LOG.error(msg) raise exception.SDKImageOperationError(rs=23, img=image_name) else: # An invalid timestamp return -1 atime = os.path.getatime(image_file) return atime def image_query(self, image_name=None): image_info = self._ImageDbOperator.image_query_record(image_name) if not image_info: # because database maybe None, so return nothing here return [] # if image_name is not None, means there is only one record if image_name: last_access_time = self._get_image_last_access_time( image_name, raise_exception=False) image_info[0]['last_access_time'] = last_access_time else: for item in image_info: image_name = item['imagename'] # set raise_exception to false because one failed # may stop processing all the items in the list last_access_time = self._get_image_last_access_time( image_name, raise_exception=False) item['last_access_time'] = last_access_time return image_info def image_get_root_disk_size(self, image_name): """Return the root disk units of the specified image image_name: the unique image name in db Return the disk units in format like 3339:CYL or 467200:BLK """ image_info = self.image_query(image_name) if not image_info: raise exception.SDKImageOperationError(rs=20, img=image_name) disk_size_units = image_info[0]['disk_size_units'].split(':')[0] return disk_size_units def image_get_os_distro(self, image_name): """ Return the operating system distro of the specified image """ image_info = self._ImageDbOperator.image_query_record(image_name) if not image_info: raise exception.SDKImageOperationError(rs=20, img=image_name) os_distro = image_info[0]['imageosdistro'] return os_distro def _get_image_disk_type(self, image_name): """ Return image disk type """ image_info = self._ImageDbOperator.image_query_record(image_name) if ((image_info[0]['comments'] is not None) and (image_info[0]['comments'].__contains__('disk_type'))): image_disk_type = eval(image_info[0]['comments'])['disk_type'] if image_disk_type == 'DASD': return 'ECKD' elif image_disk_type == 'SCSI': return 'SCSI' else: return None else: return None def punch_file(self, userid, fn, fclass): rd = ("changevm %(uid)s punchfile %(file)s --class %(class)s" % {'uid': userid, 'file': fn, 'class': fclass}) try: self._request(rd) except exception.SDKSMTRequestFailed as err: LOG.error("Failed to punch file to userid '%s'," "error: %s" % (userid, err.format_message())) raise finally: os.remove(fn) def get_guest_connection_status(self, userid): '''Get guest vm connection status.''' rd = ' '.join(('getvm', userid, 'isreachable')) results = self._request(rd) if results['rs'] == 1: return True else: return False def _generate_disk_parmline(self, vdev, fmt, mntdir): parms = [ 'action=' + 'addMdisk', 'vaddr=' + vdev, 'filesys=' + fmt, 'mntdir=' + mntdir ] parmline = ' '.join(parms) parmstr = "'" + parmline + "'" return parmstr def process_additional_minidisks(self, userid, disk_info): '''Generate and punch the scripts used to process additional disk into target vm's reader. ''' for idx, disk in enumerate(disk_info): vdev = disk.get('vdev') or self.generate_disk_vdev( offset = (idx + 1)) fmt = disk.get('format') mount_dir = disk.get('mntdir') or ''.join(['/mnt/ephemeral', str(vdev)]) # the mount point of swap partition is swap if fmt == "swap": mount_dir = "swap" disk_parms = self._generate_disk_parmline(vdev, fmt, mount_dir) func_name = '/var/lib/zvmsdk/setupDisk' self.aemod_handler(userid, func_name, disk_parms) # trigger do-script if self.get_power_state(userid) == 'on': self.execute_cmd(userid, "/usr/bin/zvmguestconfigure start") def aemod_handler(self, instance_name, func_name, parms): rd = ' '.join(['changevm', instance_name, 'aemod', func_name, '--invparms', parms]) action = parms[0] + instance_name with zvmutils.log_and_reraise_smt_request_failed(action): self._request(rd) def get_user_console_output(self, userid): # get console into reader rd = 'getvm %s consoleoutput' % userid action = 'get console log reader file list for guest vm: %s' % userid with zvmutils.log_and_reraise_smt_request_failed(action): resp = self._request(rd) with zvmutils.expect_invalid_resp_data(resp): rf_list = resp['response'][0].rpartition(':')[2].strip().split() # TODO: make sure reader device is online # via 'cat /sys/bus/ccw/drivers/vmur/0.0.000c/online' # 'sudo /sbin/cio_ignore -r 000c; sudo /sbin/chccwdev -e 000c' # 'which udevadm &> /dev/null && udevadm settle || udevsettle' logs = [] for rf in rf_list: cmd = 'sudo /usr/sbin/vmur re -t -O %s' % rf rc, output = zvmutils.execute(cmd) if rc == 0: logs.append(output) return ''.join(logs) def query_vswitch(self, switch_name): smt_userid = zvmutils.get_smt_userid() rd = ' '.join(( "SMAPI %s API Virtual_Network_Vswitch_Query_Extended" % smt_userid, "--operands", '-k switch_name=%s' % switch_name )) try: results = self._request(rd) rd_list = results['response'] except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 212) and (err.results['rs'] == 40)): msg = 'Vswitch %s does not exist' % switch_name LOG.error(msg) obj_desc = "Vswitch %s" % switch_name raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='network') else: action = "query vswitch details info" msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) vsw_info = {} with zvmutils.expect_invalid_resp_data(): # ignore user_vlan_id part and jump to the vswitch basic info idx_end = len(rd_list) idx = 0 while ((idx < idx_end) and not rd_list[idx].__contains__('switch_name')): idx = idx + 1 # The next 21 lines contains the vswitch basic info # eg, name, type, port_type, vlan_awareness, etc for i in range(21): rd = rd_list[idx + i].split(':') vsw_info[rd[0].strip()] = rd[1].strip() idx = idx + 21 # Skip the vepa_status while ((idx < idx_end) and not rd_list[idx].__contains__('real_device_address') and not rd_list[idx].__contains__('port_num') and not rd_list[idx].__contains__('adapter_owner')): idx = idx + 1 def _parse_value(data_list, idx, keyword, offset=1): value = data_list[idx].rpartition(keyword)[2].strip() if value == '(NONE)': value = 'NONE' return idx + offset, value def _parse_dev_status(value): if value in const.DEV_STATUS.keys(): return const.DEV_STATUS[value] else: return 'Unknown' def _parse_dev_err(value): if value in const.DEV_ERROR.keys(): return const.DEV_ERROR[value] else: return 'Unknown' # Start to analyse the real devices info vsw_info['real_devices'] = {} while ((idx < idx_end) and rd_list[idx].__contains__('real_device_address')): # each rdev has 6 lines' info idx, rdev_addr = _parse_value(rd_list, idx, 'real_device_address: ') idx, vdev_addr = _parse_value(rd_list, idx, 'virtual_device_address: ') idx, controller = _parse_value(rd_list, idx, 'controller_name: ') idx, port_name = _parse_value(rd_list, idx, 'port_name: ') idx, dev_status = _parse_value(rd_list, idx, 'device_status: ') idx, dev_err = _parse_value(rd_list, idx, 'device_error_status ') vsw_info['real_devices'][rdev_addr] = {'vdev': vdev_addr, 'controller': controller, 'port_name': port_name, 'dev_status': _parse_dev_status( dev_status), 'dev_err': _parse_dev_err( dev_err) } # Under some case there would be an error line in the output # "Error controller_name is NULL!!", skip this line if ((idx < idx_end) and rd_list[idx].__contains__( 'Error controller_name is NULL!!')): idx += 1 # Start to get the authorized userids vsw_info['authorized_users'] = {} while ((idx < idx_end) and rd_list[idx].__contains__('port_num')): # each authorized userid has 6 lines' info at least idx, port_num = _parse_value(rd_list, idx, 'port_num: ') idx, userid = _parse_value(rd_list, idx, 'grant_userid: ') idx, prom_mode = _parse_value(rd_list, idx, 'promiscuous_mode: ') idx, osd_sim = _parse_value(rd_list, idx, 'osd_sim: ') idx, vlan_count = _parse_value(rd_list, idx, 'vlan_count: ') vlan_ids = [] for i in range(int(vlan_count)): idx, id = _parse_value(rd_list, idx, 'user_vlan_id: ') vlan_ids.append(id) # For vlan unaware vswitch, the query smcli would # return vlan_count as 1, here we just set the count to 0 if (vsw_info['vlan_awareness'] == 'UNAWARE'): vlan_count = 0 vlan_ids = [] vsw_info['authorized_users'][userid] = { 'port_num': port_num, 'prom_mode': prom_mode, 'osd_sim': osd_sim, 'vlan_count': vlan_count, 'vlan_ids': vlan_ids } # Start to get the connected adapters info # OWNER_VDEV would be used as the dict key for each adapter vsw_info['adapters'] = {} while ((idx < idx_end) and rd_list[idx].__contains__('adapter_owner')): # each adapter has four line info: owner, vdev, macaddr, type idx, owner = _parse_value(rd_list, idx, 'adapter_owner: ') idx, vdev = _parse_value(rd_list, idx, 'adapter_vdev: ') idx, mac = _parse_value(rd_list, idx, 'adapter_macaddr: ') idx, type = _parse_value(rd_list, idx, 'adapter_type: ') key = owner + '_' + vdev vsw_info['adapters'][key] = { 'mac': mac, 'type': type } # Todo: analyze and add the uplink NIC info and global member info def _parse_switch_status(value): if value in const.SWITCH_STATUS.keys(): return const.SWITCH_STATUS[value] else: return 'Unknown' if 'switch_status' in vsw_info.keys(): vsw_info['switch_status'] = _parse_switch_status( vsw_info['switch_status']) return vsw_info def get_nic_info(self, userid=None, nic_id=None, vswitch=None): nic_info = self._NetDbOperator.switch_select_record(userid=userid, nic_id=nic_id, vswitch=vswitch) return nic_info def is_first_network_config(self, userid): action = "get guest '%s' to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): info = self._GuestDbOperator.get_guest_by_userid(userid) # check net_set if int(info[3]) == 0: return True else: return False def update_guestdb_with_net_set(self, userid): action = "update guest '%s' in database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.update_guest_by_userid(userid, net_set='1') def _is_OSA_free(self, OSA_device): osa_info = self._query_OSA() if 'OSA' not in osa_info.keys(): return False elif len(osa_info['OSA']['FREE']) == 0: return False else: dev1 = str(OSA_device).zfill(4).upper() dev2 = str(str(hex(int(OSA_device, 16) + 1))[2:]).zfill(4).upper() dev3 = str(str(hex(int(OSA_device, 16) + 2))[2:]).zfill(4).upper() if ((dev1 in osa_info['OSA']['FREE']) and (dev2 in osa_info['OSA']['FREE']) and (dev3 in osa_info['OSA']['FREE'])): return True else: return False def _query_OSA(self): smt_userid = zvmutils.get_smt_userid() rd = "SMAPI %s API Virtual_Network_OSA_Query" % smt_userid OSA_info = {} try: results = self._request(rd) rd_list = results['response'] except exception.SDKSMTRequestFailed as err: if ((err.results['rc'] == 4) and (err.results['rs'] == 4)): msg = 'No OSAs on system' LOG.info(msg) return OSA_info else: action = "query OSA details info" msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) with zvmutils.expect_invalid_resp_data(): idx_end = len(rd_list) idx = 0 def _parse_value(data_list, idx, keyword, offset=1): value = data_list[idx].rpartition(keyword)[2].strip() return idx + offset, value # Start to analyse the osa devices info while ((idx < idx_end) and rd_list[idx].__contains__('OSA Address')): idx, osa_addr = _parse_value(rd_list, idx, 'OSA Address: ') idx, osa_status = _parse_value(rd_list, idx, 'OSA Status: ') idx, osa_type = _parse_value(rd_list, idx, 'OSA Type: ') if osa_type != 'UNKNOWN': idx, CHPID_addr = _parse_value(rd_list, idx, 'CHPID Address: ') idx, Agent_status = _parse_value(rd_list, idx, 'Agent Status: ') if osa_type not in OSA_info.keys(): OSA_info[osa_type] = {} OSA_info[osa_type]['FREE'] = [] OSA_info[osa_type]['BOXED'] = [] OSA_info[osa_type]['OFFLINE'] = [] OSA_info[osa_type]['ATTACHED'] = [] if osa_status.__contains__('ATT'): id = osa_status.split()[1] item = (id, osa_addr) OSA_info[osa_type]['ATTACHED'].append(item) else: OSA_info[osa_type][osa_status].append(osa_addr) return OSA_info def _get_available_vdev(self, userid, vdev=None): ports_info = self._NetDbOperator.switch_select_table() vdev_info = [] for p in ports_info: if p['userid'] == userid.upper(): vdev_info.append(p['interface']) if len(vdev_info) == 0: # no nic defined for the guest if vdev is None: nic_vdev = CONF.zvm.default_nic_vdev else: nic_vdev = vdev else: if vdev is None: used_vdev = max(vdev_info) nic_vdev = str(hex(int(used_vdev, 16) + 3))[2:] else: if self._is_vdev_valid(vdev, vdev_info): nic_vdev = vdev else: errmsg = ("The specified virtual device number %s " "has already been used." % vdev) raise exception.SDKConflictError(modID='network', rs=6, vdev=vdev, userid=userid, msg=errmsg) if ((len(nic_vdev) > 4) or (len(str(hex(int(nic_vdev, 16) + 2))[2:]) > 4)): errmsg = ("Virtual device number %s is not valid" % nic_vdev) raise exception.SDKInvalidInputFormat(msg=errmsg) return nic_vdev def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False): nic_vdev = self._get_available_vdev(userid, vdev=vdev) if not self._is_OSA_free(OSA_device): errmsg = ("The specified OSA device number %s " "is not free" % OSA_device) raise exception.SDKConflictError(modID='network', rs=14, osa=OSA_device, userid=userid, msg=errmsg) LOG.debug('Nic attributes: vdev is %(vdev)s, ' 'dedicated OSA device is %(osa)s', {'vdev': nic_vdev, 'osa': OSA_device}) self._dedicate_OSA(userid, OSA_device, nic_vdev, active=active) return nic_vdev def _dedicate_OSA_inactive_exception(self, error, userid, vdev, OSA_device): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=15, osa=OSA_device, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 12)): obj_desc = "Guest device %s" % vdev raise exception.SDKConflictError(modID='network', rs=15, osa=OSA_device, userid=userid, obj=obj_desc) elif ((error.results['rc'] == 404) and (error.results['rs'] == 4)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=14, osa=OSA_device, userid=userid, msg=errmsg) else: raise error def _dedicate_OSA_active_exception(self, error, userid, OSA_device): if (((error.results['rc'] == 204) and (error.results['rs'] == 4)) or ((error.results['rc'] == 204) and (error.results['rs'] == 8)) or ((error.results['rc'] == 204) and (error.results['rs'] == 16))): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=14, osa=OSA_device, userid=userid, msg=errmsg) else: raise error def _dedicate_OSA(self, userid, OSA_device, vdev, active=False): if active: self._is_active(userid) msg = ('Start to dedicate nic device %(vdev)s of guest %(vm)s ' 'to OSA device %(osa)s' % {'vdev': vdev, 'vm': userid, 'osa': OSA_device}) LOG.info(msg) def_vdev = vdev att_OSA_device = OSA_device for i in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Dedicate_DM' % userid, "--operands", "-v %s" % def_vdev, "-r %s" % att_OSA_device)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: LOG.error("Failed to dedicate OSA %s to nic %s for user %s " "in the guest's user direct, error: %s" % (att_OSA_device, def_vdev, userid, err.format_message())) # TODO revoke the dedicated OSA in user direct while (int(def_vdev, 16) != int(vdev, 16)): def_vdev = str(hex(int(def_vdev, 16) - 1))[2:] requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate_DM' % userid, "--operands", "-v %s" % def_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: if ((err2.results['rc'] == 404) and (err2.results['rs'] == 8)): pass else: LOG.error("Failed to Undedicate nic %s for user" " %s in the guest's user direct, " "error: %s" % (def_vdev, userid, err2.format_message())) pass self._dedicate_OSA_inactive_exception(err, userid, vdev, OSA_device) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:] if active: def_vdev = vdev att_OSA_device = OSA_device for i in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Dedicate' % userid, "--operands", "-v %s" % def_vdev, "-r %s" % att_OSA_device)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: LOG.error("Failed to dedicate OSA %s to nic %s for user " "%s on the active guest system, error: %s" % (att_OSA_device, def_vdev, userid, err.format_message())) # TODO revoke the dedicated OSA in user direct and active detach_vdev = vdev for j in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate_DM' % userid, "--operands", "-v %s" % detach_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err2: if ((err2.results['rc'] == 404) and (err2.results['rs'] == 8)): pass else: LOG.error("Failed to Undedicate nic %s for " "user %s in the guest's user " "direct, error: %s" % (def_vdev, userid, err2.format_message())) pass detach_vdev = str(hex(int(detach_vdev, 16) + 1))[2:] while (int(def_vdev, 16) != int(vdev, 16)): def_vdev = str(hex(int(def_vdev, 16) - 1))[2:] requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate' % userid, "--operands", "-v %s" % def_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err3: if ((err3.results['rc'] == 204) and (err3.results['rs'] == 8)): pass else: LOG.error("Failed to Undedicate nic %s for " "user %s on the active guest " "system, error: %s" % (def_vdev, userid, err3.format_message())) pass self._dedicate_OSA_active_exception(err, userid, OSA_device) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] att_OSA_device = str(hex(int(att_OSA_device, 16) + 1))[2:] OSA_desc = 'OSA=%s' % OSA_device self._NetDbOperator.switch_add_record(userid, vdev, comments=OSA_desc) msg = ('Dedicate nic device %(vdev)s of guest %(vm)s ' 'to OSA device %(osa)s successfully' % {'vdev': vdev, 'vm': userid, 'osa': OSA_device}) LOG.info(msg) def _undedicate_nic_active_exception(self, error, userid, vdev): if ((error.results['rc'] == 204) and (error.results['rs'] == 44)): errmsg = error.format_message() raise exception.SDKConflictError(modID='network', rs=16, userid=userid, vdev=vdev, msg=errmsg) else: raise error def _undedicate_nic_inactive_exception(self, error, userid, vdev): if ((error.results['rc'] == 400) and (error.results['rs'] == 12)): obj_desc = "Guest %s" % userid raise exception.SDKConflictError(modID='network', rs=17, userid=userid, vdev=vdev, obj=obj_desc) else: raise error def _undedicate_nic(self, userid, vdev, active=False, del_active_only=False): if active: self._is_active(userid) msg = ('Start to undedicate nic device %(vdev)s of guest %(vm)s' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) if not del_active_only: def_vdev = vdev for i in range(3): requestData = ' '.join(( 'SMAPI %s API Image_Device_Undedicate_DM' % userid, "--operands", "-v %s" % def_vdev)) try: self._request(requestData) except (exception.SDKSMTRequestFailed, exception.SDKInternalError) as err: results = err.results emsg = err.format_message() if ((results['rc'] == 404) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist in " "the guest's user direct", vdev) else: LOG.error("Failed to undedicate nic %s for %s in " "the guest's user direct, error: %s" % (vdev, userid, emsg)) self._undedicate_nic_inactive_exception(err, userid, vdev) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] self._NetDbOperator.switch_delete_record_for_nic(userid, vdev) if active: def_vdev = vdev for i in range(3): rd = ' '.join(( "SMAPI %s API Image_Device_Undedicate" % userid, "--operands", '-v %s' % def_vdev)) try: self._request(rd) except exception.SDKSMTRequestFailed as err: results = err.results emsg = err.format_message() if ((results['rc'] == 204) and (results['rs'] == 8)): LOG.warning("Virtual device %s does not exist on " "the active guest system", vdev) else: LOG.error("Failed to undedicate nic %s for %s on " "the active guest system, error: %s" % (vdev, userid, emsg)) self._undedicate_nic_active_exception(err, userid, vdev) def_vdev = str(hex(int(def_vdev, 16) + 1))[2:] msg = ('Undedicate nic device %(vdev)s of guest %(vm)s successfully' % {'vdev': vdev, 'vm': userid}) LOG.info(msg) def _request_with_error_ignored(self, rd): """Send smt request, log and ignore any errors.""" try: return self._request(rd) except Exception as err: # log as warning and ignore namelist operation failures LOG.warning(six.text_type(err)) def namelist_add(self, namelist, userid): rd = ''.join(("SMAPI %s API Name_List_Add " % namelist, "--operands -n %s" % userid)) self._request_with_error_ignored(rd) def namelist_remove(self, namelist, userid): rd = ''.join(("SMAPI %s API Name_List_Remove " % namelist, "--operands -n %s" % userid)) self._request_with_error_ignored(rd) def namelist_query(self, namelist): rd = "SMAPI %s API Name_List_Query" % namelist resp = self._request_with_error_ignored(rd) if resp is not None: return resp['response'] else: return [] def namelist_destroy(self, namelist): rd = "SMAPI %s API Name_List_Destroy" % namelist self._request_with_error_ignored(rd) def _get_defined_cpu_addrs(self, userid): user_direct = self.get_user_direct(userid) defined_addrs = [] max_cpus = 0 for ent in user_direct: if ent.startswith("CPU"): cpu_addr = ent.split()[1].strip().upper() defined_addrs.append(cpu_addr) if ent.startswith("MACHINE ESA"): max_cpus = int(ent.split()[2].strip()) return (max_cpus, defined_addrs) def _get_available_cpu_addrs(self, used_addrs, max_cpus): # Get available CPU addresses that are not defined in user entry used_set = set(used_addrs) available_addrs = set([hex(i)[2:].rjust(2, '0').upper() for i in range(0, max_cpus)]) available_addrs.difference_update(used_set) return list(available_addrs) def get_active_cpu_addrs(self, userid): # Get the active cpu addrs in two-digit hex string in upper case # Sample output for 'lscpu --parse=ADDRESS': # # The following is the parsable format, which can be fed to other # # programs. Each different item in every column has an unique ID # # starting from zero. # # Address # 0 # 1 active_addrs = [] active_cpus = self.execute_cmd(userid, "lscpu --parse=ADDRESS") for c in active_cpus: # Skip the comment lines at beginning if c.startswith("# "): continue addr = hex(int(c.strip()))[2:].rjust(2, '0').upper() active_addrs.append(addr) return active_addrs def resize_cpus(self, userid, count): # Check defined cpus in user entry. If greater than requested, then # delete cpus. Otherwise, add new cpus. # Return value: for revert usage, a tuple of # action: The action taken for this resize, possible values: # 0: no action, 1: add cpu, 2: delete cpu # cpu_addrs: list of influenced cpu addrs action = 0 updated_addrs = [] (max_cpus, defined_addrs) = self._get_defined_cpu_addrs(userid) defined_count = len(defined_addrs) # Check maximum cpu count defined if max_cpus == 0: LOG.error("Resize for guest '%s' cann't be done. The maximum " "number of cpus is not defined in user directory." % userid) raise exception.SDKConflictError(modID='guest', rs=3, userid=userid) # Check requested count is less than the maximum cpus if count > max_cpus: LOG.error("Resize for guest '%s' cann't be done. The " "requested number of cpus: '%i' exceeds the maximum " "number of cpus allowed: '%i'." % (userid, count, max_cpus)) raise exception.SDKConflictError(modID='guest', rs=4, userid=userid, req=count, max=max_cpus) # Check count and take action if defined_count == count: LOG.info("The number of current defined CPUs in user '%s' equals " "to requested count: %i, no action for static resize" "needed." % (userid, count)) return (action, updated_addrs, max_cpus) elif defined_count < count: action = 1 # add more CPUs available_addrs = self._get_available_cpu_addrs(defined_addrs, max_cpus) # sort the list and get the first few addrs to use available_addrs.sort() # Define new cpus in user directory rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid, "--operands")) updated_addrs = available_addrs[0:count - defined_count] for addr in updated_addrs: rd += (" -k CPU=CPUADDR=%s" % addr) # Add resize support for share of CPU if CONF.zvm.user_default_share_unit > 0: total = CONF.zvm.user_default_share_unit * count rd += (" -k SHARE=RELATIVE=%s" % total) try: self._request(rd) except exception.SDKSMTRequestFailed as e: msg = ("Define new cpus in user directory for '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise exception.SDKGuestOperationError(rs=6, userid=userid, err=e.format_message()) LOG.info("New CPUs defined in user directory for '%s' " "successfully" % userid) return (action, updated_addrs, max_cpus) else: action = 2 # Delete CPUs defined_addrs.sort() updated_addrs = defined_addrs[-(defined_count - count):] # Delete the last few cpus in user directory rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM " % userid, "--operands")) for addr in updated_addrs: rd += (" -k CPU=CPUADDR=%s" % addr) try: self._request(rd) except exception.SDKSMTRequestFailed as e: msg = ("Delete CPUs in user directory for '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise exception.SDKGuestOperationError(rs=6, userid=userid, err=e.format_message()) LOG.info("CPUs '%s' deleted from user directory for '%s' " "successfully" % (str(updated_addrs), userid)) # Add resize support for share of CPU if CONF.zvm.user_default_share_unit > 0: total = CONF.zvm.user_default_share_unit * count rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid, "--operands -k SHARE=RELATIVE=%s" % total)) try: self._request(rd) except exception.SDKSMTRequestFailed as e: msg = ("Update share statement in user directory for '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise exception.SDKGuestOperationError(rs=6, userid=userid, err=e.format_message()) LOG.info("Update share statment in user directory for '%s' " "successfully" % userid) return (action, updated_addrs, max_cpus) def live_resize_cpus(self, userid, count): # Get active cpu count and compare with requested count # If request count is smaller than the current count, then report # error and exit immediately. active_addrs = self.get_active_cpu_addrs(userid) active_count = len(active_addrs) if active_count > count: LOG.error("Failed to live resize cpus of guest: %(uid)s, " "current active cpu count: %(cur)i is greater than " "the requested count: %(req)i." % {'uid': userid, 'cur': active_count, 'req': count}) raise exception.SDKConflictError(modID='guest', rs=2, userid=userid, active=active_count, req=count) # Static resize CPUs. (add or delete CPUs from user directory) (action, updated_addrs, max_cpus) = self.resize_cpus(userid, count) if active_count == count: # active count equals to requested LOG.info("Current active cpu count of guest: '%s' equals to the " "requested count: '%i', no more actions needed for " "live resize." % (userid, count)) LOG.info("Live resize cpus for guest: '%s' finished successfully." % userid) return else: # Get the number of cpus to add to active and check address active_free = self._get_available_cpu_addrs(active_addrs, max_cpus) active_free.sort() active_new = active_free[0:count - active_count] # Do live resize # Define new cpus cmd_str = "vmcp def cpu " + ' '.join(active_new) try: self.execute_cmd(userid, cmd_str) except exception.SDKSMTRequestFailed as err1: # rollback and return msg1 = ("Define cpu of guest: '%s' to active failed with . " "error: %s." % (userid, err1.format_message())) # Start to do rollback if action == 0: LOG.error(msg1) else: LOG.error(msg1 + (" Will revert the user directory " "change.")) # Combine influenced cpu addrs cpu_entries = "" for addr in updated_addrs: cpu_entries += (" -k CPU=CPUADDR=%s" % addr) rd = '' if action == 1: # Delete added CPUs rd = ''.join(("SMAPI %s API Image_Definition_Delete_DM" % userid, " --operands")) else: # Add deleted CPUs rd = ''.join(("SMAPI %s API Image_Definition_Create_DM" % userid, " --operands")) rd += cpu_entries try: self._request(rd) except exception.SDKSMTRequestFailed as err2: msg = ("Failed to revert user directory change for '" "%s', SMT error: %s" % (userid, err2.format_message())) LOG.error(msg) else: # revert change for share statement if CONF.zvm.user_default_share_unit > 0: old = CONF.zvm.user_default_share_unit * active_count rd = ''.join(("SMAPI %s API Image_Definition_Update_DM " % userid, "--operands -k SHARE=RELATIVE=%s" % old)) try: self._request(rd) except exception.SDKSMTRequestFailed as e: msg = ("Failed to revert user directory change of share for '" "%s', SMT error: %s" % (userid, e.format_message())) LOG.error(msg) else: LOG.info("Revert user directory change for '%s' " "successfully." % userid) else: LOG.info("Revert user directory change for '%s' " "successfully." % userid) # Finally raise the exception raise exception.SDKGuestOperationError( rs=7, userid=userid, err=err1.format_message()) # Activate successfully, rescan in Linux layer to hot-plug new cpus LOG.info("Added new CPUs to active configuration of guest '%s'" % userid) try: self.execute_cmd(userid, "chcpu -r") except exception.SDKSMTRequestFailed as err: msg = err.format_message() LOG.error("Rescan cpus to hot-plug new defined cpus for guest: " "'%s' failed with error: %s. No rollback is done and you" "may need to check the status and restart the guest to " "make the defined cpus online." % (userid, msg)) raise exception.SDKGuestOperationError(rs=8, userid=userid, err=msg) uname_out = self.execute_cmd(userid, "uname -a") if uname_out and len(uname_out) >= 1: distro = uname_out[0] else: distro = '' if 'ubuntu' in distro or 'Ubuntu' in distro \ or 'UBUNTU' in distro: try: # need use chcpu -e to make cpu online for Ubuntu online_cmd = "chcpu -e " + ','.join(active_new) self.execute_cmd(userid, online_cmd) except exception.SDKSMTRequestFailed as err: msg = err.format_message() LOG.error("Enable cpus for guest: '%s' failed with error: %s. " "No rollback is done and you may need to check the " "status and restart the guest to make the defined " "cpus online." % (userid, msg)) raise exception.SDKGuestOperationError(rs=15, userid=userid, err=msg) LOG.info("Live resize cpus for guest: '%s' finished successfully." % userid) def _get_defined_memory(self, userid): user_direct = self.get_user_direct(userid) defined_mem = max_mem = reserved_mem = -1 for ent in user_direct: # u'USER userid password storage max privclass' if ent.startswith("USER "): fields = ent.split(' ') if len(fields) < 6: # This case should not exist if the target user # is created by zcc and not updated manually by user break defined_mem = int(zvmutils.convert_to_mb(fields[3])) max_mem = int(zvmutils.convert_to_mb(fields[4])) # For legacy guests, the reserved memory may not be defined if ent.startswith("COMMAND DEF STOR RESERVED"): reserved_mem = int(zvmutils.convert_to_mb(ent.split(' ')[4])) return (defined_mem, max_mem, reserved_mem, user_direct) def _replace_user_direct(self, userid, user_entry): # user_entry can be a list or a string entry_str = "" if isinstance(user_entry, list): for ent in user_entry: if ent == "": # skip empty line continue else: entry_str += (ent + '\n') else: entry_str = user_entry tmp_folder = tempfile.mkdtemp() tmp_user_direct = os.path.join(tmp_folder, userid) with open(tmp_user_direct, 'w') as f: f.write(entry_str) rd = ''.join(("SMAPI %s API Image_Replace_DM " % userid, "--operands ", "-f %s" % tmp_user_direct)) try: self._request(rd) except exception.SDKSMTRequestFailed as err1: msg = ("Replace definition of guest '%s' failed with " "SMT error: %s." % (userid, err1.format_message())) LOG.error(msg) LOG.debug("Unlocking the user directory.") rd = ("SMAPI %s API Image_Unlock_DM " % userid) try: self._request(rd) except exception.SDKSMTRequestFailed as err2: # ignore 'not locked' error if ((err2.results['rc'] == 400) and ( err2.results['rs'] == 24)): LOG.debug("Guest '%s' unlocked successfully." % userid) pass else: # just print error and ignore this unlock error msg = ("Unlock definition of guest '%s' failed " "with SMT error: %s" % (userid, err2.format_message())) LOG.error(msg) else: LOG.debug("Guest '%s' unlocked successfully." % userid) # at the end, raise the replace error for upper layer to handle raise err1 finally: self._pathutils.clean_temp_folder(tmp_folder) def _lock_user_direct(self, userid): rd = ("SMAPI %s API Image_Lock_DM " % userid) try: self._request(rd) except exception.SDKSMTRequestFailed as e: # ignore the "already locked" error if ((e.results['rc'] == 400) and (e.results['rs'] == 12)): LOG.debug("Image is already unlocked.") else: msg = ("Lock definition of guest '%s' failed with" " SMT error: %s" % (userid, e.format_message())) LOG.error(msg) raise e def resize_memory(self, userid, memory): # Check defined storage in user entry. # Update STORAGE and RESERVED accordingly. size = int(zvmutils.convert_to_mb(memory)) (defined_mem, max_mem, reserved_mem, user_direct) = self._get_defined_memory(userid) # Check max memory is properly defined if max_mem == -1 or reserved_mem == -1: LOG.error("Memory resize for guest '%s' cann't be done." "Failed to get the defined/max/reserved memory size " "from user directory." % userid) raise exception.SDKConflictError(modID='guest', rs=19, userid=userid) action = 0 # Make sure requested size is less than the maximum memory size if size > max_mem: LOG.error("Memory resize for guest '%s' cann't be done. The " "requested memory size: '%im' exceeds the maximum " "size allowed: '%im'." % (userid, size, max_mem)) raise exception.SDKConflictError(modID='guest', rs=20, userid=userid, req=size, max=max_mem) # check if already satisfy request if defined_mem == size: LOG.info("The current defined memory size in user '%s' equals " "to requested size: %im, no action for memory resize " "needed." % (userid, size)) return (action, defined_mem, max_mem, user_direct) else: # set action to 1 to represent that revert need to be done when # live resize failed. action = 1 # get the new reserved memory size new_reserved = max_mem - size # get maximum reserved memory value MAX_STOR_RESERVED = int(zvmutils.convert_to_mb( CONF.zvm.user_default_max_reserved_memory)) # when new reserved memory value > the MAX_STOR_RESERVED, # make is as the MAX_STOR_RESERVED value if new_reserved > MAX_STOR_RESERVED: new_reserved = MAX_STOR_RESERVED # prepare the new user entry content entry_str = "" for ent in user_direct: if ent == '': # Avoid adding an empty line in the entry file # otherwise Image_Replace_DM would return syntax error. continue new_ent = "" if ent.startswith("USER "): fields = ent.split(' ') for i in range(len(fields)): # update fields[3] to new defined size if i != 3: new_ent += (fields[i] + ' ') else: new_ent += (str(size) + 'M ') # remove the last space new_ent = new_ent.strip() elif ent.startswith("COMMAND DEF STOR RESERVED"): new_ent = ("COMMAND DEF STOR RESERVED %iM" % new_reserved) else: new_ent = ent # append this new entry entry_str += (new_ent + '\n') # Lock and replace user definition with the new_entry content try: self._lock_user_direct(userid) except exception.SDKSMTRequestFailed as e: raise exception.SDKGuestOperationError(rs=9, userid=userid, err=e.format_message()) LOG.debug("User directory Locked successfully for guest '%s' " % userid) # Replace user directory try: self._replace_user_direct(userid, entry_str) except exception.SDKSMTRequestFailed as e: raise exception.SDKGuestOperationError(rs=10, userid=userid, err=e.format_message()) # Finally return useful info return (action, defined_mem, max_mem, user_direct) def _revert_user_direct(self, userid, user_entry): # user_entry can be a list or a string try: self._lock_user_direct(userid) except exception.SDKSMTRequestFailed: # print revert error and return msg = ("Failed to revert user direct of guest '%s'." % userid) LOG.error(msg) return LOG.debug("User directory Locked successfully for guest '%s'." % userid) # Replace user directory try: self._replace_user_direct(userid, user_entry) except exception.SDKSMTRequestFailed: msg = ("Failed to revert user direct of guest '%s'." % userid) LOG.error(msg) return LOG.debug("User directory reverted successfully for guest '%s'." % userid) def _get_active_memory(self, userid): # Return an integer value representing the active memory size in mb output = self.execute_cmd(userid, "lsmem") active_mem = 0 for e in output: # cmd output contains line starts with "Total online memory", # its format can be like: # "Total online memory : 8192 MB" # or # "Total online memory: 8G" # need handle both formats if e.startswith("Total online memory"): try: # sample mem_info_str: "8192MB" or "8G" mem_info_str = e.split(':')[1].replace(' ', '').upper() # make mem_info as "8192M" or "8G" if mem_info_str.endswith('B'): mem_info = mem_info_str[:-1] else: mem_info = mem_info_str active_mem = int(zvmutils.convert_to_mb(mem_info)) except (IndexError, ValueError, KeyError, TypeError) as e: errmsg = ("Failed to get active storage size for guest: %s" % userid) LOG.error(errmsg + " with error: " + six.text_type(e)) raise exception.SDKInternalError(msg=errmsg) break return active_mem def live_resize_memory(self, userid, memory): # Get active memory size and compare with requested size # If request size is smaller than the current size, then report # error and exit immediately. size = int(zvmutils.convert_to_mb(memory)) active_size = self._get_active_memory(userid) if active_size > size: LOG.error("Failed to live resize memory of guest: %(uid)s, " "current active memory size: %(cur)im is greater than " "the requested size: %(req)im." % {'uid': userid, 'cur': active_size, 'req': size}) raise exception.SDKConflictError(modID='guest', rs=18, userid=userid, active=active_size, req=size) # get maximum reserved memory value MAX_STOR_RESERVED = int(zvmutils.convert_to_mb( CONF.zvm.user_default_max_reserved_memory)) # The maximum increased memory size in one live resizing can't # exceed MAX_STOR_RESERVED increase_size = size - active_size if increase_size > MAX_STOR_RESERVED: LOG.error("Live memory resize for guest '%s' cann't be done. " "The memory size to be increased: '%im' is greater " " than the maximum reserved memory size: '%im'." % (userid, increase_size, MAX_STOR_RESERVED)) raise exception.SDKConflictError(modID='guest', rs=21, userid=userid, inc=increase_size, max=MAX_STOR_RESERVED) # Static resize memory. (increase/decrease memory from user directory) (action, defined_mem, max_mem, user_direct) = self.resize_memory(userid, memory) # Compare active size and requested size, then update accordingly if active_size == size: # online memory already satisfied LOG.info("Current active memory size of guest: '%s' equals to the " "requested size: '%iM', no more actions needed for " "live resize." % (userid, size)) LOG.info("Live resize memory for guest: '%s' finished " "successfully." % userid) return else: # Do live resize. update memory size # Step1: Define new standby storage cmd_str = ("vmcp def storage standby %sM" % increase_size) try: self.execute_cmd(userid, cmd_str) except exception.SDKSMTRequestFailed as e: # rollback and return msg = ("Define standby memory of guest: '%s' failed with " "error: %s." % (userid, e.format_message())) LOG.error(msg) # Start to do rollback if action == 1: LOG.debug("Start to revert user definition of guest '%s'." % userid) self._revert_user_direct(userid, user_direct) # Finally, raise the error and exit raise exception.SDKGuestOperationError(rs=11, userid=userid, err=e.format_message()) # Step 2: Online new memory cmd_str = ("chmem -e %sM" % increase_size) try: self.execute_cmd(userid, cmd_str) except exception.SDKSMTRequestFailed as err1: # rollback and return msg1 = ("Online memory of guest: '%s' failed with " "error: %s." % (userid, err1.format_message())) LOG.error(msg1) # Start to do rollback LOG.info("Start to do revert.") LOG.debug("Reverting the standby memory.") try: self.execute_cmd(userid, "vmcp def storage standby 0M") except exception.SDKSMTRequestFailed as err2: # print revert error info and continue msg2 = ("Revert standby memory of guest: '%s' failed with " "error: %s." % (userid, err2.format_message())) LOG.error(msg2) # Continue to do the user directory change. if action == 1: LOG.debug("Reverting the user directory change of guest " "'%s'." % userid) self._revert_user_direct(userid, user_direct) # Finally raise the exception raise exception.SDKGuestOperationError( rs=7, userid=userid, err=err1.format_message()) LOG.info("Live resize memory for guest: '%s' finished successfully." % userid) def is_rhcos(self, os_version): return os_version.lower().startswith('rhcos') def _get_wwpn_lun(self, userid): user_direct = self.get_user_direct(userid) wwpn = None lun = None for ent in user_direct: if ent.upper().startswith("LOADDEV PORT"): wwpn = ent.split()[2].strip() elif ent.upper().startswith("LOADDEV LUN"): lun = ent.split()[2].strip() return (wwpn, lun) def host_get_ssi_info(self): msg = ('Start SSI_Query') LOG.info(msg) rd = 'SMAPI HYPERVISOR API SSI_Query' try: results = self._request(rd) except exception.SDKSMTRequestFailed as err: if err.results['rc'] == 4 and err.results['rs'] == 3008: # System is not a member of an SSI cluster LOG.debug("Host is not a member of an SSI cluster.") return [] msg = "SMT error: %s" % err.format_message() raise exception.SDKSMTRequestFailed(err.results, msg) LOG.error("Failed to query SSI information.") if results['rc'] == 0 and results['rs'] == 0 \ and results.get('response'): return results.get('response') return [] def guest_get_kernel_info(self, userid): # Get the kernel info of 'uname -srm' try: kernel_info = self.execute_cmd(userid, "uname -srm") return kernel_info[0] except exception.SDKSMTRequestFailed as err: msg = err.format_message() LOG.error("Get kernel info from the guest %s failed: %s" % (userid, msg)) return '' class FilesystemBackend(object): @classmethod def image_import(cls, image_name, url, target, **kwargs): """Import image from remote host to local image repository using scp. If remote_host not specified, it means the source file exist in local file system, just copy the image to image repository """ source = urlparse.urlparse(url).path if kwargs['remote_host']: if '@' in kwargs['remote_host']: source_path = ':'.join([kwargs['remote_host'], source]) command = ' '.join(['/usr/bin/scp', "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", '-r ', source_path, target]) (rc, output) = zvmutils.execute(command) if rc: msg = ("Copying image file from remote filesystem failed" " with reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=10, err=output) else: msg = ("The specified remote_host %s format invalid" % kwargs['remote_host']) LOG.error(msg) raise exception.SDKImageOperationError(rs=11, rh=kwargs['remote_host']) else: LOG.debug("Remote_host not specified, will copy from local") try: shutil.copyfile(source, target) except Exception as err: msg = ("Import image from local file system failed" " with reason %s" % six.text_type(err)) LOG.error(msg) raise exception.SDKImageOperationError(rs=12, err=six.text_type(err)) @classmethod def image_export(cls, source_path, dest_url, **kwargs): """Export the specific image to remote host or local file system """ dest_path = urlparse.urlparse(dest_url).path if kwargs['remote_host']: target_path = ':'.join([kwargs['remote_host'], dest_path]) command = ' '.join(['/usr/bin/scp', "-P", CONF.zvm.remotehost_sshd_port, "-o StrictHostKeyChecking=no", '-r ', source_path, target_path]) (rc, output) = zvmutils.execute(command) if rc: msg = ("Error happened when copying image file to remote " "host with reason: %s" % output) LOG.error(msg) raise exception.SDKImageOperationError(rs=21, msg=output) else: # Copy to local file system LOG.debug("Remote_host not specified, will copy to local server") try: shutil.copyfile(source_path, dest_path) except Exception as err: msg = ("Export image from %(src)s to local file system" " %(dest)s failed: %(err)s" % {'src': source_path, 'dest': dest_path, 'err': six.text_type(err)}) LOG.error(msg) raise exception.SDKImageOperationError(rs=22, err=six.text_type(err)) class HTTPBackend(object): @classmethod def image_import(cls, image_name, url, target, **kwargs): import_image = MultiThreadDownloader(image_name, url, target) import_image.run() class MultiThreadDownloader(threading.Thread): def __init__(self, image_name, url, target): super(MultiThreadDownloader, self).__init__() self.url = url # Set thread number self.threadnum = 8 r = requests.head(self.url) # Get the size of the download resource self.totalsize = int(r.headers['Content-Length']) self.target = target def handle_download_errors(func): @functools.wraps(func) def wrapper(self, *args, **kwargs): try: return func(self, *args, **kwargs) except Exception as err: self.fd.close() msg = ("Download image from http server failed: %s" % six.text_type(err)) LOG.error(msg) raise exception.SDKImageOperationError(rs=9, err=six.text_type(err)) return wrapper def get_range(self): ranges = [] offset = int(self.totalsize / self.threadnum) for i in range(self.threadnum): if i == self.threadnum - 1: ranges.append((i * offset, '')) else: # Get the process range for each thread ranges.append((i * offset, (i + 1) * offset)) return ranges def download(self, start, end): headers = {'Range': 'Bytes=%s-%s' % (start, end), 'Accept-Encoding': '*'} # Get the data res = requests.get(self.url, headers=headers) # seek to the right position for writing data LOG.debug("Downloading file range %s:%s success" % (start, end)) with _LOCK: self.fd.seek(start) self.fd.write(res.content) @handle_download_errors def run(self): self.fd = open(self.target, 'w') thread_list = [] n = 0 for ran in self.get_range(): start, end = ran LOG.debug('thread %d start:%s,end:%s' % (n, start, end)) n += 1 # Open thread thread = threading.Thread(target=self.download, args=(start, end)) thread.start() thread_list.append(thread) for i in thread_list: i.join() LOG.info('Download %s success' % (self.name)) self.fd.close() zVMCloudConnector-1.6.3/zvmsdk/volumeops/0000775000175000017510000000000014315232035020067 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/volumeops/__init__.py0000664000175000017510000000000013720612363022173 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/0000775000175000017510000000000014315232035022065 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/sles_attach_volume.j20000664000175000017510000002314014315210052026176 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" lun="{{ lun }}" target_filename="{{ target_filename }}" # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_PARAMETERS(2): failed because input parameters may have problems INVALID_PARAMETERS=2 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 echo "Enter SLES attach script with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, target_filename:$target_filename" # ensure multipathd service is up echo "Begin to enable dm-multipath mode ..." enable_multipath_mod=`lsmod | grep dm_multipath` if [ -z "$enable_multipath_mod" ];then modprobe dm-multipath echo -e "#blacklist { # devnode \"*\" #} " > /etc/multipath.conf mpathconf systemctl restart multipathd.service echo "dm-multipath mode enabled successfully" else echo "dm-multipath mode is already enabled" fi echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Attach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # because jinja2's problem, we can not use # to count the array_size fcp_count=0 for fcp in ${fcp_list[@]} do echo "Got FCP $fcp" fcp_count=$((fcp_count+=1)) done if [[ $fcp_count -eq 0 ]]; then echo "fcp_list is empty, exit with code $INVALID_PARAMETERS." exit $INVALID_PARAMETERS fi # see if zfcp is enable echo "Begin to enable zfcp mode ..." enable_zfcp_mod=`lsmod | grep zfcp` if [ -z "$enable_zfcp_mod" ];then modprobe zfcp echo "zfcp mode enabled successfully" else echo "zfcp mode is already enabled" fi # online fcp devices for fcp in ${fcp_list[@]} do echo "Begin to online FCP $fcp ..." /sbin/cio_ignore -r $fcp > /dev/null /sbin/chccwdev -e $fcp > /dev/null echo "FCP $fcp is online now" done # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # send an iSCSI scan request with given host and optionally the ctl # the ctl means, c: channel,default to - # t: target, default to - # l: lun, default to - all_hosts=(`ls /sys/class/scsi_host/`) for host in ${all_hosts[@]} do echo "Scan on host $host triggered" echo "- - -" > /sys/class/scsi_host/$host/scan echo "Scan on host $host completed" done # the number WWPNs is generated dynamically # so we need to get them from the filesystem # and get the intersection between input wwpns and lszfcp output lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo -e "lszfcp -P output port info:\n$lszfcp_output" declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "Discover WWPNs: ${ActiveWWPNs[@]} for FCP device: $fcp" # If auto-discovery of LUNs is disabled on s390 platforms # luns need to be added to the configuration through # the unit_add interface AutoScan=`cat /sys/module/zfcp/parameters/allow_lun_scan` if [[ "$AutoScan" != "Y" ]]; then for wwpn in ${ActiveWWPNs[@]} do echo "Begin to register SCSI device 0.0.$fcp:$wwpn:$lun ..." chzdev -e -a zfcp-lun 0.0.$fcp:$wwpn:$lun echo "Registration for SCSI device 0.0.$fcp:$wwpn:$lun was done" done fi # create udev rules sleep 1 touch /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules out=`cat "/etc/udev/rules.d/51-zfcp-0.0.$fcp.rules" | egrep -i "ccw/0.0.$fcp]online"` if [[ ! $out ]]; then echo "ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"0.0.$fcp\", IMPORT{program}=\"collect 0.0.$fcp %k 0.0.$fcp zfcp\""| tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules echo "ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"zfcp\", IMPORT{program}=\"collect 0.0.$fcp %k 0.0.$fcp zfcp\""| tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules echo "ACTION==\"add\", ENV{COLLECT_0.0.$fcp}==\"0\", ATTR{[ccw/0.0.$fcp]online}=\"1\""| tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules fi for wwpn in ${ActiveWWPNs[@]} do echo "ACTION==\"add\", KERNEL==\"rport-*\", ATTR{port_name}==\"$wwpn\", SUBSYSTEMS==\"ccw\", KERNELS==\"0.0.$fcp\",ATTR{[ccw/0.0.$fcp]$wwpn/unit_add}=\"$lun\""| tee -a /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules done echo "Start to execute udev settle" if [[ $(which udevadm 2> /dev/null) != '' ]]; then udevadm settle echo "Execution of udevadm settle was done" else udevsettle echo "Execution of udevsettle was done" fi # wait for the devices ready # timeout set to 10 seconds Timeout=10 while [ $FoundDiskPath -eq 0 ] do # if timeout less or equal 0 seconds, means no time left if [ $Timeout -le 0 ]; then echo "Waiting for devices ready timed out after 10 seconds." break fi # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-zfcp-$j:$lun" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 echo "Found disk path is: $diskPath" break fi done # if devices still not ready, wait another 5 seconds and retry if [ $FoundDiskPath -eq 0 ]; then sleep 1 Timeout=$((Timeout-=1)) echo "Sleep 1 second to wait the devices ready, timeout left: $Timeout" fi done done if [ $FoundDiskPath -eq 1 ]; then echo "The storage device is ready and found disk path: $diskPath." else echo "Error happens during attachment because the device file of $fcp not found, will exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi WWID=`/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath` echo "scsi_id command get WWID:$WWID for device $diskPath." ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi LinkItem="KERNEL==\"dm-*\", ENV{DM_UUID}==\"mpath-$WWID\", SYMLINK+=\"$target_filename\"" echo -e $LinkItem >> $ConfigFile echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit SLES attach script" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/ubuntu_attach_volume.j20000664000175000017510000002151514315210052026556 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" # lun example: 0x0100000000000000 lun="{{ lun }}" # 1. if the lun id less than 256, # the file under /dev/disk/by-path/ will as below, # take 'lun id = 0' as example: # ccw-0.0.5c03-fc-0x5005076802400c1a-lun-0, the the lun id is decimal. # 2. if the lun id is equal or more than 256, # the file under /dev/disk/by-path/ will as below, # take 'lun id = 256' as example: # ccw-0.0.1a0d-fc-0x500507680b26bac7-lun-0x0100000000000000, # the lun id is hex. lun_id="{{ lun_id }}" target_filename="{{ target_filename }}" # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_PARAMETERS(2): failed because input parameters may have problems INVALID_PARAMETERS=2 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 echo "Enter UBUNTU attach script with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun lun_id:$lun_id, target_filename:$target_filename." # ensure multipathd service is up echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Attach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # because jinja2's problem, we can not use # to count the array_size fcp_count=0 for fcp in ${fcp_list[@]} do echo "Got FCP $fcp" fcp_count=$((fcp_count+=1)) done if [[ $fcp_count -eq 0 ]]; then echo "fcp_list is empty, exit with code $INVALID_PARAMETERS." exit $INVALID_PARAMETERS fi # see if zfcp is enable echo "Begin to enable zfcp mode ..." enable_zfcp_mod=`lsmod | grep zfcp` if [ -z "$enable_zfcp_mod" ];then modprobe zfcp echo "zfcp mode enabled successfully" else echo "zfcp mode is already enabled" fi # online fcp devices for fcp in ${fcp_list[@]} do echo "Begin to online FCP $fcp ..." /sbin/chzdev zfcp-host $fcp -e echo "FCP $fcp is online now" done # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # send an iSCSI scan request with given host and optionally the ctl # the ctl means, c: channel,default to - # t: target, default to - # l: lun, default to - all_hosts=(`ls /sys/class/scsi_host/`) for host in ${all_hosts[@]} do echo "Scan on host $host triggered" echo "- - -" > /sys/class/scsi_host/$host/scan echo "Scan on host $host completed" done # the number WWPNs is generated dynamically # so we need to get them from the filesystem # the number WWPNs is generated dynamically # so we need to get them from the filesystem # and get the intersection between input wwpns and lszfcp output lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo -e "lszfcp -P output port info:\n$lszfcp_output" declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "Discover WWPNs: ${ActiveWWPNs[@]} for FCP device: $fcp" # If auto-discovery of LUNs is disabled on s390 platforms # luns need to be added to the configuration through # the unit_add interface AutoLunScan=`cat /sys/module/zfcp/parameters/allow_lun_scan` if [[ "$AutoLunScan" != "Y" ]]; then echo "Auto LUN scan is disabled, so manually configure the SCSI devices." for wwpn in ${ActiveWWPNs[@]} do echo "Begin to register SCSI device 0.0.$fcp:$wwpn:$lun ..." /sbin/chzdev zfcp-lun 0.0.$fcp:$wwpn:$lun -e echo "Registration for SCSI device 0.0.$fcp:$wwpn:$lun was done" done fi echo "Start to execute udev settle" if [[ $(which udevadm 2> /dev/null) != '' ]]; then udevadm settle echo "Execution of udevadm settle was done" else udevsettle echo "Execution of udevsettle was done" fi # wait for the devices ready # timeout set to 10 seconds Timeout=10 while [ $FoundDiskPath -eq 0 ] do # if timeout less or equal 0 seconds, means no time left if [ $Timeout -le 0 ]; then echo "Waiting for devices ready timed out after 10 seconds." break fi # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-fc-$j-lun-$lun_id" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done # if devices still not ready, wait another 5 seconds and retry if [ $FoundDiskPath -eq 0 ]; then sleep 1 Timeout=$((Timeout-=1)) echo "Sleep 1 second to wait the devices ready, timeout left: $Timeout" fi done done if [ $FoundDiskPath -eq 1 ]; then echo "The storage device is ready and found disk path: $diskPath." else echo "Error happens during attachment because the device file of $fcp not found, will exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi WWID=`/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath` echo "scsi_id command get WWID:$WWID for device $diskPath." ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi LinkItem="KERNEL==\"dm-*\", ENV{DM_UUID}==\"mpath-$WWID\", SYMLINK+=\"$target_filename\"" echo -e $LinkItem >> $ConfigFile echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit UBUNTU attach script" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/sles_detach_volume.j20000664000175000017510000002060014315210052026160 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" lun="{{ lun }}" target_filename="{{ target_filename }}" is_last_volume="{{ is_last_volume }}" # Detach script exit code explanation: # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 # CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP(5): failed to flush a multipath device map CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP=5 echo "Enter SLES detach script with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, target_filename:$target_filename, is_last_volume: $is_last_volume." # ensure multipathd service is up echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Detach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # we need to get the intersection between input wwpns and lszfcp output # we need execute this from beginning because if paths were flushed, we get nothing lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo "lszfcp output port info: $lszfcp_output ." declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error and exit code 3 valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To find active disk path of LUN $lun, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device: $fcp." # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-zfcp-$j:$lun" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done if [ $FoundDiskPath -eq 1 ]; then echo "Found active disk path: $diskPath ." break fi done # if no disk path found, exit with code 4 if [[ -z $diskPath ]]; then echo "No valid paths found between FCP devices: ${fcp_list[@]} and WWPNS: ${input_wwpns}, exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi # get the wwid of device, the WWID are same for same volume WWID=$(/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath) echo "scsi_id command get WWID:$WWID for device: $diskPath" # flush IO for devices echo "Begin to flush cache data on $diskPath ..." blockdev --flushbufs $diskPath > /dev/null echo "Flush cache data on $diskPath was done" # exit code default to 0, because WWIDs may be empty exit_code=$OK map_name=$(multipath -l $WWID -v 1) echo "Got map name: $map_name for WWID: $WWID" output=$(multipath -f $map_name 2>&1) exit_code=$? # error output not empty, means error happened # and the error 'in use' and 'must provode a map name' # of multipath -f will return same exit code 1 # so diff them, we will ingore the error of 'must provide a map name' if [ "$output" ]; then if [ "$(echo $output | grep -i 'must provide a map name')" ]; then echo "ignore error on WWID $WWID and Lun $lun:$output" exit_code=$OK elif [ "$(echo $output | grep -i 'in use')" ]; then echo "Warning:device $map_name with WWID $WWID and Lun $lun is use, the detachment will continue." exit_code=$OK else echo "Error:when flushing a multipath device map on device with WWID $WWID and Lun $lun failed because:$output" exit_code=$CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP fi fi echo "Flushing a multipath device map $map_name exit with code: $exit_code" #if above code didn't succeed, exit now. if [[ $exit_code != 0 ]]; then exit $exit_code fi # get the real WWPNs in the file system for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To remove LUN $lun from file system, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device $fcp." # remove FCP LUNs and their SCSI devices for wwpn in ${ActiveWWPNs[@]} do echo "Begin to deregister SCSI device 0.0.$fcp:$wwpn:$lun ..." chzdev -d zfcp-lun 0.0.$fcp:$wwpn:$lun --force echo "Deregistraion of 0.0.$fcp:$wwpn:$lun was done" done # remove udev rules for wwpn in ${ActiveWWPNs[@]} do sed -i -e "/ATTR{\[ccw\/0.0.$fcp\]$wwpn\/unit_add}=\"$lun\"/d" /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules done # if is last volume, then should offline the FCP if [ $is_last_volume -eq 1 ]; then echo "this is last volume, will offline fcp device $fcp" /sbin/chzdev zfcp-host $fcp -d echo "FCP device $fcp was offline now" echo "To delete udev file /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules" rm -f /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules echo "Udev file: /etc/udev/rules.d/51-zfcp-0.0.$fcp.rules deleted" fi done echo "target file name is: $target_filename." ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi sed -i -e /SYMLINK+=\"$target_filename\"/d $ConfigFile echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit SLES detach script" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/rhel8_attach_volume.j20000664000175000017510000002330314315210052026253 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template # both RHEL8 and RHEL9 will use this template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" lun="{{ lun }}" target_filename="{{ target_filename }}" # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_PARAMETERS(2): failed because input parameters may have problems INVALID_PARAMETERS=2 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 echo "Enter attach script for RHEL with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, target_filename:$target_filename" # ensure multipathd service is up echo "Begin to enable dm-multipath mode ..." enable_multipath_mod=`lsmod | grep dm_multipath` if [ -z "$enable_multipath_mod" ];then modprobe dm-multipath echo -e "#blacklist { # devnode \"*\" #} " > /etc/multipath.conf mpathconf systemctl restart multipathd.service echo "dm-multipath mode enabled successfully" else echo "dm-multipath mode is already enabled" fi echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Attach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # because jinja2's problem, we can not use # to count the array_size fcp_count=0 for fcp in ${fcp_list[@]} do echo "Got FCP $fcp" fcp_count=$((fcp_count+=1)) done if [[ $fcp_count -eq 0 ]]; then echo "fcp_list is empty, exit with code $INVALID_PARAMETERS." exit $INVALID_PARAMETERS fi # see if zfcp is enable echo "Begin to enable zfcp mode ..." enable_zfcp_mod=`lsmod | grep zfcp` if [ -z "$enable_zfcp_mod" ];then modprobe zfcp echo "zfcp mode enabled successfully" else echo "zfcp mode is already enabled" fi # online fcp devices for fcp in ${fcp_list[@]} do echo "Begin to online FCP $fcp ..." /usr/sbin/cio_ignore -r $fcp > /dev/null /usr/sbin/chccwdev -e $fcp > /dev/null echo "FCP $fcp is online now" done # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # send an iSCSI scan request with given host and optionally the ctl # the ctl means, c: channel,default to - # t: target, default to - # l: lun, default to - all_hosts=(`ls /sys/class/scsi_host/`) for host in ${all_hosts[@]} do echo "Scan on host $host triggered" echo "- - -" > /sys/class/scsi_host/$host/scan echo "Scan on host $host completed" done # the number WWPNs is generated dynamically # so we need to get them from the filesystem # and get the intersection between input wwpns and lszfcp output lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo -e "lszfcp -P output port info:\n$lszfcp_output " declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "Discover WWPNs: ${ActiveWWPNs[@]} for FCP device: $fcp" # If auto-discovery of Fibre-Channel target ports is # disabled on s390 platforms, ports need to be added to # the configuration. AutoPortScan=`cat /sys/module/zfcp/parameters/no_auto_port_rescan` if [[ "$AutoPortScan" != "N" ]]; then echo "Port rescan on FCP $fcp triggered" echo 1 > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/port_rescan echo "Port rescan on FCP $fcp done" fi # If auto-discovery of LUNs is disabled on s390 platforms # luns need to be added to the configuration through # the unit_add interface AutoLunScan=`cat /sys/module/zfcp/parameters/allow_lun_scan` if [[ "$AutoLunScan" != "Y" ]]; then for wwpn in ${ActiveWWPNs[@]} do # chzdev only exist in rhel version8.0+ echo "Begin to register SCSI device 0.0.$fcp:$wwpn:$lun ..." chzdev -e -a zfcp-lun 0.0.$fcp:$wwpn:$lun echo "$lun" > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/$wwpn/unit_add echo "Registration for SCSI device 0.0.$fcp:$wwpn:$lun was done" done fi for wwpn in ${ActiveWWPNs[@]} do echo "0.0.$fcp $wwpn $lun" >> /etc/zfcp.conf echo "0.0.$fcp $wwpn $lun written into zfcp.conf" done echo "Start to execute udev settle" echo "add" >> /sys/bus/ccw/devices/0.0.$fcp/uevent if [[ $(which udevadm 2> /dev/null) != '' ]]; then udevadm settle echo "Execution of udevadm settle was done" else udevsettle echo "Execution of udevsettle was done" fi # wait for the devices ready # timeout set to 10 seconds Timeout=10 while [ $FoundDiskPath -eq 0 ] do # if timeout less or equal 0 seconds, means no time left if [ $Timeout -le 0 ]; then echo "Waiting for devices ready timed out after 10 seconds." break fi # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-zfcp-$j:$lun" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done # if devices still not ready, wait another 5 seconds and retry if [ $FoundDiskPath -eq 0 ]; then sleep 1 Timeout=$((Timeout-1)) echo "Sleep 1 second to wait the devices ready, timeout left: $Timeout" fi done done if [ $FoundDiskPath -eq 1 ]; then echo "The storage device is ready and found disk path: $diskPath." else echo "Error happens during attachment because the device file of $fcp not found, will exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi # read the WWN from page 0x83 value for a SCSI device WWID=`/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath` echo "scsi_id command get WWID:$WWID for device $diskPath." # the symlink name that user specified TargetFile="$target_filename" ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi # add udev rules wwid_existed=`cat "$ConfigFile" | grep "$WWID"` if [ -z "$wwid_existed" ];then LinkItem="KERNEL==\"dm-*\", ENV{DM_UUID}==\"mpath-$WWID\", SYMLINK+=\"$TargetFile\"" echo -e $LinkItem >> $ConfigFile else echo "$WWID" already in "$ConfigFile" fi # reload udev rules echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit attach script for RHEL" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/ubuntu_detach_volume.j20000664000175000017510000002076614315210052026551 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" # lun example: 0x0100000000000000 lun="{{ lun }}" # 1. if the lun id less than 256, # the file under /dev/disk/by-path/ will as below, # take 'lun id = 0' as example: # ccw-0.0.5c03-fc-0x5005076802400c1a-lun-0, the the lun id is decimal. # 2. if the lun id is equal or more than 256, # the file under /dev/disk/by-path/ will as below, # take 'lun id = 256' as example: # ccw-0.0.1a0d-fc-0x500507680b26bac7-lun-0x0100000000000000, # the lun id is hex. lun_id="{{ lun_id }}" target_filename="{{ target_filename }}" is_last_volume="{{ is_last_volume }}" # Detach script exit code explanation: # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 # CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP(5): failed to flush a multipath device map CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP=5 echo "Enter UBUNTU detach script with parameters: FCP:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, LUN ID: $lun_id, target_filename:$target_filename, is_last_volume: $is_last_volume." # ensure multipathd service is up echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Detach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # we need to get the intersection between input wwpns and lszfcp output # we need execute this from beginning because if paths were flushed, we get nothing lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo "lszfcp -P output port info: $lszfcp_output ." declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error and exit code 3 valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To find active disk path of LUN $lun, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device: $fcp." # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-fc-$j-lun-$lun_id" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done if [ $FoundDiskPath -eq 1 ]; then echo "Found active disk path: $diskPath ." break fi done # if no disk path found, exit with code 4 if [[ -z $diskPath ]]; then echo "No valid paths found between FCP devices: ${fcp_list[@]} and WWPNS: ${input_wwpns}, exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi # get the wwid of device, the WWID are same for same volume WWID=$(/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath) echo "scsi_id command get WWID:$WWID for device: $diskPath" # flush IO for devices echo "Begin to flush cache data on $diskPath ..." blockdev --flushbufs $diskPath > /dev/null echo "Flush cache data on $diskPath was done" # exit code default to 0, because WWID may be empty exit_code=$OK # get the map name of the WWID # then use multipath -f to flush the device map_name=$(multipath -l $WWID -v 1) echo "Got map name: $map_name for WWID: $WWID" output=$(multipath -f $map_name 2>&1) exit_code=$? # error output not empty, means error happened # and the error 'in use' and 'must provode a map name' # of multipath -f will return same exit code 1 # so diff them, we will ingore the error of 'must provide a map name' if [ "$output" ]; then if [ "$(echo $output | grep -i 'must provide a map name')" ]; then echo "ignore error on WWID $WWID and Lun $lun:$output" exit_code=$OK elif [ "$(echo $output | grep -i 'in use')" ]; then echo "Warning:device $map_name with WWID $WWID and Lun $lun is use, the detachment will continue." exit_code=$OK else echo "Error:when flushing a multipath device map on device with WWID $WWID and Lun $lun failed because:$output" exit_code=$CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP fi fi echo "Flushing a multipath device map $map_name exit with code: $exit_code" #if above code didn't succeed, exit now. if [[ $exit_code != 0 ]]; then exit $exit_code fi for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To remove LUN $lun from file system, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device $fcp." # remove FCP LUNs and their SCSI devices for WWPN in ${ActiveWWPNs[@]} do echo "Begin to deregister SCSI device 0.0.$fcp:$wwpn:$lun ..." /sbin/chzdev -d zfcp-lun 0.0.$fcp:$WWPN:$lun --force echo "Deregistraion of 0.0.$fcp:$wwpn:$lun was done" done # if is last volume, then should offline the FCP if [ $is_last_volume -eq 1 ]; then echo "This is last volume, will offline fcp device $fcp" /sbin/chzdev zfcp-host $fcp -d echo "FCP device $fcp was offline now" fi done ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi sed -i -e /SYMLINK+=\"$target_filename\"/d $ConfigFile echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit UBUNTU detach script" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/rhel7_detach_volume.j20000664000175000017510000002056014315210052026240 0ustar ruirui00000000000000#!/bin/bash #Generated by jinja2 template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" lun="{{ lun }}" target_filename="{{ target_filename }}" is_last_volume="{{ is_last_volume }}" # Detach script exit code explanation: # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 # CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP(5): failed to flush a multipath device map CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP=5 echo "Enter RHEL7 detach script with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, target_filename:$target_filename, is_last_volume: $is_last_volume." # ensure multipathd service is up echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Detach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # we need to get the intersection between input wwpns and lszfcp output # we need execute this from beginning because if paths were flushed, we get nothing lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo "lszfcp -P output port info: $lszfcp_output ." declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error and exit code 3 valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To find active disk path of LUN $lun, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device: $fcp." # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-zfcp-$j:$lun" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done if [ $FoundDiskPath -eq 1 ]; then echo "Found active disk path: $diskPath ." break fi done # if no disk path found, exit with code 4 if [[ -z $diskPath ]]; then echo "No valid paths found between FCP devices: ${fcp_list[@]} and WWPNS: ${input_wwpns}, exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi # get the wwid of device WWID=$(/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath) echo "scsi_id command get WWID:$WWID for device: $diskPath" # flush IO for devices echo "Begin to flush cache data on $diskPath ..." blockdev --flushbufs $diskPath > /dev/null echo "Flush cache data on $diskPath was done" # exit code default to 0, because WWIDs may be empty exit_code=$OK # get the map name of the WWID # then use multipath -f to flush the device map_name=$(multipath -l $WWID -v 1) echo "Got map name: $map_name for WWID: $WWID" output=$(multipath -f $map_name 2>&1) exit_code=$? # error output not empty, means error happened # and the error 'in use' and 'must provode a map name' # of multipath -f will return same exit code 1 # so diff them, we will ingore the error of 'must provide a map name' if [ "$output" ]; then if [ "$(echo $output | grep -i 'must provide a map name')" ]; then echo "Ignore error on WWID $WWID and Lun $lun:$output" exit_code=$OK elif [ "$(echo $output | grep -i 'in use')" ]; then echo "Warning:device $map_name with WWID $WWID and Lun $lun is use, the detachment will continue." exit_code=$OK else echo "Error:when flushing a multipath device map on device with WWID $WWID and Lun $lun failed because:$output" exit_code=$CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP fi fi echo "Flushing a multipath device map $map_name exit with code: $exit_code" #if above code didn't succeed, exit now. if [[ $exit_code != 0 ]]; then exit $exit_code fi # get the real WWPNs in the file system for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To remove LUN $lun from file system, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device $fcp." # remove FCP LUNs and their SCSI devices for wwpn in ${ActiveWWPNs[@]} do echo "Begin to deregister SCSI device 0.0.$fcp:$wwpn:$lun ..." echo "$lun" > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/$wwpn/unit_remove echo "Deregistraion of 0.0.$fcp:$wwpn:$lun was done" done # if is last volume, then should offline the FCP if [ $is_last_volume -eq 1 ]; then echo "This is last volume, will offline fcp device $fcp" /sbin/chccwdev -d $fcp > /dev/null echo "FCP device $fcp was offline now" fi # remove configuration items in zfcp.conf for WWPN in ${ActiveWWPNs[@]} do echo "To remove WWPN $WWPN for $fcp in zfcp.conf" sed -i -e "/0.0.$fcp $WWPN $lun/d" /etc/zfcp.conf echo "WWPN $WWPN for $fcp in zfcp.conf removed" done done # remove udev rules and reload TargetFile="$target_filename" ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi sed -i -e /SYMLINK+=\"$TargetFile\"/d $ConfigFile # reload udev rules echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit RHEL8 detach script" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/rhel8_detach_volume.j20000664000175000017510000002056714315210052026250 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template # both RHEL8 and RHEL9 will use this template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" lun="{{ lun }}" target_filename="{{ target_filename }}" is_last_volume="{{ is_last_volume }}" # Detach script exit code explanation: # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 # CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP(5): failed to flush a multipath device map CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP=5 echo "Enter detach script for RHEL with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, target_filename:$target_filename, is_last_volume: $is_last_volume." # ensure multipathd service is up echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Detach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # we need to get the intersection between input wwpns and lszfcp output # we need execute this from beginning because if paths were flushed, we get nothing lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo -e "lszfcp -P output port info:\n$lszfcp_output " declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error and exit code 3 valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To find active disk path of LUN $lun, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device: $fcp." # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-zfcp-$j:$lun" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ]; then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done if [ $FoundDiskPath -eq 1 ]; then echo "Found active disk path: $diskPath ." break fi done # if no disk path found, exit with code 4 if [[ -z $diskPath ]]; then echo "No valid paths found between FCP devices: ${fcp_list[@]} and WWPNS: ${input_wwpns}, exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi # get the wwid of device, the WWID are same for same volume WWID=$(/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath) echo "scsi_id command get WWID: $WWID for device: $diskPath" # flush IO for devices echo "Begin to flush cache data on $diskPath ..." blockdev --flushbufs $diskPath > /dev/null echo "Flush cache data on $diskPath was done" # exit code default to 0 exit_code=$OK # get the map name of the WWID # then use multipath -f to flush the device map_name=$(multipath -l $WWID -v 1) echo "Got map name: $map_name for WWID: $WWID" output=$(multipath -f $map_name 2>&1) exit_code=$? # error output not empty, means error happened # and the error 'in use' and 'must provode a map name' # of multipath -f will return same exit code 1 # so diff them, we will ingore the error of 'must provide a map name' if [ "$output" ]; then if [ "$(echo $output | grep -i 'must provide a map name')" ]; then echo "Ignore error on WWID $WWID and Lun $lun:$output" exit_code=$OK elif [ "$(echo $output | grep -i 'in use')" ]; then echo "Warning:device $map_name with WWID $WWID and Lun $lun is use, the detachment will continue." exit_code=$OK else echo "Error:when flushing a multipath device map on device with WWID $WWID and Lun $lun failed because:$output" exit_code=$CAN_NOT_FLUSH_MULTIPATH_DEVICE_MAP fi fi echo "Flushing a multipath device map $map_name exit with code: $exit_code" #if above code didn't succeed, exit now. if [[ $exit_code != 0 ]]; then exit $exit_code fi for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "To remove LUN $lun from file system, got WWPNs: ${ActiveWWPNs[@]} belonging to FCP device $fcp." # remove FCP LUNs and their SCSI devices for wwpn in ${ActiveWWPNs[@]} do echo "Begin to deregister SCSI device 0.0.$fcp:$wwpn:$lun ..." chzdev -d zfcp-lun 0.0.$fcp:$wwpn:$lun --force echo "Deregistraion of 0.0.$fcp:$wwpn:$lun was done" done # if is last volume, then should offline the FCP if [ $is_last_volume -eq 1 ]; then echo "This is last volume, will offline fcp device $fcp" chzdev -d zfcp-host 0.0.$fcp echo "FCP device $fcp was offline now" fi # remove configuration items in zfcp.conf for WWPN in ${ActiveWWPNs[@]} do echo "To remove WWPN $WWPN for $fcp in zfcp.conf" sed -i -e "/0.0.$fcp $WWPN $lun/d" /etc/zfcp.conf echo "WWPN $WWPN for $fcp in zfcp.conf removed" done done # remove udev rules and reload TargetFile="$target_filename" ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi sed -i -e /SYMLINK+=\"$TargetFile\"/d $ConfigFile # reload udev rules echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit detach script for RHEL" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeops/templates/rhel7_attach_volume.j20000664000175000017510000002311614315210052026254 0ustar ruirui00000000000000#!/bin/bash # Generated by jinja2 template fcp_list="{{ fcp_list }}" input_wwpns="{{ wwpns }}" lun="{{ lun }}" target_filename="{{ target_filename }}" # Attach script exit code explanation: # OK(0): success OK=0 # MULTIPATH_SERVICE_NOT_ACTIVE(1): failed because multipathd service is not active MULTIPATH_SERVICE_NOT_ACTIVE=1 # INVALID_PARAMETERS(2): failed because input parameters may have problems INVALID_PARAMETERS=2 # INVALID_WWPNS(3): failed because can not found intersection between input WWPNs and lszfcp output INVALID_WWPNS=3 # DEVICE_PATH_NOT_FOUND(4): failed because no disk file found in the target VM, means no volume shown in the target VM DEVICE_PATH_NOT_FOUND=4 error_code_ echo "Enter RHEL7 attach script with parameters: FCP list:${fcp_list[@]}, INPUT WWPNS:${input_wwpns[@]}, LUN:$lun, target_filename:$target_filename" # ensure multipathd service is up echo "Begin to enable dm-multipath mode ..." enable_multipath_mod=`lsmod | grep dm_multipath` if [ -z "$enable_multipath_mod" ];then modprobe dm-multipath echo -e "#blacklist { # devnode \"*\" #} " > /etc/multipath.conf mpathconf systemctl restart multipathd.service echo "dm-multipath mode enabled successfully" else echo "dm-multipath mode is already enabled" fi echo "Checking status of multipathd service ..." check_multipathd_service=`systemctl is-active --quiet multipathd` exit_code=$? if [[ $exit_code != 0 ]]; then echo "Attach script terminated because multipathd service is not active, exit with code $MULTIPATH_SERVICE_NOT_ACTIVE." exit $MULTIPATH_SERVICE_NOT_ACTIVE fi # because jinja2's problem, we can not use # to count the array_size fcp_count=0 for fcp in ${fcp_list[@]} do echo "Got FCP $fcp" fcp_count=$((fcp_count+=1)) done if [[ $fcp_count -eq 0 ]]; then echo "fcp_list is empty, exit with code $INVALID_PARAMETERS." exit $INVALID_PARAMETERS fi # see if zfcp is enable echo "Begin to enable zfcp mode ..." enable_zfcp_mod=`lsmod | grep zfcp` if [ -z "$enable_zfcp_mod" ];then modprobe zfcp echo "zfcp mode enabled successfully" else echo "zfcp mode is already enabled" fi # online fcp devices for fcp in ${fcp_list[@]} do echo "Begin to online FCP $fcp ..." /usr/sbin/cio_ignore -r $fcp > /dev/null /usr/sbin/chccwdev -e $fcp > /dev/null echo "FCP $fcp is online now" done # Print info of each FCP for fcp in ${fcp_list[@]} do # print the status of FCP port for debug host=$(lszfcp | grep -i $fcp | awk -F' ' '{print $2}') if [[ -n "$host" ]]; then if [[ -e /sys/class/fc_host/$host/port_state ]]; then port_state=$(cat /sys/class/fc_host/$host/port_state) echo "Port state of fcp: $fcp is: $port_state" else echo "Can not get port statue of fcp $fcp because file /sys/class/fc_host/$host/port_state not exist" fi else echo "Failed to find host of fcp: $fcp from the lszfcp output, cann't get port state" fi done # send an iSCSI scan request with given host and optionally the ctl # the ctl means, c: channel,default to - # t: target, default to - # l: lun, default to - all_hosts=(`ls /sys/class/scsi_host/`) for host in ${all_hosts[@]} do echo "Scan on host $host triggered" echo "- - -" > /sys/class/scsi_host/$host/scan echo "Scan on host $host completed" done # the number WWPNs is generated dynamically # so we need to get them from the filesystem # the number WWPNs is generated dynamically # so we need to get them from the filesystem # and get the intersection between input wwpns and lszfcp output lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` echo -e "lszfcp -P output port info:\n$lszfcp_output" declare -A valid_dict for fcp in ${fcp_list[@]} do # Retry each FCP device to make sure there are matched WWPNs for it Timeout=10 while [[ $Timeout -gt 0 ]] do # Get WWPNs under /sys/bus/ccw/drivers/zfcp/. wwpns_shown_in_sys=`ls /sys/bus/ccw/drivers/zfcp/0.0.$fcp/ | grep "0x"` echo -e "Target WWPNs shown under /sys/bus/ccw/drivers/zfcp/0.0.$fcp/:\n$wwpns_shown_in_sys" # Try to find match between system WWPNs(from lszfcp output or under /sys) and input WWPNs found_match=0 for wwpn in ${input_wwpns[@]} do fcp_wwpn_str="0.0.${fcp} ${wwpn}" if [[ $lszfcp_output =~ $fcp_wwpn_str || $wwpns_shown_in_sys =~ $wwpn ]]; then found_match=1 echo "$fcp_wwpn_str matched with the ouput of lszfcp or WWPNs shown under /sys/bus/ccw/drivers/zfcp/." # Add this combination into valid_dict if [[ -z ${valid_dict[$fcp]} ]]; then valid_dict+=([$fcp]="$wwpn") else old_value=${valid_dict[$fcp]} new_value=${old_value}" "$wwpn valid_dict[$fcp]=$new_value fi fi done # If no matched wwpn found, need retry if [[ $found_match -eq 0 ]]; then sleep 1 Timeout=$((Timeout-1)) echo "Retrying to get the target WWPNs for FCP device $fcp, $Timeout seconds left..." lszfcp_output=`lszfcp -P | awk '{print $1}' | awk -F'/' '{print $1,$2}'` else echo "Found target WWPNs ${valid_dict[$fcp]} for FCP device $fcp." break fi done done echo "Got valid wwpns, list of key is: ${!valid_dict[@]}, list of value is: ${valid_dict[@]} ." # check the content of valid_dict, if no content, return error valid_fcp_count=0 for fcp in ${!valid_dict[@]} do valid_fcp_count=$((valid_fcp_count+=1)) done if [[ $valid_fcp_count -eq 0 ]]; then echo "Can not find the intersection between input wwpns: ${input_wwpns[@]} and lszfcp output: $lszfcp_output, exit with code $INVALID_WWPNS." exit $INVALID_WWPNS fi # flag which indicate whether we found a valid and accessable path for the volume FoundDiskPath=0 # wait for the device ready for fcp in ${!valid_dict[@]} do ActiveWWPNs=(${valid_dict[$fcp]}) echo "Discover WWPNs: ${ActiveWWPNs[@]} for FCP device: $fcp" # If auto-discovery of Fibre-Channel target ports is # disabled on s390 platforms, ports need to be added to # the configuration. AutoPortScan=`cat /sys/module/zfcp/parameters/no_auto_port_rescan` if [[ "$AutoPortScan" != "N" ]]; then echo "Port rescan on FCP $fcp triggered" echo 1 > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/port_rescan echo "Port rescan on FCP $fcp done" fi # luns need to be added to the configuration through the unit_add interface # execute this anytime because unit_remove in detach script will fails if no corresponding unit_add for wwpn in ${ActiveWWPNs[@]} do echo "Begin to register SCSI device 0.0.$fcp:$wwpn:$lun ..." echo "$lun" > /sys/bus/ccw/drivers/zfcp/0.0.$fcp/$wwpn/unit_add echo "Registration for SCSI device 0.0.$fcp:$wwpn:$lun was done" done for wwpn in ${ActiveWWPNs[@]} do echo "0.0.$fcp $wwpn $lun" >> /etc/zfcp.conf echo "0.0.$fcp $wwpn $lun written into zfcp.conf" done echo "Start to execute udev settle" echo "add" >> /sys/bus/ccw/devices/0.0.$fcp/uevent if [[ $(which udevadm 2> /dev/null) != '' ]]; then udevadm settle echo "Execution of udevadm settle was done" else udevsettle echo "Execution of udevsettle was done" fi # wait for the devices ready # timeout set to 10 seconds Timeout=10 while [ $FoundDiskPath -eq 0 ] do # if timeout less or equal 0 seconds, means no time left if [ $Timeout -le 0 ];then echo "Waiting for devices ready timed out after 10 seconds." break fi # loop all the WWPNs to found the alive device for j in ${ActiveWWPNs[@]} do x="/dev/disk/by-path/ccw-0.0.$fcp-zfcp-$j:$lun" # the x would be like: # ccw-0.0.1d13-zfcp-0x5005076306035388:0x4014400400000000 echo "Try to detect disk $x" if [ -e $x ];then echo "Disk $x detected" diskPath=$x FoundDiskPath=1 break fi done # if devices still not ready, wait another 5 seconds and retry if [ $FoundDiskPath -eq 0 ]; then sleep 1 Timeout=$((Timeout-=1)) echo "Sleep 1 second to wait the devices ready, timeout left: $Timeout" fi done done if [ $FoundDiskPath -eq 1 ]; then echo "The storage device is ready and found disk path: $diskPath." else echo "Error happens during attachment because the device file of $fcp not found, will exit with code $DEVICE_PATH_NOT_FOUND." exit $DEVICE_PATH_NOT_FOUND fi # read the WWN from page 0x83 value for a SCSI device WWID=`/lib/udev/scsi_id --page 0x83 --whitelisted $diskPath` echo "scsi_id command get WWID:$WWID for device $diskPath." # the symlink name that user specified TargetFile="$target_filename" ConfigLib="/lib/udev/rules.d/56-zfcp.rules" if [ -e "$ConfigLib" ] then ConfigFile="/lib/udev/rules.d/56-zfcp.rules" else ConfigFile="/etc/udev/rules.d/56-zfcp.rules" fi # add udev rules wwid_existed=`cat "$ConfigFile" | grep "$WWID"` if [ -z "$wwid_existed" ];then LinkItem="KERNEL==\"dm-*\", ENV{DM_UUID}==\"mpath-$WWID\", SYMLINK+=\"$TargetFile\"" echo -e $LinkItem >> $ConfigFile else echo "$WWID" already in "$ConfigFile" fi # reload udev rules echo "Begin to reload udev rules ..." udevadm control --reload udevadm trigger --sysname-match=dm-* echo "Undev rules reload done" echo "Exit RHEL8 attach script" exit $OK zVMCloudConnector-1.6.3/zvmsdk/volumeop.py0000775000175000017510000033415614315210052020270 0ustar ruirui00000000000000# Copyright 2017, 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import re import shutil import uuid import six import threading import os from zvmsdk import config from zvmsdk import constants from zvmsdk import database from zvmsdk import dist from zvmsdk import exception from zvmsdk import log from zvmsdk import smtclient from zvmsdk import utils as zvmutils from zvmsdk import vmops from zvmsdk import utils _VolumeOP = None CONF = config.CONF LOG = log.LOG # instance parameters: NAME = 'name' OS_TYPE = 'os_type' # volume parameters: SIZE = 'size' TYPE = 'type' LUN = 'lun' # connection_info parameters: ALIAS = 'alias' PROTOCOL = 'protocol' FCPS = 'fcps' WWPNS = 'wwpns' DEDICATE = 'dedicate' _LOCK_RESERVE_FCP = threading.RLock() def get_volumeop(): global _VolumeOP if not _VolumeOP: _VolumeOP = VolumeOperatorAPI() return _VolumeOP @six.add_metaclass(abc.ABCMeta) class VolumeOperatorAPI(object): """Volume operation APIs oriented towards SDK driver. The reason to design these APIs is to facilitate the SDK driver issuing a volume related request without knowing details. The details among different distributions, different instance status, different volume types and so on are all hidden behind these APIs. The only thing the issuer need to know is what it want to do on which targets. In fact, that's an ideal case. In the real world, something like connection_info still depends on different complex and the issuer needs to know how to deal with its case. Even so, these APIs can still make things much easier. """ _fcp_manager_obj = None def __init__(self): if not VolumeOperatorAPI._fcp_manager_obj: VolumeOperatorAPI._fcp_manager_obj = FCPVolumeManager() self._volume_manager = VolumeOperatorAPI._fcp_manager_obj def attach_volume_to_instance(self, connection_info): self._volume_manager.attach(connection_info) def detach_volume_from_instance(self, connection_info): self._volume_manager.detach(connection_info) def volume_refresh_bootmap(self, fcpchannel, wwpn, lun, wwid='', transportfiles='', guest_networks=None, fcp_template_id=None): return self._volume_manager.volume_refresh_bootmap(fcpchannel, wwpn, lun, wwid=wwid, transportfiles=transportfiles, guest_networks=guest_networks, fcp_template_id=fcp_template_id) def get_volume_connector(self, assigner_id, reserve, fcp_template_id=None, sp_name=None): return self._volume_manager.get_volume_connector( assigner_id, reserve, fcp_template_id=fcp_template_id, sp_name=sp_name) def check_fcp_exist_in_db(self, fcp, raise_exec=True): return self._volume_manager.check_fcp_exist_in_db(fcp, raise_exec) def get_fcp_usage(self, fcp): return self._volume_manager.get_fcp_usage(fcp) def set_fcp_usage(self, assigner_id, fcp, reserved, connections, fcp_template_id): return self._volume_manager.set_fcp_usage(fcp, assigner_id, reserved, connections, fcp_template_id) def create_fcp_template(self, name, description: str = '', fcp_devices: str = '', host_default: bool = False, default_sp_list: list = [], min_fcp_paths_count: int = None): return self._volume_manager.fcp_mgr.create_fcp_template( name, description, fcp_devices, host_default, default_sp_list, min_fcp_paths_count) def edit_fcp_template(self, fcp_template_id, name=None, description=None, fcp_devices=None, host_default=None, default_sp_list=None, min_fcp_paths_count=None): return self._volume_manager.fcp_mgr.edit_fcp_template( fcp_template_id, name=name, description=description, fcp_devices=fcp_devices, host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count) def get_fcp_templates(self, template_id_list=None, assigner_id=None, default_sp_list=None, host_default=None): return self._volume_manager.fcp_mgr.get_fcp_templates( template_id_list, assigner_id, default_sp_list, host_default) def get_fcp_templates_details(self, template_id_list=None, raw=False, statistics=True, sync_with_zvm=False): return self._volume_manager.fcp_mgr.get_fcp_templates_details( template_id_list, raw=raw, statistics=statistics, sync_with_zvm=sync_with_zvm) def delete_fcp_template(self, template_id): return self._volume_manager.fcp_mgr.delete_fcp_template(template_id) @six.add_metaclass(abc.ABCMeta) class VolumeConfiguratorAPI(object): """Volume configure APIs to implement volume config jobs on the target instance, like: attach, detach, and so on. The reason to design these APIs is to hide the details among different Linux distributions and releases. """ def __init__(self): self._vmop = vmops.get_vmops() self._dist_manager = dist.LinuxDistManager() self._smtclient = smtclient.get_smtclient() def check_IUCV_is_ready(self, assigner_id): # Make sure the iucv channel is ready for communication with VM ready = True try: self._smtclient.execute_cmd(assigner_id, 'pwd') except exception.SDKSMTRequestFailed as err: if 'UNAUTHORIZED_ERROR' in err.format_message(): # If unauthorized, we must raise exception errmsg = err.results['response'][0] msg = ('IUCV failed to get authorization from VM %(vm)s with ' 'error %(err)s' % {'vm': assigner_id, 'err': errmsg}) LOG.error(msg) raise exception.SDKVolumeOperationError(rs=6, userid=assigner_id, msg=errmsg) else: # In such case, we can continue without raising exception ready = False msg = ('Failed to connect VM %(vm)s with error ' '%(err)s, assume it is OFF status ' 'and continue' % {'vm': assigner_id, 'err': err.results['response'][0]}) LOG.debug(msg) return ready def _get_status_code_from_systemctl(self, assigner_id, command): """get the status code from systemctl status for example, if systemctl status output: Main PID: 28406 (code=exited, status=0/SUCCESS) this function will return the 3 behind status= """ output = self._smtclient.execute_cmd_direct(assigner_id, command) exit_code = 0 for line in output['response']: if 'Main PID' in line: # the status code start with = and before /FAILURE pattern = '(?<=status=)([0-9]+)' ret = re.search(pattern, line) exit_code = int(ret.group(1)) break return exit_code def config_attach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point): LOG.info("Begin to configure volume (WWPN:%s, LUN:%s) on the " "virtual machine %s with FCP devices " "%s." % (target_wwpns, target_lun, assigner_id, fcp_list)) linuxdist = self._dist_manager.get_linux_dist(os_version)() self.configure_volume_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, linuxdist) iucv_is_ready = self.check_IUCV_is_ready(assigner_id) if iucv_is_ready: # If VM is active rather than shutdown, then restart zvmguestconfigure # to run punch script(i.e. reader file) in the VM operating system active_cmds = linuxdist.create_active_net_interf_cmd() ret = self._smtclient.execute_cmd_direct( assigner_id, active_cmds, timeout=CONF.volume.punch_script_execution_timeout) LOG.info('attach scripts return values: %s' % ret) if ret['rc'] != 0: # if return code is 64 means timeout # no need to check the exist code of systemctl and return if ret['rc'] == 64: errmsg = ('Failed to configure volume in the virtual machine ' '%s for volume (WWPN:%s, LUN:%s) ' 'because exceed the timeout %s.' % (assigner_id, target_wwpns, target_lun, CONF.volume.punch_script_execution_timeout)) LOG.error(errmsg) raise exception.SDKVolumeOperationError( rs=8, userid=assigner_id, msg=errmsg) # get exit code of zvmguestconfigure.service from VM OS, # the exit code reflects the result of running punch script get_status_cmd = 'systemctl status zvmguestconfigure.service' exit_code = self._get_status_code_from_systemctl( assigner_id, get_status_cmd) # Attach script exit code explanation: # 1: failed because multipathd service is not active # 2: failed because input parameters may have problems # 3: failed because can not found intersection between input WWPNs and lszfcp output # 4: failed because no disk file found in the target VM, means no volume shown in the target VM if exit_code == 1: errmsg = ('Failed to configure volume because the ' 'multipathd service is not active ' 'in the target virtual machine' '(userid:%s).' % assigner_id) elif exit_code == 2: errmsg = ('Failed to configure volume because the ' 'configuration process terminate early with ' 'exit code %s, refer to the /var/log/messages in ' 'target virtual machine(userid:%s) for more ' 'details.' % (exit_code, assigner_id)) elif exit_code == 3: errmsg = ('Failed to configure volume because can not ' 'find valid target WWPNs for FCP devices %s, ' 'refer to the /var/log/messages in the target ' 'virtual machine(userid:%s) for more ' 'details.' % (fcp_list, assigner_id)) elif exit_code == 4: errmsg = ('Failed to configure volume because the ' 'volume(target WWPN:%s, LUN:%s) did not show up in ' 'the target virtual machine(userid:%s), please ' 'check Fibre Channel connectivity between ' 'the FCP devices(%s) and target WWPN.' % (target_wwpns, target_lun, assigner_id, fcp_list)) else: errmsg = ('Failed to configure volume in the target ' 'virtual machine(userid:%s) for volume' '(target WWPN:%s, LUN:%s) on FCP devices %s with ' 'exit code: %s, refer to the /var/log/messages ' 'in the target virtual machine for more details.' % (assigner_id, target_wwpns, target_lun, fcp_list, exit_code)) LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=8, userid=assigner_id, msg=errmsg) LOG.info("Configuration of volume (WWPN:%s, LUN:%s) on the " "target virtual machine %s with FCP devices " "%s is done." % (target_wwpns, target_lun, assigner_id, fcp_list)) def config_detach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, connections): LOG.info("Begin to deconfigure volume (WWPN:%s, LUN:%s) on the " "virtual machine %s with FCP devices " "%s." % (target_wwpns, target_lun, assigner_id, fcp_list)) linuxdist = self._dist_manager.get_linux_dist(os_version)() self.configure_volume_detach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, linuxdist, connections) iucv_is_ready = self.check_IUCV_is_ready(assigner_id) if iucv_is_ready: # If VM is active rather than shutdown, then restart zvmguestconfigure # to run punch script(i.e. reader file) in the VM operating system active_cmds = linuxdist.create_active_net_interf_cmd() ret = self._smtclient.execute_cmd_direct( assigner_id, active_cmds, timeout=CONF.volume.punch_script_execution_timeout) LOG.info('detach scripts return values: %s' % ret) if ret['rc'] != 0: # if return code is 64 means timeout # no need to check the exist code of systemctl and return if ret['rc'] == 64: errmsg = ('detach script execution in the virtual machine ' '%s for volume (WWPN:%s, LUN:%s) ' 'exceed the timeout %s.' % (assigner_id, target_wwpns, target_lun, CONF.volume.punch_script_execution_timeout)) LOG.error(errmsg) raise exception.SDKVolumeOperationError( rs=9, userid=assigner_id, msg=errmsg) get_status_cmd = 'systemctl status zvmguestconfigure.service' exit_code = self._get_status_code_from_systemctl( assigner_id, get_status_cmd) # Detach script exit code explanation: # 1: failed because multipathd service is not active # 3: failed because can not found intersection between input WWPNs and lszfcp output # 4: failed because no disk file found in the target VM, means no volume shown in the target VM # 5: failed to flush a multipath device map if exit_code == 1: errmsg = ('Failed to deconfigure volume because the ' 'multipathd service is not active ' 'in the target virtual machine' '(userid:%s).' % assigner_id) elif exit_code == 3: errmsg = ('Failed to deconfigure volume because can not ' 'find valid target WWPNs for FCP devices %s, ' 'refer to the /var/log/messages in the target ' 'virtual machine(userid:%s) for more ' 'details.' % (fcp_list, assigner_id)) elif exit_code == 4: errmsg = ('Failed to deconfigure volume because the ' 'volume(target WWPN:%s, LUN:%s) did not show up in ' 'the target virtual machine(userid:%s), please ' 'check Fibre Channel connectivity between ' 'the FCP devices(%s) and target WWPN.' % (target_wwpns, target_lun, assigner_id, fcp_list)) elif exit_code == 5: errmsg = ('Failed to deconfigure volume because ' 'getting error when flushing the multipath ' 'device maps, refer to the /var/log/messages in ' 'the target virtual machine(userid:%s) for ' 'more details.' % assigner_id) else: errmsg = ('Failed to deconfigure volume in the target ' 'virtual machine(userid:%s) for volume' '(target WWPN:%s, LUN:%s) on FCP devices %s with ' 'exit code: %s, refer to the /var/log/messages ' 'in the target virtual machine for more details.' % (assigner_id, target_wwpns, target_lun, fcp_list, exit_code)) LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=9, userid=assigner_id, msg=errmsg) LOG.info("Deconfiguration of volume (WWPN:%s, LUN:%s) on the " "target virtual machine %s with FCP devices " "%s is done." % (target_wwpns, target_lun, assigner_id, fcp_list)) def _create_file(self, assigner_id, file_name, data): temp_folder = self._smtclient.get_guest_temp_path(assigner_id) file_path = os.path.join(temp_folder, file_name) with open(file_path, "w") as f: f.write(data) return file_path, temp_folder def configure_volume_attach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, linuxdist): """new==True means this is first attachment""" # get configuration commands fcp_list_str = ' '.join(fcp_list) target_wwpns_str = ' '.join(target_wwpns) config_cmds = linuxdist.get_volume_attach_configuration_cmds( fcp_list_str, target_wwpns_str, target_lun, multipath, mount_point) LOG.debug('Got volume attachment configuation cmds for %s,' 'the content is:%s' % (assigner_id, config_cmds)) # write commands into script file config_file, config_file_path = self._create_file(assigner_id, 'atvol.sh', config_cmds) LOG.debug('Creating file %s to contain volume attach ' 'configuration file' % config_file) # punch file into guest fileClass = "X" try: self._smtclient.punch_file(assigner_id, config_file, fileClass) finally: LOG.debug('Removing the folder %s ', config_file_path) shutil.rmtree(config_file_path) def configure_volume_detach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, linuxdist, connections): # get configuration commands fcp_list_str = ' '.join(fcp_list) target_wwpns_str = ' '.join(target_wwpns) config_cmds = linuxdist.get_volume_detach_configuration_cmds( fcp_list_str, target_wwpns_str, target_lun, multipath, mount_point, connections) LOG.debug('Got volume detachment configuation cmds for %s,' 'the content is:%s' % (assigner_id, config_cmds)) # write commands into script file config_file, config_file_path = self._create_file(assigner_id, 'devol.sh', config_cmds) LOG.debug('Creating file %s to contain volume detach ' 'configuration file' % config_file) # punch file into guest fileClass = "X" try: self._smtclient.punch_file(assigner_id, config_file, fileClass) finally: LOG.debug('Removing the folder %s ', config_file_path) shutil.rmtree(config_file_path) class FCP(object): def __init__(self, init_info): self._dev_no = None self._dev_status = None self._npiv_port = None self._chpid = None self._physical_port = None self._assigned_id = None self._owner = None self._parse(init_info) @staticmethod def _get_value_from_line(info_line: str): """Get the value behind the last colon and transfer to lower cases. For example, input str is 'xxxxxx: VAlval' return value will be: valval""" val = info_line.split(':')[-1].strip().lower() return val if val else None def _parse(self, init_info): """Initialize a FCP device object from several lines of string describing properties of the FCP device. Here is a sample: FCP device number: 1D1E Status: Free NPIV world wide port number: C05076DE330003C2 Channel path ID: 27 Physical world wide port number: C05076DE33002E41 Owner: NONE The format comes from the response of xCAT, do not support arbitrary format. """ lines_per_item = constants.FCP_INFO_LINES_PER_ITEM if isinstance(init_info, list) and (len(init_info) == lines_per_item): for line in init_info: if 'FCP device number' in line: self._dev_no = self._get_value_from_line(line) elif 'Status' in line: self._dev_status = self._get_value_from_line(line) elif 'NPIV world wide port number' in line: self._npiv_port = self._get_value_from_line(line) elif 'Channel path ID' in line: self._chpid = self._get_value_from_line(line) if len(self._chpid) != 2: LOG.warn("CHPID value %s of FCP device %s is " "invalid!" % (self._chpid, self._dev_no)) elif 'Physical world wide port numbe' in line: self._physical_port = self._get_value_from_line(line) elif 'Owner' in line: self._owner = self._get_value_from_line(line) else: LOG.info('Unknown line found in FCP information:%s', line) else: LOG.warning('When parsing FCP information, got an invalid ' 'instance %s', init_info) def get_dev_no(self): return self._dev_no def get_dev_status(self): return self._dev_status def get_npiv_port(self): return self._npiv_port def set_npiv_port(self, new_npiv_port: str): self._npiv_port = new_npiv_port def set_physical_port(self, new_phy_port: str): self._physical_port = new_phy_port def get_physical_port(self): return self._physical_port def get_chpid(self): return self._chpid def get_owner(self): return self._owner def is_valid(self): # FIXME: add validation later return True def to_tuple(self): """Tranfer this object to a tuple type, format is like (fcp_id, wwpn_npiv, wwpn_phy, chpid, state, owner) for example: ('1a06', 'c05076de33000355', 'c05076de33002641', '27', 'active', 'user1') """ return (self.get_dev_no(), self.get_npiv_port(), self.get_physical_port(), self.get_chpid(), self.get_dev_status(), self.get_owner()) class FCPManager(object): def __init__(self): # _fcp_path_info store the FCP path mapping index by path no self._fcp_path_mapping = {} self.db = database.FCPDbOperator() self._smtclient = smtclient.get_smtclient() # Sync FCP DB self.sync_db() def sync_db(self): """Sync FCP DB with the FCP info queried from zVM""" with zvmutils.ignore_errors(): self._sync_db_with_zvm() def _get_all_fcp_info(self, assigner_id, status=None): fcp_info = self._smtclient.get_fcp_info_by_status(assigner_id, status) return fcp_info def increase_fcp_connections(self, fcp_list, assigner_id=None): """Increase connections of the given FCP devices :param fcp_list: (list) a list of FCP devices :param assigner_id: (str) the userid of the virtual machine :return fcp_connections: (dict) fcp_connections example {'1a10': 1, '1b10', 0} the values are the connections of the FCP device """ with database.get_fcp_conn(): fcp_connections = {} for fcp in fcp_list: # increase connections by 1 fcp_connections[fcp] = self.db.increase_connections_by_assigner(fcp, assigner_id) return fcp_connections def decrease_fcp_connections(self, fcp_list): """Decrease connections of FCP devices by 1 :param fcp_list: (list) a list of FCP devices :return fcp_connections: (dict) fcp_connections example { '1a10': 1, '1b10', 0 } the values are the connections of the FCP device """ with database.get_fcp_conn(): fcp_connections = {} for fcp in fcp_list: try: LOG.info('Decreasing the connections of FCP device {}'.format(fcp)) # Decrease connections of FCP device by 1 fcp_connections[fcp] = self.db.decrease_connections(fcp) except exception.SDKObjectNotExistError: fcp_connections[fcp] = 0 pass return fcp_connections def _valid_fcp_devcie_wwpn(self, fcp_list, assigner_id): """This method is to check if the FCP wwpn_npiv or wwpn_phy is empty string, if yes, raise error""" for fcp in fcp_list: fcp_id, wwpn_npiv, wwpn_phy, *_ = fcp if not wwpn_npiv: # wwpn_npiv not found in FCP DB errmsg = ("NPIV WWPN of FCP device %s not found in " "database." % fcp_id) LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=11, userid=assigner_id, msg=errmsg) # We use initiator to build up zones on fabric, for NPIV, the # virtual ports are not yet logged in when we creating zones. # so we will generate the physical virtual initiator mapping # to determine the proper zoning on the fabric. # Refer to #7039 for details about avoid creating zones on # the fabric to which there is no fcp connected. if not wwpn_phy: errmsg = ("Physical WWPN of FCP device %s not found in " "database." % fcp[0]) LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=11, userid=assigner_id, msg=errmsg) def reserve_fcp_devices(self, assigner_id, fcp_template_id, sp_name): """ Reserve FCP devices by assigner_id and fcp_template_id. In this method: 1. If fcp_template_id is specified, then use it. If not, get the sp default FCP Multipath Template, if no sp default template, use host default FCP Multipath Template. If host default template is not found, then raise error. 2. Get FCP list from db by assigner and fcp_template whose reserve=1 3. If fcp_list is not empty, just to use them. 4. If fcp_list is empty, get one from each path, then update 'reserved' and 'tmpl_id' in fcp table. Returns: fcp_list and fcp_template_id. The fcp list data structure: [(fcp_id, wwpn_npiv, wwpn_phy)]. An example of fcp_list: [('1c10', 'c12345abcdefg1', 'c1234abcd33002641'), ('1d10', 'c12345abcdefg2', 'c1234abcd33002641')] """ with database.get_fcp_conn(): fcp_tmpl_id = fcp_template_id if not fcp_tmpl_id: LOG.info("FCP Multipath Template id is not specified when reserving FCP " "devices for assigner %s." % assigner_id) if sp_name: LOG.info("Get the default FCP Multipath Template id for Storage " "Provider %s " % sp_name) default_tmpl = self.db.get_sp_default_fcp_template([sp_name]) if not sp_name or not default_tmpl: LOG.info("Can not find the default FCP Multipath Template id for " "storage provider %s. Get the host default FCP " "template id for assigner %s" % (sp_name, assigner_id)) default_tmpl = self.db.get_host_default_fcp_template() if default_tmpl: fcp_tmpl_id = default_tmpl[0][0] LOG.info("The default FCP Multipath Template id is %s." % fcp_tmpl_id) else: errmsg = ("No FCP Multipath Template is specified and " "no default FCP Multipath Template is found.") LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=11, userid=assigner_id, msg=errmsg) global _LOCK_RESERVE_FCP _LOCK_RESERVE_FCP.acquire() try: # go here, means try to attach volumes # first check whether this userid already has a FCP device # get the FCP devices belongs to assigner_id fcp_list = self.db.get_allocated_fcps_from_assigner( assigner_id, fcp_tmpl_id) LOG.info("Previously allocated records %s for " "instance %s in FCP Multipath Template %s." % ([f['fcp_id'] for f in fcp_list], assigner_id, fcp_tmpl_id)) if not fcp_list: # Sync DB to update FCP state, # so that allocating new FCPs is based on the latest FCP state self._sync_db_with_zvm() # allocate new ones if fcp_list is empty LOG.info("There is no allocated FCP devices for virtual machine %s, " "allocating new ones." % assigner_id) if CONF.volume.get_fcp_pair_with_same_index: ''' If use get_fcp_pair_with_same_index, then fcp pair is randomly selected from below combinations. [fa00,fb00],[fa01,fb01],[fa02,fb02] ''' free_unreserved = self.db.get_fcp_devices_with_same_index( fcp_tmpl_id) else: ''' If use get_fcp_pair, then fcp pair is randomly selected from below combinations. [fa00,fb00],[fa01,fb00],[fa02,fb00] [fa00,fb01],[fa01,fb01],[fa02,fb01] [fa00,fb02],[fa01,fb02],[fa02,fb02] ''' free_unreserved = self.db.get_fcp_devices(fcp_tmpl_id) if not free_unreserved: return [], fcp_tmpl_id available_list = free_unreserved fcp_ids = [fcp[0] for fcp in free_unreserved] # record the assigner id in the fcp DB so that # when the vm provision with both root and data volumes # the root and data volume would get the same FCP devices # with the get_volume_connector call. assigner_id = assigner_id.upper() self.db.reserve_fcps(fcp_ids, assigner_id, fcp_tmpl_id) LOG.info("Newly allocated %s fcp for %s assigner " "and FCP Multipath Template %s" % (fcp_ids, assigner_id, fcp_tmpl_id)) else: # reuse the old ones if fcp_list is not empty LOG.info("Found allocated fcps %s for %s in FCP Multipath Template %s, " "will reuse them." % ([f['fcp_id'] for f in fcp_list], assigner_id, fcp_tmpl_id)) path_count = self.db.get_path_count(fcp_tmpl_id) if len(fcp_list) != path_count: LOG.warning("FCPs previously assigned to %s includes %s, " "it is not equal to the path count: %s." % (assigner_id, fcp_list, path_count)) self._valid_fcp_devcie_wwpn(fcp_list, assigner_id) # we got it from db, let's reuse it available_list = fcp_list return available_list, fcp_tmpl_id except Exception as err: errmsg = ("Failed to reserve FCP devices " "for assigner %s by FCP Multipath Template %s error: %s" % (assigner_id, fcp_template_id, err.message)) LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=11, userid=assigner_id, msg=errmsg) finally: _LOCK_RESERVE_FCP.release() def unreserve_fcp_devices(self, assigner_id, fcp_template_id): """ Unreserve FCP devices by assigner_id and fcp_template_id. In this method: 1. Get FCP list from db by assigner and fcp_template whose reserved=1 2. If fcp_list is not empty, choose the ones with connections=0, and then set reserved=0 in fcp table in db 3. If fcp_list is empty, return empty list Returns: The fcp list data structure: [(fcp_id, wwpn_npiv, wwpn_phy, connections)]. An example of fcp_list: [('1c10', 'c12345abcdefg1', 'c1234abcd33002641', 1), ('1d10', 'c12345abcdefg2', 'c1234abcd33002641', 0)] If no fcp can be gotten from db, return empty list. """ with database.get_fcp_conn(): try: if fcp_template_id is None: errmsg = ("fcp_template_id is not specified " "while releasing FCP devices.") LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=11, userid=assigner_id, msg=errmsg) fcp_list = self.db.get_reserved_fcps_from_assigner( assigner_id, fcp_template_id) if fcp_list: self._valid_fcp_devcie_wwpn(fcp_list, assigner_id) # the data structure of fcp_list is # [(fcp_id, wwpn_npiv, wwpn_phy, connections)] # only unreserve the fcp with connections=0 fcp_ids = [fcp['fcp_id'] for fcp in fcp_list if fcp['connections'] == 0] if fcp_ids: self.db.unreserve_fcps(fcp_ids) LOG.info("Unreserve fcp device %s from " "instance %s and FCP Multipath Template %s." % (fcp_ids, assigner_id, fcp_template_id)) return fcp_list return [] except Exception as err: errmsg = ("Failed to unreserve FCP devices for " "assigner %s by FCP Multipath Template %s. Error: %s" % (assigner_id, fcp_template_id, err.message)) LOG.error(errmsg) raise exception.SDKVolumeOperationError(rs=11, userid=assigner_id, msg=errmsg) def get_all_fcp_pool(self, assigner_id): """Return a dict of all FCPs in ZVM fcp_dict_in_zvm example (key=FCP): { '1a06': , '1a07': , '1b06': , '1b07': } """ all_fcp_info = self._get_all_fcp_info(assigner_id) lines_per_item = constants.FCP_INFO_LINES_PER_ITEM all_fcp_pool = {} num_fcps = len(all_fcp_info) // lines_per_item for n in range(0, num_fcps): start_line = lines_per_item * n end_line = lines_per_item * (n + 1) fcp_init_info = all_fcp_info[start_line:end_line] fcp = FCP(fcp_init_info) dev_no = fcp.get_dev_no() all_fcp_pool[dev_no] = fcp return all_fcp_pool def get_fcp_dict_in_db(self): """Return a dict of all FCPs in FCP_DB Note: the key of the returned dict is in lowercase. example (key=FCP) { 'fcp_id': (fcp_id, userid, connections, reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, tmpl_id), '1a06': ('1a06', 'C2WDL003', 2, 1, 'c05076ddf7000002', 'c05076ddf7001d81', 27, 'active', 'C2WDL003', ''), '1b08': ('1b08', 'C2WDL003', 2, 1, 'c05076ddf7000002', 'c05076ddf7001d81', 27, 'active', 'C2WDL003', ''), '1c08': ('1c08', 'C2WDL003', 2, 1, 'c05076ddf7000002', 'c05076ddf7001d81', 27, 'active', 'C2WDL003', ''), } """ try: # Get all FCPs found in DB. fcp_in_db = self.db.get_all_fcps_of_assigner() except exception.SDKObjectNotExistError: fcp_in_db = list() # this method is called by _sync_db_with_zvm, # change this msg to warning # level since no record in db is normal during sync # such as when there is no fcp_list configured msg = ("No fcp records found in database and ignore " "the exception.") LOG.warning(msg) fcp_dict_in_db = {fcp[0].lower(): fcp for fcp in fcp_in_db} return fcp_dict_in_db def get_fcp_dict_in_zvm(self): """Return a dict of all FCPs in ZVM Note: the key of the returned dict is in lowercase. fcp_dict_in_zvm example (key=FCP): { '1a06': , '1a07': , '1b06': , '1b07': } """ # Get the userid of smt server smt_userid = zvmutils.get_smt_userid() # Return a dict of all FCPs in ZVM fcp_dict_in_zvm = self.get_all_fcp_pool(smt_userid) fcp_id_to_object = {fcp.lower(): fcp_dict_in_zvm[fcp] for fcp in fcp_dict_in_zvm} return fcp_id_to_object def sync_fcp_table_with_zvm(self, fcp_dict_in_zvm): """Update FCP records queried from zVM into FCP table.""" with database.get_fcp_conn(): # Get a dict of all FCPs already existed in FCP table fcp_dict_in_db = self.get_fcp_dict_in_db() # Divide FCPs into three sets inter_set = set(fcp_dict_in_zvm) & set(fcp_dict_in_db) del_fcp_set = set(fcp_dict_in_db) - inter_set add_fcp_set = set(fcp_dict_in_zvm) - inter_set # Add new records into FCP table fcp_info_need_insert = [fcp_dict_in_zvm[fcp].to_tuple() for fcp in add_fcp_set] LOG.info("New FCP devices found on z/VM: {}".format(add_fcp_set)) self.db.bulk_insert_zvm_fcp_info_into_fcp_table( fcp_info_need_insert) # Delete FCP records from FCP table # if it is connections=0 and reserve=0 LOG.info("FCP devices exist in FCP table but not in " "z/VM any more: {}".format(del_fcp_set)) fcp_ids_secure_to_delete = set() fcp_ids_not_found = set() for fcp in del_fcp_set: # example of a FCP record in fcp_dict_in_db # (fcp_id, userid, connections, reserved, wwpn_npiv, # wwpn_phy, chpid, state, owner, tmpl_id) (fcp_id, userid, connections, reserved, wwpn_npiv_db, wwpn_phy_db, chpid_db, fcp_state_db, fcp_owner_db, tmpl_id) = fcp_dict_in_db[fcp] if connections == 0 and reserved == 0: fcp_ids_secure_to_delete.add(fcp) else: # these records not found in z/VM # but still in-use in FCP table fcp_ids_not_found.add(fcp) self.db.bulk_delete_from_fcp_table( fcp_ids_secure_to_delete) LOG.info("FCP devices removed from FCP table: {}".format( fcp_ids_secure_to_delete)) # For records not found in ZVM, but still in-use in DB # mark them as not found if fcp_ids_not_found: self.db.bulk_update_state_in_fcp_table(fcp_ids_not_found, 'notfound') LOG.info("Ignore the request of deleting in-use " "FCPs: {}.".format(fcp_ids_not_found)) # Update status for FCP records already existed in DB LOG.info("FCP devices exist in both FCP table and " "z/VM: {}".format(inter_set)) fcp_ids_need_update = set() for fcp in inter_set: # example of a FCP record in fcp_dict_in_db # (fcp_id, userid, connections, reserved, wwpn_npiv, # wwpn_phy, chpid, state, owner, tmpl_id) (fcp_id, userid, connections, reserved, wwpn_npiv_db, wwpn_phy_db, chpid_db, fcp_state_db, fcp_owner_db, tmpl_id) = fcp_dict_in_db[fcp] # Get physical WWPN and NPIV WWPN queried from z/VM wwpn_phy_zvm = fcp_dict_in_zvm[fcp].get_physical_port() wwpn_npiv_zvm = fcp_dict_in_zvm[fcp].get_npiv_port() # Get CHPID queried from z/VM chpid_zvm = fcp_dict_in_zvm[fcp].get_chpid() # Get FCP device state queried from z/VM # Possible state returned by ZVM: # 'active', 'free' or 'offline' fcp_state_zvm = fcp_dict_in_zvm[fcp].get_dev_status() # Get owner of FCP device queried from z/VM # Possible FCP owner returned by ZVM: # VM userid: if the FCP is attached to a VM # A String "NONE": if the FCP is not attached fcp_owner_zvm = fcp_dict_in_zvm[fcp].get_owner() # Check WWPNs need update or not if wwpn_npiv_db == '' or (connections == 0 and reserved == 0): # The WWPNs are secure to be updated when: # case1(wwpn_npiv_db == ''): the wwpn_npiv_db is empty, for example, upgraded from 114. # case2(connections == 0 and reserved == 0): the FCP device is not in use. if wwpn_npiv_db != wwpn_npiv_zvm or wwpn_phy_db != wwpn_phy_zvm: # only need to update wwpns when they are different fcp_ids_need_update.add(fcp) else: # For an in-used FCP device, even its WWPNs(wwpn_npiv_zvm, wwpn_phy_zvm) are changed in z/VM, # we can NOT update the wwpn_npiv, wwpn_phy columns in FCP DB because the host mapping from # storage provider backend is still using the old WWPNs recorded in FCP DB. # To detach the volume and delete the host mapping successfully, we need make sure the WWPNs records # in FCP DB unchanged in this case. # Because we will copy all properties in fcp_dict_in_zvm[fcp] to DB when update a FCP property # (for example, state, owner, etc), # we overwrite the (wwpn_npiv_zvm, wwpn_phy_zvm) in fcp_dict_in_zvm[fcp] # to old (wwpn_npiv_db, wwpn_phy_db), so that their values will not be changed when update other # properties. fcp_dict_in_zvm[fcp].set_npiv_port(wwpn_npiv_db) fcp_dict_in_zvm[fcp].set_physical_port(wwpn_phy_db) # Other cases need to update FCP record in DB if chpid_db != chpid_zvm: # Check chpid changed or not fcp_ids_need_update.add(fcp) elif fcp_state_db != fcp_state_zvm: # Check state changed or not fcp_ids_need_update.add(fcp) elif fcp_owner_db != fcp_owner_zvm: # Check owner changed or not fcp_ids_need_update.add(fcp) else: LOG.debug("No need to update record of FCP " "device {}".format(fcp)) fcp_info_need_update = [fcp_dict_in_zvm[fcp].to_tuple() for fcp in fcp_ids_need_update] self.db.bulk_update_zvm_fcp_info_in_fcp_table(fcp_info_need_update) LOG.info("FCP devices need to update records in " "fcp table: {}".format(fcp_info_need_update)) def _sync_db_with_zvm(self): """Sync FCP DB with the FCP info queried from zVM""" LOG.info("Enter: Sync FCP DB with FCP info queried from z/VM.") LOG.info("Querying FCP status on z/VM.") # Get a dict of all FCPs in ZVM fcp_dict_in_zvm = self.get_fcp_dict_in_zvm() # Update the dict of all FCPs into FCP table in database self.sync_fcp_table_with_zvm(fcp_dict_in_zvm) LOG.info("Exit: Sync FCP DB with FCP info queried from z/VM.") def create_fcp_template(self, name, description: str = '', fcp_devices: str = '', host_default: bool = False, default_sp_list: list = None, min_fcp_paths_count: int = None): """Create a FCP Multipath Template and return the basic information of the created template, for example: { 'fcp_template': { 'name': 'bjcb-test-template', 'id': '36439338-db14-11ec-bb41-0201018b1dd2', 'description': 'This is Default template', 'host_default': True, 'storage_providers': ['sp4', 'v7k60'], 'min_fcp_paths_count': 2 } } """ LOG.info("Try to create a" " FCP Multipath Template with name:%s," "description:%s, fcp devices: %s, host_default: %s," "storage_providers: %s, min_fcp_paths_count: %s." % (name, description, fcp_devices, host_default, default_sp_list, min_fcp_paths_count)) # Generate a template id for this new template tmpl_id = str(uuid.uuid1()) # Get fcp devices info index by path fcp_devices_by_path = utils.expand_fcp_list(fcp_devices) # If min_fcp_paths_count is not None,need validate the value if min_fcp_paths_count and min_fcp_paths_count > len(fcp_devices_by_path): msg = ("min_fcp_paths_count %s is larger than fcp device path count %s, " "adjust fcp_devices or min_fcp_paths_count." % (min_fcp_paths_count, len(fcp_devices_by_path))) LOG.error(msg) raise exception.SDKConflictError(modID='volume', rs=23, msg=msg) # Insert related records in FCP database self.db.create_fcp_template(tmpl_id, name, description, fcp_devices_by_path, host_default, default_sp_list, min_fcp_paths_count) min_fcp_paths_count_db = self.db.get_min_fcp_paths_count(tmpl_id) # Return template basic info LOG.info("A FCP Multipath Template was created with ID %s." % tmpl_id) return {'fcp_template': {'name': name, 'id': tmpl_id, 'description': description, 'host_default': host_default, 'storage_providers': default_sp_list if default_sp_list else [], 'min_fcp_paths_count': min_fcp_paths_count_db}} def edit_fcp_template(self, fcp_template_id, name=None, description=None, fcp_devices=None, host_default=None, default_sp_list=None, min_fcp_paths_count=None): """ Edit a FCP Multipath Template The kwargs values are pre-validated in two places: validate kwargs types in zvmsdk/sdkwsgi/schemas/volume.py set a kwarg as None if not passed by user in zvmsdk/sdkwsgi/handlers/volume.py If any kwarg is None, the kwarg will not be updated. :param fcp_template_id: template id :param name: template name :param description: template desc :param fcp_devices: FCP devices divided into different paths by semicolon Format: "fcp-devices-from-path0;fcp-devices-from-path1;..." Example: "0011-0013;0015;0017-0018", :param host_default: (bool) :param default_sp_list: (list) Example: ["SP1", "SP2"] :param min_fcp_paths_count the min fcp paths count, if it is None, will not update this field in db. :return: Example { 'fcp_template': { 'name': 'bjcb-test-template', 'id': '36439338-db14-11ec-bb41-0201018b1dd2', 'description': 'This is Default template', 'host_default': True, 'storage_providers': ['sp4', 'v7k60'], 'min_fcp_paths_count': 2 } } """ LOG.info("Enter: edit_fcp_template with args {}".format( (fcp_template_id, name, description, fcp_devices, host_default, default_sp_list, min_fcp_paths_count))) # DML in FCP database result = self.db.edit_fcp_template(fcp_template_id, name=name, description=description, fcp_devices=fcp_devices, host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count) LOG.info("Exit: edit_fcp_template") return result def _update_template_fcp_raw_usage(self, raw_usage, raw_item): """group raw_item with template_id and path raw_item format: [(fcp_id|tmpl_id|path|assigner_id|connections|reserved| wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id)] return format: { template_id: { path: [(fcp_id, template_id, assigner_id, connections, reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, tmpl_id),()] } } """ (fcp_id, template_id, path_id, assigner_id, connections, reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, tmpl_id) = raw_item if not raw_usage.get(template_id, None): raw_usage[template_id] = {} if not raw_usage[template_id].get(path_id, None): raw_usage[template_id][path_id] = [] # remove path_id from raw data, keep the last templ_id to # represent from which template this FCP has been allocated out. return_raw = (fcp_id, template_id, assigner_id, connections, reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, tmpl_id) raw_usage[template_id][path_id].append(return_raw) return raw_usage def extract_template_info_from_raw_data(self, raw_data): """ raw_data format: [(id|name|description|is_default|sp_name)] return format: { temlate_id: { "id": id, "name": name, "description": description, "host_default": is_default, "storage_providers": [sp_name] } } """ template_dict = {} for item in raw_data: id, name, description, is_default, min_fcp_paths_count, sp_name = item if min_fcp_paths_count < 0: min_fcp_paths_count = self.db.get_path_count(id) if not template_dict.get(id, None): template_dict[id] = {"id": id, "name": name, "description": description, "host_default": bool(is_default), "storage_providers": [], "min_fcp_paths_count": min_fcp_paths_count} # one FCP Multipath Template can be multiple sp's default template if sp_name and sp_name not in template_dict[id]["storage_providers"]: template_dict[id]["storage_providers"].append(sp_name) return template_dict def _update_template_fcp_statistics_usage(self, statistics_usage, raw_item): """Transform raw usage in FCP database into statistic data. :param statistics_usage: (dict) to store statistics info :param raw_item: [list] to represent db query result raw_item format: (fcp_id|tmpl_id|path|assigner_id|connections|reserved| wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id) the first three properties are from template_fcp_mapping table, and the others are from fcp table. These three properties will always have values. when the device is not in fcp table, all the properties in fcp table will be None. For example: template '12345678' has a fcp "1aaa" on path 0, but this device is not in fcp table, the query result will be as below. 1aaa|12345678|0|||||||||| Note: the FCP id in the returned dict is in uppercase. statistics_usage return result format: { template_id: { path1: {}, path2: {}} } """ # get statistic data about: # available, allocated, notfound, # unallocated_but_active, allocated_but_free # CHPIDs (fcp_id, template_id, path_id, assigner_id, connections, reserved, _, _, chpid, state, owner, _) = raw_item # The raw_item is for each fcp device, so there are multiple # items for each single FCP Multipath Template. # But the return result needs to group all the items by FCP Multipath Template, # so construct a dict statistics_usage[template_id] # with template_id as key to group the info. # template_id key also will be used to join with template base info if not statistics_usage.get(template_id, None): statistics_usage[template_id] = {} if not statistics_usage[template_id].get(path_id, None): statistics_usage[template_id][path_id] = { "total": [], "total_count": 0, "single_fcp": [], "range_fcp": [], "available": [], "available_count": 0, "allocated": [], "reserve_only": [], "connection_only": [], "unallocated_but_active": {}, "allocated_but_free": [], "notfound": [], "offline": [], "CHPIDs": {}} # when this fcp_id is not None, means the fcp exists in zvm, i.e in # fcp table, then it will have detail info from fcp table # when this fcp_id is None, means the fcp does not exist in zvm, no # detail info, just add into 'not_found' with the tmpl_fcp_id returns # from template_fcp_mapping table # Show upper case for FCP id fcp_id = fcp_id.upper() # If a fcp not found in z/VM, will not insert into fcp table, then the # db query result will be None. So connections not None represents # the fcp is found in z/VM if connections is not None: # Store each FCP in section "total" statistics_usage[template_id][path_id]["total"].append(fcp_id) # case G: (state = notfound) # this FCP in database but not found in z/VM if state == "notfound": statistics_usage[ template_id][path_id]["notfound"].append(fcp_id) LOG.warning("Found a FCP device " "%s in FCP Multipath Template %s, but not found in " "z/VM." % (str(fcp_id), str(template_id))) # case H: (state = offline) # this FCP in database but offline in z/VM if state == "offline": statistics_usage[template_id][path_id]["offline"].append( fcp_id) LOG.warning("Found state of a FCP " "device %s is offline in database." % str(fcp_id)) # found this FCP in z/VM if connections == 0: if reserved == 0: # case A: (reserve=0 and conn=0 and state=free) # this FCP is available for use if state == "free": statistics_usage[ template_id][path_id]["available"].append(fcp_id) LOG.debug("Found " "an available FCP device %s in " "database." % str(fcp_id)) # case E: (conn=0 and reserve=0 and state=active) # this FCP is available in database but its state # is active in smcli output if state == "active": statistics_usage[ template_id][path_id]["unallocated_but_active"].\ update({fcp_id: owner}) LOG.warning("Found a FCP " "device %s available in database but its " "state is active, it may be occupied by " "a userid outside of this ZCC." % str( fcp_id)) else: # case C: (reserve=1 and conn=0) # the fcp should be in task or a bug happen statistics_usage[ template_id][path_id]["reserve_only"].append(fcp_id) LOG.warning("Found a FCP " "device %s reserve_only." % str(fcp_id)) else: # connections != 0 if reserved == 0: # case D: (reserve = 0 and conn != 0) # must have a bug result in this statistics_usage[template_id][ path_id]["connection_only"].append(fcp_id) LOG.warning("Found a FCP " "device %s unreserved in database but " "its connections is not 0." % str(fcp_id)) else: # case B: (reserve=1 and conn!=0) # ZCC allocated this to a userid statistics_usage[ template_id][path_id]["allocated"].append(fcp_id) LOG.debug("Found an allocated " "FCP device: %s." % str(fcp_id)) # case F: (conn!=0 and state=free) if state == "free": statistics_usage[template_id][ path_id]["allocated_but_free"].append(fcp_id) LOG.warning("Found a FCP " "device %s allocated by ZCC but its state is " "free." % str(fcp_id)) # case I: ((conn != 0) & assigner_id != owner) elif assigner_id.lower() != owner.lower() and state != "notfound": LOG.warning("Found a FCP " "device %s allocated by ZCC but its assigner " "differs from owner." % str(fcp_id)) if chpid: if not statistics_usage[template_id][path_id]["CHPIDs"].get(chpid, None): statistics_usage[ template_id][path_id]["CHPIDs"].update({chpid: []}) statistics_usage[ template_id][path_id]["CHPIDs"][chpid].append(fcp_id) # this FCP in template_fcp_mapping table but not found in z/VM else: # add into 'total' and 'not_found' statistics_usage[template_id][path_id]["total"].append(fcp_id) statistics_usage[template_id][path_id]["notfound"].append(fcp_id) LOG.warning("Found a FCP device " "%s in FCP Multipath Template %s, but not found in " "z/VM." % (str(fcp_id), str(template_id))) return statistics_usage def _shrink_fcp_list_in_statistics_usage(self, statistics_usage): """shrink fcp list in statistics sections to range fcp for example, before shrink: template_statistics[path]["total"] = "1A0A, 1A0B, 1A0C, 1A0E" after shink: template_statistics[path]["total"] = "1A0A - 1A0C, 1A0E" """ for template_statistics in statistics_usage.values(): for path in template_statistics: # count total and available fcp before shrink if template_statistics[path]["total"]: template_statistics[path][ "total_count"] = len(template_statistics[path][ "total"]) if template_statistics[path]["available"]: template_statistics[path][ "available_count"] = len(template_statistics[path][ "available"]) # only below sections in statistics need to shrink need_shrink_sections = ["total", "available", "allocated", "reserve_only", "connection_only", "allocated_but_free", "notfound", "offline"] # Do NOT transform unallocated_but_active, # because its value also contains VM userid. # e.g. [('1b04','owner1'), ('1b05','owner2')] # Do NOT transform CHPIDs, total_count, single_fcp, # range_fcp and available_count for section in need_shrink_sections: fcp_list = template_statistics[path][section] template_statistics[path][section] = ( utils.shrink_fcp_list(fcp_list)) # shrink for each CHIPID for chpid, fcps in template_statistics[ path]['CHPIDs'].items(): fcp_list = fcps template_statistics[path]['CHPIDs'][chpid] = ( utils.shrink_fcp_list(fcp_list)) def _split_singe_range_fcp_list(self, statistics_usage): # after shrink, total fcps can have both range and singe fcps, # for example: template_statistics[path]['total'] = "1A0A - 1A0C, 1A0E" # UI needs 'range_fcp' and 'singe_fcp' to input in different areas # so split the total fcps to 'range_fcp' and 'singe_fcp' as below: # template_statistics[path]['range_fcp'] = "1A0A - 1A0C" # template_statistics[path]['single_fcp'] = "1A0E" for template_statistics in statistics_usage.values(): for path in template_statistics: range_fcp = [] single_fcp = [] total_fcp = template_statistics[path]['total'].split(',') for fcp in total_fcp: if '-' in fcp: range_fcp.append(fcp.strip()) else: single_fcp.append(fcp.strip()) template_statistics[path]['range_fcp'] = ', '.join(range_fcp) template_statistics[path]['single_fcp'] = ', '.join(single_fcp) def get_fcp_templates(self, template_id_list=None, assigner_id=None, default_sp_list=None, host_default=None): """Get template base info by template_id_list or filters :param template_id_list: (list) a list of template id, if it is None, get FCP Multipath Templates with other parameter :param assigner_id: (str) a string of VM userid :param default_sp_list: (list) a list of storage provider or 'all', to get storage provider's default FCP Multipath Templates when sp_host_list = ['all'], will get all storage providers' default FCP Multipath Templates. For example, there are 3 FCP Multipath Templates are set as storage providers' default template, then all these 3 FCP Multipath Templates will return as below: { "fcp_templates": [ { "id": "36439338-db14-11ec-bb41-0201018b1dd2", "name": "default_template", "description": "This is Default template", "host_default": True, "storage_providers": [ "v7k60", "sp4" ] }, { "id": "36439338-db14-11ec-bb41-0201018b1dd3", "name": "test_template", "description": "just for test", "host_default": False, "storage_providers": [ "ds8k60c1" ] }, { "id": "12345678", "name": "templatet1", "description": "test1", "host_default": False, "storage_providers": [ "sp3" ] } ] } when sp_host_list is a storage provider name list, will return these providers' default FCP Multipath Templates. Example: sp_host_list = ['v7k60', 'ds8k60c1'] return: { "fcp_templates": [ { "id": "36439338-db14-11ec-bb41-0201018b1dd2", "name": "default_template", "description": "This is Default template", "host_default": True, "storage_providers": [ "v7k60", "sp4" ] }, { "id": "36439338-db14-11ec-bb41-0201018b1dd3", "name": "test_template", "description": "just for test", "host_default": False, "storage_providers": [ "ds8k60c1" ] } ] } :param host_default: (boolean) whether or not get host default fcp template :return: (dict) the base info of template """ ret = [] if template_id_list: not_exist = [] for template_id in template_id_list: if not self.db.fcp_template_exist_in_db(template_id): not_exist.append(template_id) if not_exist: obj_desc = ("FCP Multipath Templates {} ".format(not_exist)) raise exception.SDKObjectNotExistError(obj_desc=obj_desc) raw = self.db.get_fcp_templates(template_id_list) elif assigner_id: raw = self.db.get_fcp_template_by_assigner_id(assigner_id) elif default_sp_list: raw = self.db.get_sp_default_fcp_template(default_sp_list) elif host_default is not None: raw = self.db.get_host_default_fcp_template(host_default) else: # if no parameter, will get all FCP Multipath Templates raw = self.db.get_fcp_templates(template_id_list) template_list = self.extract_template_info_from_raw_data(raw) for value in template_list.values(): ret.append(value) return {"fcp_templates": ret} def get_fcp_templates_details(self, template_id_list=None, raw=False, statistics=True, sync_with_zvm=False): """Get FCP Multipath Templates detail info. :param template_list: (list) if is None, will get all the templates on the host :return: (dict) the raw and/or statistic data of temlate_list FCP devices if sync_with_zvm: self.fcp_mgr._sync_db_with_zvm() if FCP DB is NOT empty and raw=True statistics=True { "fcp_templates":[ { "id":"36439338-db14-11ec-bb41-0201018b1dd2", "name":"default_template", "description":"This is Default template", "host_default":True, "storage_providers":[ "sp4", "v7k60" ], "raw":{ # (fcp_id, template_id, assigner_id, connections, # reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, # tmpl_id) "0":[ [ "1a0f", "36439338-db14-11ec-bb41-0201018b1dd2", "HLP0000B", 0, 0, "c05076de3300038b", "c05076de33002e41", "27", "free", "none", "36439338-db14-11ec-bb41-0201018b1dd2" ], [ "1a0e", "36439338-db14-11ec-bb41-0201018b1dd2", "", 0, 0, "c05076de330003a2", "c05076de33002e41", "27", "free", "none", "36439338-db14-11ec-bb41-0201018b1dd2" ] ], "1":[ [ "1c0d", "36439338-db14-11ec-bb41-0201018b1dd2", "", 0, 0, "c05076de33000353", "c05076de33002641", "32", "free", "none", "36439338-db14-11ec-bb41-0201018b1dd2" ] ] }, "statistics":{ # case A: (reserve = 0 and conn = 0 and state = free) # FCP is available and in free status "available": ('1A00','1A05',...) # case B: (reserve = 1 and conn != 0) # nomral in-use FCP "allocated": ('1B00','1B05',...) # case C: (reserve = 1, conn = 0) # the fcp should be in task or a bug cause this # situation "reserve_only": ('1C00', '1C05', ...) # case D: (reserve = 0 and conn != 0) # should be a bug result in this situation "connection_only": ('1C00', '1C05', ...) # case E: (reserve = 0, conn = 0, state = active) # FCP occupied out-of-band 'unallocated_but_active': {'1B04': 'owner1', '1B05': 'owner2'} # case F: (conn != 0, state = free) # we allocated it in db but the FCP status is free # this is an situation not expected "allocated_but_free": ('1D00','1D05',...) # case G: (state = notfound) # not found in smcli "notfound": ('1E00','1E05',...) # case H: (state = offline) # offline in smcli "offline": ('1F00','1F05',...) # case I: ((conn != 0) & assigner_id != owner) # assigner_id-in-DB differ from smcli-returned-owner # only log about this # case J: fcp by chpid "0":{ "total":"1A0E - 1A0F", "available":"1A0E - 1A0F", "allocated":"", "reserve_only":"", "connection_only":"", "unallocated_but_active":{}, "allocated_but_free":"", "notfound":"", "offline":"", "CHPIDs":{ "27":"1A0E - 1A0F" } }, "1":{ "total":"1C0D", "available":"1C0D", "allocated":"", "reserve_only":"", "connection_only":"", "unallocated_but_active":{}, "allocated_but_free":"", "notfound":"", "offline":"", "CHPIDs":{ "32":"1C0D" } } } } ] } """ not_exist = [] if template_id_list: for template_id in template_id_list: if not self.db.fcp_template_exist_in_db(template_id): not_exist.append(template_id) if not_exist: obj_desc = ("FCP Multipath Templates {} ".format(not_exist)) raise exception.SDKObjectNotExistError(obj_desc=obj_desc) if sync_with_zvm: self._sync_db_with_zvm() statistics_usage = {} raw_usage = {} template_info = {} ret = [] # tmpl_cmd result format: # [(id|name|description|is_default|sp_name)] # devices_cmd result format: # [(fcp_id|tmpl_id|path|assigner_id|connections|reserved| # wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id)] tmpl_result, devices_result = self.db.get_fcp_templates_details( template_id_list) # extract template base info into template_info template_info = self.extract_template_info_from_raw_data(tmpl_result) # template_info foramt: # { # temlate_id: { # "id": id, # "name": name, # "description": description, # "is_default": is_default, # "storage_providers": [sp_name] # } # } if raw: for item in devices_result: self._update_template_fcp_raw_usage(raw_usage, item) for template_id, base_info in template_info.items(): if template_id in raw_usage: base_info.update({"raw": raw_usage[template_id]}) else: # some template does not have fcp devices, so there is no # raw_usage for such template base_info.update({"raw": {}}) # after join raw info, template_info format is like this: # { # temlate_id: { # "id": id, # "name": name, # "description": description, # "is_default": is_default, # "storage_providers": [sp_name], # "raw": { # path1: {}, # path2: {}} # } # } # } # get fcp statistics usage if statistics: for item in devices_result: self._update_template_fcp_statistics_usage( statistics_usage, item) LOG.info("statistic FCP usage before shrink: %s" % statistics_usage) self._shrink_fcp_list_in_statistics_usage(statistics_usage) self._split_singe_range_fcp_list(statistics_usage) LOG.info("statistic FCP usage after shrink: %s" % statistics_usage) # update base info with statistics_usage # statistics_usage format: # { # template_id1: { # path1: {}, # path2: {}}, # template_id2: { # path1: {}, # path2: {}} # } for template_id, base_info in template_info.items(): # only the FCP Multipath Template which has fcp in zvm has # statistics_usage data if template_id in statistics_usage: base_info.update( {"statistics": statistics_usage[template_id]}) else: # some templates do not have fcp devices or do not have # valid fcp in zvm, so do not have statistics_usage data base_info.update({"statistics": {}}) # after join statistics info, template_info format is like this: # { # temlate_id: { # "id": id, # "name": name, # "description": description, # "host_default": is_default, # "storage_providers": [sp_name], # "statistics": { # path1: {}, # path2: {}} # } # } # } for value in template_info.values(): ret.append(value) return {"fcp_templates": ret} def delete_fcp_template(self, template_id): """Delete FCP Multipath Template by id. :param template_id: (str) :return: no return result """ return self.db.delete_fcp_template(template_id) # volume manager for FCP protocol class FCPVolumeManager(object): def __init__(self): self.fcp_mgr = FCPManager() self.config_api = VolumeConfiguratorAPI() self._smtclient = smtclient.get_smtclient() self._lock = threading.RLock() # previously FCPDbOperator is initialized twice, here we # just do following variable redirection to avoid too much # reference code changes self.db = self.fcp_mgr.db def _dedicate_fcp(self, fcp, assigner_id): self._smtclient.dedicate_device(assigner_id, fcp, fcp, 0) def _add_disks(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point,): self.config_api.config_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) def _rollback_do_attach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point): """ Rollback for the following completed operations: 1. oeration on VM operating system done by _add_disks() i.e. online FCP devices and the volume from VM OS 2. operations on z/VM done by _dedicate_fcp() i.e. dedicate FCP device from assigner_id 3. operations on FCP DB done by get_volume_connector() i.e. reserve FCP device and set FCP Multipath Template id from FCP DB :param fcp_list: (list) a list of FCP devices :param assigner_id: (str) the userid of the virtual machine :return: None """ # Operation on VM OS: # offline volume and FCP devices from VM OS with zvmutils.ignore_errors(): fcp_connections = {fcp: self.db.get_connections_from_fcp(fcp) for fcp in fcp_list} # _remove_disks() offline FCP devices only when total_connections is 0, # i.e. detaching the last volume from the FCP devices total_connections = sum(fcp_connections.values()) self._remove_disks(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, total_connections) LOG.info("Rollback on VM OS: offline the volume from VM OS") # Operation on z/VM: # undedicate FCP device from assigner_id for fcp in fcp_list: with zvmutils.ignore_errors(): if fcp_connections[fcp] == 0: self._undedicate_fcp(fcp, assigner_id) LOG.info("Rollback on z/VM: undedicate FCP device: %s" % fcp) # Operation on FCP DB: # if connections is 0, # then unreserve the FCP device and cleanup tmpl_id no_connection_fcps = [fcp for fcp in fcp_connections if fcp_connections[fcp] == 0] if no_connection_fcps: with zvmutils.ignore_errors(): self.db.unreserve_fcps(no_connection_fcps) LOG.info("Rollback on FCP DB: Unreserve FCP devices %s", no_connection_fcps) def _do_attach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, is_root_volume): """Attach a volume First, we need translate fcp into local wwpn, then dedicate fcp to the user if it's needed, after that call smt layer to call linux command """ LOG.info("Start to attach volume to FCP devices " "%s on machine %s." % (fcp_list, assigner_id)) # _DBLOCK_FCP is the lock used in get_fcp_conn(), # here it is used to ensure the operation of FCP DB is thread safe. # Example: # Before thread-1 enters _attach(), # 2 FCP devices are reserved (fcp1, fcp2) # by get_volume_connectoer() for this attach. # If thread-1 fails increasing connections for 2nd FCP (fcp2), # then, thread-2 must wait before thread-1 completes rollback # for the state of reserved and connections of both FCPs # More details refer to pull request #668 with database._DBLOCK_FCP: try: # The three operations must be put in the same with-block: # - Operation on FCP DB: increase_fcp_connections() # - Operation on z/VM: _dedicate_fcp() # - Operation on VM OS: _add_disks() # So as to auto-rollabck connections on FCP DB, # if any of the three operations raises exception. with database.get_fcp_conn(): # Operation on FCP DB: # increase connections by 1 and set assigner_id. # # Call increase_fcp_connections() within the try-block, # so that _rollback_do_attach() can be called to unreserve FCP # devices if increase_fcp_connections() raise exception. # # fcp_connections examples: # {'1a10': 1, '1b10': 1} => attaching 1st volume # {'1a10': 2, '1b10': 2} => attaching 2nd volume # {'1a10': 2, '1b10': 1} => connections differ in abnormal case (due to bug) # the values are the connections of the FCP device fcp_connections = self.fcp_mgr.increase_fcp_connections(fcp_list, assigner_id) LOG.info("The connections of FCP devices before " "being dedicated to virtual machine %s is: %s." % (assigner_id, fcp_connections)) if is_root_volume: LOG.info("We are attaching root volume, dedicating FCP devices %s " "to virtual machine %s has been done by refresh_bootmap; " "skip the remain steps of volume attachment." % (fcp_list, assigner_id)) return [] # Operation on z/VM: # dedicate FCP devices to the assigner_id in z/VM for fcp in fcp_list: # only dedicate the FCP device on z/VM # if connections is 1 (i.e. 1st volume attached for the FCP dev) # because otherwise the FCP device has been dedicated already. # if _dedicate_fcp() raise exception for a FCP device, # we must stop the attachment # i.e. go to except-block to do _rollback_do_attach() # rather than continue to do _dedicate_fcp() for the next FCP device if fcp_connections[fcp] == 1: LOG.info("Start to dedicate FCP %s to " "%s in z/VM." % (fcp, assigner_id)) # dedicate the FCP to the assigner in z/VM self._dedicate_fcp(fcp, assigner_id) LOG.info("Dedicating FCP %s to %s in z/VM is " "done." % (fcp, assigner_id)) else: LOG.info("This is not the first time to " "attach volume to FCP %s, " "skip dedicating the FCP device in z/VM." % fcp) # Operation on VM operating system # online the volume in the virtual machine LOG.info("Start to configure volume in the operating " "system of %s." % assigner_id) self._add_disks(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) LOG.info("Configuring volume in the operating " "system of %s is done." % assigner_id) LOG.info("Attaching volume to FCP devices %s on virtual machine %s is " "done." % (fcp_list, assigner_id)) except Exception as err: LOG.error(str(err)) # Rollback for the following completed operations: # 1. Operation on VM OS done by _add_disks() # 2. operations on z/VM done by _dedicate_fcp() # 3. operations on FCP DB done by get_volume_connector() LOG.info("Enter rollback: _rollback_do_attach") self._rollback_do_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) LOG.info("Exit rollback: _rollback_do_attach") raise def _rollback_do_detach(self, fcp_connections, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point): """ Rollback for the following completed operations: 1. oeration on VM operating system done by _remove_disks() i.e. remove FCP devices and the volume from VM OS 2. operations on z/VM done by _undedicate_fcp() i.e. undedicate FCP device from assigner_id :param fcp_list: (list) a list of FCP devices :param assigner_id: (str) the userid of the virtual machine :return: None """ # Operation on z/VM: # dedicate FCP devices to the virtual machine for fcp in fcp_connections: with zvmutils.ignore_errors(): # _undedicate_fcp() has been done in _do_detach() if fcp_connections[fcp] == 0, # so we do _dedicate_fcp() as rollback with the same if-condition if fcp_connections[fcp] == 0: # dedicate the FCP to the assigner in z/VM self._dedicate_fcp(fcp, assigner_id) LOG.info("Rollback on z/VM: dedicate FCP device: %s" % fcp) # Operation on VM operating system: # online the volume in the virtual machine with zvmutils.ignore_errors(): fcp_list = [f for f in fcp_connections] self._add_disks(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) LOG.info("Rollback on VM operating system: " "online volume for virtual machine %s" % assigner_id) def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, wwid='', transportfiles=None, guest_networks=None, fcp_template_id=None): if not fcp_template_id: min_fcp_paths_count = len(fcpchannels) else: min_fcp_paths_count = self.db.get_min_fcp_paths_count(fcp_template_id) if min_fcp_paths_count == 0: errmsg = ("No FCP devices were found in the FCP Multipath Template %s," "stop refreshing bootmap." % fcp_template_id) LOG.error(errmsg) raise exception.SDKBaseException(msg=errmsg) with zvmutils.acquire_lock(self._lock): LOG.debug('Enter lock scope of volume_refresh_bootmap.') ret = self._smtclient.volume_refresh_bootmap(fcpchannels, wwpns, lun, wwid=wwid, transportfiles=transportfiles, guest_networks=guest_networks, min_fcp_paths_count=min_fcp_paths_count) LOG.debug('Exit lock of volume_refresh_bootmap with ret %s.' % ret) return ret def attach(self, connection_info): """Attach a volume to a guest connection_info contains info from host and storage side this mostly includes host side FCP: this can get host side wwpn storage side wwpn storage side lun all the above assume the storage side info is given by caller """ fcps = connection_info['zvm_fcp'] wwpns = connection_info['target_wwpn'] target_lun = connection_info['target_lun'] assigner_id = connection_info['assigner_id'].upper() multipath = connection_info['multipath'].lower() if multipath == 'true': multipath = True else: multipath = False os_version = connection_info['os_version'] mount_point = connection_info['mount_point'] is_root_volume = connection_info.get('is_root_volume', False) if is_root_volume is False and \ not zvmutils.check_userid_exist(assigner_id): LOG.error("The virtual machine '%s' does not exist on z/VM." % assigner_id) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % assigner_id), modID='volume') else: # transfer to lower cases fcp_list = [x.lower() for x in fcps] target_wwpns = [wwpn.lower() for wwpn in wwpns] try: self._do_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, is_root_volume) except Exception: for fcp in fcp_list: with zvmutils.ignore_errors(): _userid, _reserved, _conns, _tmpl_id = self.get_fcp_usage(fcp) LOG.info("After rollback, property of FCP device %s " "is (assigner_id: %s, reserved:%s, " "connections: %s, FCP Multipath Template id: %s)." % (fcp, _userid, _reserved, _conns, _tmpl_id)) raise def _undedicate_fcp(self, fcp, assigner_id): self._smtclient.undedicate_device(assigner_id, fcp) def _remove_disks(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, connections): self.config_api.config_detach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, connections) def _do_detach(self, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, is_root_volume, update_connections_only): """Detach a volume from a guest""" LOG.info("Start to detach volume on virtual machine %s from " "FCP devices %s" % (assigner_id, fcp_list)) with database.get_fcp_conn(): # Operation on FCP DB: # decrease connections by 1 # fcp_connections is like {'1a10': 0, '1b10': 2} # the values are the connections of the FCP device fcp_connections = self.fcp_mgr.decrease_fcp_connections(fcp_list) # If is root volume we only need update database record # because the dedicate is done by volume_refresh_bootmap # If update_connections set to True, means upper layer want # to update database record only. For example, try to delete # the instance, then no need to waste time on undedicate if is_root_volume or update_connections_only: if update_connections_only: LOG.info("Update connections only, undedicating FCP devices %s " "from virtual machine %s has been done; skip the remain " "steps of volume detachment" % (fcp_list, assigner_id)) else: LOG.info("We are detaching root volume, undedicating FCP devices %s " "from virtual machine %s has been done; skip the remain " "steps of volume detachment" % (fcp_list, assigner_id)) return # when detaching volumes, if userid not exist, no need to # raise exception. we stop here after the database operations done. if not zvmutils.check_userid_exist(assigner_id): LOG.warning("Virtual machine %s does not exist when trying to detach " "volume from it. skip the remain steps of volume " "detachment", assigner_id) return try: LOG.info("Start to remove volume in the operating " "system of %s." % assigner_id) # Operation on VM operating system: # offline the volume in the virtual machine # # When detaching the non-last volume from the FCPs in fcp_connections, # normally, the connections of partial FCPs are non-zero, so that # sum(fcp_connections.values()) > 0 # fcp_connections is like {'1a10': 2, '1b10': 2} # in this case, _remove_disks() must be called with total_connections > 0, # so as NOT to offline the FCPs from VM Linux operating system # When detaching the last volume from the FCPs in fcp_connections, # normally, the connections of all FCPs are 0, so that # sum(fcp_connections.values()) == 0, # fcp_connections is like {'1a10': 0, '1b10': 0} # in this case, _remove_disks() must be called with total_connections as 0, # so as to offline the FCPs from VM Linux operating system # abnormally, the connections of partial FCPs are 0, so that # sum(fcp_connections.values()) > 0 # fcp_connections is like {'1a10': 0, '1b10': 3} # in this case, _remove_disks() must be called with total_connections > 0, # so as NOT to offline the FCPs from VM Linux operating system total_connections = sum(fcp_connections.values()) self._remove_disks(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, total_connections) LOG.info("Removing volume in the operating " "system of %s is done." % assigner_id) # Operation on z/VM: # undedicate FCP device from the virtual machine for fcp in fcp_list: if fcp_connections[fcp] == 0: # As _remove_disks() has been run successfully, # we need to try our best to undedicate every FCP device try: LOG.info("Start to undedicate FCP %s from " "%s on z/VM." % (fcp, assigner_id)) self._undedicate_fcp(fcp, assigner_id) LOG.info("FCP %s undedicated from %s on z/VM is " "done." % (fcp, assigner_id)) except exception.SDKSMTRequestFailed as err: rc = err.results['rc'] rs = err.results['rs'] if (rc == 404 or rc == 204) and rs == 8: # We ignore the two exceptions raised when FCP device is already undedicated. # Example of exception when rc == 404: # zvmsdk.exception.SDKSMTRequestFailed: # Failed to undedicate device from userid 'JACK0003'. # RequestData: 'changevm JACK0003 undedicate 1d1a', # Results: '{'overallRC': 8, # 'rc': 404, 'rs': 8, 'errno': 0, # 'strError': 'ULGSMC5404E Image device not defined', # 'response': ['(Error) ULTVMU0300E # SMAPI API failed: Image_Device_Undedicate_DM, # Example of exception when rc == 204: # zvmsdk.exception.SDKSMTRequestFailed: # Failed to undedicate device from userid 'JACK0003'. # RequestData: 'changevm JACK0003 undedicate 1b17', # Results: '{'overallRC': 8, # 'rc': 204, 'rs': 8, 'errno': 0, # 'response': ['(Error) ULTVMU0300E # SMAPI API failed: Image_Device_Undedicate, msg = ('ignore an exception because the FCP device {} ' 'has already been undedicdated on z/VM: {}' ).format(fcp, err.format_message()) LOG.warn(msg) else: # raise to do _rollback_do_detach() raise else: LOG.info("The connections of FCP device %s is not 0, " "skip undedicating the FCP device on z/VM." % fcp) LOG.info("Detaching volume on virtual machine %s from FCP devices %s is " "done." % (assigner_id, fcp_list)) except Exception as err: LOG.error(str(err)) # Rollback for the following completed operations: # 1. Operation on VM OS done by _remove_disks() # 2. operations on z/VM done by _udedicate_fcp() LOG.info("Enter rollback: _rollback_do_detach") self._rollback_do_detach(fcp_connections, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) LOG.info("Exit rollback: _rollback_do_detach") raise def detach(self, connection_info): """Detach a volume from a guest """ fcps = connection_info['zvm_fcp'] wwpns = connection_info['target_wwpn'] target_lun = connection_info['target_lun'] assigner_id = connection_info['assigner_id'].upper() multipath = connection_info['multipath'].lower() os_version = connection_info['os_version'] mount_point = connection_info['mount_point'] if multipath == 'true': multipath = True else: multipath = False is_root_volume = connection_info.get('is_root_volume', False) update_connections_only = connection_info.get( 'update_connections_only', False) # transfer to lower cases fcp_list = [x.lower() for x in fcps] target_wwpns = [wwpn.lower() for wwpn in wwpns] try: self._do_detach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, is_root_volume, update_connections_only) except Exception: for fcp in fcp_list: with zvmutils.ignore_errors(): _userid, _reserved, _conns, _tmpl_id = self.get_fcp_usage(fcp) LOG.info("After rollback, property of FCP device %s " "is (assigner_id: %s, reserved:%s, " "connections: %s, FCP Multipath Template id: %s)." % (fcp, _userid, _reserved, _conns, _tmpl_id)) raise def get_volume_connector(self, assigner_id, reserve, fcp_template_id=None, sp_name=None): """Get connector information of the instance for attaching to volumes. Connector information is a dictionary representing the Fibre Channel(FC) port(s) that will be making the connection. The properties of FC port(s) are as follows:: { 'zvm_fcp': [fcp1, fcp2] 'wwpns': [npiv_wwpn1, npiv_wwpn2] 'phy_to_virt_initiators':{ npiv_wwpn1: phy_wwpn1, npiv_wwpn2: phy_wwpn2, } 'host': LPARname_VMuserid, 'fcp_paths': 2, # the count of fcp paths 'fcp_template_id': fcp_template_id # if user doesn't specify it, it is either the SP default or the host default template id } """ with database.get_fcp_conn(): if fcp_template_id and \ not self.db.fcp_template_exist_in_db(fcp_template_id): errmsg = ("fcp_template_id %s doesn't exist." % fcp_template_id) LOG.error(errmsg) raise exception.SDKVolumeOperationError( rs=11, userid=assigner_id, msg=errmsg) # get lpar name of the userid, # if no host name got, raise exception zvm_host = zvmutils.get_lpar_name() if zvm_host == '': errmsg = "failed to get z/VM LPAR name." LOG.error(errmsg) raise exception.SDKVolumeOperationError( rs=11, userid=assigner_id, msg=errmsg) """ Reserve or unreserve FCP device according to assigner id and FCP Multipath Template id. """ if reserve: LOG.info("get_volume_connector: Enter reserve_fcp_devices.") # The data structure of fcp_list is: # [(fcp_id, wwpn_npiv, wwpn_phy)] fcp_list, fcp_template_id = self.fcp_mgr.reserve_fcp_devices( assigner_id, fcp_template_id, sp_name) LOG.info("get_volume_connector: Exit reserve_fcp_devices {}".format( [f['fcp_id'] for f in fcp_list])) else: LOG.info("get_volume_connector: Enter unreserve_fcp_devices.") # The data structure of fcp_list is: # [(fcp_id, wwpn_npiv, wwpn_phy, connections)] # An example of fcp_list: # [('1c10', 'c12345abcdefg1', 'c1234abcd33002641', 1), # ('1d10', 'c12345abcdefg2', 'c1234abcd33002641', 0)] fcp_list = self.fcp_mgr.unreserve_fcp_devices( assigner_id, fcp_template_id) LOG.info("get_volume_connector: Exit unreserve_fcp_devices {}".format( [f['fcp_id'] for f in fcp_list])) empty_connector = {'zvm_fcp': [], 'wwpns': [], 'host': '', 'phy_to_virt_initiators': {}, 'fcp_paths': 0, 'fcp_template_id': fcp_template_id} if not fcp_list: errmsg = ("Not enough available FCP devices found from " "FCP Multipath Template(id={})".format(fcp_template_id)) LOG.error(errmsg) return empty_connector # get wwpns of fcp devices wwpns = [] phy_virt_wwpn_map = {} fcp_ids = [] for fcp in fcp_list: wwpn_npiv = fcp[1] fcp_ids.append(fcp[0]) wwpns.append(wwpn_npiv) phy_virt_wwpn_map[wwpn_npiv] = fcp[2] # return the LPARname+VMuserid as host ret_host = zvm_host + '_' + assigner_id connector = {'zvm_fcp': fcp_ids, 'wwpns': wwpns, 'phy_to_virt_initiators': phy_virt_wwpn_map, 'host': ret_host, 'fcp_paths': len(fcp_list), 'fcp_template_id': fcp_template_id} LOG.info('get_volume_connector returns %s for ' 'assigner %s and FCP Multipath Template %s' % (connector, assigner_id, fcp_template_id)) return connector def check_fcp_exist_in_db(self, fcp, raise_exec=True): all_fcps_raw = self.db.get_all() all_fcps = [] for item in all_fcps_raw: all_fcps.append(item[0].lower()) if fcp not in all_fcps: if raise_exec: LOG.error("fcp %s not exist in db!", fcp) raise exception.SDKObjectNotExistError( obj_desc=("FCP '%s'" % fcp), modID='volume') else: LOG.warning("fcp %s not exist in db!", fcp) return False else: return True def get_fcp_usage(self, fcp): userid, reserved, connections, tmpl_id = self.db.get_usage_of_fcp(fcp) LOG.debug("Got userid:%s, reserved:%s, connections:%s, tmpl_id: %s " "of FCP:%s" % (userid, reserved, connections, fcp, tmpl_id)) return userid, reserved, connections, tmpl_id def set_fcp_usage(self, fcp, assigner_id, reserved, connections, fcp_template_id): self.db.update_usage_of_fcp(fcp, assigner_id, reserved, connections, fcp_template_id) LOG.info("Set usage of fcp %s to userid:%s, reserved:%s, " "connections:%s, tmpl_id: %s." % (fcp, assigner_id, reserved, connections, fcp_template_id)) zVMCloudConnector-1.6.3/zvmsdk/dist.py0000775000175000017510000016754214315210052017370 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import abc import netaddr import os import six from jinja2 import Environment, FileSystemLoader from zvmsdk import config from zvmsdk import exception from zvmsdk import log from zvmsdk import smtclient CONF = config.CONF LOG = log.LOG @six.add_metaclass(abc.ABCMeta) class LinuxDist(object): """Linux distribution base class Due to we need to interact with linux dist and inject different files according to the dist version. Currently RHEL6, RHEL7, SLES11, SLES12 , UBUNTU16 and RHCOS4 are supported. """ def __init__(self): self._smtclient = smtclient.get_smtclient() def create_network_configuration_files(self, file_path, guest_networks, first, active=False): """Generate network configuration files for guest vm :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address, 'dns_addr': (list) dns addresses, 'gateway_addr': (str) gateway address, 'cidr': (str) cidr format 'nic_vdev': (str) VDEV of the nic} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}] :returns cfg_files: the network interface configuration file name and file content cmd_strings: shell command, helps to enable the network interface, will be put into znetconfig file clean_cmd: if first is true, it is used to erase the previous network interface configuration, will be put into invokeScript file net_enable_cmd: 'ip addr' and 'ip link' command to enable the new network interface """ cfg_files = [] cmd_strings = '' udev_cfg_str = '' dns_cfg_str = '' route_cfg_str = '' net_enable_cmd = '' cmd_str = None file_path = self._get_network_file_path() file_name_route = file_path + 'routes' if first: clean_cmd = self._get_clean_command() else: clean_cmd = '' file_name_dns = self._get_dns_filename() for network in guest_networks: base_vdev = network['nic_vdev'].lower() file_name = self._get_device_filename(base_vdev) (cfg_str, cmd_str, dns_str, route_str, net_cmd) = self._generate_network_configuration( network, base_vdev, active=active) LOG.debug('Network configure file content is: %s', cfg_str) target_net_conf_file_name = file_path + file_name cfg_files.append((target_net_conf_file_name, cfg_str)) udev_cfg_str += self._get_udev_configuration(base_vdev, '0.0.' + str(base_vdev).zfill(4)) self._append_udev_rules_file(cfg_files, base_vdev) if cmd_str is not None: cmd_strings += cmd_str if net_cmd is not None: net_enable_cmd += net_cmd if len(dns_str) > 0: dns_cfg_str += dns_str if len(route_str) > 0: route_cfg_str += route_str if len(dns_cfg_str) > 0: cfg_files.append((file_name_dns, dns_cfg_str)) cmd_strings = self._append_udev_info(cmd_strings, cfg_files, file_name_route, route_cfg_str, udev_cfg_str, first) return cfg_files, cmd_strings, clean_cmd, net_enable_cmd def _generate_network_configuration(self, network, vdev, active=False): ip_v4 = dns_str = gateway_v4 = '' ip_cidr = netmask_v4 = broadcast_v4 = '' net_cmd = mtu = '' dns_v4 = [] if (('ip_addr' in network.keys()) and (network['ip_addr'] is not None)): ip_v4 = network['ip_addr'] if (('gateway_addr' in network.keys()) and (network['gateway_addr'] is not None)): gateway_v4 = network['gateway_addr'] if (('dns_addr' in network.keys()) and (network['dns_addr'] is not None) and (len(network['dns_addr']) > 0)): for dns in network['dns_addr']: dns_str += 'nameserver ' + dns + '\n' dns_v4.append(dns) if (('cidr' in network.keys()) and (network['cidr'] is not None)): ip_cidr = network['cidr'] netmask_v4 = str(netaddr.IPNetwork(ip_cidr).netmask) broadcast_v4 = str(netaddr.IPNetwork(ip_cidr).broadcast) if broadcast_v4 == 'None': broadcast_v4 = '' if (('mtu' in network.keys()) and (network['mtu'] is not None)): mtu = str(network['mtu']) device = self._get_device_name(vdev) address_read = str(vdev).zfill(4) address_write = str(hex(int(vdev, 16) + 1))[2:].zfill(4) address_data = str(hex(int(vdev, 16) + 2))[2:].zfill(4) subchannels = '0.0.%s' % address_read.lower() subchannels += ',0.0.%s' % address_write.lower() subchannels += ',0.0.%s' % address_data.lower() cfg_str = self._get_cfg_str(device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, address_read, subchannels, dns_v4, mtu) cmd_str = self._get_cmd_str(address_read, address_write, address_data) route_str = self._get_route_str(gateway_v4) if active and ip_v4 != '': if ip_cidr != '': mask = ip_cidr.rpartition('/')[2] else: mask = '32' full_ip = '%s/%s' % (ip_v4, mask) net_cmd = self._enable_network_interface(device, full_ip, broadcast_v4) return cfg_str, cmd_str, dns_str, route_str, net_cmd def get_simple_znetconfig_contents(self): return '\n'.join(('cio_ignore -R', 'znetconf -A', 'cio_ignore -u')) def get_device_name(self, vdev): return self._get_device_name(vdev) def get_network_configuration_files(self, vdev): vdev = vdev.lower() file_path = self._get_network_file_path() device = self._get_device_filename(vdev) target_net_conf_file_name = os.path.join(file_path, device) return target_net_conf_file_name def delete_vdev_info(self, vdev): cmd = self._delete_vdev_info(vdev) return cmd @abc.abstractmethod def _get_network_file_path(self): """Get network file configuration path.""" pass def get_change_passwd_command(self, admin_password): """construct change password command :admin_password: the password to be changed to """ return "echo 'root:%s' | chpasswd" % admin_password @abc.abstractmethod def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): "generate punch script for attachment configuration" pass @abc.abstractmethod def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): "generate punch script for detachment configuration" pass @abc.abstractmethod def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, address_read, subchannels): """construct configuration file of network device.""" pass @abc.abstractmethod def _get_device_filename(self, vdev): """construct the name of a network device file.""" pass @abc.abstractmethod def _get_route_str(self, gateway_v4): """construct a router string.""" pass @abc.abstractmethod def _enable_network_interface(self, device, ip, broadcast): """construct a router string.""" pass @abc.abstractmethod def _get_clean_command(self): """construct a clean command to remove.""" pass @abc.abstractmethod def _get_cmd_str(self, address_read, address_write, address_data): """construct network startup command string.""" pass @abc.abstractmethod def _get_dns_filename(self): """construct the name of dns file.""" pass @abc.abstractmethod def get_znetconfig_contents(self): """construct znetconfig file will be called during first boot.""" pass @abc.abstractmethod def _get_device_name(self, vdev): """construct the name of a network device.""" pass @abc.abstractmethod def _get_udev_configuration(self, device, dev_channel): """construct udev configuration info.""" pass @abc.abstractmethod def _get_udev_rules(self, channel_read, channel_write, channel_data): """construct udev rules info.""" pass @abc.abstractmethod def _append_udev_info(self, cmd_str, cfg_files, file_name_route, route_cfg_str, udev_cfg_str, first=False): return cmd_str @abc.abstractmethod def _append_udev_rules_file(self, cfg_files, base_vdev): pass @abc.abstractmethod def get_scp_string(self, root, fcp, wwpn, lun): """construct scp_data string for ipl parameter""" pass @abc.abstractmethod def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): """construct the lines composing the script to generate the /etc/zipl.conf file """ pass @abc.abstractmethod def create_active_net_interf_cmd(self): """construct active command which will initialize and configure vm.""" pass @abc.abstractmethod def _delete_vdev_info(self, vdev): """delete udev rules file.""" pass def generate_set_hostname_script(self, hostname): lines = ['#!/bin/bash\n', 'echo -n %s > /etc/hostname\n' % hostname, '/bin/hostname %s\n' % hostname] return lines def get_template(self, module, template_name): relative_path = module + "/templates" base_path = os.path.dirname(os.path.abspath(__file__)) template_file_path = os.path.join(base_path, relative_path, template_name) template_file_directory = os.path.dirname(template_file_path) template_loader = FileSystemLoader(searchpath=template_file_directory) env = Environment(loader=template_loader) template = env.get_template(template_name) return template def get_extend_partition_cmds(self): template = self.get_template("vmactions", "grow_root_volume.j2") content = template.render() return content class rhel(LinuxDist): def _get_network_file_path(self): return '/etc/sysconfig/network-scripts/' def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, address_read, subchannels, dns_v4, mtu): cfg_str = 'DEVICE=\"' + device + '\"\n' cfg_str += 'BOOTPROTO=\"static\"\n' cfg_str += 'BROADCAST=\"' + broadcast_v4 + '\"\n' cfg_str += 'GATEWAY=\"' + gateway_v4 + '\"\n' cfg_str += 'IPADDR=\"' + ip_v4 + '\"\n' cfg_str += 'NETMASK=\"' + netmask_v4 + '\"\n' cfg_str += 'NETTYPE=\"qeth\"\n' cfg_str += 'ONBOOT=\"yes\"\n' cfg_str += 'PORTNAME=\"PORT' + address_read + '\"\n' cfg_str += 'OPTIONS=\"layer2=1\"\n' cfg_str += 'SUBCHANNELS=\"' + subchannels + '\"\n' cfg_str += 'MTU=\"' + mtu + '\"\n' if (dns_v4 is not None) and (len(dns_v4) > 0): i = 1 for dns in dns_v4: cfg_str += 'DNS' + str(i) + '=\"' + dns + '\"\n' i += 1 return cfg_str def _get_route_str(self, gateway_v4): return '' def _get_cmd_str(self, address_read, address_write, address_data): return '' def _get_dns_filename(self): return '/etc/resolv.conf' def _get_device_name(self, vdev): return 'eth' + str(vdev).zfill(4) def _get_udev_configuration(self, device, dev_channel): return '' def _append_udev_info(self, cmd_str, cfg_files, file_name_route, route_cfg_str, udev_cfg_str, first=False): return cmd_str def _get_udev_rules(self, channel_read, channel_write, channel_data): """construct udev rules info.""" return '' def _append_udev_rules_file(self, cfg_files, base_vdev): pass def _enable_network_interface(self, device, ip, broadcast): return '' def _delete_vdev_info(self, vdev): return '' class rhel6(rhel): def get_znetconfig_contents(self): return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'service network restart', 'cio_ignore -u')) def _get_device_filename(self, vdev): return 'ifcfg-eth' + str(vdev).zfill(4) def _get_all_device_filename(self): return 'ifcfg-eth*' def _get_device_name(self, vdev): return 'eth' + str(vdev).zfill(4) def get_scp_string(self, root, fcp, wwpn, lun): return ("=root=%(root)s selinux=0 " "rd_ZFCP=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % { 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun} def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): return ['#!/bin/bash\n', ('echo -e "[defaultboot]\\n' 'timeout=5\\n' 'default=boot-from-volume\\n' 'target=/boot/\\n' '[boot-from-volume]\\n' 'image=%(image)s\\n' 'ramdisk=%(ramdisk)s\\n' 'parameters=\\"root=%(root)s ' 'rd_ZFCP=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s selinux=0\\""' '>/etc/zipl_volume.conf\n' 'zipl -c /etc/zipl_volume.conf') % {'image': image, 'ramdisk': ramdisk, 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}] def create_active_net_interf_cmd(self): return 'service zvmguestconfigure start' def _get_clean_command(self): files = os.path.join(self._get_network_file_path(), self._get_all_device_filename()) return '\nrm -f %s\n' % files def generate_set_hostname_script(self, hostname): lines = ['#!/bin/bash\n', 'sed -i "s/^HOSTNAME=.*/HOSTNAME=%s/" ' '/etc/sysconfig/network\n' % hostname, '/bin/hostname %s\n' % hostname] return lines def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): "generate punch script for attachment configuration" func_name = 'get_volume_attach_configuration_cmds' raise exception.SDKFunctionNotImplementError(func=func_name, modID='volume') def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): "generate punch script for detachment configuration" func_name = 'get_volume_detach_configuration_cmds' raise exception.SDKFunctionNotImplementError(func=func_name, modID='volume') class rhel7(rhel): def get_znetconfig_contents(self): return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'cio_ignore -u')) def _get_device_filename(self, vdev): # Construct a device like ifcfg-enccw0.0.1000, ifcfg-enccw0.0.1003 return 'ifcfg-enccw0.0.' + str(vdev).zfill(4) def _get_all_device_filename(self): return 'ifcfg-enccw0.0.*' def _get_device_name(self, vdev): # Construct a device like enccw0.0.1000, enccw0.0.1003 return 'enccw0.0.' + str(vdev).zfill(4) def get_scp_string(self, root, fcp, wwpn, lun): return ("=root=%(root)s selinux=0 zfcp.allow_lun_scan=0 " "rd.zfcp=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % { 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun} def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): return ['#!/bin/bash\n', ('echo -e "[defaultboot]\\n' 'timeout=5\\n' 'default=boot-from-volume\\n' 'target=/boot/\\n' '[boot-from-volume]\\n' 'image=%(image)s\\n' 'ramdisk=%(ramdisk)s\\n' 'parameters=\\"root=%(root)s ' 'rd.zfcp=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s ' 'zfcp.allow_lun_scan=0 selinux=0\\""' '>/etc/zipl_volume.conf\n' 'zipl -c /etc/zipl_volume.conf') % {'image': image, 'ramdisk': ramdisk, 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}] def _enable_network_interface(self, device, ip, broadcast): if len(broadcast) > 0: activeIP_str = 'ip addr add %s broadcast %s dev %s\n' % (ip, broadcast, device) else: activeIP_str = 'ip addr add %s dev %s\n' % (ip, device) activeIP_str += 'ip link set dev %s up\n' % device return activeIP_str def create_active_net_interf_cmd(self): return 'systemctl start zvmguestconfigure.service' def _get_clean_command(self): files = os.path.join(self._get_network_file_path(), self._get_all_device_filename()) return '\nrm -f %s\n' % files def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): """rhel7""" template = self.get_template("volumeops", "rhel7_attach_volume.j2") target_filename = mount_point.replace('/dev/', '') content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, target_filename=target_filename) return content def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): """rhel7""" if connections > 0: # if this volume is the last volume # we need to know it and offline the FCP devices is_last_volume = 0 else: is_last_volume = 1 template = self.get_template("volumeops", "rhel7_detach_volume.j2") target_filename = mount_point.replace('/dev/', '') content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, target_filename=target_filename, is_last_volume=is_last_volume) return content class rhel8(rhel7): """docstring for rhel8""" def _get_device_filename(self, vdev): return 'ifcfg-enc' + str(vdev).zfill(4) def _get_all_device_filename(self): return 'ifcfg-enc*' def _get_device_name(self, vdev): # Construct a device like enc1000 return 'enc' + str(vdev).zfill(4) def _get_clean_command(self): files = os.path.join(self._get_network_file_path(), self._get_all_device_filename()) return '\nrm -f %s\n' % files def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): """rhel8 attach script generation""" template = self.get_template("volumeops", "rhel8_attach_volume.j2") target_filename = mount_point.replace('/dev/', '') content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, target_filename=target_filename) return content def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): """rhel8 detach script generation""" if connections > 0: # if this volume is the last volume # we need to know it and offline the FCP devices is_last_volume = 0 else: is_last_volume = 1 template = self.get_template("volumeops", "rhel8_detach_volume.j2") target_filename = mount_point.replace('/dev/', '') content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, target_filename=target_filename, is_last_volume=is_last_volume) return content class rhel9(rhel8): pass class rhcos(LinuxDist): def create_coreos_parameter(self, network_info, userid=''): try: # TODO: fix the limitation that assuming the first nic configured vif = network_info[0] ip_addr = vif['ip_addr'] gateway_addr = vif['gateway_addr'] netmask = vif['cidr'].split("/")[-1] nic_name = "enc" + vif.get('nic_vdev', CONF.zvm.default_nic_vdev) hostname = vif.get('hostname', userid) or "localhost" # update dns name server info if they're defined in subnet _dns = ["", ""] if 'dns_addr' in vif.keys(): if ((vif['dns_addr'] is not None) and (len(vif['dns_addr']) > 0)): _index = 0 for dns in vif['dns_addr']: _dns[_index] = dns _index += 1 mtu = vif['mtu'] # transfor network info and hostname into form of # ip=:[]::: # ::none[:[][:]]; result = "%s::%s:%s:%s:%s:none:%s:%s;%s" % (ip_addr, gateway_addr, netmask, hostname, nic_name, _dns[0], _dns[1], mtu) return result except Exception as err: LOG.error("Failed to create coreos parameter for userid '%s'," "error: %s" % (userid, err)) raise def create_coreos_parameter_temp_file(self, network_info, userid): # Create the coreos parameters for ZCC, includes ignitionUrl, diskType, # nicID and ipConfig, then save them in a temp file try: result = self.create_coreos_parameter(network_info, userid) tmp_path = self._smtclient.get_guest_path(userid.upper()) LOG.debug("Created coreos fixed ip parameter: %(result)s, " "writing them to tempfile: %(tmp_path)s/fixed_ip_param" % {'result': result, 'tmp_path': tmp_path}) with open('%s/fixed_ip_param' % tmp_path, 'w') as f: f.write(result) f.write('\n') return True except Exception as err: LOG.error("Failed to create coreos parameter for userid '%s'," "error: %s" % (userid, err)) return False def read_coreos_parameter(self, userid): # read coreos fixed ip parameters from tempfile by matching userid tmp_path = self._smtclient.get_guest_path(userid.upper()) tmp_file_path = ('%s/fixed_ip_param' % tmp_path) with open(tmp_file_path, 'r') as f: fixed_ip_parameter = f.read().replace('\n', '') LOG.debug('Read coreos fixed ip parameter: %(parameter)s ' 'from tempfile: %(filename)s' % {'parameter': fixed_ip_parameter, 'filename': tmp_file_path}) # Clean up tempfile self._smtclient.clean_temp_folder(tmp_path) return fixed_ip_parameter def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): "generate punch script for attachment configuration" func_name = 'get_volume_attach_configuration_cmds' raise exception.SDKFunctionNotImplementError(func=func_name, modID='volume') def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): "generate punch script for detachment configuration" func_name = 'get_volume_detach_configuration_cmds' raise exception.SDKFunctionNotImplementError(func=func_name, modID='volume') def _append_udev_info(self, cmd_str, cfg_files, file_name_route, route_cfg_str, udev_cfg_str, first=False): pass def _append_udev_rules_file(self, cfg_files, base_vdev): pass def _delete_vdev_info(self, vdev): pass def _enable_network_interface(self, device, ip, broadcast): pass def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, address_read, subchannels): pass def _get_clean_command(self): pass def _get_cmd_str(self, address_read, address_write, address_data): pass def _get_device_filename(self, vdev): pass def _get_device_name(self, vdev): pass def _get_dns_filename(self): pass def _get_network_file_path(self): pass def _get_route_str(self, gateway_v4): pass def _get_udev_configuration(self, device, dev_channel): pass def _get_udev_rules(self, channel_read, channel_write, channel_data): pass def create_active_net_interf_cmd(self): pass def get_scp_string(self, root, fcp, wwpn, lun): pass def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): pass def get_znetconfig_contents(self): pass class rhcos4(rhcos): pass class sles(LinuxDist): def _get_network_file_path(self): return '/etc/sysconfig/network/' def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, address_read, subchannels, dns_v4, mtu): cfg_str = "BOOTPROTO=\'static\'\n" cfg_str += "IPADDR=\'%s\'\n" % ip_v4 cfg_str += "NETMASK=\'%s\'\n" % netmask_v4 cfg_str += "BROADCAST=\'%s\'\n" % broadcast_v4 cfg_str += "STARTMODE=\'onboot\'\n" cfg_str += ("NAME=\'OSA Express Network card (%s)\'\n" % address_read) cfg_str += "MTU=\'%s\'\n" % mtu if (dns_v4 is not None) and (len(dns_v4) > 0): self.dns_v4 = dns_v4 else: self.dns_v4 = None return cfg_str def _get_route_str(self, gateway_v4): route_str = 'default %s - -\n' % gateway_v4 return route_str def _get_cmd_str(self, address_read, address_write, address_data): cmd_str = 'qeth_configure -l 0.0.%s ' % address_read.lower() cmd_str += '0.0.%(write)s 0.0.%(data)s 1\n' % {'write': address_write.lower(), 'data': address_data.lower()} cmd_str += ('echo "0.0.%(read)s,0.0.%(write)s,0.0.%(data)s #`date`"' ' >>/boot/zipl/active_devices.txt\n' % {'read': address_read.lower(), 'write': address_write.lower(), 'data': address_data.lower()}) return cmd_str def _get_dns_filename(self): return '/etc/resolv.conf' def _get_device_filename(self, vdev): return 'ifcfg-eth' + str(vdev).zfill(4) def _get_all_device_filename(self): return 'ifcfg-eth*' def _get_device_name(self, vdev): return 'eth' + str(vdev).zfill(4) def _append_udev_info(self, cmd_str, cfg_files, file_name_route, route_cfg_str, udev_cfg_str, first=False): udev_file_name = '/etc/udev/rules.d/70-persistent-net.rules' if first: cfg_files.append((udev_file_name, udev_cfg_str)) if len(route_cfg_str) > 0: cfg_files.append((file_name_route, route_cfg_str)) else: cmd_str += ("echo '%s'" ' >>%s\n' % (udev_cfg_str, udev_file_name)) if len(route_cfg_str) > 0: cmd_str += ('echo "%s"' ' >>%s\n' % (route_cfg_str, file_name_route)) return cmd_str def _get_udev_configuration(self, device, dev_channel): cfg_str = 'SUBSYSTEM==\"net\", ACTION==\"add\", DRIVERS==\"qeth\",' cfg_str += ' KERNELS==\"%s\", ATTR{type}==\"1\",' % dev_channel cfg_str += ' KERNEL==\"eth*\", NAME=\"eth%s\"\n' % device return cfg_str def _append_udev_rules_file(self, cfg_files, base_vdev): rules_file_name = '/etc/udev/rules.d/51-qeth-0.0.%s.rules' % base_vdev read_ch = '0.0.' + base_vdev write_ch = '0.0.' + str(hex(int(base_vdev, 16) + 1))[2:] data_ch = '0.0.' + str(hex(int(base_vdev, 16) + 2))[2:] udev_rules_str = self._get_udev_rules(read_ch, write_ch, data_ch) cfg_files.append((rules_file_name, udev_rules_str)) def _get_udev_rules(self, channel_read, channel_write, channel_data): """construct udev rules info.""" sub_str = '%(read)s %%k %(read)s %(write)s %(data)s qeth' % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str = '# Configure qeth device at' rules_str += ' %(read)s/%(write)s/%(data)s\n' % { 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==' '\"qeth\", IMPORT{program}=\"collect %s\"\n') % sub_str rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(read)s\", IMPORT{program}="collect %(channel)s\"\n') % { 'read': channel_read, 'channel': sub_str} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(write)s\", IMPORT{program}=\"collect %(channel)s\"\n') % { 'write': channel_write, 'channel': sub_str} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(data)s\", IMPORT{program}=\"collect %(channel)s\"\n') % { 'data': channel_data, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"drivers\", KERNEL==\"' 'qeth\", IMPORT{program}=\"collect --remove %s\"\n') % sub_str rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(read)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'read': channel_read, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(write)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'write': channel_write, 'channel': sub_str} rules_str += ('ACTION==\"remove\", SUBSYSTEM==\"ccw\", KERNEL==\"' '%(data)s\", IMPORT{program}=\"collect --remove %(channel)s\"\n' ) % {'data': channel_data, 'channel': sub_str} rules_str += ('TEST==\"[ccwgroup/%(read)s]\", GOTO=\"qeth-%(read)s' '-end\"\n') % {'read': channel_read, 'read': channel_read} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccw\", ENV{COLLECT_' '%(read)s}==\"0\", ATTR{[drivers/ccwgroup:qeth]group}=\"' '%(read)s,%(write)s,%(data)s\"\n') % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"drivers\", KERNEL==\"qeth' '\", ENV{COLLECT_%(read)s}==\"0\", ATTR{[drivers/' 'ccwgroup:qeth]group}=\"%(read)s,%(write)s,%(data)s\"\n' 'LABEL=\"qeth-%(read)s-end\"\n') % { 'read': channel_read, 'read': channel_read, 'write': channel_write, 'data': channel_data, 'read': channel_read} rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL==' '\"%s\", ATTR{layer2}=\"1\"\n') % channel_read rules_str += ('ACTION==\"add\", SUBSYSTEM==\"ccwgroup\", KERNEL==' '\"%s\", ATTR{online}=\"1\"\n') % channel_read return rules_str def get_scp_string(self, root, fcp, wwpn, lun): return ("=root=%(root)s " "zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % { 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun} def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): return ['#!/bin/bash\n', ('echo -e "[defaultboot]\\n' 'default=boot-from-volume\\n' '[boot-from-volume]\\n' 'image=%(image)s\\n' 'target = /boot/zipl\\n' 'ramdisk=%(ramdisk)s\\n' 'parameters=\\"root=%(root)s ' 'zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s\\""' '>/etc/zipl_volume.conf\n' 'mkinitrd\n' 'zipl -c /etc/zipl_volume.conf') % {'image': image, 'ramdisk': ramdisk, 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}] def _enable_network_interface(self, device, ip, broadcast): return '' def _get_clean_command(self): files = os.path.join(self._get_network_file_path(), self._get_all_device_filename()) cmd = '\nrm -f %s\n' % files all_udev_rules_files = '/etc/udev/rules.d/51-qeth-0.0.*' cmd += 'rm -f %s\n' % all_udev_rules_files cmd += '> /boot/zipl/active_devices.txt\n' return cmd def _delete_vdev_info(self, vdev): """handle udev rules file.""" vdev = vdev.lower() rules_file_name = '/etc/udev/rules.d/51-qeth-0.0.%s.rules' % vdev cmd = 'rm -f %s\n' % rules_file_name address = '0.0.%s' % str(vdev).zfill(4) udev_file_name = '/etc/udev/rules.d/70-persistent-net.rules' cmd += "sed -i '/%s/d' %s\n" % (address, udev_file_name) cmd += "sed -i '/%s/d' %s\n" % (address, '/boot/zipl/active_devices.txt') return cmd def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): """sles attach script generation""" template = self.get_template("volumeops", "sles_attach_volume.j2") target_filename = mount_point.replace('/dev/', '') # TODO(bill): also consider is first attach or not content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, target_filename=target_filename) return content def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): """sles detach script generation""" if connections > 0: # if this volume is the last volume # we need to know it and offline the FCP devices is_last_volume = 0 else: is_last_volume = 1 template = self.get_template("volumeops", "sles_detach_volume.j2") target_filename = mount_point.replace('/dev/', '') content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, target_filename=target_filename, is_last_volume=is_last_volume) return content class sles11(sles): def get_znetconfig_contents(self): return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'sleep 2', 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'service network restart', 'cio_ignore -u')) def create_active_net_interf_cmd(self): return 'service zvmguestconfigure start' class sles12(sles): def get_znetconfig_contents(self): remove_route = 'rm -f %s/ifroute-eth*' % self._get_network_file_path() return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'sleep 2', remove_route, 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'cio_ignore -u', 'wicked ifreload all')) def get_scp_string(self, root, fcp, wwpn, lun): return ("=root=%(root)s zfcp.allow_lun_scan=0 " "zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s") % { 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun} def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): return ['#!/bin/bash\n', ('echo -e "[defaultboot]\\n' 'default=boot-from-volume\\n' '[boot-from-volume]\\n' 'image=%(image)s\\n' 'target = /boot/zipl\\n' 'ramdisk=%(ramdisk)s\\n' 'parameters=\\"root=%(root)s ' 'zfcp.device=0.0.%(fcp)s,0x%(wwpn)s,0x%(lun)s ' 'zfcp.allow_lun_scan=0\\""' '>/etc/zipl_volume.conf\n' 'mkinitrd\n' 'zipl -c /etc/zipl_volume.conf') % {'image': image, 'ramdisk': ramdisk, 'root': root, 'fcp': fcp, 'wwpn': wwpn, 'lun': lun}] def create_active_net_interf_cmd(self): return 'systemctl start zvmguestconfigure.service' def _enable_network_interface(self, device, ip, broadcast): if len(broadcast) > 0: activeIP_str = 'ip addr add %s broadcast %s dev %s\n' % (ip, broadcast, device) else: activeIP_str = 'ip addr add %s dev %s\n' % (ip, device) activeIP_str += 'ip link set dev %s up\n' % device return activeIP_str class sles15(sles12): """docstring for sles15""" def get_znetconfig_contents(self): remove_route = 'rm -f %s/ifroute-eth*' % self._get_network_file_path() replace_var = 'NETCONFIG_DNS_STATIC_SERVERS' replace_file = '/etc/sysconfig/network/config' remove_dns_cfg = "sed -i '/^\s*%s=\"/d' %s" % (replace_var, replace_file) if self.dns_v4: dns_addrs = ' '.join(self.dns_v4) netconfig_dns = '%s="%s"' % (replace_var, dns_addrs) set_dns = "echo '%s' >> %s" % (netconfig_dns, replace_file) return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'sleep 2', remove_route, remove_dns_cfg, set_dns, 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'cio_ignore -u', 'wicked ifreload all')) else: return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'sleep 2', remove_route, remove_dns_cfg, 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'cio_ignore -u', 'wicked ifreload all')) class ubuntu(LinuxDist): def create_network_configuration_files(self, file_path, guest_networks, first, active=False): """Generate network configuration files for guest vm :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address, 'dns_addr': (list) dns addresses, 'gateway_addr': (str) gateway address, 'cidr': (str) cidr format 'nic_vdev': (str) VDEV of the nic} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}] """ cfg_files = [] cmd_strings = '' network_config_file_name = self._get_network_file() network_cfg_str = 'auto lo\n' network_cfg_str += 'iface lo inet loopback\n' net_enable_cmd = '' if first: clean_cmd = self._get_clean_command() else: clean_cmd = '' network_cfg_str = '' for network in guest_networks: base_vdev = network['nic_vdev'].lower() network_hw_config_fname = self._get_device_filename(base_vdev) network_hw_config_str = self._get_network_hw_config_str(base_vdev) cfg_files.append((network_hw_config_fname, network_hw_config_str)) (cfg_str, dns_str) = self._generate_network_configuration(network, base_vdev) LOG.debug('Network configure file content is: %s', cfg_str) network_cfg_str += cfg_str if len(dns_str) > 0: network_cfg_str += dns_str if first: cfg_files.append((network_config_file_name, network_cfg_str)) else: cmd_strings = ('echo "%s" >>%s\n' % (network_cfg_str, network_config_file_name)) return cfg_files, cmd_strings, clean_cmd, net_enable_cmd def get_network_configuration_files(self, vdev): vdev = vdev.lower() network_hw_config_fname = self._get_device_filename(vdev) return network_hw_config_fname def delete_vdev_info(self, vdev): cmd = self._delete_vdev_info(vdev) return cmd def _delete_vdev_info(self, vdev): """handle vdev related info.""" vdev = vdev.lower() network_config_file_name = self._get_network_file() device = self._get_device_name(vdev) cmd = '\n'.join(("num=$(sed -n '/auto %s/=' %s)" % (device, network_config_file_name), "dns=$(awk 'NR==(\"\'$num\'\"+6)&&" "/dns-nameservers/' %s)" % network_config_file_name, "if [[ -n $dns ]]; then", " sed -i '/auto %s/,+6d' %s" % (device, network_config_file_name), "else", " sed -i '/auto %s/,+5d' %s" % (device, network_config_file_name), "fi")) return cmd def _get_network_file(self): return '/etc/network/interfaces' def _get_cfg_str(self, device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, mtu): cfg_str = 'auto ' + device + '\n' cfg_str += 'iface ' + device + ' inet static\n' cfg_str += 'address ' + ip_v4 + '\n' cfg_str += 'netmask ' + netmask_v4 + '\n' cfg_str += 'broadcast ' + broadcast_v4 + '\n' cfg_str += 'gateway ' + gateway_v4 + '\n' cfg_str += 'mtu ' + mtu + '\n' return cfg_str def _generate_network_configuration(self, network, vdev): ip_v4 = dns_str = gateway_v4 = '' netmask_v4 = broadcast_v4 = '' if (('ip_addr' in network.keys()) and (network['ip_addr'] is not None)): ip_v4 = network['ip_addr'] if (('gateway_addr' in network.keys()) and (network['gateway_addr'] is not None)): gateway_v4 = network['gateway_addr'] if (('dns_addr' in network.keys()) and (network['dns_addr'] is not None) and (len(network['dns_addr']) > 0)): for dns in network['dns_addr']: dns_str += 'dns-nameservers ' + dns + '\n' if (('cidr' in network.keys()) and (network['cidr'] is not None)): ip_cidr = network['cidr'] netmask_v4 = str(netaddr.IPNetwork(ip_cidr).netmask) broadcast_v4 = str(netaddr.IPNetwork(ip_cidr).broadcast) if broadcast_v4 == 'None': broadcast_v4 = '' if (('mtu' in network.keys()) and (network['mtu'] is not None)): mtu = str(network['mtu']) device = self._get_device_name(vdev) cfg_str = self._get_cfg_str(device, broadcast_v4, gateway_v4, ip_v4, netmask_v4, mtu) return cfg_str, dns_str def _get_route_str(self, gateway_v4): return '' def _get_cmd_str(self, address_read, address_write, address_data): return '' def _enable_network_interface(self, device, ip): return '' def _get_device_name(self, device_num): return 'enc' + str(device_num) def _get_dns_filename(self): return '' def _get_device_filename(self, device_num): return '/etc/sysconfig/hardware/config-ccw-0.0.' + str(device_num) def _get_network_hw_config_str(self, base_vdev): ccwgroup_chans_str = ' '.join(( '0.0.' + str(hex(int(base_vdev, 16)))[2:], '0.0.' + str(hex(int(base_vdev, 16) + 1))[2:], '0.0.' + str(hex(int(base_vdev, 16) + 2))[2:])) return '\n'.join(('CCWGROUP_CHANS=(' + ccwgroup_chans_str + ')', 'QETH_OPTIONS=layer2')) def _get_network_file_path(self): pass def get_znetconfig_contents(self): return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'sleep 2', 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', '/etc/init.d/networking restart', 'cio_ignore -u')) def _get_udev_configuration(self, device, dev_channel): return '' def _append_udev_info(self, cmd_str, cfg_files, file_name_route, route_cfg_str, udev_cfg_str, first=False): return cmd_str def get_scp_string(self, root, fcp, wwpn, lun): pass def get_zipl_script_lines(self, image, ramdisk, root, fcp, wwpn, lun): pass def _get_udev_rules(self, channel_read, channel_write, channel_data): """construct udev rules info.""" return '' def _append_udev_rules_file(self, cfg_files, base_vdev): pass def create_active_net_interf_cmd(self): return "systemctl start zvmguestconfigure.service" def _get_clean_command(self): files = self._get_device_filename('*') cmd = '\nrm -f %s\n' % files return cmd def _check_multipath_tools(self): multipath = 'multipath' return multipath def _format_lun(self, lun): """ubuntu""" target_lun = int(lun[2:6], 16) return target_lun def get_volume_attach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point): """ubuntu attach script generation""" template = self.get_template("volumeops", "ubuntu_attach_volume.j2") target_filename = mount_point.replace('/dev/', '') # the parameter 'target_lun' is hex for either v7k or ds8k: # for v7k, target_lun[2] == '0' and target_lun[6:] == '0' # for ds8k, target_lun[2] == '4' # in the future, we add support to other storage provider whose lun # id may use bits in target_lun[6:], such as, 0x0003040200000000 # when attach v7k volume: # 1. if the lun id less than 256, # the file under /dev/disk/by-path/ will as below, # take 'lun id = 0' as example: # ccw-0.0.5c03-fc-0x5005076802400c1a-lun-0,the the lun id is decimal. # 2. if the lun id is equal or more than 256, # the file under /dev/disk/by-path/ will as below, # take 'lun id = 256' as example: # ccw-0.0.1a0d-fc-0x500507680b26bac7-lun-0x0100000000000000, # the lun id is hex. # when attach ds8k volume: # the file under /dev/disk/by-path/ will as below, # take "volume id 140c" as example: # ccw-0.0.1a0d-fc-0x5005076306035388-lun-0x4014400c00000000, # the lun id is always hex. lun = self._format_lun(target_lun) if all([x == '0' for x in target_lun[6:]]) and lun < 256: lun_id = lun else: lun_id = target_lun # TODO(bill): also consider is first attach or not content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, lun_id=lun_id, target_filename=target_filename) return content def get_volume_detach_configuration_cmds(self, fcp_list, target_wwpns, target_lun, multipath, mount_point, connections): """ubuntu detach script generation""" if connections > 0: # if this volume is the last volume # we need to know it and offline the FCP devices is_last_volume = 0 else: is_last_volume = 1 template = self.get_template("volumeops", "ubuntu_detach_volume.j2") target_filename = mount_point.replace('/dev/', '') lun = self._format_lun(target_lun) if all([x == '0' for x in target_lun[6:]]) and lun < 256: lun_id = lun else: lun_id = target_lun content = template.render(fcp_list=fcp_list, wwpns=target_wwpns, lun=target_lun, lun_id=lun_id, target_filename=target_filename, is_last_volume=is_last_volume) return content class ubuntu16(ubuntu): pass class ubuntu20(ubuntu): def _get_device_filename(self, device_num): return '/etc/netplan/' + str(device_num) + '.yaml' def _get_network_file(self): return '/etc/netplan/00-zvmguestconfigure-config.yaml' def _get_network_file_path(self): return '/etc/netplan/' def get_znetconfig_contents(self): return '\n'.join(('cio_ignore -R', 'znetconf -R -n', 'sleep 2', 'udevadm trigger', 'udevadm settle', 'sleep 2', 'znetconf -A', 'netplan apply', 'cio_ignore -u')) def create_network_configuration_files(self, file_path, guest_networks, first, active=False): """Generate network configuration files for guest vm :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address, 'dns_addr': (list) dns addresses, 'gateway_addr': (str) gateway address, 'cidr': (str) cidr format 'nic_vdev': (str) VDEV of the nic} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}] """ cfg_files = [] cmd_strings = '' network_config_file_name = self._get_network_file() net_enable_cmd = '' if first: clean_cmd = self._get_clean_command() else: clean_cmd = '' for network in guest_networks: base_vdev = network['nic_vdev'].lower() (cfg_str) = self._generate_network_configuration(network, base_vdev) LOG.debug('Network configure file content is: %s', cfg_str) if first: cfg_files.append((network_config_file_name, cfg_str)) else: # TODO: create interface with cmd_strings after VM deployed raise Exception('Ubuntu20 is not supported to create interface' 'after VM deployed.') return cfg_files, cmd_strings, clean_cmd, net_enable_cmd def _generate_network_configuration(self, network, vdev): ip_v4 = dns_str = gateway_v4 = '' cidr = mtu = '' dns_v4 = [] if (('ip_addr' in network.keys()) and (network['ip_addr'] is not None)): ip_v4 = network['ip_addr'] if (('gateway_addr' in network.keys()) and (network['gateway_addr'] is not None)): gateway_v4 = network['gateway_addr'] if (('dns_addr' in network.keys()) and (network['dns_addr'] is not None) and (len(network['dns_addr']) > 0)): for dns in network['dns_addr']: dns_str += 'nameserver ' + dns + '\n' dns_v4.append(dns) if (('cidr' in network.keys()) and (network['cidr'] is not None)): cidr = network['cidr'].split('/')[1] if (('mtu' in network.keys()) and (network['mtu'] is not None)): mtu = str(network['mtu']) device = self._get_device_name(vdev) if dns_v4: cfg_str = {'network': {'ethernets': {device: {'addresses': [ip_v4 + '/' + cidr], 'gateway4': gateway_v4, 'mtu': mtu, 'nameservers': {'addresses': dns_v4} } }, 'version': 2 } } else: cfg_str = {'network': {'ethernets': {device: {'addresses': [ip_v4 + '/' + cidr], 'gateway4': gateway_v4, 'mtu': mtu } }, 'version': 2 } } return cfg_str class ubuntu22(ubuntu20): pass class LinuxDistManager(object): def get_linux_dist(self, os_version): distro, release = self.parse_dist(os_version) return globals()[distro + release] def _parse_release(self, os_version, distro, remain): supported = {'rhel': ['6', '7', '8', '9'], 'sles': ['11', '12', '15'], 'ubuntu': ['16', '20', '22'], 'rhcos': ['4']} releases = supported[distro] for r in releases: if remain.startswith(r): return r else: msg = 'Can not handle os: %s' % os_version raise exception.ZVMException(msg=msg) def parse_dist(self, os_version): """Separate os and version from os_version. Possible return value are only: ('rhel', x.y) and ('sles', x.y) where x.y may not be digits """ supported = {'rhel': ['rhel', 'redhat', 'red hat'], 'sles': ['suse', 'sles'], 'ubuntu': ['ubuntu'], 'rhcos': ['rhcos', 'coreos', 'red hat coreos']} os_version = os_version.lower() for distro, patterns in supported.items(): for i in patterns: if os_version.startswith(i): # Not guarrentee the version is digital remain = os_version.split(i, 2)[1] release = self._parse_release(os_version, distro, remain) return distro, release msg = 'Can not handle os: %s' % os_version raise exception.ZVMException(msg=msg) zVMCloudConnector-1.6.3/zvmsdk/exception.py0000664000175000017510000002336214315210052020407 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from zvmsdk import config from zvmsdk import log from zvmsdk import returncode CONF = config.CONF LOG = log.LOG class SDKBaseException(Exception): """ Inherit from this class and define a 'msg_fmt' property. That msg_fmt will get printf'd with the keyword arguments provided to the constructor. """ msg_fmt = "z/VM SDK error: %(msg)s" code = 500 headers = {} safe = False def __init__(self, message=None, results=None, **kwargs): self.results = results self.kw = kwargs if 'code' in self.kw: try: self.kw['code'] = self.code except AttributeError: pass if not message: try: message = self.msg_fmt % kwargs except Exception: LOG.exception('Exception in string format operation') for name, value in six.iteritems(kwargs): LOG.error("%s: %s" % (name, value)) message = self.msg_fmt self.message = message super(SDKBaseException, self).__init__(message) def format_message(self): return self.args[0] class ZVMException(SDKBaseException): msg_fmt = 'ZVMException happened: %(msg)s' class ZVMNetworkError(SDKBaseException): msg_fmt = "z/VM network error: %(msg)s" class ZVMVirtualMachineNotExist(SDKBaseException): msg_fmt = 'Virtual machine %(userid)s does not exist in %(zvm_host)s' class NotFound(SDKBaseException): msg_fmt = 'The resource can not be found' class InvalidName(SDKBaseException): msg_fmt = 'Invalid name provided, reason is %(reason)s' class ValidationError(SDKBaseException): safe = True code = 400 msg_fmt = 'Validation error: %(detail)s' class ZVMUnauthorized(SDKBaseException): msg_fmt = 'Not authorized to execute' code = 401 class ZVMNotFound(SDKBaseException): def __init__(self, msg, modID='zvmsdk'): rc = returncode.errors['notExist'] results = rc[0] results['modID'] = returncode.ModRCs[modID] results['rs'] = 1 errormsg = rc[1][2] % {'msg': msg} super(ZVMNotFound, self).__init__(results=results, message=errormsg) class SDKDatabaseException(SDKBaseException): msg_fmt = "SDK database error: %(msg)s" class SDKInvalidInputNumber(SDKBaseException): def __init__(self, api, expected, provided): rc = returncode.errors['input'] results = rc[0] results['modID'] = returncode.ModRCs['zvmsdk'] results['rs'] = 1 errormsg = rc[1][1] % {'api': api, 'expected': expected, 'provided': provided} super(SDKInvalidInputNumber, self).__init__(results=results, message=errormsg) class SDKInvalidInputTypes(SDKBaseException): def __init__(self, api, expected, inputtypes): rc = returncode.errors['input'] results = rc[0] results['modID'] = returncode.ModRCs['zvmsdk'] results['rs'] = 2 errormsg = rc[1][2] % {'api': api, 'expected': expected, 'inputtypes': inputtypes} super(SDKInvalidInputTypes, self).__init__(results=results, message=errormsg) class SDKInvalidInputFormat(SDKBaseException): def __init__(self, msg): rc = returncode.errors['input'] results = rc[0] results['modID'] = returncode.ModRCs['zvmsdk'] results['rs'] = 3 errormsg = rc[1][3] % {'msg': msg} super(SDKInvalidInputFormat, self).__init__(results=results, message=errormsg) class SDKMissingRequiredInput(SDKBaseException): def __init__(self, msg): rc = returncode.errors['input'] results = rc[0] results['modID'] = returncode.ModRCs['zvmsdk'] results['rs'] = 4 errormsg = rc[1][4] % {'msg': msg} super(SDKInvalidInputFormat, self).__init__(results=results, message=errormsg) class SDKInternalError(SDKBaseException): def __init__(self, msg, modID='zvmsdk', results=None): # if results is set, it means the internal error comes from # smt module, we need to keep the rc/rs value from SMT rc = returncode.errors['internal'] errormsg = rc[1][1] % {'msg': msg} if results is None: results = rc[0] results['rs'] = 1 results['modID'] = returncode.ModRCs[modID] else: # SMT internal error # Reset the overallRC in results to the overallRC value # corresponding to internal error results['overallRC'] = (rc[0]['overallRC']) results['modID'] = returncode.ModRCs['smt'] super(SDKInternalError, self).__init__(results=results, message=errormsg) class SDKConflictError(SDKBaseException): def __init__(self, modID, rs, **kwargs): # kwargs can be used to contain different keyword for constructing # the rs error msg rc = returncode.errors['conflict'] results = rc[0] results['modID'] = returncode.ModRCs[modID] results['rs'] = rs errormsg = rc[1][rs] % kwargs super(SDKConflictError, self).__init__(results=results, message=errormsg) class SDKObjectNotExistError(SDKBaseException): def __init__(self, obj_desc, modID='zvmsdk', rs=1): rc = returncode.errors['notExist'] results = rc[0] results['modID'] = returncode.ModRCs[modID] results['rs'] = rs errormsg = rc[1][rs] % {'obj_desc': obj_desc} super(SDKObjectNotExistError, self).__init__(results=results, message=errormsg) class SDKObjectAlreadyExistError(SDKBaseException): """The object to create or add is already exist in ZCC.""" def __init__(self, obj_desc, modID='zvmsdk', rs=1): rc = returncode.errors['alreadyExist'] results = rc[0] results['modID'] = returncode.ModRCs[modID] results['rs'] = rs errormsg = rc[1][rs] % {'obj_desc': obj_desc} super(SDKObjectAlreadyExistError, self).__init__(results=results, message=errormsg) class SDKSMTRequestFailed(SDKBaseException): def __init__(self, results, msg): results['modID'] = returncode.ModRCs['smt'] super(SDKSMTRequestFailed, self).__init__(results=results, message=msg) class SDKGuestOperationError(SDKBaseException): def __init__(self, rs, **kwargs): # kwargs can be used to contain different keyword for constructing # the rs error msg rc = returncode.errors['guest'] results = rc[0] results['rs'] = rs errormsg = rc[1][rs] % kwargs super(SDKGuestOperationError, self).__init__(results=results, message=errormsg) class SDKNetworkOperationError(SDKBaseException): def __init__(self, rs, **kwargs): # kwargs can be used to contain different keyword for constructing # the rs error msg rc = returncode.errors['network'] results = rc[0] results['rs'] = rs errormsg = rc[1][rs] % kwargs super(SDKNetworkOperationError, self).__init__(results=results, message=errormsg) class SDKImageOperationError(SDKBaseException): def __init__(self, rs, **kwargs): # kwargs can be used to contain different keyword for constructing # the rs error msg rc = returncode.errors['image'] results = rc[0] results['rs'] = rs errormsg = rc[1][rs] % kwargs results['strError'] = errormsg super(SDKImageOperationError, self).__init__(results=results, message=errormsg) class SDKVolumeOperationError(SDKBaseException): def __init__(self, rs, **kwargs): # kwargs can be used to contain different keyword for constructing # the rs error msg rc = returncode.errors['volume'] results = rc[0] results['rs'] = rs errormsg = rc[1][rs] % kwargs results['strError'] = errormsg super(SDKVolumeOperationError, self).__init__(results=results, message=errormsg) class SDKFunctionNotImplementError(SDKBaseException): def __init__(self, func, modID='guest'): # kwargs can be used to contain different keyword for constructing # the rs error msg rc = returncode.errors['serviceNotSupport'] results = rc[0] results['modID'] = modID results['rs'] = 1 errormsg = rc[1][1] % {'func': func} results['strError'] = errormsg super(SDKFunctionNotImplementError, self).__init__(results=results, message=errormsg) class SDKRetryException(SDKBaseException): msg_fmt = 'Retry exception' zVMCloudConnector-1.6.3/zvmsdk/networkops.py0000664000175000017510000003127214263437505020643 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import shutil import tarfile import yaml from zvmsdk import config from zvmsdk import dist from zvmsdk import log from zvmsdk import smtclient _NetworkOPS = None CONF = config.CONF LOG = log.LOG def get_networkops(): global _NetworkOPS if _NetworkOPS is None: _NetworkOPS = NetworkOPS() return _NetworkOPS class NetworkOPS(object): """Configuration check and manage MAC address API oriented towards SDK driver """ def __init__(self): self._smtclient = smtclient.get_smtclient() self._dist_manager = dist.LinuxDistManager() def create_nic(self, userid, vdev=None, nic_id=None, mac_addr=None, active=False): return self._smtclient.create_nic(userid, vdev=vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) def get_vswitch_list(self): return self._smtclient.get_vswitch_list() def couple_nic_to_vswitch(self, userid, nic_vdev, vswitch_name, active=False, vlan_id=-1): self._smtclient.couple_nic_to_vswitch(userid, nic_vdev, vswitch_name, active=active, vlan_id=vlan_id) def uncouple_nic_from_vswitch(self, userid, nic_vdev, active=False): self._smtclient.uncouple_nic_from_vswitch(userid, nic_vdev, active=active) def add_vswitch(self, name, rdev=None, controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='UNAWARE', port_type='ACCESS', gvrp='GVRP', queue_mem=8, native_vid=1, persist=True): self._smtclient.add_vswitch(name, rdev=rdev, controller=controller, connection=connection, network_type=network_type, router=router, vid=vid, port_type=port_type, gvrp=gvrp, queue_mem=queue_mem, native_vid=native_vid, persist=persist) def grant_user_to_vswitch(self, vswitch_name, userid): self._smtclient.grant_user_to_vswitch(vswitch_name, userid) def revoke_user_from_vswitch(self, vswitch_name, userid): try: self._smtclient.revoke_user_from_vswitch(vswitch_name, userid) except Exception as e: # TODO: for APARs VM65925, VM65926, and VM65931 applied or z/VM 7.1 # this call won't be needed, so we can avoid raise exception # and let it be as some configuration may block this API call. LOG.debug('Error ignored: %s', str(e)) def set_vswitch_port_vlan_id(self, vswitch_name, userid, vlan_id): self._smtclient.set_vswitch_port_vlan_id(vswitch_name, userid, vlan_id) def set_vswitch(self, vswitch_name, **kwargs): self._smtclient.set_vswitch(vswitch_name, **kwargs) def delete_vswitch(self, vswitch_name, persist=True): self._smtclient.delete_vswitch(vswitch_name, persist) def delete_nic(self, userid, vdev, active=False): self._smtclient.delete_nic(userid, vdev, active=active) def network_configuration(self, userid, os_version, network_info, active=False): if self._smtclient.is_rhcos(os_version): linuxdist = self._dist_manager.get_linux_dist(os_version)() linuxdist.create_coreos_parameter_temp_file(network_info, userid) else: network_file_path = self._smtclient.get_guest_temp_path(userid) LOG.debug('Creating folder %s to contain network configuration ' 'files' % network_file_path) # check whether network interface has already been set for the # guest. If not, means this is the first time to set the network # interface first = self._smtclient.is_first_network_config(userid) (network_doscript, active_cmds) = self._generate_network_doscript( userid, os_version, network_info, network_file_path, first, active=active) fileClass = "X" try: self._smtclient.punch_file(userid, network_doscript, fileClass) finally: LOG.debug('Removing the folder %s ', network_file_path) shutil.rmtree(network_file_path) # update guest db to mark the network is already set if first: self._smtclient.update_guestdb_with_net_set(userid) # using zvmguestconfigure tool to parse network_doscript if active: self._smtclient.execute_cmd(userid, active_cmds) # Prepare and create network doscript for instance def _generate_network_doscript(self, userid, os_version, network_info, network_file_path, first, active=False): path_contents = [] content_dir = {} files_map = [] # Create network configuration files LOG.debug('Creating network configuration files ' 'for guest %s in the folder %s' % (userid, network_file_path)) linuxdist = self._dist_manager.get_linux_dist(os_version)() files_and_cmds = linuxdist.create_network_configuration_files( network_file_path, network_info, first, active=active) (net_conf_files, net_conf_cmds, clean_cmd, net_enable_cmd) = files_and_cmds # Add network configure files to path_contents if len(net_conf_files) > 0: path_contents.extend(net_conf_files) # restart_cmds = '' # if active: # restart_cmds = linuxdist.restart_network() net_cmd_file = self._create_znetconfig(net_conf_cmds, linuxdist, net_enable_cmd, active=active) # Add znetconfig file to path_contents if len(net_cmd_file) > 0: path_contents.extend(net_cmd_file) for (path, contents) in path_contents: key = "%04i" % len(content_dir) files_map.append({'target_path': path, 'source_file': "%s" % key}) content_dir[key] = contents file_name = os.path.join(network_file_path, key) if 'yaml' in path: self._add_yaml_file(file_name, contents) else: self._add_file(file_name, contents) self._create_invokeScript(network_file_path, clean_cmd, files_map) network_doscript = self._create_network_doscript(network_file_path) # get command about zvmguestconfigure active_cmds = '' if active: active_cmds = linuxdist.create_active_net_interf_cmd() return network_doscript, active_cmds def _add_file(self, file_name, data): with open(file_name, "w") as f: f.write(data) def _add_yaml_file(self, file_name, data): with open(file_name, 'w') as stream: yaml.dump(data, stream) def _create_znetconfig(self, commands, linuxdist, append_cmd, active=False): LOG.debug('Creating znetconfig file') if active: znet_content = linuxdist.get_simple_znetconfig_contents() else: znet_content = linuxdist.get_znetconfig_contents() net_cmd_file = [] if znet_content: if len(commands) == 0: znetconfig = '\n'.join(('#!/bin/bash', znet_content)) else: znetconfig = '\n'.join(('#!/bin/bash', commands, 'sleep 2', znet_content)) if len(append_cmd) > 0: znetconfig += '\nsleep 2' znetconfig += '\n%s\n' % append_cmd znetconfig += '\nrm -rf /tmp/znetconfig.sh\n' # Create a temp file in instance to execute above commands net_cmd_file.append(('/tmp/znetconfig.sh', znetconfig)) # nosec return net_cmd_file def _create_invokeScript(self, network_file_path, commands, files_map): """invokeScript: Configure zLinux os network invokeScript is included in the network.doscript, it is used to put the network configuration file to the directory where it belongs and call znetconfig to configure the network """ LOG.debug('Creating invokeScript shell in the folder %s' % network_file_path) invokeScript = "invokeScript.sh" conf = "#!/bin/bash \n" command = commands for file in files_map: target_path = file['target_path'] source_file = file['source_file'] # potential risk: whether target_path exist # using cat does not change the target file selinux file type command += 'cat ' + source_file + ' > ' + target_path + '\n' command += 'sleep 2\n' command += '/bin/bash /tmp/znetconfig.sh\n' command += 'rm -rf invokeScript.sh\n' scriptfile = os.path.join(network_file_path, invokeScript) with open(scriptfile, "w") as f: f.write(conf) f.write(command) def _create_network_doscript(self, network_file_path): """doscript: contains a invokeScript.sh which will do the special work The network.doscript contains network configuration files and it will be used by zvmguestconfigure to configure zLinux os network when it starts up """ # Generate the tar package for punch LOG.debug('Creating network doscript in the folder %s' % network_file_path) network_doscript = os.path.join(network_file_path, 'network.doscript') tar = tarfile.open(network_doscript, "w") for file in os.listdir(network_file_path): file_name = os.path.join(network_file_path, file) tar.add(file_name, arcname=file) tar.close() return network_doscript def get_nic_info(self, userid=None, nic_id=None, vswitch=None): return self._smtclient.get_nic_info(userid=userid, nic_id=nic_id, vswitch=vswitch) def vswitch_query(self, vswitch_name): return self._smtclient.query_vswitch(vswitch_name) def delete_network_configuration(self, userid, os_version, vdev, active=False): network_file_path = self._smtclient.get_guest_temp_path(userid) linuxdist = self._dist_manager.get_linux_dist(os_version)() file = linuxdist.get_network_configuration_files(vdev) cmd = 'rm -f %s\n' % file cmd += linuxdist.delete_vdev_info(vdev) net_cmd_file = self._create_znetconfig(cmd, linuxdist, '', active=active) del_file = 'DEL%s.sh' % str(vdev).zfill(4) file_name = os.path.join(network_file_path, del_file) file_content = net_cmd_file[0][1] self._add_file(file_name, file_content) fileClass = "X" try: self._smtclient.punch_file(userid, file_name, fileClass) finally: LOG.debug('Removing the folder %s ', network_file_path) shutil.rmtree(network_file_path) if active: active_cmds = linuxdist.create_active_net_interf_cmd() self._smtclient.execute_cmd(userid, active_cmds) def dedicate_OSA(self, userid, OSA_device, vdev=None, active=False): return self._smtclient.dedicate_OSA(userid, OSA_device, vdev=vdev, active=active) zVMCloudConnector-1.6.3/zvmsdk/constants.py0000775000175000017510000001261414263437505020446 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. HYPERVISOR_TYPE = 'zvm' ARCHITECTURE = 's390x' ALLOWED_VM_TYPE = 'zLinux' PROV_METHOD = 'netboot' ZVM_USER_DEFAULT_PRIVILEGE = 'G' CONFIG_DRIVE_FORMAT = 'tgz' DEFAULT_EPH_DISK_FMT = 'ext3' DISK_FUNC_NAME = 'setupDisk' # the count of lines that one FCP info has FCP_INFO_LINES_PER_ITEM = 6 RINV_HOST_KEYWORDS = { "zcc_userid": "ZCC USERID:", "zvm_host": "z/VM Host:", "zhcp": "zHCP:", "cec_vendor": "CEC Vendor:", "cec_model": "CEC Model:", "hypervisor_os": "Hypervisor OS:", "hypervisor_name": "Hypervisor Name:", "architecture": "Architecture:", "lpar_cpu_total": "LPAR CPU Total:", "lpar_cpu_used": "LPAR CPU Used:", "lpar_memory_total": "LPAR Memory Total:", "lpar_memory_used": "LPAR Memory Used:", "lpar_memory_offline": "LPAR Memory Offline:", "ipl_time": "IPL Time:", } DISKPOOL_KEYWORDS = { "disk_total": "Total:", "disk_used": "Used:", "disk_available": "Free:", } DISKPOOL_VOLUME_KEYWORDS = { "diskpool_volumes": "Diskpool Volumes:", } SET_VSWITCH_KEYWORDS = ["grant_userid", "user_vlan_id", "revoke_userid", "real_device_address", "port_name", "controller_name", "connection_value", "queue_memory_limit", "routing_value", "port_type", "persist", "gvrp_value", "mac_id", "uplink", "nic_userid", "nic_vdev", "lacp", "interval", "group_rdev", "iptimeout", "port_isolation", "promiscuous", "MAC_protect", "VLAN_counters"] DEV_STATUS = {'0': 'Device is not active.', '1': 'Device is active.', '2': 'Device is a backup device'} DEV_ERROR = {'0': 'No error.', '1': 'Port name conflict.', '2': 'No layer 2 support.', '3': 'Real device does not exist.', '4': 'Real device is attached elsewhere.', '5': 'Real device is not compatible type.', '6': 'Initialization error.', '7': 'Stalled OSA.', '8': 'Stalled controller.', '9': 'Controller connection severed.', '10': 'Primary or secondary routing conflict.', '11': 'Device is offline.', '12': 'Device was detached.', '13': 'IP/Ethernet type mismatch.', '14': 'Insufficient memory in controller ' 'virtual machine.', '15': 'TCP/IP configuration conflict.', '16': 'No link aggregation support.', '17': 'OSA-E attribute mismatch.', '18': 'Reserved for future use.', '19': 'OSA-E is not ready.', '20': 'Reserved for future use.', '21': 'Attempting restart for device.', '22': 'Exclusive user error.', '23': 'Device state is invalid.', '24': 'Port number is invalid for device.', '25': 'No OSA connection isolation.', '26': 'EQID mismatch.', '27': 'Incompatible controller.', '28': 'BACKUP detached.', '29': 'BACKUP not ready.', '30': 'BACKUP attempting restart.', '31': 'EQID mismatch.', '32': 'No HiperSockets bridge support.', '33': 'HiperSockets bridge error.'} SWITCH_STATUS = {'1': 'Virtual switch defined.', '2': 'Controller not available.', '3': 'Operator intervention required.', '4': 'Disconnected.', '5': 'Virtual devices attached to controller. ' 'Normally a transient state.', '6': 'OSA initialization in progress. ' 'Normally a transient state.', '7': 'OSA device not ready', '8': 'OSA device ready.', '9': 'OSA devices being detached. ' 'Normally a transient state.', '10': 'Virtual switch delete pending. ' 'Normally a transient state.', '11': 'Virtual switch failover recovering. ' 'Normally a transient state.', '12': 'Autorestart in progress. ' 'Normally a transient state.'} ZVM_VOLUMES_FILE = 'zvm_volumes' ZVM_VOLUME_STATUS = ['free', 'in-use'] VOLUME_MULTI_PASS = 'MULTI' POWER_STATE_ON = u'on' POWER_STATE_OFF = u'off' DATABASE_VOLUME = 'sdk_volume.sqlite' DATABASE_NETWORK = 'sdk_network.sqlite' DATABASE_GUEST = 'sdk_guest.sqlite' DATABASE_IMAGE = 'sdk_image.sqlite' DATABASE_FCP = 'sdk_fcp.sqlite' IMAGE_TYPE = { 'DEPLOY': 'netboot', 'CAPTURE': 'staging'} FILE_TYPE = { 'IMPORT': 'imported', 'EXPORT': 'exported'} SDK_DATA_PATH = '/var/lib/zvmsdk/' IUCV_AUTH_USERID_PATH = '/etc/zvmsdk/iucv_authorized_userid' zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/0000775000175000017510000000000014315232035017511 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/requestlog.py0000664000175000017510000000630514315210052022254 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Simple middleware for request logging.""" import logging from zvmsdk import log from zvmsdk.sdkwsgi import util LOG = log.LOG class RequestLog(object): """WSGI Middleware to write a simple request log to. Borrowed from Paste Translogger """ format = ('%(REMOTE_ADDR)s "%(REQUEST_METHOD)s %(REQUEST_URI)s" ' 'status: %(status)s length: %(bytes)s headers: %(headers)s ' 'exc_info: %(exc_info)s') def __init__(self, application): self.application = application def __call__(self, environ, start_response): LOG.debug('Starting request: %s "%s %s"', environ['REMOTE_ADDR'], environ['REQUEST_METHOD'], util.get_request_uri(environ)) return self._log_and_call(environ, start_response) def _log_and_call(self, environ, start_response): req_uri = util.get_request_uri(environ) def _local_response(status, headers, exc_info=None): size = None for name, value in headers: if name.lower() == 'content-length': size = value for index, value in enumerate(headers): if value[0] == 'X-Auth-Token': headers[index] = ('X-Auth-Token', value[1].decode('utf-8')) break self._write_log(environ, req_uri, status, size, headers, exc_info) return start_response(status, headers, exc_info) return self.application(environ, _local_response) def _force_debug(self, method, uri): if method == 'POST' and uri == '/token': return True if method == 'GET' and uri == '/guests/nics': return True return False def _write_log(self, environ, req_uri, status, size, headers, exc_info): if size is None: size = '-' log_format = { 'REMOTE_ADDR': environ.get('REMOTE_ADDR', '-'), 'REQUEST_METHOD': environ['REQUEST_METHOD'], 'REQUEST_URI': req_uri, 'status': status.split(None, 1)[0], 'bytes': size, 'headers': util.mask_tuple_password(headers), 'exc_info': exc_info } if LOG.isEnabledFor(logging.INFO): # POST '/token' and GET '/guests/nics' # too often, so we want to avoid them if self._force_debug(environ['REQUEST_METHOD'], req_uri): LOG.debug(self.format, log_format) else: LOG.info(self.format, log_format) else: LOG.debug(self.format, log_format) zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/0000775000175000017510000000000014315232035021311 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/tokens.py0000664000175000017510000000721613575566551023220 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import datetime import functools import jwt import os import threading from zvmsdk import config from zvmsdk import exception from zvmsdk import log from zvmsdk.sdkwsgi import util CONF = config.CONF LOG = log.LOG DEFAULT_TOKEN_VALIDATION_PERIOD = 3600 TOKEN_LOCK = threading.Lock() def get_admin_token(path): if os.path.exists(path): TOKEN_LOCK.acquire() try: with open(path, 'r') as fd: token = fd.read().strip() except Exception: LOG.debug('token file open failed.') raise exception.ZVMUnauthorized() finally: TOKEN_LOCK.release() else: LOG.debug('token configuration file not found.') raise exception.ZVMUnauthorized() return token @util.SdkWsgify def create(req): # Check if token validation closed if CONF.wsgi.auth.lower() == 'none': user_token = 'server-auth-closed' req.response.headers.add('X-Auth-Token', user_token) return req.response # Validation is open, so start to validate the admin-token if 'X-Admin-Token' not in req.headers: LOG.debug('no X-Admin-Token given in reqeust header') raise exception.ZVMUnauthorized() token_file_path = CONF.wsgi.token_path admin_token = get_admin_token(token_file_path) if (req.headers['X-Admin-Token'] != admin_token): LOG.debug('X-Admin-Token incorrect') raise exception.ZVMUnauthorized() expires = CONF.wsgi.token_validation_period if expires < 0: expires = DEFAULT_TOKEN_VALIDATION_PERIOD expired_elapse = datetime.timedelta(seconds=expires) expired_time = datetime.datetime.utcnow() + expired_elapse payload = {'exp': expired_time} user_token = jwt.encode(payload, admin_token) req.response.headers.add('X-Auth-Token', user_token) return req.response # To validate the token, it is possible the token is expired or the # token is not validated at all def validate(function): @functools.wraps(function) def wrap_func(req, *args, **kwargs): # by default, no token validation used if CONF.wsgi.auth.lower() == 'none': return function(req, *args, **kwargs) # so, this is for token validation if 'X-Auth-Token' not in req.headers: LOG.debug('no X-Auth-Token given in reqeust header') raise exception.ZVMUnauthorized() token_file_path = CONF.wsgi.token_path admin_token = get_admin_token(token_file_path) try: jwt.decode(req.headers['X-Auth-Token'], admin_token) except jwt.ExpiredSignatureError: LOG.debug('token validation failed because it is expired') raise exception.ZVMUnauthorized() except jwt.DecodeError: LOG.debug('token not valid') raise exception.ZVMUnauthorized() except Exception: LOG.debug('unknown exception occur during token validation') raise exception.ZVMUnauthorized() return function(req, *args, **kwargs) return wrap_func zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/vswitch.py0000664000175000017510000001443213575566551023402 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import json from zvmconnector import connector from zvmsdk import config from zvmsdk import log from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi.schemas import vswitch from zvmsdk.sdkwsgi import util from zvmsdk.sdkwsgi import validation from zvmsdk import utils _VSWITCHACTION = None CONF = config.CONF LOG = log.LOG class VswitchAction(object): def __init__(self): self.client = connector.ZVMConnector(connection_type='socket', ip_addr=CONF.sdkserver.bind_addr, port=CONF.sdkserver.bind_port) def list(self): return self.client.send_request('vswitch_get_list') @validation.schema(vswitch.create) def create(self, body): vsw = body['vswitch'] name = vsw['name'] rdev = vsw.get('rdev', None) controller = vsw.get('controller', '*') connection = vsw.get('connection', "CONNECT") network_type = vsw.get('network_type', "ETHERNET") router = vsw.get('router', "NONROUTER") vid = vsw.get('vid', "UNAWARE") port_type = vsw.get('port_type', "ACCESS") gvrp = vsw.get('gvrp', "GVRP") queue_mem = vsw.get('queue_mem', 8) native_vid = vsw.get('native_vid', 1) persist = vsw.get('persist', True) persist = util.bool_from_string(persist, strict=True) info = self.client.send_request('vswitch_create', name, rdev=rdev, controller=controller, connection=connection, network_type=network_type, router=router, vid=vid, port_type=port_type, gvrp=gvrp, queue_mem=queue_mem, native_vid=native_vid, persist=persist) return info def delete(self, name): info = self.client.send_request('vswitch_delete', name) return info def query(self, name): info = self.client.send_request('vswitch_query', name) return info @validation.schema(vswitch.update) def update(self, name, body): vsw = body['vswitch'] # TODO: only allow one param at most once if 'grant_userid' in vsw: userid = vsw['grant_userid'] info = self.client.send_request('vswitch_grant_user', name, userid) return info if 'revoke_userid' in vsw: userid = vsw['revoke_userid'] info = self.client.send_request('vswitch_revoke_user', name, userid) return info if 'user_vlan_id' in vsw: userid = vsw['user_vlan_id']['userid'] vlanid = vsw['user_vlan_id']['vlanid'] info = self.client.send_request('vswitch_set_vlan_id_for_user', name, userid, vlanid) return info def get_action(): global _VSWITCHACTION if _VSWITCHACTION is None: _VSWITCHACTION = VswitchAction() return _VSWITCHACTION @util.SdkWsgify @tokens.validate def vswitch_list(req): def _vswitch_list(req): action = get_action() return action.list() info = _vswitch_list(req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def vswitch_create(req): def _vswitch_create(req): action = get_action() body = util.extract_json(req.body) return action.create(body=body) info = _vswitch_create(req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_already_exists) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def vswitch_delete(req): def _vswitch_delete(name): action = get_action() return action.delete(name) name = util.wsgi_path_item(req.environ, 'name') info = _vswitch_delete(name) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, default=200) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def vswitch_update(req): def _vswitch_update(name, req): body = util.extract_json(req.body) action = get_action() return action.update(name, body=body) name = util.wsgi_path_item(req.environ, 'name') info = _vswitch_update(name, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def vswitch_query(req): def _vswitch_query(name): action = get_action() return action.query(name) name = util.wsgi_path_item(req.environ, 'name') info = _vswitch_query(name) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/image.py0000664000175000017510000001251213575566551022772 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the image of the sdk API.""" import json from zvmconnector import connector from zvmsdk import config from zvmsdk import log from zvmsdk import utils from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi.schemas import image from zvmsdk.sdkwsgi import util from zvmsdk.sdkwsgi import validation _IMAGEACTION = None CONF = config.CONF LOG = log.LOG class ImageAction(object): def __init__(self): self.client = connector.ZVMConnector(connection_type='socket', ip_addr=CONF.sdkserver.bind_addr, port=CONF.sdkserver.bind_port) @validation.schema(image.create) def create(self, body): image = body['image'] image_name = image['image_name'] url = image['url'] remote_host = image.get('remote_host', None) image_meta = image['image_meta'] info = self.client.send_request('image_import', image_name, url, image_meta, remote_host) return info @validation.query_schema(image.query) def get_root_disk_size(self, req, name): # FIXME: this param need combined image nameg, e.g the profile # name, not the given image name from customer side info = self.client.send_request('image_get_root_disk_size', name) return info def delete(self, name): info = self.client.send_request('image_delete', name) return info @validation.query_schema(image.query) def query(self, req, name): info = self.client.send_request('image_query', name) return info @validation.schema(image.export) def export(self, name, body): location = body['location'] dest_url = location['dest_url'] remotehost = location.get('remote_host', None) info = self.client.send_request('image_export', name, dest_url, remotehost) return info def get_action(): global _IMAGEACTION if _IMAGEACTION is None: _IMAGEACTION = ImageAction() return _IMAGEACTION @util.SdkWsgify @tokens.validate def image_create(req): def _image_create(req): action = get_action() body = util.extract_json(req.body) return action.create(body=body) info = _image_create(req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler = util.handle_already_exists) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def image_get_root_disk_size(req): def _image_get_root_disk_size(req, name): action = get_action() return action.get_root_disk_size(req, name) name = util.wsgi_path_item(req.environ, 'name') info = _image_get_root_disk_size(req, name) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info) return req.response @util.SdkWsgify @tokens.validate def image_delete(req): def _image_delete(name): action = get_action() return action.delete(name) name = util.wsgi_path_item(req.environ, 'name') info = _image_delete(name) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, default=200) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def image_export(req): def _image_export(name, req): action = get_action() body = util.extract_json(req.body) return action.export(name, body=body) name = util.wsgi_path_item(req.environ, 'name') info = _image_export(name, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def image_query(req): def _image_query(imagename, req): action = get_action() return action.query(req, imagename) imagename = None if 'imagename' in req.GET: imagename = req.GET['imagename'] info = _image_query(imagename, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler = util.handle_not_found) req.response.content_type = 'application/json' return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/guest.py0000664000175000017510000007456214263437505023043 0ustar ruirui00000000000000# Copyright 2017-2020 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import json import six import threading import webob.exc from zvmconnector import connector from zvmsdk import config from zvmsdk import log from zvmsdk import returncode from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi.schemas import guest from zvmsdk.sdkwsgi import util from zvmsdk.sdkwsgi import validation from zvmsdk import utils _VMACTION = None _VMHANDLER = None CONF = config.CONF LOG = log.LOG CONF = config.CONF class VMHandler(object): def __init__(self): self.client = connector.ZVMConnector(connection_type='socket', ip_addr=CONF.sdkserver.bind_addr, port=CONF.sdkserver.bind_port) @validation.schema(guest.create) def create(self, body): guest = body['guest'] userid = guest['userid'] vcpus = guest['vcpus'] memory = guest['memory'] kwargs_list = {} guest_keys = guest.keys() if 'disk_list' in guest_keys: kwargs_list['disk_list'] = guest['disk_list'] if 'user_profile' in guest_keys: kwargs_list['user_profile'] = guest['user_profile'] if 'max_cpu' in guest_keys: kwargs_list['max_cpu'] = guest['max_cpu'] if 'max_mem' in guest_keys: kwargs_list['max_mem'] = guest['max_mem'] if 'ipl_from' in guest_keys: kwargs_list['ipl_from'] = guest['ipl_from'] if 'ipl_param' in guest_keys: kwargs_list['ipl_param'] = guest['ipl_param'] if 'ipl_loadparam' in guest_keys: kwargs_list['ipl_loadparam'] = guest['ipl_loadparam'] if 'dedicate_vdevs' in guest_keys: kwargs_list['dedicate_vdevs'] = guest['dedicate_vdevs'] if 'loaddev' in guest_keys: kwargs_list['loaddev'] = guest['loaddev'] if 'account' in guest_keys: kwargs_list['account'] = guest['account'] if 'cschedule' in guest_keys: kwargs_list['cschedule'] = guest['cschedule'] if 'cshare' in guest_keys: kwargs_list['cshare'] = guest['cshare'] if 'rdomain' in guest_keys: kwargs_list['rdomain'] = guest['rdomain'] if 'pcif' in guest_keys: kwargs_list['pcif'] = guest['pcif'] info = self.client.send_request('guest_create', userid, vcpus, memory, **kwargs_list) return info def list(self): # list all guest on the given host info = self.client.send_request('guest_list') return info @validation.query_schema(guest.userid_list_query) def get_power_state_real(self, req, userid): info = self.client.send_request('guest_get_power_state_real', userid) return info @validation.query_schema(guest.userid_list_query) def get_info(self, req, userid): info = self.client.send_request('guest_get_info', userid) return info @validation.query_schema(guest.userid_list_query) def get_user_direct(self, req, userid): info = self.client.send_request('guest_get_user_direct', userid) return info @validation.query_schema(guest.userid_list_query) def get_adapters(self, req, userid): info = self.client.send_request('guest_get_adapters_info', userid) return info @validation.query_schema(guest.userid_list_query) def get_definition_info(self, req, userid): info = self.client.send_request('guest_get_definition_info', userid) return info @validation.query_schema(guest.userid_list_query) def get_power_state(self, req, userid): info = self.client.send_request('guest_get_power_state', userid) return info def delete(self, userid): info = self.client.send_request('guest_delete', userid) return info def delete_nic(self, userid, vdev, body): active = body.get('active', False) active = util.bool_from_string(active, strict=True) info = self.client.send_request('guest_delete_nic', userid, vdev, active=active) return info @validation.query_schema(guest.userid_list_array_query) def inspect_stats(self, req, userid_list): info = self.client.send_request('guest_inspect_stats', userid_list) return info @validation.query_schema(guest.userid_list_array_query) def inspect_vnics(self, req, userid_list): info = self.client.send_request('guest_inspect_vnics', userid_list) return info # @validation.query_schema(guest.nic_DB_info) # FIXME: the above validation will fail with "'dict' object has no # attribute 'dict_of_lists'" def get_nic_DB_info(self, req, userid=None, nic_id=None, vswitch=None): info = self.client.send_request('guests_get_nic_info', userid=userid, nic_id=nic_id, vswitch=vswitch) return info @validation.schema(guest.create_nic) def create_nic(self, userid, body=None): nic = body['nic'] vdev = nic.get('vdev', None) nic_id = nic.get('nic_id', None) mac_addr = nic.get('mac_addr', None) active = nic.get('active', False) active = util.bool_from_string(active, strict=True) info = self.client.send_request('guest_create_nic', userid, vdev=vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) return info @validation.schema(guest.create_network_interface) def create_network_interface(self, userid, body=None): interface = body['interface'] version = interface['os_version'] networks = interface.get('guest_networks', None) active = interface.get('active', False) active = util.bool_from_string(active, strict=True) info = self.client.send_request('guest_create_network_interface', userid, os_version=version, guest_networks=networks, active=active) return info @validation.schema(guest.delete_network_interface) def delete_network_interface(self, userid, body=None): interface = body['interface'] version = interface['os_version'] vdev = interface['vdev'] active = interface.get('active', False) active = util.bool_from_string(active, strict=True) info = self.client.send_request('guest_delete_network_interface', userid, version, vdev, active=active) return info @validation.schema(guest.create_disks) def create_disks(self, userid, body=None): disk_info = body['disk_info'] disk_list = disk_info.get('disk_list', None) info = self.client.send_request('guest_create_disks', userid, disk_list) return info @validation.schema(guest.config_minidisks) def config_minidisks(self, userid, body=None): disk_info = body['disk_info'] disk_list = disk_info.get('disk_list', None) info = self.client.send_request('guest_config_minidisks', userid, disk_list) return info @validation.schema(guest.delete_disks) def delete_disks(self, userid, body=None): vdev_info = body['vdev_info'] vdev_list = vdev_info.get('vdev_list', None) info = self.client.send_request('guest_delete_disks', userid, vdev_list) return info @validation.schema(guest.nic_couple_uncouple) def nic_couple_uncouple(self, userid, vdev, body): info = body['info'] active = info.get('active', False) active = util.bool_from_string(active, strict=True) couple = util.bool_from_string(info['couple'], strict=True) # vlan_id is for couple operation only, uncouple ignore it vlan_id = info.get('vlan_id', -1) if couple: info = self.client.send_request('guest_nic_couple_to_vswitch', userid, vdev, info['vswitch'], active=active, vlan_id=vlan_id) else: info = self.client.send_request('guest_nic_uncouple_from_vswitch', userid, vdev, active=active) return info class VMAction(object): def __init__(self): self.client = connector.ZVMConnector(connection_type='socket', ip_addr=CONF.sdkserver.bind_addr, port=CONF.sdkserver.bind_port) self.dd_semaphore = threading.BoundedSemaphore( value=CONF.wsgi.max_concurrent_deploy_capture) @validation.schema(guest.start) def start(self, userid, body): timeout = body.get('timeout', 0) info = self.client.send_request('guest_start', userid, timeout) return info @validation.schema(guest.stop) def stop(self, userid, body): timeout = body.get('timeout', None) poll_interval = body.get('poll_interval', None) info = self.client.send_request('guest_stop', userid, timeout=timeout, poll_interval=poll_interval) return info @validation.schema(guest.softstop) def softstop(self, userid, body): timeout = body.get('timeout', None) poll_interval = body.get('poll_interval', None) info = self.client.send_request('guest_softstop', userid, timeout=timeout, poll_interval=poll_interval) return info def pause(self, userid, body): info = self.client.send_request('guest_pause', userid) return info def unpause(self, userid, body): info = self.client.send_request('guest_unpause', userid) return info def reboot(self, userid, body): info = self.client.send_request('guest_reboot', userid) return info def reset(self, userid, body): info = self.client.send_request('guest_reset', userid) return info def get_console_output(self, userid, body): info = self.client.send_request('guest_get_console_output', userid) return info @validation.schema(guest.register_vm) def register_vm(self, userid, body): meta = body['meta'] net_set = body['net_set'] port_macs = None if 'port_macs' in body.keys(): port_macs = body['port_macs'] info = self.client.send_request('guest_register', userid, meta, net_set, port_macs) return info @validation.schema(guest.deregister_vm) def deregister_vm(self, userid, body): info = self.client.send_request('guest_deregister', userid) return info @validation.schema(guest.live_migrate_vm) def live_migrate_vm(self, userid, body): # dest_zcc_userid default as '' dest_zcc_userid = body.get('dest_zcc_userid', '') destination = body['destination'] operation = body.get('operation', {}) parms = body['parms'] info = self.client.send_request('guest_live_migrate', userid, dest_zcc_userid, destination, parms, operation) return info @validation.schema(guest.resize_cpus) def resize_cpus(self, userid, body): cpu_cnt = body['cpu_cnt'] info = self.client.send_request('guest_resize_cpus', userid, cpu_cnt) return info @validation.schema(guest.resize_cpus) def live_resize_cpus(self, userid, body): cpu_cnt = body['cpu_cnt'] info = self.client.send_request('guest_live_resize_cpus', userid, cpu_cnt) return info @validation.schema(guest.resize_mem) def resize_mem(self, userid, body): size = body['size'] info = self.client.send_request('guest_resize_mem', userid, size) return info @validation.schema(guest.resize_mem) def live_resize_mem(self, userid, body): size = body['size'] info = self.client.send_request('guest_live_resize_mem', userid, size) return info @validation.schema(guest.grow_root_volume) def grow_root_volume(self, userid, body=None): info = self.client.send_request('guest_grow_root_volume', userid, body['os_version']) return info @validation.schema(guest.deploy) def deploy(self, userid, body): image_name = body['image'] transportfiles = body.get('transportfiles', None) remotehost = body.get('remotehost', None) vdev = body.get('vdev', None) hostname = body.get('hostname', None) skipdiskcopy = body.get('skipdiskcopy', False) request_info = ("action: 'deploy', userid: %(userid)s," "transportfiles: %(trans)s, remotehost: %(remote)s," "vdev: %(vdev)s, skipdiskcopy: %(skipdiskcopy)s" % {'userid': userid, 'trans': transportfiles, 'remote': remotehost, 'vdev': vdev, 'skipdiskcopy': skipdiskcopy, }) info = None dd_allowed = self.dd_semaphore.acquire(blocking=False) if not dd_allowed: error_def = returncode.errors['serviceUnavail'] info = error_def[0] err_msg = error_def[1][1] % {'req': request_info} info.update({'rs': 1, 'errmsg': err_msg, 'output': ''}) LOG.error(err_msg) return info try: LOG.debug("WSGI sending deploy requests. %s" % request_info) info = self.client.send_request('guest_deploy', userid, image_name, transportfiles=transportfiles, remotehost=remotehost, vdev=vdev, hostname=hostname, skipdiskcopy=skipdiskcopy) finally: try: self.dd_semaphore.release() LOG.debug("WSGI deploy request finished, %s." "Resource released." % request_info) except Exception as err: err_msg = ("Failed to release deploy resource in WSGI." "Error: %s, request info: %s" % (six.text_type(err), request_info)) LOG.error(err_msg) return info @validation.schema(guest.capture) def capture(self, userid, body): image_name = body['image'] capture_type = body.get('capture_type', 'rootonly') compress_level = body.get('compress_level', CONF.image.default_compress_level) request_info = ("action: 'capture', userid: %(userid)s," "image name: %(image)s, capture type: %(cap)s," "compress level: %(level)s" % {'userid': userid, 'image': image_name, 'cap': capture_type, 'level': compress_level }) info = None capture_allowed = self.dd_semaphore.acquire(blocking=False) if not capture_allowed: error_def = returncode.errors['serviceUnavail'] info = error_def[0] err_msg = error_def[1][1] % {'req': request_info} info.update({'rs': 1, 'errmsg': err_msg, 'output': ''}) LOG.error(err_msg) return info try: LOG.debug("WSGI sending capture requests. %s" % request_info) info = self.client.send_request('guest_capture', userid, image_name, capture_type=capture_type, compress_level=compress_level) finally: try: self.dd_semaphore.release() LOG.debug("WSGI capture request finished, %s." "Resource released." % request_info) except Exception as err: err_msg = ("Failed to release capture resource in WSGI." "Error: %s, request info: %s" % (six.text_type(err), request_info)) LOG.error(err_msg) return info def get_action(): global _VMACTION if _VMACTION is None: _VMACTION = VMAction() return _VMACTION def get_handler(): global _VMHANDLER if _VMHANDLER is None: _VMHANDLER = VMHandler() return _VMHANDLER @util.SdkWsgify @tokens.validate def guest_get_power_state_real(req): def _guest_get_power_state_real(req, userid): action = get_handler() return action.get_power_state_real(req, userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_get_power_state_real(req, userid) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_get_info(req): def _guest_get_info(req, userid): action = get_handler() return action.get_info(req, userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_get_info(req, userid) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_get_user_direct(req): def _guest_get_user_direct(req, userid): action = get_handler() return action.get_user_direct(req, userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_get_user_direct(req, userid) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_get_adapters_info(req): def _guest_get_adapters_info(req, userid): action = get_handler() return action.get_adapters(req, userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_get_adapters_info(req, userid) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_get(req): def _guest_get(req, userid): action = get_handler() return action.get_definition_info(req, userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_get(req, userid) # info we got looks like: # {'user_direct': [u'USER RESTT305 PASSW0RD 1024m 1024m G', # u'INCLUDE OSDFLT']} info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_get_power_state(req): def _guest_get_power_state(req, userid): action = get_handler() return action.get_power_state(req, userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_get_power_state(req, userid) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_create(req): def _guest_create(req): action = get_handler() body = util.extract_json(req.body) return action.create(body=body) info = _guest_create(req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_already_exists) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_list(req): def _guest_list(): action = get_handler() return action.list() info = _guest_list() info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = 200 return req.response @util.SdkWsgify @tokens.validate def guest_action(req): def _guest_action(userid, req): action = get_action() body = util.extract_json(req.body) if len(body) == 0 or 'action' not in body: msg = 'action not exist or is empty' LOG.info(msg) raise webob.exc.HTTPBadRequest(explanation=msg) method = body['action'] func = getattr(action, method, None) if func: body.pop('action') return func(userid, body=body) else: msg = 'action %s is invalid' % method raise webob.exc.HTTPBadRequest(msg) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_action(userid, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found_and_conflict) return req.response @util.SdkWsgify @tokens.validate def guest_delete(req): def _guest_delete(userid): action = get_handler() return action.delete(userid) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_delete(userid) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, default=200) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_delete_nic(req): def _guest_delete_nic(userid, vdev, req): action = get_handler() body = util.extract_json(req.body) return action.delete_nic(userid, vdev, body) userid = util.wsgi_path_item(req.environ, 'userid') vdev = util.wsgi_path_item(req.environ, 'vdev') info = _guest_delete_nic(userid, vdev, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, default=200) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_create_nic(req): def _guest_create_nic(userid, req): action = get_handler() body = util.extract_json(req.body) return action.create_nic(userid, body=body) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_create_nic(userid, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_couple_uncouple_nic(req): def _guest_couple_uncouple_nic(userid, vdev, req): action = get_handler() body = util.extract_json(req.body) return action.nic_couple_uncouple(userid, vdev, body=body) userid = util.wsgi_path_item(req.environ, 'userid') vdev = util.wsgi_path_item(req.environ, 'vdev') info = _guest_couple_uncouple_nic(userid, vdev, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_create_network_interface(req): def _guest_create_network_interface(userid, req): action = get_handler() body = util.extract_json(req.body) return action.create_network_interface(userid, body=body) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_create_network_interface(userid, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_delete_network_interface(req): def _guest_delete_network_interface(userid, req): action = get_handler() body = util.extract_json(req.body) return action.delete_network_interface(userid, body=body) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_delete_network_interface(userid, req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info) req.response.content_type = 'application/json' return req.response def _get_userid_list(req): userids = [] if 'userid' in req.GET.keys(): userid = req.GET.get('userid') userid = userid.strip(' ,') userid = userid.replace(' ', '') userids.extend(userid.split(',')) return userids @util.SdkWsgify @tokens.validate def guest_get_stats(req): userid_list = _get_userid_list(req) def _guest_get_stats(req, userid_list): action = get_handler() return action.inspect_stats(req, userid_list) info = _guest_get_stats(req, userid_list) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_get_interface_stats(req): userid_list = _get_userid_list(req) def _guest_get_interface_stats(req, userid_list): action = get_handler() return action.inspect_vnics(req, userid_list) info = _guest_get_interface_stats(req, userid_list) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guests_get_nic_info(req): def _guests_get_nic_DB_info(req, userid=None, nic_id=None, vswitch=None): action = get_handler() return action.get_nic_DB_info(req, userid=userid, nic_id=nic_id, vswitch=vswitch) userid = req.GET.get('userid', None) nic_id = req.GET.get('nic_id', None) vswitch = req.GET.get('vswitch', None) info = _guests_get_nic_DB_info(req, userid=userid, nic_id=nic_id, vswitch=vswitch) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_config_disks(req): def _guest_config_minidisks(userid, req): action = get_handler() body = util.extract_json(req.body) return action.config_minidisks(userid, body=body) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_config_minidisks(userid, req) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_create_disks(req): def _guest_create_disks(userid, req): action = get_handler() body = util.extract_json(req.body) return action.create_disks(userid, body=body) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_create_disks(userid, req) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def guest_delete_disks(req): def _guest_delete_disks(userid, req): action = get_handler() body = util.extract_json(req.body) return action.delete_disks(userid, body=body) userid = util.wsgi_path_item(req.environ, 'userid') info = _guest_delete_disks(userid, req) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, default=200) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/host.py0000664000175000017510000001254314263437505022660 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import json from zvmconnector import connector from zvmsdk import config from zvmsdk import log from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi import util from zvmsdk import utils from zvmsdk.sdkwsgi import validation from zvmsdk.sdkwsgi.schemas import image from zvmsdk.sdkwsgi.schemas import host _HOSTACTION = None CONF = config.CONF LOG = log.LOG class HostAction(object): def __init__(self): self.client = connector.ZVMConnector(connection_type='socket', ip_addr=CONF.sdkserver.bind_addr, port=CONF.sdkserver.bind_port) def get_info(self): info = self.client.send_request('host_get_info') return info def get_guest_list(self): info = self.client.send_request('host_get_guest_list') return info @validation.query_schema(image.diskpool) def get_diskpool_volumes(self, req, poolname): info = self.client.send_request('host_get_diskpool_volumes', disk_pool=poolname) return info @validation.query_schema(host.volume) def get_volume_info(self, req, volumename): info = self.client.send_request('host_get_volume_info', volume=volumename) return info @validation.query_schema(image.diskpool) def diskpool_get_info(self, req, poolname): info = self.client.send_request('host_diskpool_get_info', disk_pool=poolname) return info def get_ssi_info(self): info = self.client.send_request('host_get_ssi_info') return info def get_action(): global _HOSTACTION if _HOSTACTION is None: _HOSTACTION = HostAction() return _HOSTACTION @util.SdkWsgify @tokens.validate def host_get_info(req): def _host_get_info(): action = get_action() return action.get_info() info = _host_get_info() info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def host_get_guest_list(req): def _host_get_guest_list(): action = get_action() return action.get_guest_list() info = _host_get_guest_list() info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info) return req.response @util.SdkWsgify @tokens.validate def host_get_diskpool_volumes(req): def _host_get_diskpool_volumes(req, poolname): action = get_action() return action.get_diskpool_volumes(req, poolname) poolname = None if 'poolname' in req.GET: poolname = req.GET['poolname'] info = _host_get_diskpool_volumes(req, poolname) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info) return req.response @util.SdkWsgify @tokens.validate def host_get_volume_info(req): def _host_get_volume_info(req, volumename): action = get_action() return action.get_volume_info(req, volumename) volumename = None if 'volumename' in req.GET: volumename = req.GET['volumename'] info = _host_get_volume_info(req, volumename) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info) return req.response @util.SdkWsgify @tokens.validate def host_get_disk_info(req): def _host_get_disk_info(req, poolname): action = get_action() return action.diskpool_get_info(req, poolname) poolname = None if 'poolname' in req.GET: poolname = req.GET['poolname'] info = _host_get_disk_info(req, poolname) info_json = json.dumps(info) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' return req.response @util.SdkWsgify @tokens.validate def host_get_ssi_info(req): def _host_get_ssi_info(): action = get_action() return action.get_ssi_info() info = _host_get_ssi_info() info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info) return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/file.py0000664000175000017510000002240413575566551022630 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the image of the sdk API.""" import six import json import hashlib import os import uuid from zvmsdk import config from zvmsdk import constants as const from zvmsdk import exception from zvmsdk import log from zvmsdk import returncode from zvmsdk import utils from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi import util _FILEACTION = None CONF = config.CONF LOG = log.LOG CHUNKSIZE = 4096 INVALID_CONTENT_TYPE = { 'overallRC': returncode.errors['RESTAPI'][0]['overallRC'], 'modID': returncode.errors['RESTAPI'][0]['modID'], 'rc': returncode.errors['RESTAPI'][0]['overallRC'], 'rs': 1, 'errmsg': '', 'output': ''} FILE_OPERATION_ERROR = { 'overallRC': returncode.errors['file'][0]['overallRC'], 'modID': returncode.errors['file'][0]['modID'], 'rc': returncode.errors['file'][0]['overallRC'], 'rs': 1, 'errmsg': '', 'output': ''} class FileAction(object): def __init__(self): self._pathutils = utils.PathUtils() def file_import(self, fileobj): try: importDir = self._pathutils.create_file_repository( const.FILE_TYPE['IMPORT']) fname = str(uuid.uuid1()) target_fpath = '/'.join([importDir, fname]) # The following steps save the imported file into sdkserver checksum = hashlib.md5() bytes_written = 0 with open(target_fpath, 'wb') as f: for buf in fileChunkReadable(fileobj, CHUNKSIZE): bytes_written += len(buf) checksum.update(buf) f.write(buf) checksum_hex = checksum.hexdigest() LOG.debug("Wrote %(bytes_written)d bytes to %(target_image)s" " with checksum %(checksum_hex)s" % {'bytes_written': bytes_written, 'target_image': target_fpath, 'checksum_hex': checksum_hex}) return_data = {'filesize_in_bytes': bytes_written, 'dest_url': 'file://' + target_fpath, 'md5sum': checksum_hex} results = {'overallRC': 0, 'modID': None, 'rc': 0, 'rs': 0, 'errmsg': '', 'output': return_data} except OSError as err: msg = ("File import error: %s, please check access right to " "specified file or folder" % six.text_type(err)) LOG.error(msg) results = FILE_OPERATION_ERROR results.update({'rs': 1, 'errmsg': msg, 'output': ''}) except Exception as err: # Cleanup the file from file repository self._pathutils.clean_temp_folder(target_fpath) msg = ("Exception happened during file import: %s" % six.text_type(err)) LOG.error(msg) results = FILE_OPERATION_ERROR results.update({'rs': 1, 'errmsg': msg, 'output': ''}) return results def file_export(self, fpath): try: if not os.path.exists(fpath): msg = ("The specific file %s for export does not exist" % fpath) LOG.error(msg) results = FILE_OPERATION_ERROR results.update({'rs': 2, 'errmsg': msg, 'output': ''}) return results offset = 0 file_size = os.path.getsize(fpath) # image_size here is the image_size in bytes file_iter = iter(get_data(fpath, offset=offset, file_size=file_size)) return file_iter except exception as err: msg = ("Exception happened during file export with error %s " % six.text_type(err)) LOG.error(msg) results = FILE_OPERATION_ERROR.update({'rs': 2, 'errmsg': msg, 'output': ''}) return results def get_action(): global _FILEACTION if _FILEACTION is None: _FILEACTION = FileAction() return _FILEACTION @util.SdkWsgify @tokens.validate def file_import(request): def _import(file_obj): action = get_action() return action.file_import(file_obj) # Check if the request content type is valid content_type = request.content_type info = _content_type_validation(content_type) if not info: file_obj = request.body_file info = _import(file_obj) info_json = json.dumps(info) request.response.body = utils.to_utf8(info_json) request.response.status = util.get_http_code_from_sdk_return(info) request.response.content_type = 'application/json' return request.response def _content_type_validation(content_type): results = {} if content_type not in ['application/octet-stream']: msg = ('Invalid content type %s found for file import/export, the ' 'supported content type is application/octet-stream' % content_type) LOG.error(msg) results = INVALID_CONTENT_TYPE.update({'errmsg': msg}) return results @util.SdkWsgify @tokens.validate def file_export(request): def _export(fpath): action = get_action() return action.file_export(fpath) body = util.extract_json(request.body) fpath = body['source_file'] results = _export(fpath) # if results is dict, means error happened. if isinstance(results, dict): info_json = json.dumps(results) request.response.body = utils.to_utf8(info_json) request.response.status = util.get_http_code_from_sdk_return( results) request.response.content_type = 'application/json' return request.response # Result contains (image_iter, md5sum, image_size) else: request.response.headers['Content-Type'] = 'application/octet-stream' request.response.app_iter = results request.response.status_int = 200 return request.response def fileChunkReadable(file_obj, chunk_size=65536): """ Return a readable iterator with a reader yielding chunks of a preferred size, otherwise leave file object unchanged. :param file_obj: an iter which may be readable :param chunk_size: maximum size of chunk """ if hasattr(file_obj, 'read'): return fileChunkIter(file_obj, chunk_size) else: return file_obj def fileChunkIter(file_object, file_chunk_size=65536): """ Return an iterator to a file-like object that yields fixed size chunks :param file_object: a file-like object :param file_chunk_size: maximum size of chunk """ while True: chunk = file_object.read(file_chunk_size) if chunk: yield chunk else: break def get_data(file_path, offset=0, file_size=None): data = chunkedFile(file_path, file_offset=offset, file_chunk_size=CHUNKSIZE, file_partial_length=file_size) return get_chunk_data_iterator(data) def get_chunk_data_iterator(data): for chunk in data: yield chunk class chunkedFile(object): """ Send iterator to wsgi server so that it can iterate over a large file """ def __init__(self, file_path, file_offset=0, file_chunk_size=4096, file_partial_length=None): self.file_path = file_path self.file_chunk_size = file_chunk_size self.file_partial_length = file_partial_length self.file_partial = self.file_partial_length is not None self.file_object = open(self.file_path, 'rb') if file_offset: self.file_pointer.seek(file_offset) def __iter__(self): """Return an iterator over the large file.""" try: if self.file_object: while True: if self.file_partial: size = min(self.file_chunk_size, self.file_partial_length) else: size = self.file_chunk_size chunk = self.file_object.read(size) if chunk: yield chunk if self.file_partial: self.file_partial_length -= len(chunk) if self.file_partial_length <= 0: break else: break finally: self.close() def close(self): """Close the internal file pointer""" if self.file_object: self.file_object.close() self.file_object = None zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/healthy.py0000664000175000017510000000210014266177632023332 0ustar ruirui00000000000000# Copyright 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import json from smtLayer import vmStatus from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi import util from zvmsdk import utils @util.SdkWsgify @tokens.validate def healthy(req): s = vmStatus.GetSMAPIStatus() output = s.Get() info_json = json.dumps(output) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = 200 return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/__init__.py0000664000175000017510000000000013575566551023434 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/version.py0000664000175000017510000000267513575566551023406 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import json from zvmsdk.sdkwsgi import util from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk import utils from zvmsdk import version as sdk_version APIVERSION = '1.0' @util.SdkWsgify @tokens.validate def version(req): min_version = APIVERSION max_version = APIVERSION version_data = { 'version': sdk_version.__version__, 'api_version': '%s' % APIVERSION, 'max_version': max_version, 'min_version': min_version, } output = { "rs": 0, "overallRC": 0, "modID": None, "rc": 0, "errmsg": "", "output": version_data } info_json = json.dumps(output) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = 200 return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/handlers/volume.py0000664000175000017510000003566714315210052023206 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Handler for the root of the sdk API.""" import json from zvmconnector import connector from zvmsdk import config from zvmsdk import log from zvmsdk.sdkwsgi.handlers import tokens from zvmsdk.sdkwsgi.schemas import volume from zvmsdk.sdkwsgi import util from zvmsdk.sdkwsgi import validation from zvmsdk import utils _VOLUMEACTION = None CONF = config.CONF LOG = log.LOG class VolumeAction(object): def __init__(self): self.client = connector.ZVMConnector(connection_type='socket', ip_addr=CONF.sdkserver.bind_addr, port=CONF.sdkserver.bind_port) @validation.schema(volume.attach) def attach(self, body): info = body['info'] connection = info['connection'] info = self.client.send_request('volume_attach', connection) return info @validation.schema(volume.detach) def detach(self, body): info = body['info'] connection = info['connection'] info = self.client.send_request('volume_detach', connection) return info @validation.query_schema(volume.get_volume_connector) def get_volume_connector(self, req, userid, reserve, fcp_template_id, sp_name): conn = self.client.send_request('get_volume_connector', userid, reserve, fcp_template_id, sp_name) return conn @validation.query_schema(volume.get_fcp_templates) def get_fcp_templates(self, req, template_id_list, assigner_id, default_sp_list, host_default): return self.client.send_request('get_fcp_templates', template_id_list, assigner_id, default_sp_list, host_default) @validation.query_schema(volume.get_fcp_templates_details) def get_fcp_templates_details(self, req, template_id_list, raw, statistics, sync_with_zvm): return self.client.send_request('get_fcp_templates_details', template_id_list, raw=raw, statistics=statistics, sync_with_zvm=sync_with_zvm) def delete_fcp_template(self, template_id): return self.client.send_request('delete_fcp_template', template_id) @validation.query_schema(volume.get_fcp_usage) def get_fcp_usage(self, req, fcp): return self.client.send_request('get_fcp_usage', fcp) @validation.schema(volume.set_fcp_usage) def set_fcp_usage(self, fcp, body=None): userid = body['info']['userid'] reserved = body['info']['reserved'] connections = body['info']['connections'] fcp_template_id = body['info']['fcp_template_id'] if not fcp_template_id: fcp_template_id = '' return self.client.send_request('set_fcp_usage', fcp, userid, reserved, connections, fcp_template_id) def volume_refresh_bootmap(self, fcpchannel, wwpn, lun, wwid, transportfiles, guest_networks, fcp_template_id): info = self.client.send_request('volume_refresh_bootmap', fcpchannel, wwpn, lun, wwid, transportfiles, guest_networks, fcp_template_id) return info @validation.schema(volume.create_fcp_template) def create_fcp_template(self, body=None): name = body.get('name') description = body.get('description', '') fcp_devices = body.get('fcp_devices', '') host_default = body.get('host_default', False) min_fcp_paths_count = body.get('min_fcp_paths_count', None) # ensure host_default parameter is boolean type # because of the sqlite FCP database's requirements valid_true_values = [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on', 'YES', 'Yes', 'yes'] if host_default in valid_true_values: host_default = True else: host_default = False default_sp_list = body.get('storage_providers', []) ret = self.client.send_request('create_fcp_template', name, description=description, fcp_devices=fcp_devices, host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count) return ret @validation.schema(volume.edit_fcp_template) def edit_fcp_template(self, body=None): fcp_template_id = body.get('fcp_template_id') name = body.get('name', None) description = body.get('description', None) fcp_devices = body.get('fcp_devices', None) default_sp_list = body.get('storage_providers', None) min_fcp_paths_count = body.get('min_fcp_paths_count', None) # Due to the pre-validation in schemas/volume.py, # host_default only has 2 possible value types: # i.e. None or a value defined in parameter_types.boolean host_default = body.get('host_default', None) # ensure host_default parameter is boolean type # because of the sqlite FCP database's requirements valid_true_values = [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on', 'YES', 'Yes', 'yes'] if host_default in valid_true_values: host_default = True elif host_default is not None: host_default = False ret = self.client.send_request('edit_fcp_template', fcp_template_id, name=name, description=description, fcp_devices=fcp_devices, host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count) return ret def get_action(): global _VOLUMEACTION if _VOLUMEACTION is None: _VOLUMEACTION = VolumeAction() return _VOLUMEACTION @util.SdkWsgify @tokens.validate def volume_attach(req): def _volume_attach(req): action = get_action() body = util.extract_json(req.body) return action.attach(body=body) info = _volume_attach(req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info, default=200) return req.response @util.SdkWsgify @tokens.validate def volume_detach(req): def _volume_detach(req): action = get_action() body = util.extract_json(req.body) return action.detach(body=body) info = _volume_detach(req) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info, default=200) return req.response @util.SdkWsgify @tokens.validate def volume_refresh_bootmap(req): def _volume_refresh_bootmap(req, fcpchannel, wwpn, lun, wwid, transportfiles, guest_networks, fcp_template_id): action = get_action() return action.volume_refresh_bootmap(fcpchannel, wwpn, lun, wwid, transportfiles, guest_networks, fcp_template_id) body = util.extract_json(req.body) info = _volume_refresh_bootmap(req, body['info']['fcpchannel'], body['info']['wwpn'], body['info']['lun'], body['info'].get('wwid', ""), body['info'].get('transportfiles', ""), body['info'].get('guest_networks', []), body['info'].get('fcp_template_id', None)) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.content_type = 'application/json' req.response.status = util.get_http_code_from_sdk_return(info, default=200) return req.response @util.SdkWsgify @tokens.validate def get_volume_connector(req): def _get_volume_conn(req, userid, reserve, fcp_template_id, sp_name): action = get_action() return action.get_volume_connector(req, userid, reserve, fcp_template_id, sp_name) userid = util.wsgi_path_item(req.environ, 'userid') body = util.extract_json(req.body) reserve = body['info']['reserve'] fcp_template_id = body['info'].get('fcp_template_id', None) sp_name = body['info'].get('storage_provider', None) conn = _get_volume_conn(req, userid, reserve, fcp_template_id, sp_name) conn_json = json.dumps(conn) req.response.content_type = 'application/json' req.response.body = utils.to_utf8(conn_json) req.response.status = util.get_http_code_from_sdk_return(conn, default=200) return req.response @util.SdkWsgify @tokens.validate def get_fcp_usage(req): def _get_fcp_usage(req, fcp): action = get_action() return action.get_fcp_usage(req, fcp) fcp = util.wsgi_path_item(req.environ, 'fcp_id') ret = _get_fcp_usage(req, fcp) ret_json = json.dumps(ret) req.response.status = util.get_http_code_from_sdk_return(ret, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' req.response.body = utils.to_utf8(ret_json) return req.response @util.SdkWsgify @tokens.validate def set_fcp_usage(req): def _set_fcp_usage(req, fcp): action = get_action() body = util.extract_json(req.body) return action.set_fcp_usage(fcp, body=body) fcp = util.wsgi_path_item(req.environ, 'fcp_id') ret = _set_fcp_usage(req, fcp) ret_json = json.dumps(ret) req.response.body = utils.to_utf8(ret_json) req.response.content_type = 'application/json' req.response.status = 200 return req.response @util.SdkWsgify @tokens.validate def create_fcp_template(req): def _create_fcp_template(req): action = get_action() body = util.extract_json(req.body) return action.create_fcp_template(body=body) ret = _create_fcp_template(req) ret_json = json.dumps(ret) req.response.body = utils.to_utf8(ret_json) req.response.status = util.get_http_code_from_sdk_return(ret) req.response.content_type = 'application/json' @util.SdkWsgify @tokens.validate def edit_fcp_template(req): def _edit_fcp_template(req_body): action = get_action() return action.edit_fcp_template(body=req_body) body = util.extract_json(req.body) body['fcp_template_id'] = util.wsgi_path_item(req.environ, 'template_id') ret = _edit_fcp_template(body) ret_json = json.dumps(ret) req.response.body = utils.to_utf8(ret_json) req.response.status = util.get_http_code_from_sdk_return(ret) req.response.content_type = 'application/json' @util.SdkWsgify @tokens.validate def get_fcp_templates(req): def _get_fcp_templates(req, template_id_list, assigner_id, default_sp_list, host_default): action = get_action() return action.get_fcp_templates(req, template_id_list, assigner_id, default_sp_list, host_default) template_id_list = req.GET.get('template_id_list', None) assigner_id = req.GET.get('assigner_id', None) default_sp_list = req.GET.get('storage_providers', None) host_default = req.GET.get('host_default', None) valid_true_values = [True, 'True', 'TRUE', 'true', '1', 'ON', 'On', 'on', 'YES', 'Yes', 'yes'] if host_default: if host_default in valid_true_values: host_default = True else: host_default = False ret = _get_fcp_templates(req, template_id_list, assigner_id, default_sp_list, host_default) ret_json = json.dumps(ret) req.response.status = util.get_http_code_from_sdk_return( ret, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' req.response.body = utils.to_utf8(ret_json) return req.response @util.SdkWsgify @tokens.validate def get_fcp_templates_details(req): def _get_fcp_templates_details(req, template_id_list, raw, statistics, sync_with_zvm): action = get_action() return action.get_fcp_templates_details(req, template_id_list, raw, statistics, sync_with_zvm) template_id_list = req.GET.get('template_id_list', None) raw = req.GET.get('raw', 'false') if raw.lower() == 'true': raw = True else: raw = False statistics = req.GET.get('statistics', 'true') if statistics.lower() == 'true': statistics = True else: statistics = False sync_with_zvm = req.GET.get('sync_with_zvm', 'false') if sync_with_zvm.lower() == 'true': sync_with_zvm = True else: sync_with_zvm = False ret = _get_fcp_templates_details(req, template_id_list, raw, statistics, sync_with_zvm) ret_json = json.dumps(ret) req.response.status = util.get_http_code_from_sdk_return(ret, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' req.response.body = utils.to_utf8(ret_json) return req.response @util.SdkWsgify @tokens.validate def delete_fcp_template(req): def _delete_fcp_template(template_id): action = get_action() return action.delete_fcp_template(template_id) template_id = util.wsgi_path_item(req.environ, 'template_id') info = _delete_fcp_template(template_id) info_json = json.dumps(info) req.response.body = utils.to_utf8(info_json) req.response.status = util.get_http_code_from_sdk_return(info, additional_handler=util.handle_not_found) req.response.content_type = 'application/json' return req.response zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/util.py0000664000175000017510000002016614315210052021040 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import six import webob from webob.dec import wsgify from zvmsdk import log LOG = log.LOG SDKWSGI_MODID = 120 # The following globals are used by `mask_tuple_password` _SANITIZE_KEYS = ['X-Auth-Token'] def extract_json(body): try: LOG.debug('Decoding body: %s', body) # This function actually is received from upper layer through # socket, so it's bytes in py3 if isinstance(body, bytes): body = bytes.decode(body) data = json.loads(body) except ValueError as exc: msg = ('Malformed JSON: %(error)s') % {'error': exc} LOG.debug(msg) raise webob.exc.HTTPBadRequest(msg, json_formatter=json_error_formatter) return data def json_error_formatter(body, status, title, environ): """A json_formatter for webob exceptions.""" body = webob.exc.strip_tags(body) status_code = int(status.split(None, 1)[0]) error_dict = { 'status': status_code, 'title': title, 'detail': body } return {'errors': [error_dict]} def wsgi_path_item(environ, name): """Extract the value of a named field in a URL. Return None if the name is not present or there are no path items. """ try: return environ['wsgiorg.routing_args'][1][name] except (KeyError, IndexError): return None TRUE_STRINGS = ('1', 't', 'true', 'on', 'y', 'yes') FALSE_STRINGS = ('0', 'f', 'false', 'off', 'n', 'no') def bool_from_string(subject, strict=False, default=False): if isinstance(subject, bool): return subject if not isinstance(subject, six.string_types): subject = six.text_type(subject) lowered = subject.strip().lower() if lowered in TRUE_STRINGS: return True elif lowered in FALSE_STRINGS: return False elif strict: acceptable = ', '.join( "'%s'" % s for s in sorted(TRUE_STRINGS + FALSE_STRINGS)) msg = ("Unrecognized value '%(val)s', acceptable values are:" " %(acceptable)s") % {'val': subject, 'acceptable': acceptable} raise ValueError(msg) else: return default def get_request_uri(environ): name = environ.get('SCRIPT_NAME', '') info = environ.get('PATH_INFO', '') req_uri = name + info if environ.get('QUERY_STRING'): req_uri += '?' + environ['QUERY_STRING'] return req_uri def get_http_code_from_sdk_return(msg, additional_handler=None, default=200): LOG.debug("Get msg to handle: %s", msg) if 'overallRC' in msg: ret = msg['overallRC'] if ret != 0: # same definition to sdk layer if ret in [400, 404, 409, 501, 503]: return ret # 100 mean validation error in sdk layer and # lead to a 400 badrequest if ret in [100]: return 400 # Add a special handle for smt return if additional_handler: ret = additional_handler(msg) if ret: return ret # ok, we reach here because can't handle it LOG.info("The msg <%s> lead to return internal error", msg) return 500 else: # return default code return default def handle_not_found(msg): if 'overallRC' in msg and 'rc' in msg and 'rs' in msg: # overall rc: 8, rc: 212, rs: 40 means vswitch not exist if (msg['overallRC'] == 8 and msg['rc'] == 212 and msg['rs'] == 40): LOG.debug('vswitch does not exist, change ret to 404') return 404 # overall rc: 4, rc: 5, rs: 402 means vswitch not exist if (msg['overallRC'] == 4 and msg['rc'] == 5 and msg['rs'] == 402): LOG.debug('disk pool not exist, change ret to 404') return 404 # overall rc: 300, rc: 300, rs: 20 means image not exist if (msg['overallRC'] == 300 and msg['rc'] == 300 and msg['rs'] == 20): LOG.debug('image not exist, change ret to 404') return 404 # overall rc: 8, rc: 400, rs: 4 means guest not exist if (msg['overallRC'] == 8 and msg['rc'] == 400 and msg['rs'] == 4): LOG.debug('guest not exist, change ret to 404') return 404 # overall rc: 8, rc: 200, rs: 4 means guest not exist if (msg['overallRC'] == 8 and msg['rc'] == 200 and msg['rs'] == 4): LOG.debug('guest not exist, change ret to 404') return 404 # overall rc: 300, rc:300, rs: 3, error message contains # "not linked; not in CP directory" means target vdev not exist if (msg['overallRC'] == 300 and msg['rc'] == 300 and msg['rs'] == 3 and 'not linked; not in CP directory' in msg['errmsg']): LOG.debug('deploy target vdev not exist,' ' change ret to 404') return 404 return 0 def handle_already_exists(msg): if 'overallRC' in msg and 'rc' in msg and 'rs' in msg: # overall rc: 8, rc: 212, rs: 36 means vswitch already exist if (msg['overallRC'] == 8 and msg['rc'] == 212 and msg['rs'] == 36): LOG.debug('vswitch already exist, change ret to 409') return 409 # overall rc: 300, rc: 300, rc: 13 means image already exist if (msg['overallRC'] == 300 and msg['rc'] == 300 and msg['rs'] == 13): LOG.debug('image already exist, change ret to 409') return 409 # overall rc: 8, rc: 400, rs: 8 means guest already exist if (msg['overallRC'] == 8 and msg['rc'] == 400 and msg['rs'] == 8): LOG.debug('guest already exist, change ret to 409') return 409 # not handle it well, go to default return 0 def handle_conflict_state(msg): if 'overallRC' in msg and 'rc' in msg and 'rs' in msg: # overall rc: 8, rc: 212, rs: 36 means vswitch already exist if (msg['overallRC'] == 300 and msg['rc'] == 300 and msg['rs'] == 5): LOG.debug('guest power off state, change ret to 409') return 409 return 0 def handle_not_found_and_conflict(msg): err = handle_not_found(msg) if err == 0: return handle_conflict_state(msg) return err def mask_tuple_password(message_list, secret="***"): """Replace password with *secret* in message.""" retval = [] for sani_key in _SANITIZE_KEYS: for item in message_list: item_lower = [x.lower() for x in item if isinstance(x, str)] if isinstance(item, tuple) and sani_key.lower() in item_lower: retval.append((sani_key, secret)) else: retval.append(item) return retval class SdkWsgify(wsgify): def call_func(self, req, *args, **kwargs): """Add json_error_formatter to any webob HTTPExceptions.""" try: return super(SdkWsgify, self).call_func(req, *args, **kwargs) except webob.exc.HTTPException as exc: msg = ('encounter %(error)s error') % {'error': exc} LOG.debug(msg) exc.json_formatter = json_error_formatter code = exc.status_int explanation = six.text_type(exc) fault_data = { 'overallRC': 400, 'rc': 400, 'rs': code, 'modID': SDKWSGI_MODID, 'output': '', 'errmsg': explanation} exc.text = six.text_type(json.dumps(fault_data)) raise exc zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/0000775000175000017510000000000014315232035021134 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/vswitch.py0000664000175000017510000000444013575566551023223 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk.sdkwsgi.validation import parameter_types create = { 'type': 'object', 'properties': { 'vswitch': { 'type': 'object', 'properties': { 'name': parameter_types.vswitch_name, 'rdev': parameter_types.rdev_list, # FIXME: controller has its own conventions 'controller': parameter_types.controller, 'persist': parameter_types.boolean, 'connection': parameter_types.connection_type, 'queue_mem': { 'type': ['integer'], 'minimum': 1, 'maximum': 8, }, 'router': parameter_types.router_type, 'network_type': parameter_types.network_type, 'vid': parameter_types.vid_type, 'port_type': parameter_types.port_type, 'gvrp': parameter_types.gvrp_type, 'native_vid': parameter_types.native_vid_type, }, 'required': ['name'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['vswitch'], 'additionalProperties': False, } update = { 'type': 'object', 'properties': { 'vswitch': { 'type': 'object', 'properties': { 'grant_userid': parameter_types.userid, 'user_vlan_id': parameter_types.user_vlan_id, 'revoke_userid': parameter_types.userid, }, 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['vswitch'], 'additionalProperties': False, } zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/image.py0000664000175000017510000000413013575566551022612 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk.sdkwsgi.validation import parameter_types create = { 'type': 'object', 'properties': { 'image': { 'type': 'object', 'properties': { 'image_name': parameter_types.name, 'url': parameter_types.url, 'image_meta': parameter_types.image_meta, 'remote_host': parameter_types.remotehost }, 'required': ['image_name', 'url', 'image_meta'], 'additionalProperties': False }, 'additionalProperties': False }, 'required': ['image'], 'additionalProperties': False } export = { 'type': 'object', 'properties': { 'location': { 'type': 'object', 'properties': { 'dest_url': parameter_types.url, 'remote_host': parameter_types.remotehost }, 'required': ['dest_url'], 'additionalProperties': False }, 'additionalProperties': False }, 'required': ['location'], 'additionalProperties': False } query = { 'type': 'object', 'properties': { 'imagename': parameter_types.image_list }, 'additionalProperties': True } diskname = { 'type': 'object', 'properties': { 'disk': parameter_types.disk_pool }, 'additionalProperties': False } diskpool = { 'type': 'object', 'properties': { 'poolname': parameter_types.disk_pool_list }, 'additionalProperties': False } zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/guest.py0000664000175000017510000002104614263437505022653 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk.sdkwsgi.validation import parameter_types create = { 'type': 'object', 'properties': { 'guest': { 'type': 'object', 'properties': { 'userid': parameter_types.userid, 'vcpus': parameter_types.positive_integer, 'memory': parameter_types.positive_integer, # profile is similar to userid 'user_profile': parameter_types.userid_or_None, 'disk_list': parameter_types.disk_list, 'max_cpu': parameter_types.max_cpu, 'max_mem': parameter_types.max_mem, 'ipl_from': parameter_types.ipl_from, 'ipl_param': parameter_types.ipl_param, 'ipl_loadparam': parameter_types.ipl_loadparam, 'dedicate_vdevs': parameter_types.dedicate_vdevs, 'loaddev': parameter_types.loaddev, 'account': parameter_types.account, 'comments': parameter_types.comment_list, 'cschedule': parameter_types.cpupool, 'cshare': parameter_types.share, 'rdomain': parameter_types.rdomain, 'pcif': parameter_types.pcif }, 'required': ['userid', 'vcpus', 'memory'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['guest'], 'additionalProperties': False, } live_migrate_vm = { 'type': 'object', 'properties': { 'dest_zcc_userid': parameter_types.userid_or_None, 'destination': parameter_types.userid, 'parms': parameter_types.live_migrate_parms, 'operation': parameter_types.name, }, 'required': ['destination', 'operation'], 'additionalProperties': False, } create_nic = { 'type': 'object', 'properties': { 'nic': { 'type': 'object', 'properties': { 'vdev': parameter_types.vdev_or_None, 'nic_id': parameter_types.nic_id, 'mac_addr': parameter_types.mac_address, 'active': parameter_types.boolean, }, 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['nic'], 'additionalProperties': False, } create_network_interface = { 'type': 'object', 'properties': { 'interface': { 'type': 'object', 'properties': { 'os_version': parameter_types.os_version, 'guest_networks': parameter_types.network_list, 'active': parameter_types.boolean, }, 'required': ['os_version', 'guest_networks'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['interface'], 'additionalProperties': False, } delete_network_interface = { 'type': 'object', 'properties': { 'interface': { 'type': 'object', 'properties': { 'os_version': parameter_types.os_version, 'vdev': parameter_types.vdev, 'active': parameter_types.boolean, }, 'required': ['os_version', 'vdev'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['interface'], 'additionalProperties': False, } config_minidisks = { 'type': 'object', 'properties': { 'disk_info': { 'type': 'object', 'properties': { 'disk_list': parameter_types.disk_conf, }, 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['disk_info'], 'additionalProperties': False, } grow_root_volume = { 'type': 'object', 'properties': { 'os_version': parameter_types.os_version, }, 'required': ['os_version'], 'additionalProperties': False, } create_disks = { 'type': 'object', 'properties': { 'disk_info': { 'type': 'object', 'properties': { 'disk_list': parameter_types.disk_list, }, 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['disk_info'], 'additionalProperties': False, } delete_disks = { 'type': 'object', 'properties': { 'vdev_info': { 'type': 'object', 'properties': { 'vdev_list': parameter_types.vdev_list, }, 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['vdev_info'], 'additionalProperties': False, } nic_couple_uncouple = { 'type': 'object', 'properties': { 'info': { 'type': 'object', 'properties': { 'couple': parameter_types.boolean, 'active': parameter_types.boolean, 'vswitch': parameter_types.vswitch_name, 'vlan_id': parameter_types.vlan_id_or_minus_1, }, # FIXME: vswitch should be required when it's couple 'required': ['couple'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['info'], 'additionalProperties': False, } deploy = { 'type': 'object', 'properties': { 'image': parameter_types.name, 'transportfiles': {'type': ['string']}, 'remotehost': parameter_types.remotehost, 'vdev': parameter_types.vdev_or_None, 'hostname': parameter_types.hostname, 'skipdiskcopy': parameter_types.boolean, }, 'required': ['image'], 'additionalProperties': False, } capture = { 'type': 'object', 'properties': { 'image': parameter_types.name, 'capture_type': parameter_types.capture_type, 'compress_level': parameter_types.compress_level, }, 'required': ['image'], 'additionalProperties': False, } resize_cpus = { 'type': 'object', 'properties': { 'cpu_cnt': parameter_types.max_cpu, }, 'required': ['cpu_cnt'], 'additionalProperties': False, } resize_mem = { 'type': 'object', 'properties': { 'size': parameter_types.max_mem, }, 'required': ['size'], 'additionalProperties': False, } userid_list_query = { 'type': 'object', 'properties': { 'userid': parameter_types.userid_list, }, 'additionalProperties': False } register_vm = { 'type': 'object', 'properties': { 'meta': {'type': ['string']}, 'net_set': {'type': ['string']}, 'port': {'type': ['string']}, }, 'required': ['meta', 'net_set'], 'additionalProperties': True } deregister_vm = { 'type': 'object', 'properties': { 'userid': parameter_types.userid, }, 'additionalProperties': False } userid_list_array_query = { 'type': 'object', 'properties': { 'userid': parameter_types.userid_list_array, }, 'additionalProperties': False } nic_DB_info = { 'type': 'object', 'properties': { 'userid': parameter_types.userid, 'nic_id': parameter_types.nic_id, 'vswitch': parameter_types.vswitch_name, }, 'additionalProperties': False, } start = { 'type': 'object', 'properties': { 'userid': parameter_types.userid, 'timeout': parameter_types.non_negative_integer, }, 'additionalProperties': False, } stop = { 'type': 'object', 'properties': { 'userid': parameter_types.userid, 'timeout': parameter_types.non_negative_integer, 'poll_interval': parameter_types.non_negative_integer, }, 'additionalProperties': False, } softstop = { 'type': 'object', 'properties': { 'userid': parameter_types.userid, 'timeout': parameter_types.non_negative_integer, 'poll_interval': parameter_types.non_negative_integer, }, 'additionalProperties': False, } zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/host.py0000664000175000017510000000144414263437505022501 0ustar ruirui00000000000000# Copyright 2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk.sdkwsgi.validation import parameter_types volume = { 'type': 'object', 'properties': { 'volumename': parameter_types.volume_list }, 'additionalProperties': False } zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/__init__.py0000664000175000017510000000000013575566551023257 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/schemas/volume.py0000664000175000017510000001124414315210052023012 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk.sdkwsgi.validation import parameter_types attach = { 'type': 'object', 'properties': { 'info': { 'type': 'object', 'properties': { 'connection': parameter_types.connection_info, }, 'required': ['connection'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['info'], 'additionalProperties': False, } detach = { 'type': 'object', 'properties': { 'info': { 'type': 'object', 'properties': { 'connection': parameter_types.connection_info, }, 'required': ['connection'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'required': ['info'], 'additionalProperties': False, } get_fcp_usage = { 'type': 'object', 'properties': { 'fcp_id': parameter_types.fcp_id, }, 'required': ['fcp_id'], 'additionalProperties': False, } get_fcp_templates = { 'type': 'object', 'properties': { 'template_id_list': parameter_types.fcp_template_id_list, 'assigner_id': parameter_types.single_param(parameter_types.userid), 'host_default': parameter_types.single_param(parameter_types.boolean), 'storage_providers': { 'type': 'array' } }, 'additionalProperties': False, } get_fcp_templates_details = { 'type': 'object', 'properties': { 'template_id_list': parameter_types.fcp_template_id_list, 'raw': parameter_types.single_param(parameter_types.boolean), 'statistics': parameter_types.single_param(parameter_types.boolean), 'sync_with_zvm': parameter_types.single_param(parameter_types.boolean), }, 'additionalProperties': False, } set_fcp_usage = { 'type': 'object', 'properties': { 'info': { 'type': 'object', 'properties': { 'userid': parameter_types.userid, 'reserved': { 'type': ['integer'], 'minimum': 0, 'maximum': 1, }, 'connections': { 'type': ['integer'], }, 'fcp_template_id': parameter_types.fcp_template_id, }, 'required': ['reserved', 'connections'], 'additionalProperties': False, } }, 'required': ['info'], 'additionalProperties': False, } get_volume_connector = { 'type': 'object', 'properties': { 'userid': parameter_types.userid_list, 'info': { 'type': 'object', 'properties': { 'reserve': parameter_types.boolean, 'fcp_template_id': parameter_types.fcp_template_id, 'storage_provider': parameter_types.name }, 'required': ['info'], 'additionalProperties': False, }, 'additionalProperties': False, }, 'additionalProperties': False, } create_fcp_template = { 'type': 'object', 'properties': { 'name': parameter_types.name, 'description': { 'type': 'string' }, 'fcp_devices': { 'type': 'string' }, 'host_default': parameter_types.boolean, 'storage_providers': { 'type': 'array' }, 'min_fcp_paths_count': parameter_types.positive_integer }, 'required': ['name'], 'additionalProperties': False, } edit_fcp_template = { 'type': 'object', 'properties': { 'fcp_template_id': parameter_types.fcp_template_id, 'name': parameter_types.name, 'description': { 'type': 'string' }, 'fcp_devices': { 'type': 'string' }, 'host_default': parameter_types.boolean, 'storage_providers': { 'type': 'array' }, 'min_fcp_paths_count': parameter_types.positive_integer }, 'required': ['fcp_template_id'], 'additionalProperties': False } zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/deploy.py0000664000175000017510000001127413575566551021410 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Deployment handling for sdk API.""" import json import six import sys import traceback import webob from zvmsdk import log from zvmsdk.sdkwsgi import handler from zvmsdk.sdkwsgi import requestlog from zvmsdk.sdkwsgi import util LOG = log.LOG NAME = "zvm-cloud-connector" def _find_fault(clazz, encountered=None): if not encountered: encountered = [] for subclass in clazz.__subclasses__(): if subclass not in encountered: encountered.append(subclass) for subsubclass in _find_fault(subclass, encountered): yield subsubclass yield subclass class Fault(webob.exc.HTTPException): def __init__(self, exception): self.wrapped_exc = exception for key, value in list(self.wrapped_exc.headers.items()): self.wrapped_exc.headers[key] = str(value) self.status_int = exception.status_int @webob.dec.wsgify() def __call__(self, req): code = self.wrapped_exc.status_int explanation = self.wrapped_exc.explanation LOG.debug("Returning %(code)s to user: %(explanation)s", {'code': code, 'explanation': explanation}) fault_data = { 'overallRC': 400, 'rc': 400, 'rs': code, 'modID': util.SDKWSGI_MODID, 'output': '', 'errmsg': explanation} if code == 413 or code == 429: retry = self.wrapped_exc.headers.get('Retry-After', None) if retry: fault_data['retryAfter'] = retry self.wrapped_exc.content_type = 'application/json' self.wrapped_exc.charset = 'UTF-8' self.wrapped_exc.text = six.text_type(json.dumps(fault_data)) return self.wrapped_exc def __str__(self): return self.wrapped_exc.__str__() class FaultWrapper(object): """Calls down the middleware stack, making exceptions into faults.""" _status_to_type = {} @staticmethod def status_to_type(status): if not FaultWrapper._status_to_type: for clazz in _find_fault(webob.exc.HTTPError): FaultWrapper._status_to_type[clazz.code] = clazz return FaultWrapper._status_to_type.get( status, webob.exc.HTTPInternalServerError)() def __init__(self, application): self.application = application def _error(self, inner, req): exc_info = traceback.extract_tb(sys.exc_info()[2])[-1] LOG.info('Got unhandled exception: %s', exc_info) safe = getattr(inner, 'safe', False) headers = getattr(inner, 'headers', None) status = getattr(inner, 'code', 500) if status is None: status = 500 outer = self.status_to_type(status) if headers: outer.headers = headers if safe: outer.explanation = '%s: %s' % (inner.__class__.__name__, inner.message) return Fault(outer) @webob.dec.wsgify() def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: return self._error(ex, req) class HeaderControl(object): def __init__(self, application): self.application = application @webob.dec.wsgify def __call__(self, req): response = req.get_response(self.application) response.headers.add('cache-control', 'no-cache') return response def deploy(project_name): """Assemble the middleware pipeline""" request_log = requestlog.RequestLog header_addon = HeaderControl fault_wrapper = FaultWrapper application = handler.SdkHandler() # currently we have 3 middleware for middleware in (header_addon, fault_wrapper, request_log, ): if middleware: application = middleware(application) return application def loadapp(project_name=NAME): application = deploy(project_name) return application def init_application(): # build and return WSGI app return loadapp() zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/validation/0000775000175000017510000000000014315232035021643 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/validation/__init__.py0000664000175000017510000001242513575566551024004 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # Copyright 2013 NEC Corporation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools import re import jsonschema from jsonschema import exceptions as jsonschema_exc import six from zvmsdk import exception def _schema_validation_helper(schema, target, args, kwargs, is_body=True): schema_validator = _SchemaValidator( schema, is_body=is_body) schema_validator.validate(target) def schema(request_body_schema): def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): _schema_validation_helper(request_body_schema, kwargs['body'], args, kwargs) return func(*args, **kwargs) return wrapper return add_validator class FormatChecker(jsonschema.FormatChecker): def check(self, instance, format): if format not in self.checkers: return func, raises = self.checkers[format] result, cause = None, None try: result = func(instance) except raises as e: cause = e if not result: msg = "%r is not a %r" % (instance, format) raise jsonschema_exc.FormatError(msg, cause=cause) class _SchemaValidator(object): validator = None validator_org = jsonschema.Draft4Validator def __init__(self, schema, relax_additional_properties=False, is_body=True): self.is_body = is_body validators = { 'dummy': self._dummy } validator_cls = jsonschema.validators.extend(self.validator_org, validators) format_checker = FormatChecker() self.validator = validator_cls(schema, format_checker=format_checker) def _dummy(self, validator, minimum, instance, schema): pass def validate(self, *args, **kwargs): try: self.validator.validate(*args, **kwargs) except jsonschema.ValidationError as ex: if isinstance(ex.cause, exception.InvalidName): detail = ex.cause.format_message() elif len(ex.path) > 0: if self.is_body: detail = ("Invalid input for field/attribute %(path)s. " "Value: %(value)s. %(message)s") else: detail = ("Invalid input for query parameters %(path)s. " "Value: %(value)s. %(message)s") detail = detail % { 'path': ex.path.pop(), 'value': ex.instance, 'message': ex.message } else: detail = ex.message raise exception.ValidationError(detail=detail) except TypeError as ex: detail = six.text_type(ex) raise exception.ValidationError(detail=detail) def _remove_unexpected_query_parameters(schema, req): """Remove unexpected properties from the req.GET.""" additional_properties = schema.get('addtionalProperties', True) if additional_properties: pattern_regexes = [] patterns = schema.get('patternProperties', None) if patterns: for regex in patterns: pattern_regexes.append(re.compile(regex)) for param in set(req.GET.keys()): if param not in schema['properties'].keys(): if not (list(regex for regex in pattern_regexes if regex.match(param))): del req.GET[param] def query_schema(query_params_schema, min_version=None, max_version=None): """Register a schema to validate request query parameters.""" def add_validator(func): @functools.wraps(func) def wrapper(*args, **kwargs): if 'req' in kwargs: req = kwargs['req'] else: req = args[1] if req.environ['wsgiorg.routing_args'][1]: if _schema_validation_helper(query_params_schema, req.environ['wsgiorg.routing_args'][1], args, kwargs, is_body=False): _remove_unexpected_query_parameters(query_params_schema, req) else: if _schema_validation_helper(query_params_schema, req.GET.dict_of_lists(), args, kwargs, is_body=False): _remove_unexpected_query_parameters(query_params_schema, req) return func(*args, **kwargs) return wrapper return add_validator zVMCloudConnector-1.6.3/zvmsdk/sdkwsgi/validation/parameter_types.py0000664000175000017510000003310014315210052025411 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # Copyright 2013 NEC Corporation. # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import re import unicodedata import six def single_param(schema): ret = multi_params(schema) ret['maxItems'] = 1 return ret def multi_params(schema): return {'type': 'array', 'items': schema} class ValidationRegex(object): def __init__(self, regex, reason): self.regex = regex self.reason = reason def _is_printable(char): category = unicodedata.category(char) return (not category.startswith("C") and (not category.startswith("Z") or category == "Zs")) def _get_all_chars(): for i in range(0xFFFF): yield six.unichr(i) def _build_regex_range(ws=True, invert=False, exclude=None): if exclude is None: exclude = [] regex = "" in_range = False last = None last_added = None def valid_char(char): if char in exclude: result = False elif ws: result = _is_printable(char) else: # Zs is the unicode class for space characters, of which # there are about 10 in this range. result = (_is_printable(char) and unicodedata.category(char) != "Zs") if invert is True: return not result return result # iterate through the entire character range. in_ for c in _get_all_chars(): if valid_char(c): if not in_range: regex += re.escape(c) last_added = c in_range = True else: if in_range and last != last_added: regex += "-" + re.escape(last) in_range = False last = c else: if in_range: regex += "-" + re.escape(c) return regex valid_name_regex_base = '^(?![%s])[%s]*(? 0) and not environ.get('CONTENT_TYPE'): msg = 'content-type header required when content-length > 0' LOG.debug(msg) raise webob.exc.HTTPBadRequest(msg, json_formatter=util.json_error_formatter) except ValueError: msg = 'content-length header must be an integer' LOG.debug(msg) raise webob.exc.HTTPBadRequest(msg, json_formatter=util.json_error_formatter) try: return dispatch(environ, start_response, self._map) except exception.NotFound as exc: raise webob.exc.HTTPNotFound( exc, json_formatter=util.json_error_formatter) except Exception: raise zVMCloudConnector-1.6.3/zvmsdk/database.py0000775000175000017510000025067514315210052020171 0ustar ruirui00000000000000# Copyright 2017, 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import random import os import six import sqlite3 import threading import uuid import json from zvmsdk import config from zvmsdk import constants as const from zvmsdk import exception from zvmsdk import log from zvmsdk import utils CONF = config.CONF LOG = log.LOG _DIR_MODE = 0o755 _NETWORK_CONN = None _IMAGE_CONN = None _GUEST_CONN = None _FCP_CONN = None _DBLOCK_VOLUME = threading.RLock() _DBLOCK_NETWORK = threading.RLock() _DBLOCK_IMAGE = threading.RLock() _DBLOCK_GUEST = threading.RLock() _DBLOCK_FCP = threading.RLock() @contextlib.contextmanager def get_network_conn(): global _NETWORK_CONN, _DBLOCK_NETWORK if not _NETWORK_CONN: _NETWORK_CONN = _init_db_conn(const.DATABASE_NETWORK) _DBLOCK_NETWORK.acquire() try: yield _NETWORK_CONN except Exception as err: msg = "Execute SQL statements error: %s" % six.text_type(err) LOG.error(msg) raise exception.SDKNetworkOperationError(rs=1, msg=msg) finally: _DBLOCK_NETWORK.release() @contextlib.contextmanager def get_image_conn(): global _IMAGE_CONN, _DBLOCK_IMAGE if not _IMAGE_CONN: _IMAGE_CONN = _init_db_conn(const.DATABASE_IMAGE) _DBLOCK_IMAGE.acquire() try: yield _IMAGE_CONN except Exception as err: LOG.error("Execute SQL statements error: %s", six.text_type(err)) raise exception.SDKDatabaseException(msg=err) finally: _DBLOCK_IMAGE.release() @contextlib.contextmanager def get_guest_conn(): global _GUEST_CONN, _DBLOCK_GUEST if not _GUEST_CONN: _GUEST_CONN = _init_db_conn(const.DATABASE_GUEST) _DBLOCK_GUEST.acquire() try: yield _GUEST_CONN except Exception as err: msg = "Execute SQL statements error: %s" % six.text_type(err) LOG.error(msg) raise exception.SDKGuestOperationError(rs=1, msg=msg) finally: _DBLOCK_GUEST.release() @contextlib.contextmanager def get_fcp_conn(): global _FCP_CONN, _DBLOCK_FCP if not _FCP_CONN: _FCP_CONN = _init_db_conn(const.DATABASE_FCP) # enable access columns by name _FCP_CONN.row_factory = sqlite3.Row _DBLOCK_FCP.acquire() try: # sqlite DB not allow to start a transaction within a transaction, # so, only begin a transaction when no other alive transaction if not _FCP_CONN.in_transaction: _FCP_CONN.execute("BEGIN") skip_commit = False else: skip_commit = True yield _FCP_CONN except exception.SDKBaseException as err: # rollback only if _FCP_CONN.execute("BEGIN") # is invoked when entering the contextmanager if not skip_commit: _FCP_CONN.execute("ROLLBACK") msg = "Got SDK exception in FCP DB operation: %s" % six.text_type(err) LOG.error(msg) raise except Exception as err: # rollback only if _FCP_CONN.execute("BEGIN") # is invoked when entering the contextmanager if not skip_commit: _FCP_CONN.execute("ROLLBACK") msg = "Execute SQL statements error: %s" % six.text_type(err) LOG.error(msg) raise exception.SDKGuestOperationError(rs=1, msg=msg) else: # commit only if _FCP_CONN.execute("BEGIN") # is invoked when entering the contextmanager if not skip_commit: _FCP_CONN.execute("COMMIT") finally: _DBLOCK_FCP.release() def _init_db_conn(db_file): db_dir = CONF.database.dir if not os.path.exists(db_dir): os.makedirs(db_dir, _DIR_MODE) database = os.path.join(db_dir, db_file) return sqlite3.connect(database, check_same_thread=False, isolation_level=None) class NetworkDbOperator(object): def __init__(self): self._module_id = 'network' self._create_switch_table() def _create_switch_table(self): create_table_sql = ' '.join(( 'create table if not exists switch (', 'userid varchar(8) COLLATE NOCASE,', 'interface varchar(4) COLLATE NOCASE,', 'switch varchar(8) COLLATE NOCASE,', 'port varchar(128) COLLATE NOCASE,', 'comments varchar(128),', 'primary key (userid, interface));')) with get_network_conn() as conn: conn.execute(create_table_sql) def _get_switch_by_user_interface(self, userid, interface): with get_network_conn() as conn: res = conn.execute("SELECT * FROM switch " "WHERE userid=? and interface=?", (userid, interface)) switch_record = res.fetchall() if len(switch_record) == 1: return switch_record[0] elif len(switch_record) == 0: return None def switch_delete_record_for_userid(self, userid): """Remove userid switch record from switch table.""" with get_network_conn() as conn: conn.execute("DELETE FROM switch WHERE userid=?", (userid,)) LOG.debug("Switch record for user %s is removed from " "switch table" % userid) def switch_delete_record_for_nic(self, userid, interface): """Remove userid switch record from switch table.""" with get_network_conn() as conn: conn.execute("DELETE FROM switch WHERE userid=? and interface=?", (userid, interface)) LOG.debug("Switch record for user %s with nic %s is removed from " "switch table" % (userid, interface)) def switch_add_record(self, userid, interface, port=None, switch=None, comments=None): """Add userid and nic name address into switch table.""" with get_network_conn() as conn: conn.execute("INSERT INTO switch VALUES (?, ?, ?, ?, ?)", (userid, interface, switch, port, comments)) LOG.debug("New record in the switch table: user %s, " "nic %s, port %s" % (userid, interface, port)) def switch_add_record_migrated(self, userid, interface, switch, port=None, comments=None): """Add userid and interfaces and switch into switch table.""" with get_network_conn() as conn: conn.execute("INSERT INTO switch VALUES (?, ?, ?, ?, ?)", (userid, interface, switch, port, comments)) LOG.debug("New record in the switch table: user %s, " "nic %s, switch %s" % (userid, interface, switch)) def switch_update_record_with_switch(self, userid, interface, switch=None): """Update information in switch table.""" if not self._get_switch_by_user_interface(userid, interface): msg = "User %s with nic %s does not exist in DB" % (userid, interface) LOG.error(msg) obj_desc = ('User %s with nic %s' % (userid, interface)) raise exception.SDKObjectNotExistError(obj_desc, modID=self._module_id) if switch is not None: with get_network_conn() as conn: conn.execute("UPDATE switch SET switch=? " "WHERE userid=? and interface=?", (switch, userid, interface)) LOG.debug("Set switch to %s for user %s with nic %s " "in switch table" % (switch, userid, interface)) else: with get_network_conn() as conn: conn.execute("UPDATE switch SET switch=NULL " "WHERE userid=? and interface=?", (userid, interface)) LOG.debug("Set switch to None for user %s with nic %s " "in switch table" % (userid, interface)) def _parse_switch_record(self, switch_list): # Map each switch record to be a dict, with the key is the field name # in switch DB switch_keys_list = ['userid', 'interface', 'switch', 'port', 'comments'] switch_result = [] for item in switch_list: switch_item = dict(zip(switch_keys_list, item)) switch_result.append(switch_item) return switch_result def switch_select_table(self): with get_network_conn() as conn: result = conn.execute("SELECT * FROM switch") nic_settings = result.fetchall() return self._parse_switch_record(nic_settings) def switch_select_record_for_userid(self, userid): with get_network_conn() as conn: result = conn.execute("SELECT * FROM switch " "WHERE userid=?", (userid,)) switch_info = result.fetchall() return self._parse_switch_record(switch_info) def switch_select_record(self, userid=None, nic_id=None, vswitch=None): if ((userid is None) and (nic_id is None) and (vswitch is None)): return self.switch_select_table() sql_cmd = "SELECT * FROM switch WHERE" sql_var = [] if userid is not None: sql_cmd += " userid=? and" sql_var.append(userid) if nic_id is not None: sql_cmd += " port=? and" sql_var.append(nic_id) if vswitch is not None: sql_cmd += " switch=?" sql_var.append(vswitch) # remove the tailing ' and' sql_cmd = sql_cmd.strip(' and') with get_network_conn() as conn: result = conn.execute(sql_cmd, sql_var) switch_list = result.fetchall() return self._parse_switch_record(switch_list) class FCPDbOperator(object): def __init__(self): self._module_id = 'volume' self._initialize_table() def _initialize_table(self): # fcp_info_tables: # map the table name to the corresponding SQL to create it # key is the name of table to be created # value is the SQL to be executed to create the table fcp_info_tables = {} # table for basic info of FCP devices # fcp_id: FCP device ID, the primary key # assigner_id: VM userid representing an unique VM, # it is allocated by zvmsdk and may differ with owner # connections: how many volumes connected to this FCP device, # 0 means no assigner # reserved: 0 for not reserved by some operation # wwpn_npiv: NPIV WWPN # wwpn_phy: Physical WWPN # chpid: channel ID of FCP device # state: FCP device status # owner: VM userid representing an unique VM, # it is read from z/VM hypervisor and # may differ with assigner_id # tmpl_id: indicate from which FCP Multipath Template this FCP device was # allocated, not to which FCP Multipath Template this FCP # device belong. because a FCP device may belong # to multiple FCP Multipath Templates. fcp_info_tables['fcp'] = ( "CREATE TABLE IF NOT EXISTS fcp(" "fcp_id char(4) NOT NULL COLLATE NOCASE," "assigner_id varchar(8) NOT NULL DEFAULT '' COLLATE NOCASE," "connections integer NOT NULL DEFAULT 0," "reserved integer NOT NULL DEFAULT 0," "wwpn_npiv varchar(16) NOT NULL DEFAULT '' COLLATE NOCASE," "wwpn_phy varchar(16) NOT NULL DEFAULT '' COLLATE NOCASE," "chpid char(2) NOT NULL DEFAULT '' COLLATE NOCASE," "state varchar(8) NOT NULL DEFAULT '' COLLATE NOCASE," "owner varchar(8) NOT NULL DEFAULT '' COLLATE NOCASE," "tmpl_id varchar(32) NOT NULL DEFAULT '' COLLATE NOCASE," "PRIMARY KEY (fcp_id))") # table for FCP Multipath Templates: # id: template id, the primary key # name: the name of the template # description: the description for this template # is_default: is this template the default one on this host or not # 1/True for yes, 0/False for no # note: SQLite recognizes the keywords "TRUE" and "FALSE", # those keywords are saved in SQLite # as integer 1 and 0 respectively fcp_info_tables['template'] = ( "CREATE TABLE IF NOT EXISTS template(" "id varchar(32) NOT NULL COLLATE NOCASE," "name varchar(128) NOT NULL COLLATE NOCASE," "description varchar(255) NOT NULL DEFAULT '' COLLATE NOCASE," "is_default integer NOT NULL DEFAULT 0," "min_fcp_paths_count integer NOT NULL DEFAULT -1," "PRIMARY KEY (id))") # table for relationships between templates and storage providers: # sp_name: name of storage provider, the primary key # tmpl_id: template id fcp_info_tables['template_sp_mapping'] = ( 'CREATE TABLE IF NOT EXISTS template_sp_mapping(' 'sp_name varchar(128) NOT NULL COLLATE NOCASE,' 'tmpl_id varchar(32) NOT NULL COLLATE NOCASE,' 'PRIMARY KEY (sp_name))') # table for relationships between templates and FCP devices: # fcp_id: the fcp device ID # tmpl_id: the template id # path: the path number, 0 means the FCP device is in path0 # 1 means the FCP devices is in path1, and so on. # composite primary key (fcp_id, tmpl_id) fcp_info_tables['template_fcp_mapping'] = ( 'CREATE TABLE IF NOT EXISTS template_fcp_mapping(' 'fcp_id char(4) NOT NULL COLLATE NOCASE,' 'tmpl_id varchar(32) NOT NULL COLLATE NOCASE,' 'path integer NOT NULL,' 'PRIMARY KEY (fcp_id, tmpl_id))') # create all the tables LOG.info("Initializing FCP database.") with get_fcp_conn() as conn: for table_name in fcp_info_tables: create_table_sql = fcp_info_tables[table_name] conn.execute(create_table_sql) LOG.info("FCP database initialized.") ######################################################### # DML for Table fcp # ######################################################### def unreserve_fcps(self, fcp_ids): if not fcp_ids: return fcp_update_info = [] for fcp_id in fcp_ids: fcp_update_info.append((fcp_id,)) with get_fcp_conn() as conn: conn.executemany("UPDATE fcp SET reserved=0, tmpl_id='' " "WHERE fcp_id=?", fcp_update_info) def reserve_fcps(self, fcp_ids, assigner_id, fcp_template_id): fcp_update_info = [] for fcp_id in fcp_ids: fcp_update_info.append( (assigner_id, fcp_template_id, fcp_id)) with get_fcp_conn() as conn: conn.executemany("UPDATE fcp " "SET reserved=1, assigner_id=?, tmpl_id=? " "WHERE fcp_id=?", fcp_update_info) def bulk_insert_zvm_fcp_info_into_fcp_table(self, fcp_info_list: list): """Insert multiple records into fcp table witch fcp info queried from z/VM. The input fcp_info_list should be list of FCP info, for example: [(fcp_id, wwpn_npiv, wwpn_phy, chpid, state, owner), ('1a06', 'c05076de33000355', 'c05076de33002641', '27', 'active', 'user1'), ('1a07', 'c05076de33000355', 'c05076de33002641', '27', 'free', 'user1'), ('1a08', 'c05076de33000355', 'c05076de33002641', '27', 'active', 'user2')] """ with get_fcp_conn() as conn: conn.executemany("INSERT INTO fcp (fcp_id, wwpn_npiv, wwpn_phy, " "chpid, state, owner) " "VALUES (?, ?, ?, ?, ?, ?)", fcp_info_list) def bulk_delete_from_fcp_table(self, fcp_id_list: list): """Delete multiple FCP records from fcp table The fcp_id_list is list of FCP IDs, for example: ['1a00', '1b01', '1c02'] """ fcp_id_list = [(fcp_id,) for fcp_id in fcp_id_list] with get_fcp_conn() as conn: conn.executemany("DELETE FROM fcp " "WHERE fcp_id=?", fcp_id_list) def bulk_update_zvm_fcp_info_in_fcp_table(self, fcp_info_list: list): """Update multiple records with FCP info queried from z/VM. The input fcp_info_list should be list of FCP info set, for example: [(fcp_id, wwpn_npiv, wwpn_phy, chpid, state, owner), ('1a06', 'c05076de33000355', 'c05076de33002641', '27', 'active', 'user1'), ('1a07', 'c05076de33000355', 'c05076de33002641', '27', 'free', 'user1'), ('1a08', 'c05076de33000355', 'c05076de33002641', '27', 'active', 'user2')] """ # transfer state and owner to a comment dict # the key is the id of the FCP device, the value is a comment dict # for example: # {'1a07': {'state': 'free', 'owner': 'user1'}, # '1a08': {'state': 'active', 'owner': 'user2'}} data_to_update = list() for fcp in fcp_info_list: # change order of update data # the new order is like: # (wwpn_npiv, wwpn_phy, chpid, state, owner, fcp_id) new_record = list(fcp[1:]) + [fcp[0]] data_to_update.append(new_record) with get_fcp_conn() as conn: conn.executemany("UPDATE fcp SET wwpn_npiv=?, wwpn_phy=?, " "chpid=?, state=?, owner=? WHERE " "fcp_id=?", data_to_update) def bulk_update_state_in_fcp_table(self, fcp_id_list: list, new_state: str): """Update multiple records' comments to update the state to nofound. """ data_to_update = list() for id in fcp_id_list: new_record = [new_state, id] data_to_update.append(new_record) with get_fcp_conn() as conn: conn.executemany("UPDATE fcp set state=? " "WHERE fcp_id=?", data_to_update) def get_all_fcps_of_assigner(self, assigner_id=None): """Get dict of all fcp records of specified assigner. If assigner is None, will get all fcp records. Format of return is like : [ (fcp_id, userid, connections, reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, tmpl_id), ('283c', 'user1', 2, 1, 'c05076ddf7000002', 'c05076ddf7001d81', 27,'active', 'user1', ''), ('483c', 'user2', 0, 0, 'c05076ddf7000001', 'c05076ddf7001d82', 27, 'free', 'NONE', '') ] """ with get_fcp_conn() as conn: if assigner_id: result = conn.execute("SELECT fcp_id, assigner_id, " "connections, reserved, wwpn_npiv, " "wwpn_phy, chpid, state, owner, " "tmpl_id FROM fcp WHERE " "assigner_id=?", (assigner_id,)) else: result = conn.execute("SELECT fcp_id, assigner_id, " "connections, reserved, wwpn_npiv, " "wwpn_phy, chpid, state, owner, " "tmpl_id FROM fcp") fcp_info = result.fetchall() if not fcp_info: if assigner_id: obj_desc = ("FCP record in fcp table belongs to " "userid: %s" % assigner_id) else: obj_desc = "FCP records in fcp table" raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) return fcp_info def get_usage_of_fcp(self, fcp_id): connections = 0 reserved = 0 with get_fcp_conn() as conn: result = conn.execute("SELECT * FROM fcp " "WHERE fcp_id=?", (fcp_id,)) fcp_info = result.fetchone() if not fcp_info: msg = 'FCP with id: %s does not exist in DB.' % fcp_id LOG.error(msg) obj_desc = "FCP with id: %s" % fcp_id raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) assigner_id = fcp_info['assigner_id'] reserved = fcp_info['reserved'] connections = fcp_info['connections'] tmpl_id = fcp_info['tmpl_id'] return assigner_id, reserved, connections, tmpl_id def update_usage_of_fcp(self, fcp, assigner_id, reserved, connections, fcp_template_id): with get_fcp_conn() as conn: conn.execute("UPDATE fcp SET assigner_id=?, reserved=?, " "connections=?, tmpl_id=? WHERE fcp_id=?", (assigner_id, reserved, connections, fcp_template_id, fcp)) def increase_connections_by_assigner(self, fcp, assigner_id): """Increase connections of the given FCP device :param fcp: (str) a FCP device :param assigner_id: (str) the userid of the virtual machine :return connections: (dict) the connections of the FCP device """ with get_fcp_conn() as conn: result = conn.execute("SELECT * FROM fcp WHERE fcp_id=? " "AND assigner_id=?", (fcp, assigner_id)) fcp_info = result.fetchone() if not fcp_info: msg = 'FCP with id: %s does not exist in DB.' % fcp LOG.error(msg) obj_desc = "FCP with id: %s" % fcp raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) connections = fcp_info['connections'] + 1 conn.execute("UPDATE fcp SET connections=? WHERE fcp_id=? " "AND assigner_id=?", (connections, fcp, assigner_id)) # check the result result = conn.execute("SELECT connections FROM fcp " "WHERE fcp_id=?", (fcp,)) connections = result.fetchone()['connections'] return connections def decrease_connections(self, fcp): """Decrease connections of the given FCP device :param fcp: (str) a FCP device :return connections: (dict) the connections of the FCP device """ with get_fcp_conn() as conn: result = conn.execute("SELECT * FROM fcp WHERE " "fcp_id=?", (fcp,)) fcp_list = result.fetchall() if not fcp_list: msg = 'FCP with id: %s does not exist in DB.' % fcp LOG.error(msg) obj_desc = "FCP with id: %s" % fcp raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) connections = fcp_list[0][2] if connections == 0: msg = 'FCP with id: %s no connections in DB.' % fcp LOG.error(msg) obj_desc = "FCP with id: %s" % fcp raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) else: connections -= 1 if connections < 0: connections = 0 LOG.warning("Warning: connections of fcp is negative", fcp) # decrease connections by 1 conn.execute("UPDATE fcp SET connections=? " "WHERE fcp_id=?", (connections, fcp)) # check the result result = conn.execute("SELECT connections FROM fcp " "WHERE fcp_id=?", (fcp, )) connections = result.fetchone()['connections'] return connections def get_connections_from_fcp(self, fcp): connections = 0 with get_fcp_conn() as conn: result = conn.execute("SELECT connections FROM fcp WHERE " "fcp_id=?", (fcp,)) fcp_info = result.fetchone() if not fcp_info: msg = 'FCP with id: %s does not exist in DB.' % fcp LOG.error(msg) obj_desc = "FCP with id: %s" % fcp raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) connections = fcp_info['connections'] return connections def get_all(self): with get_fcp_conn() as conn: result = conn.execute("SELECT * FROM fcp") fcp_list = result.fetchall() return fcp_list @staticmethod def get_inuse_fcp_device_by_fcp_template(fcp_template_id): """ Get the FCP devices allocated from the template """ with get_fcp_conn() as conn: query_sql = conn.execute("SELECT * FROM fcp " "WHERE tmpl_id=?", (fcp_template_id,)) result = query_sql.fetchall() # result example # [, # , # ] return result ######################################################### # DML for Table template_fcp_mapping # ######################################################### @staticmethod def update_path_of_fcp_device(record): """ update path of single fcp device from table template_fcp_mapping :param record (tuple) example: (path, fcp_id, fcp_template_id) :return NULL """ with get_fcp_conn() as conn: conn.execute("UPDATE template_fcp_mapping " "SET path=? " "WHERE fcp_id=? and tmpl_id=?", record) def get_path_count(self, fcp_template_id): with get_fcp_conn() as conn: # Get distinct path list in DB result = conn.execute( "SELECT DISTINCT path FROM template_fcp_mapping " "WHERE tmpl_id=?", (fcp_template_id,)) path_list = result.fetchall() return len(path_list) @staticmethod def bulk_delete_fcp_device_from_fcp_template(records): """ Delete multiple fcp device from table template_fcp_mapping :param records (iter) example: [(fcp_template_id, fcp_id), ...] :return NULL """ with get_fcp_conn() as conn: conn.executemany( "DELETE FROM template_fcp_mapping " "WHERE tmpl_id=? AND fcp_id=?", records) @staticmethod def bulk_insert_fcp_device_into_fcp_template(records): """ Insert multiple fcp device from table template_fcp_mapping :param records (iter) example: [ (fcp_template_id, fcp_id, path), ... ] :return NULL """ with get_fcp_conn() as conn: conn.executemany( "INSERT INTO template_fcp_mapping " "(tmpl_id, fcp_id, path) VALUES (?, ?, ?)", records) ######################################################### # DML for Table template # ######################################################### def fcp_template_exist_in_db(self, fcp_template_id: str): with get_fcp_conn() as conn: query_sql = conn.execute("SELECT id FROM template " "WHERE id=?", (fcp_template_id,)) query_ids = query_sql.fetchall() if query_ids: return True else: return False def get_min_fcp_paths_count_from_db(self, fcp_template_id): with get_fcp_conn() as conn: query_sql = conn.execute("SELECT min_fcp_paths_count FROM template " "WHERE id=?", (fcp_template_id,)) min_fcp_paths_count = query_sql.fetchone() if min_fcp_paths_count: return min_fcp_paths_count['min_fcp_paths_count'] else: return None @staticmethod def update_basic_info_of_fcp_template(record): """ update basic info of a FCP Multipath Template in table template :param record (tuple) example: (name, description, host_default, min_fcp_paths_count, fcp_template_id) :return NULL """ name, description, host_default, min_fcp_paths_count, fcp_template_id = record with get_fcp_conn() as conn: # 1. change the is_default of existing templates to False, # if the is_default of the being-created template is True, # because only one default template per host is allowed if host_default is True: conn.execute("UPDATE template SET is_default=?", (False,)) # 2. update current template conn.execute("UPDATE template " "SET name=?, description=?, is_default=?, " "min_fcp_paths_count=? WHERE id=?", record) ######################################################### # DML for Table template_sp_mapping # ######################################################### def sp_name_exist_in_db(self, sp_name: str): with get_fcp_conn() as conn: query_sp = conn.execute("SELECT sp_name FROM template_sp_mapping " "WHERE sp_name=?", (sp_name,)) query_sp_names = query_sp.fetchall() if query_sp_names: return True else: return False @staticmethod def bulk_set_sp_default_by_fcp_template(template_id, sp_name_list): """ Set a default FCP Multipath Template for multiple storage providers The function only manipulate table(template_fcp_mapping) :param template_id: the FCP Multipath Template ID :param sp_name_list: a list of storage provider hostname :return NULL """ # Example: # if # a.the existing-in-db storage providers for template_id: # ['sp1', 'sp2'] # b.the sp_name_list is ['sp3', 'sp4'] # then # c.remove records of ['sp1', 'sp2'] from db # d.remove records of ['sp3', 'sp4'] if any from db # e.insert ['sp3', 'sp4'] with template_id as default with get_fcp_conn() as conn: # delete all records related to the template_id conn.execute("DELETE FROM template_sp_mapping " "WHERE tmpl_id=?", (template_id,)) # delete all records related to the # storage providers in sp_name_list records = ((sp, ) for sp in sp_name_list) conn.executemany("DELETE FROM template_sp_mapping " "WHERE sp_name=?", records) # insert new record for each # storage provider in sp_name_list records = ((template_id, sp) for sp in sp_name_list) conn.executemany("INSERT INTO template_sp_mapping " "(tmpl_id, sp_name) VALUES (?, ?)", records) ######################################################### # DML related to multiple tables # ######################################################### def get_allocated_fcps_from_assigner(self, assigner_id, fcp_template_id): with get_fcp_conn() as conn: result = conn.execute( "SELECT " "fcp.fcp_id, fcp.wwpn_npiv, fcp.wwpn_phy " "FROM template_fcp_mapping " "INNER JOIN fcp " "ON template_fcp_mapping.fcp_id=fcp.fcp_id " "WHERE template_fcp_mapping.tmpl_id=? " "AND fcp.assigner_id=? " "AND (fcp.connections<>0 OR fcp.reserved<>0) " "AND fcp.tmpl_id=? " "ORDER BY template_fcp_mapping.fcp_id ASC", (fcp_template_id, assigner_id, fcp_template_id)) fcp_list = result.fetchall() return fcp_list def get_reserved_fcps_from_assigner(self, assigner_id, fcp_template_id): with get_fcp_conn() as conn: result = conn.execute( "SELECT fcp.fcp_id, fcp.wwpn_npiv, " "fcp.wwpn_phy, fcp.connections " "FROM template_fcp_mapping " "INNER JOIN fcp " "ON template_fcp_mapping.fcp_id=fcp.fcp_id " "WHERE template_fcp_mapping.tmpl_id=? " "AND fcp.assigner_id=? " "AND fcp.reserved<>0 " "AND fcp.tmpl_id=? " "ORDER BY template_fcp_mapping.fcp_id ASC", (fcp_template_id, assigner_id, fcp_template_id)) fcp_list = result.fetchall() return fcp_list def get_fcp_devices_with_same_index(self, fcp_template_id): """ Get a group of available FCPs with the same index, which also need satisfy the following conditions: a. connections = 0 b. reserved = 0 c. state = 'free' :return fcp_list: (list) case 1 an empty list(i.e. []) if no fcp exist in DB case 2 an empty list(i.e. []) if no expected pair found case 3 randomly choose a pair of below combinations: [1a00,1b00] ,[1a01,1b01] ,[1a02,1b02]... rather than below combinations: [1a00,1b02] ,[1a03,1b00] [1a02], [1b03] """ fcp_list = [] fcp_pair_map = {} with get_fcp_conn() as conn: ''' count_per_path examples: in normal cases, all path has same count, eg. 4 paths: [7, 7, 7, 7] 2 paths: [7, 7] we can also handle rare abnormal cases, where path count differs, eg. 4 paths: [7, 4, 5, 6] 2 paths: [7, 6] ''' result = conn.execute("SELECT COUNT(path) " "FROM template_fcp_mapping " "WHERE tmpl_id=? " "GROUP BY path " "ORDER BY path ASC", (fcp_template_id,)) count_per_path = [a[0] for a in result.fetchall()] # case1: return [] if no fcp found in FCP DB if not count_per_path: LOG.error("Not enough FCPs available, return empty list.") return fcp_list result = conn.execute( "SELECT COUNT(template_fcp_mapping.path) " "FROM template_fcp_mapping " "INNER JOIN fcp " "ON template_fcp_mapping.fcp_id=fcp.fcp_id " "WHERE template_fcp_mapping.tmpl_id=? " "AND fcp.connections=0 " "AND fcp.reserved=0 " "AND fcp.state='free' " "AND fcp.wwpn_npiv IS NOT '' " "AND fcp.wwpn_phy IS NOT '' " "GROUP BY template_fcp_mapping.path " "ORDER BY template_fcp_mapping.path", (fcp_template_id,)) free_count_per_path = [a[0] for a in result.fetchall()] # case2: return [] if no free fcp found from at least one path if len(free_count_per_path) < len(count_per_path): # For get_fcp_pair_with_same_index, we will not check the # CONF.volume.min_fcp_paths_count, the returned fcp count # should always equal to the total paths count LOG.error("Available paths count: %s, total paths count: " "%s." % (len(free_count_per_path), len(count_per_path))) return fcp_list ''' fcps 2 paths example: fcp conn reserved ------------------ [('1a00', 1, 1, 'active'), ('1a01', 0, 0, 'free'), ('1a02', 0, 0, 'free'), ('1a03', 0, 0, 'free'), ('1a04', 0, 0, 'offline'"), ... ('1b00', 1, 0, 'active'), ('1b01', 2, 1, 'active'), ('1b02', 0, 0, 'free'), ('1b03', 0, 0, 'free'), ('1b04', 0, 0, 'free'), ... ] ''' result = conn.execute( "SELECT fcp.fcp_id, fcp.connections, " "fcp.reserved, fcp.state, fcp.wwpn_npiv, fcp.wwpn_phy " "FROM fcp " "INNER JOIN template_fcp_mapping " "ON template_fcp_mapping.fcp_id=fcp.fcp_id " "WHERE template_fcp_mapping.tmpl_id=? " "ORDER BY template_fcp_mapping.path, " "template_fcp_mapping.fcp_id", (fcp_template_id,)) fcps = result.fetchall() ''' get all free fcps from 1st path fcp_pair_map example: idx fcp_pair ---------------- { 1 : [('1a01', 'c05076de330003a3', '', 1)], 2 : ['1a02'], 3 : ['1a03']} ''' # The FCP count of 1st path for i in range(count_per_path[0]): (fcp_no, connections, reserved, state, wwpn_npiv, wwpn_phy) = fcps[i] if connections == reserved == 0 and state == 'free': fcp_pair_map[i] = [(fcp_no, wwpn_npiv, wwpn_phy)] ''' select out pairs if member count == path count fcp_pair_map example: idx fcp_pair ---------------------- { 2 : ['1a02', '1b02'], 3 : ['1a03', '1b03']} ''' for idx in fcp_pair_map.copy(): s = 0 for i, c in enumerate(count_per_path[:-1]): s += c # avoid index out of range for per path in fcps[] (fcp_no, connections, reserved, state, wwpn_npiv, wwpn_phy) = fcps[s + idx] if (idx < count_per_path[i + 1] and connections == reserved == 0 and state == 'free'): fcp_pair_map[idx].append( (fcp_no, wwpn_npiv, wwpn_phy)) else: fcp_pair_map.pop(idx) break ''' case3: return one group randomly chosen from fcp_pair_map fcp_list example: ['1a03', '1b03'] ''' LOG.info("Print at most 5 available FCP groups: {}".format( list(fcp_pair_map.values())[:5])) if fcp_pair_map: fcp_list = random.choice(sorted(fcp_pair_map.values())) else: LOG.error("Not eligible FCP group found in FCP DB.") return fcp_list def get_fcp_devices(self, fcp_template_id): """ Get a group of available FCPs, which satisfy the following conditions: a. connections = 0 b. reserved = 0 c. state = free """ fcp_list = [] with get_fcp_conn() as conn: min_fcp_paths_count = self.get_min_fcp_paths_count(fcp_template_id) # Get distinct path list in DB result = conn.execute("SELECT DISTINCT path " "FROM template_fcp_mapping " "WHERE tmpl_id=?", (fcp_template_id,)) path_list = result.fetchall() # Get fcp_list of every path for no in path_list: result = conn.execute( "SELECT fcp.fcp_id, fcp.wwpn_npiv, fcp.wwpn_phy " "FROM template_fcp_mapping " "INNER JOIN fcp " "ON template_fcp_mapping.fcp_id=fcp.fcp_id " "WHERE template_fcp_mapping.tmpl_id=? " "AND fcp.connections=0 " "AND fcp.reserved=0 " "AND fcp.state='free' " "AND template_fcp_mapping.path=? " "AND fcp.wwpn_npiv IS NOT '' " "AND fcp.wwpn_phy IS NOT '' " "ORDER BY template_fcp_mapping.path", (fcp_template_id, no[0])) fcps = result.fetchall() if not fcps: # continue to find whether # other paths has available FCP continue index = random.randint(0, len(fcps) - 1) fcp_list.append(fcps[index]) # Start to check whether the available count >= min_fcp_paths_count allocated_paths = len(fcp_list) total_paths = len(path_list) if allocated_paths < total_paths: LOG.info("Not all paths of FCP Multipath Template (id={}) " "have available FCP devices. " "The count of minimum FCP device path is {}. " "The count of total paths is {}. " "The count of paths with available FCP devices is {}, " "which is less than the total path count." .format(fcp_template_id, min_fcp_paths_count, total_paths, allocated_paths)) if allocated_paths >= min_fcp_paths_count: LOG.warning("The count of paths with available FCP devices " "is less than that of total path, but not less " "than that of minimum FCP device path. " "Return the FCP devices {} from the available " "paths to continue.".format(fcp_list)) return fcp_list else: LOG.error("The count of paths with available FCP devices " "must not be less than that of minimum FCP device " "path, return empty list to abort the volume attachment.") return [] else: return fcp_list def create_fcp_template(self, fcp_template_id, name, description, fcp_devices_by_path, host_default, default_sp_list, min_fcp_paths_count=None): """ Insert records of new FCP Multipath Template in fcp DB :param fcp_template_id: FCP Multipath Template ID :param name: FCP Multipath Template name :param description: description :param fcp_devices_by_path: Example: if fcp_list is "0011-0013;0015;0017-0018", then fcp_devices_by_path should be passed like: { 0: {'0011' ,'0012', '0013'} 1: {'0015'} 2: {'0017', '0018'} } :param host_default: (bool) :param default_sp_list: (list) :param min_fcp_paths_count: (int) if it is None, -1 will be saved to template table as default value. :return: NULL """ # The following multiple DQLs(Database query) # are put into the with-block with DMLs # because the consequent DMLs(Database modification) # depend on the result of the DQLs. # So that, other threads can NOT begin a sqlite transacation # util current thread exits the with-block. # Refer to 'def get_fcp_conn' for thread lock with get_fcp_conn() as conn: # first check the template exist or not # if already exist, raise exception if self.fcp_template_exist_in_db(fcp_template_id): raise exception.SDKObjectAlreadyExistError( obj_desc=("FCP Multipath Template " "(id: %s) " % fcp_template_id), modID=self._module_id) # then check the SP records exist in template_sp_mapping or not # if already exist, will update the tmpl_id # if not exist, will insert new records sp_mapping_to_add = list() sp_mapping_to_update = list() if not default_sp_list: default_sp_list = [] for sp_name in default_sp_list: record = (fcp_template_id, sp_name) if self.sp_name_exist_in_db(sp_name): sp_mapping_to_update.append(record) else: sp_mapping_to_add.append(record) # Prepare records include (fcp_id, tmpl_id, path) # to be inserted into table template_fcp_mapping fcp_mapping = list() for path in fcp_devices_by_path: for fcp_id in fcp_devices_by_path[path]: new_record = [fcp_id, fcp_template_id, path] fcp_mapping.append(new_record) # 1. change the is_default of existing templates to False, # if the is_default of the being-created template is True, # because only one default template per host is allowed if host_default is True: conn.execute("UPDATE template SET is_default=?", (False,)) # 2. insert a new record in template table # if min_fcp_paths_count is None, will not insert it to db if not min_fcp_paths_count: tmpl_basics = (fcp_template_id, name, description, host_default) sql = ("INSERT INTO template (id, name, description, " "is_default) VALUES (?, ?, ?, ?)") else: tmpl_basics = (fcp_template_id, name, description, host_default, min_fcp_paths_count) sql = ("INSERT INTO template (id, name, description, " "is_default, min_fcp_paths_count) VALUES (?, ?, ?, ?, ?)") conn.execute(sql, tmpl_basics) # 3. insert new records in template_fcp_mapping conn.executemany("INSERT INTO template_fcp_mapping (fcp_id, " "tmpl_id, path) VALUES (?, ?, ?)", fcp_mapping) # 4. insert a new record in template_sp_mapping if default_sp_list: if sp_mapping_to_add: conn.executemany("INSERT INTO template_sp_mapping " "(tmpl_id, sp_name) VALUES " "(?, ?)", sp_mapping_to_add) if sp_mapping_to_update: conn.executemany("UPDATE template_sp_mapping SET " "tmpl_id=? WHERE sp_name=?", sp_mapping_to_update) def _validate_min_fcp_paths_count(self, fcp_devices, min_fcp_paths_count, fcp_template_id): """ When to edit FCP Multipath Template, if min_fcp_paths_count is not None or fcp_devices is not None (None means no need to update this field, but keep the original value), need to validate the values. min_fcp_paths_count should not be larger than fcp_device_path_count. If min_fcp_paths_count is None, get the value from template table. If fcp_devices is None, get the fcp_device_path_count from template_fcp_mapping table. """ if min_fcp_paths_count or fcp_devices: with get_fcp_conn(): if not fcp_devices: fcp_devices_path_count = self.get_path_count(fcp_template_id) else: fcp_devices_by_path = utils.expand_fcp_list(fcp_devices) fcp_devices_path_count = len(fcp_devices_by_path) if not min_fcp_paths_count: min_fcp_paths_count = self.get_min_fcp_paths_count_from_db(fcp_template_id) # raise exception if min_fcp_paths_count > fcp_devices_path_count: msg = ("min_fcp_paths_count %s is larger than fcp device path count %s. " "Adjust the fcp_devices setting or " "min_fcp_paths_count." % (min_fcp_paths_count, fcp_devices_path_count)) LOG.error(msg) raise exception.SDKConflictError(modID=self._module_id, rs=23, msg=msg) def get_min_fcp_paths_count(self, fcp_template_id): """ Get min_fcp_paths_count, query template table first, if it is -1, then return the value of fcp devices path count from template_fcp_mapping table. If it is None, raise error. """ if not fcp_template_id: min_fcp_paths_count = None else: with get_fcp_conn(): min_fcp_paths_count = self.get_min_fcp_paths_count_from_db(fcp_template_id) if min_fcp_paths_count == -1: min_fcp_paths_count = self.get_path_count(fcp_template_id) if min_fcp_paths_count is None: obj_desc = "min_fcp_paths_count from fcp_template_id %s" % fcp_template_id raise exception.SDKObjectNotExistError(obj_desc=obj_desc) return min_fcp_paths_count def edit_fcp_template(self, fcp_template_id, name=None, description=None, fcp_devices=None, host_default=None, default_sp_list=None, min_fcp_paths_count=None): """ Edit a FCP Multipath Template. The kwargs values are pre-validated in two places: validate kwargs types in zvmsdk/sdkwsgi/schemas/volume.py set a kwarg as None if not passed by user in zvmsdk/sdkwsgi/handlers/volume.py If any kwarg is None, the kwarg will not be updated. :param fcp_template_id: template id :param name: template name :param description: template desc :param fcp_devices: FCP devices divided into different paths by semicolon Format: "fcp-devices-from-path0;fcp-devices-from-path1;..." Example: "0011-0013;0015;0017-0018", :param host_default: (bool) :param default_sp_list: (list) Example: ["SP1", "SP2"] :param min_fcp_paths_count: if it is None, then will not update this field in db. :return: Example { 'fcp_template': { 'name': 'bjcb-test-template', 'id': '36439338-db14-11ec-bb41-0201018b1dd2', 'description': 'This is Default template', 'host_default': True, 'storage_providers': ['sp4', 'v7k60'], 'min_fcp_paths_count': 2 } } """ # The following multiple DQLs(Database query) # are put into the with-block with DMLs # because the consequent DMLs(Database modification) # depend on the result of the DQLs. # So that, other threads can NOT begin a sqlite transacation # util current thread exits the with-block. # Refer to 'def get_fcp_conn' for thread lock with get_fcp_conn(): # DQL: validate: FCP Multipath Template if not self.fcp_template_exist_in_db(fcp_template_id): obj_desc = ("FCP Multipath Template {}".format(fcp_template_id)) raise exception.SDKObjectNotExistError(obj_desc=obj_desc) # DQL: validate: add or delete path from FCP Multipath Template. # If fcp_devices is None, it means user do not want to # modify fcp_devices, so skip the validation; # otherwise, perform the validation. if fcp_devices is not None: fcp_path_count_from_input = len( [i for i in fcp_devices.split(';') if i]) fcp_path_count_in_db = self.get_path_count(fcp_template_id) if fcp_path_count_from_input != fcp_path_count_in_db: inuse_fcp = self.get_inuse_fcp_device_by_fcp_template( fcp_template_id) if inuse_fcp: inuse_fcp = utils.shrink_fcp_list( [fcp['fcp_id'] for fcp in inuse_fcp]) detail = ("The FCP devices ({}) are allocated to virtual machines " "by the FCP Multipath Template (id={}). " "Adding or deleting a FCP device path from a FCP Multipath Template " "is not allowed if there is any FCP device allocated from the template. " "You must deallocate those FCP devices " "before adding or deleting a path from the template." .format(inuse_fcp, fcp_template_id)) raise exception.SDKConflictError(modID=self._module_id, rs=24, msg=detail) # If min_fcp_paths_count is not None or fcp_devices is not None, need to validate the value. # min_fcp_paths_count should not be larger than fcp device path count, or else, raise error. self._validate_min_fcp_paths_count(fcp_devices, min_fcp_paths_count, fcp_template_id) tmpl_basic, fcp_detail = self.get_fcp_templates_details( [fcp_template_id]) # DML: table template_fcp_mapping if fcp_devices is not None: # fcp_from_input: # fcp devices from user input # example: # {'0011': 0, '0013': 0, <<< path 0 # '0015': 1, <<< path 1 # '0018': 2, '0017': 2} <<< path 2 fcp_from_input = dict() # fcp_devices_by_path: # example: # if fcp_devices is "0011-0013;0015;0017-0018", # then fcp_devices_by_path is : # { # 0: {'0011', '0013'} # 1: {'0015'} # 2: {'0017', '0018'} # } fcp_devices_by_path = utils.expand_fcp_list(fcp_devices) for path in fcp_devices_by_path: for fcp_id in fcp_devices_by_path[path]: fcp_from_input[fcp_id] = path # fcp_in_db: # FCP devices belonging to fcp_template_id # queried from database including the FCP devices # that are not found in z/VM # example: # {'0011': , # '0013': } fcp_in_db = dict() for row in fcp_detail: fcp_in_db[row['fcp_id']] = row # Divide the FCP devices into three sets add_set = set(fcp_from_input) - set(fcp_in_db) inter_set = set(fcp_from_input) & set(fcp_in_db) del_set = set(fcp_in_db) - set(fcp_from_input) # only unused FCP devices can be # deleted from a FCP Multipath Template. # Two types of unused FCP devices: # 1. connections/reserved == None: # the fcp only exists in table(template_fcp_mapping), # rather than table(fcp) # 2. connections/reserved == 0: # the fcp exists in both tables # and it is not allocated from FCP DB not_allow_for_del = set() for fcp in del_set: if (fcp_in_db[fcp]['connections'] not in (None, 0) or fcp_in_db[fcp]['reserved'] not in (None, 0)): not_allow_for_del.add(fcp) # For a FCP device included in multiple FCP Multipath Templates, # the FCP device is allowed to be deleted from the current template # only if it is allocated from another template rather than the current one inuse_fcp_devices = self.get_inuse_fcp_device_by_fcp_template(fcp_template_id) inuse_fcp_by_current_template = set(fcp['fcp_id'] for fcp in inuse_fcp_devices) not_allow_for_del &= inuse_fcp_by_current_template # validate: not allowed to remove inuse FCP devices if not_allow_for_del: not_allow_for_del = utils.shrink_fcp_list( list(not_allow_for_del)) detail = ("The FCP devices ({}) are missing from the FCP device list. " "These FCP devices are allocated to virtual machines " "from the FCP Multipath Template (id={}). " "Deleting the allocated FCP devices from this template is not allowed. " "You must ensure those FCP devices are included in the FCP device list." .format(not_allow_for_del, fcp_template_id)) raise exception.SDKConflictError(modID=self._module_id, rs=24, msg=detail) # DML: table template_fcp_mapping LOG.info("DML: table template_fcp_mapping") # 1. delete from table template_fcp_mapping records_to_delete = [ (fcp_template_id, fcp_id) for fcp_id in del_set] self.bulk_delete_fcp_device_from_fcp_template( records_to_delete) LOG.info("FCP devices ({}) removed from FCP Multipath Template {}." .format(utils.shrink_fcp_list(list(del_set)), fcp_template_id)) # 2. insert into table template_fcp_mapping records_to_insert = [ (fcp_template_id, fcp_id, fcp_from_input[fcp_id]) for fcp_id in add_set] self.bulk_insert_fcp_device_into_fcp_template( records_to_insert) LOG.info("FCP devices ({}) added into FCP Multipath Template {}." .format(utils.shrink_fcp_list(list(add_set)), fcp_template_id)) # 3. update table template_fcp_mapping # update path of fcp devices if changed for fcp in inter_set: path_from_input = fcp_from_input[fcp] path_in_db = fcp_in_db[fcp]['path'] if path_from_input != path_in_db: record_to_update = ( fcp_from_input[fcp], fcp, fcp_template_id) self.update_path_of_fcp_device(record_to_update) LOG.info("FCP device ({}) updated into " "FCP Multipath Template {} from path {} to path {}." .format(fcp, fcp_template_id, fcp_in_db[fcp]['path'], fcp_from_input[fcp])) # DML: table template if (name, description, host_default, min_fcp_paths_count) != (None, None, None, None): LOG.info("DML: table template") record_to_update = ( name if name is not None else tmpl_basic[0]['name'], description if description is not None else tmpl_basic[0]['description'], host_default if host_default is not None else tmpl_basic[0]['is_default'], min_fcp_paths_count if min_fcp_paths_count is not None else tmpl_basic[0]['min_fcp_paths_count'], fcp_template_id) self.update_basic_info_of_fcp_template(record_to_update) LOG.info("FCP Multipath Template basic info updated.") # DML: table template_sp_mapping if default_sp_list is not None: LOG.info("DML: table template_sp_mapping") self.bulk_set_sp_default_by_fcp_template(fcp_template_id, default_sp_list) LOG.info("Default template of storage providers ({}) " "updated.".format(default_sp_list)) # Return template basic info queried from DB # tmpl_basic is a list containing one or more sqlite.Row objects # Example: # if a template is the SP-level default for 2 SPs (SP1 and SP2) # (i.e. the template has 2 entries in table template_sp_mapping # then tmpl_basic is a list containing 2 Row objects, # the only different value between the 2 Row objects is 'sp_name' # (i.e. tmpl_basic[0]['sp_name'] is 'SP1', # while tmpl_basic[1]['sp_name'] is 'SP2'. tmpl_basic = self.get_fcp_templates_details([fcp_template_id])[0] return {'fcp_template': { 'name': tmpl_basic[0]['name'], 'id': tmpl_basic[0]['id'], 'description': tmpl_basic[0]['description'], 'host_default': bool(tmpl_basic[0]['is_default']), 'storage_providers': [] if tmpl_basic[0]['sp_name'] is None else [r['sp_name'] for r in tmpl_basic], 'min_fcp_paths_count': self.get_min_fcp_paths_count(fcp_template_id) }} def get_fcp_templates(self, template_id_list=None): """Get FCP Multipath Templates base info by template_id_list. If template_id_list is None, will get all the FCP Multipath Templates in db. return format: [(id|name|description|is_default|min_fcp_paths_count|sp_name)] """ cmd = ("SELECT template.id, template.name, template.description, " "template.is_default, template.min_fcp_paths_count, template_sp_mapping.sp_name " "FROM template " "LEFT OUTER JOIN template_sp_mapping " "ON template.id=template_sp_mapping.tmpl_id") with get_fcp_conn() as conn: if template_id_list: result = conn.execute( cmd + " WHERE template.id " "IN (%s)" % ','.join('?' * len(template_id_list)), template_id_list) else: result = conn.execute(cmd) raw = result.fetchall() return raw def get_host_default_fcp_template(self, host_default=True): """Get the host default FCP Multipath Template base info. return format: (id|name|description|is_default|sp_name) when the template is more than one SP's default, then it will show up several times in the result. """ with get_fcp_conn() as conn: if host_default: result = conn.execute( "SELECT t.id, t.name, t.description, t.is_default, " "t.min_fcp_paths_count, ts.sp_name " "FROM template AS t " "LEFT OUTER JOIN template_sp_mapping AS ts " "ON t.id=ts.tmpl_id " "WHERE t.is_default=1") else: result = conn.execute( "SELECT t.id, t.name, t.description, t.is_default, " "t.min_fcp_paths_count, ts.sp_name " "FROM template AS t " "LEFT OUTER JOIN template_sp_mapping AS ts " "ON t.id=ts.tmpl_id " "WHERE t.is_default=0") raw = result.fetchall() return raw def get_sp_default_fcp_template(self, sp_host_list): """Get the sp_host_list default FCP Multipath Template. """ cmd = ("SELECT t.id, t.name, t.description, t.is_default, " "t.min_fcp_paths_count, ts.sp_name " "FROM template_sp_mapping AS ts " "INNER JOIN template AS t " "ON ts.tmpl_id=t.id") raw = [] with get_fcp_conn() as conn: if (len(sp_host_list) == 1 and sp_host_list[0].lower() == 'all'): result = conn.execute(cmd) raw = result.fetchall() else: for sp_host in sp_host_list: result = conn.execute( cmd + " WHERE ts.sp_name=?", (sp_host,)) raw.extend(result.fetchall()) return raw def get_fcp_template_by_assigner_id(self, assigner_id): """Get a templates list of specified assigner. """ with get_fcp_conn() as conn: result = conn.execute( "SELECT t.id, t.name, t.description, t.is_default, " "t.min_fcp_paths_count, ts.sp_name " "FROM fcp " "INNER JOIN template AS t " "ON fcp.tmpl_id=t.id " "LEFT OUTER JOIN template_sp_mapping AS ts " "ON fcp.tmpl_id=ts.tmpl_id " "WHERE fcp.assigner_id=?", (assigner_id,)) raw = result.fetchall() # id|name|description|is_default|min_fcp_paths_count|sp_name return raw def get_fcp_templates_details(self, template_id_list=None): """Get templates detail info by template_id_list :param template_id_list: must be a list or None If template_id_list=None, will get all the templates detail info. Detail info including two parts: base info and fcp device info, these two parts info will use two cmds to get from db and return out, outer method will join these two return output. 'tmpl_cmd' is used to get base info from template table and template_sp_mapping table. tmpl_cmd result format: id|name|description|is_default|min_fcp_paths_count|sp_name 'devices_cmd' is used to get fcp device info. Device's template id is gotten from template_fcp_mapping table, device's usage info is gotten from fcp table. Because not all the templates' fcp device is in fcp table, so the fcp device's template id should being gotten from template_fcp_mapping table insteading of fcp table. 'devices_cmd' result format: fcp_id|tmpl_id|path|assigner_id|connections|reserved| wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id In 'devices_cmd' result: the first three properties are from template_fcp_mapping table, and the others are from fcp table. when the device is not in fcp table, all the properties in fcp table will be None. For example: template '12345678' has a fcp "1aaa" on path 0, but this device is not in fcp table, the query result will be as below. 1aaa|12345678|0||||||||| """ tmpl_cmd = ( "SELECT t.id, t.name, t.description, " "t.is_default, t.min_fcp_paths_count, ts.sp_name " "FROM template AS t " "LEFT OUTER JOIN template_sp_mapping AS ts " "ON t.id=ts.tmpl_id") devices_cmd = ( "SELECT tf.fcp_id, tf.tmpl_id, tf.path, fcp.assigner_id, " "fcp.connections, fcp.reserved, fcp.wwpn_npiv, fcp.wwpn_phy, " "fcp.chpid, fcp.state, fcp.owner, fcp.tmpl_id " "FROM template_fcp_mapping AS tf " "LEFT OUTER JOIN fcp " "ON tf.fcp_id=fcp.fcp_id") with get_fcp_conn() as conn: if template_id_list: tmpl_result = conn.execute( tmpl_cmd + " WHERE t.id IN (%s)" % ','.join('?' * len(template_id_list)), template_id_list) devices_result = conn.execute( devices_cmd + " WHERE tf.tmpl_id " "IN (%s)" % ','.join('?' * len(template_id_list)), template_id_list) else: tmpl_result = conn.execute(tmpl_cmd) devices_result = conn.execute(devices_cmd) tmpl_result = tmpl_result.fetchall() devices_result = devices_result.fetchall() return tmpl_result, devices_result def bulk_delete_fcp_from_template(self, fcp_id_list, fcp_template_id): """Delete multiple FCP records from the table template_fcp_mapping in the specified FCP Multipath Template only if the FCP devices are available.""" records_to_delete = [(fcp_template_id, fcp_id) for fcp_id in fcp_id_list] with get_fcp_conn() as conn: conn.executemany( "DELETE FROM template_fcp_mapping " "WHERE fcp_id NOT IN (" "SELECT fcp_id FROM fcp " "WHERE fcp.connections<>0 OR fcp.reserved<>0) " "AND tmpl_id=? AND fcp_id=?", records_to_delete) def delete_fcp_template(self, template_id): """Remove FCP Multipath Template record from template, template_sp_mapping, template_fcp_mapping and fcp tables.""" with get_fcp_conn() as conn: if not self.fcp_template_exist_in_db(template_id): obj_desc = ("FCP Multipath Template {} ".format(template_id)) raise exception.SDKObjectNotExistError(obj_desc=obj_desc) inuse_fcp_devices = self.get_inuse_fcp_device_by_fcp_template( template_id) if inuse_fcp_devices: inuse_fcp_devices = utils.shrink_fcp_list( [fcp['fcp_id'] for fcp in inuse_fcp_devices]) detail = ("The FCP devices ({}) are allocated to virtual machines " "by the FCP Multipath Template (id={}). " "Deleting a FCP Multipath Template is not allowed " "if there is any FCP device allocated from the template. " "You must deallocate those FCP devices before deleting the template." .format(inuse_fcp_devices, template_id)) raise exception.SDKConflictError(modID=self._module_id, rs=22, msg=detail) conn.execute("DELETE FROM template WHERE id=?", (template_id,)) conn.execute("DELETE FROM template_sp_mapping WHERE tmpl_id=?", (template_id,)) conn.execute("DELETE FROM template_fcp_mapping WHERE tmpl_id=?", (template_id,)) LOG.info("FCP Multipath Template with id %s is removed from " "template, template_sp_mapping and " "template_fcp_mapping tables" % template_id) class ImageDbOperator(object): def __init__(self): self._create_image_table() self._module_id = 'image' def _create_image_table(self): create_image_table_sql = ' '.join(( 'CREATE TABLE IF NOT EXISTS image (', 'imagename varchar(128) PRIMARY KEY COLLATE NOCASE,', 'imageosdistro varchar(16),', 'md5sum varchar(512),', 'disk_size_units varchar(512),', 'image_size_in_bytes varchar(512),', 'type varchar(16),', 'comments varchar(128))')) with get_image_conn() as conn: conn.execute(create_image_table_sql) def image_add_record(self, imagename, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type, comments=None): if comments is not None: with get_image_conn() as conn: conn.execute("INSERT INTO image (imagename, imageosdistro," "md5sum, disk_size_units, image_size_in_bytes," " type, comments) VALUES (?, ?, ?, ?, ?, ?, ?)", (imagename, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type, comments)) else: with get_image_conn() as conn: conn.execute("INSERT INTO image (imagename, imageosdistro," "md5sum, disk_size_units, image_size_in_bytes," " type) VALUES (?, ?, ?, ?, ?, ?)", (imagename, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type)) def image_query_record(self, imagename=None): """Query the image record from database, if imagename is None, all of the image records will be returned, otherwise only the specified image record will be returned.""" if imagename: with get_image_conn() as conn: result = conn.execute("SELECT * FROM image WHERE " "imagename=?", (imagename,)) image_list = result.fetchall() if not image_list: obj_desc = "Image with name: %s" % imagename raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) else: with get_image_conn() as conn: result = conn.execute("SELECT * FROM image") image_list = result.fetchall() # Map each image record to be a dict, with the key is the field name in # image DB image_keys_list = ['imagename', 'imageosdistro', 'md5sum', 'disk_size_units', 'image_size_in_bytes', 'type', 'comments'] image_result = [] for item in image_list: image_item = dict(zip(image_keys_list, item)) image_result.append(image_item) return image_result def image_delete_record(self, imagename): """Delete the record of specified imagename from image table""" with get_image_conn() as conn: conn.execute("DELETE FROM image WHERE imagename=?", (imagename,)) class GuestDbOperator(object): def __init__(self): self._create_guests_table() self._module_id = 'guest' def _create_guests_table(self): """ net_set: it is used to describe network interface status, the initial value is 0, no network interface. It will be updated to be 1 after the network interface is configured """ sql = ' '.join(( 'CREATE TABLE IF NOT EXISTS guests(', 'id char(36) PRIMARY KEY COLLATE NOCASE,', 'userid varchar(8) NOT NULL UNIQUE COLLATE NOCASE,', 'metadata varchar(255),', 'net_set smallint DEFAULT 0,', 'comments text)')) with get_guest_conn() as conn: conn.execute(sql) def _check_existence_by_id(self, guest_id, ignore=False): guest = self.get_guest_by_id(guest_id) if guest is None: msg = 'Guest with id: %s does not exist in DB.' % guest_id if ignore: # Just print a warning message LOG.info(msg) else: LOG.error(msg) obj_desc = "Guest with id: %s" % guest_id raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) return guest def _check_existence_by_userid(self, userid, ignore=False): guest = self.get_guest_by_userid(userid) if guest is None: msg = 'Guest with userid: %s does not exist in DB.' % userid if ignore: # Just print a warning message LOG.info(msg) else: LOG.error(msg) obj_desc = "Guest with userid: %s" % userid raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID=self._module_id) return guest def add_guest_registered(self, userid, meta, net_set, comments=None): # Add guest which is migrated from other host or onboarded. guest_id = str(uuid.uuid4()) with get_guest_conn() as conn: conn.execute( "INSERT INTO guests VALUES (?, ?, ?, ?, ?)", (guest_id, userid, meta, net_set, comments)) def add_guest(self, userid, meta='', comments=''): # Generate uuid automatically guest_id = str(uuid.uuid4()) net_set = '0' with get_guest_conn() as conn: conn.execute( "INSERT INTO guests VALUES (?, ?, ?, ?, ?)", (guest_id, userid, meta, net_set, comments)) def delete_guest_by_id(self, guest_id): # First check whether the guest exist in db table guest = self._check_existence_by_id(guest_id, ignore=True) if guest is None: return # Update guest if exist with get_guest_conn() as conn: conn.execute( "DELETE FROM guests WHERE id=?", (guest_id,)) def delete_guest_by_userid(self, userid): # First check whether the guest exist in db table guest = self._check_existence_by_userid(userid, ignore=True) if guest is None: return with get_guest_conn() as conn: conn.execute( "DELETE FROM guests WHERE userid=?", (userid,)) def get_guest_metadata_with_userid(self, userid): with get_guest_conn() as conn: res = conn.execute("SELECT metadata FROM guests " "WHERE userid=?", (userid,)) guests = res.fetchall() return guests def update_guest_by_id(self, uuid, userid=None, meta=None, net_set=None, comments=None): if ((userid is None) and (meta is None) and (net_set is None) and (comments is None)): msg = ("Update guest with id: %s failed, no field " "specified to be updated." % uuid) LOG.error(msg) raise exception.SDKInternalError(msg=msg, modID=self._module_id) # First check whether the guest exist in db table self._check_existence_by_id(uuid) # Start update sql_cmd = "UPDATE guests SET" sql_var = [] if userid is not None: sql_cmd += " userid=?," sql_var.append(userid) if meta is not None: sql_cmd += " metadata=?," sql_var.append(meta) if net_set is not None: sql_cmd += " net_set=?," sql_var.append(net_set) if comments is not None: sql_cmd += " comments=?," sql_var.append(comments) # remove the tailing comma sql_cmd = sql_cmd.strip(',') # Add the id filter sql_cmd += " WHERE id=?" sql_var.append(uuid) with get_guest_conn() as conn: conn.execute(sql_cmd, sql_var) def update_guest_by_userid(self, userid, meta=None, net_set=None, comments=None): userid = userid if (meta is None) and (net_set is None) and (comments is None): msg = ("Update guest with userid: %s failed, no field " "specified to be updated." % userid) LOG.error(msg) raise exception.SDKInternalError(msg=msg, modID=self._module_id) # First check whether the guest exist in db table self._check_existence_by_userid(userid) # Start update sql_cmd = "UPDATE guests SET" sql_var = [] if meta is not None: sql_cmd += " metadata=?," sql_var.append(meta) if net_set is not None: sql_cmd += " net_set=?," sql_var.append(net_set) if comments is not None: new_comments = json.dumps(comments) sql_cmd += " comments=?," sql_var.append(new_comments) # remove the tailing comma sql_cmd = sql_cmd.strip(',') # Add the id filter sql_cmd += " WHERE userid=?" sql_var.append(userid) with get_guest_conn() as conn: conn.execute(sql_cmd, sql_var) def get_guest_list(self): with get_guest_conn() as conn: res = conn.execute("SELECT * FROM guests") guests = res.fetchall() return guests def get_migrated_guest_list(self): with get_guest_conn() as conn: res = conn.execute("SELECT userid FROM guests " "WHERE comments LIKE '%\"migrated\": 1%'") guests = res.fetchall() return guests def get_migrated_guest_info_list(self): with get_guest_conn() as conn: res = conn.execute("SELECT * FROM guests " "WHERE comments LIKE '%\"migrated\": 1%'") guests = res.fetchall() return guests def get_comments_by_userid(self, userid): """ Get comments record. output should be like: {'k1': 'v1', 'k2': 'v2'}' """ userid = userid with get_guest_conn() as conn: res = conn.execute("SELECT comments FROM guests " "WHERE userid=?", (userid,)) result = res.fetchall() comments = {} if result[0][0]: comments = json.loads(result[0][0]) return comments def get_metadata_by_userid(self, userid): """get metadata record. output should be like: "a=1,b=2,c=3" """ userid = userid with get_guest_conn() as conn: res = conn.execute("SELECT * FROM guests " "WHERE userid=?", (userid,)) guest = res.fetchall() if len(guest) == 1: return guest[0][2] elif len(guest) == 0: LOG.debug("Guest with userid: %s not found from DB!" % userid) return '' else: msg = "Guest with userid: %s have multiple records!" % userid LOG.error(msg) raise exception.SDKInternalError(msg=msg, modID=self._module_id) def transfer_metadata_to_dict(self, meta): """transfer str to dict. output should be like: {'a':1, 'b':2, 'c':3} """ dic = {} arr = meta.strip(' ,').split(',') for i in arr: temp = i.split('=') key = temp[0].strip() value = temp[1].strip() dic[key] = value return dic def get_guest_by_id(self, guest_id): with get_guest_conn() as conn: res = conn.execute("SELECT * FROM guests " "WHERE id=?", (guest_id,)) guest = res.fetchall() # As id is the primary key, the filtered entry number should be 0 or 1 if len(guest) == 1: return guest[0] elif len(guest) == 0: LOG.debug("Guest with id: %s not found from DB!" % guest_id) return None # Code shouldn't come here, just in case return None def get_guest_by_userid(self, userid): userid = userid with get_guest_conn() as conn: res = conn.execute("SELECT * FROM guests " "WHERE userid=?", (userid,)) guest = res.fetchall() # As id is the primary key, the filtered entry number should be 0 or 1 if len(guest) == 1: return guest[0] elif len(guest) == 0: LOG.debug("Guest with userid: %s not found from DB!" % userid) return None # Code shouldn't come here, just in case return None zVMCloudConnector-1.6.3/zvmsdk/sdkserver.py0000664000175000017510000002464213575566551020454 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import six import socket import sys import threading import traceback from zvmsdk import api from zvmsdk import config from zvmsdk import exception from zvmsdk import log from zvmsdk import returncode if six.PY3: import queue as Queue else: import Queue CONF = config.CONF LOG = log.LOG class SDKServer(object): def __init__(self): # Initailize SDK API self.sdkapi = api.SDKAPI() self.server_socket = None self.request_queue = Queue.Queue(maxsize= CONF.sdkserver.request_queue_size) def log_error(self, msg): thread = threading.current_thread().name msg = ("[%s] %s" % (thread, msg)) LOG.error(msg) def log_info(self, msg): thread = threading.current_thread().name msg = ("[%s] %s" % (thread, msg)) LOG.info(msg) def log_warn(self, msg): thread = threading.current_thread().name msg = ("[%s] %s" % (thread, msg)) LOG.warning(msg) def log_debug(self, msg): thread = threading.current_thread().name msg = ("[%s] %s" % (thread, msg)) LOG.debug(msg) def construct_internal_error(self, msg): self.log_error(msg) error = returncode.errors['internal'] results = error[0] results['modID'] = returncode.ModRCs['sdkserver'] results.update({'rs': 1, 'errmsg': error[1][1] % {'msg': msg}, 'output': ''}) return results def construct_api_name_error(self, msg): self.log_error(msg) error = returncode.errors['API'] results = error[0] results['modID'] = returncode.ModRCs['sdkserver'] results.update({'rs': 1, 'errmsg': error[1][1] % {'msg': msg}, 'output': ''}) return results def send_results(self, client, addr, results): """ send back results to client in the json format of: {'overallRC': x, 'modID': x, 'rc': x, 'rs': x, 'errmsg': 'msg', 'output': 'out'} """ json_results = json.dumps(results) json_results = json_results.encode() sent = 0 total_len = len(json_results) got_error = False while (sent < total_len): this_sent = client.send(json_results[sent:]) if this_sent == 0: got_error = True break sent += this_sent if got_error or sent != total_len: self.log_error("(%s:%s) Failed to send back results to client, " "results: %s" % (addr[0], addr[1], json_results)) else: self.log_debug("(%s:%s) Results sent back to client successfully." % (addr[0], addr[1])) def serve_API(self, client, addr): """ Read client request and call target SDK API""" self.log_debug("(%s:%s) Handling new request from client." % (addr[0], addr[1])) results = None try: data = client.recv(4096) data = bytes.decode(data) # When client failed to send the data or quit before sending the # data, server side would receive null data. # In such case, server would not send back any info and just # terminate this thread. if not data: self.log_warn("(%s:%s) Failed to receive data from client." % (addr[0], addr[1])) return api_data = json.loads(data) # API_data should be in the form [funcname, args_list, kwargs_dict] if not isinstance(api_data, list) or len(api_data) != 3: msg = ("(%s:%s) SDK server got wrong input: '%s' from client." % (addr[0], addr[1], data)) results = self.construct_internal_error(msg) return # Check called API is supported by SDK (func_name, api_args, api_kwargs) = api_data self.log_debug("(%s:%s) Request func: %s, args: %s, kwargs: %s" % (addr[0], addr[1], func_name, str(api_args), str(api_kwargs))) try: api_func = getattr(self.sdkapi, func_name) except AttributeError: msg = ("(%s:%s) SDK server got wrong API name: %s from" "client." % (addr[0], addr[1], func_name)) results = self.construct_api_name_error(msg) return # invoke target API function return_data = api_func(*api_args, **api_kwargs) except exception.SDKBaseException as e: self.log_error("(%s:%s) %s" % (addr[0], addr[1], traceback.format_exc())) # get the error info from exception attribute # All SDKbaseexception should eventually has a # results attribute defined which can be used by # sdkserver here if e.results is None: msg = ("(%s:%s) SDK server got exception without results " "defined, error: %s" % (addr[0], addr[1], e.format_message())) results = self.construct_internal_error(msg) else: results = {'overallRC': e.results['overallRC'], 'modID': e.results['modID'], 'rc': e.results['rc'], 'rs': e.results['rs'], 'errmsg': e.format_message(), 'output': ''} except Exception as e: self.log_error("(%s:%s) %s" % (addr[0], addr[1], traceback.format_exc())) msg = ("(%s:%s) SDK server got unexpected exception: " "%s" % (addr[0], addr[1], repr(e))) results = self.construct_internal_error(msg) else: if return_data is None: return_data = '' results = {'overallRC': 0, 'modID': None, 'rc': 0, 'rs': 0, 'errmsg': '', 'output': return_data} # Send back the final results try: if results is not None: self.send_results(client, addr, results) except Exception as e: # This should not happen in normal case. # A special case is the server side socket is closed/removed # before the send() action. self.log_error("(%s:%s) %s" % (addr[0], addr[1], repr(e))) finally: # Close the connection to make sure the thread socket got # closed even when it got unexpected exceptions. self.log_debug("(%s:%s) Finish handling request, closing " "socket." % (addr[0], addr[1])) client.close() def worker_loop(self): # The worker thread would continuously fetch request from queue # in a while loop. while True: try: # This get() function raise Empty exception when there's no # available item in queue clt_socket, clt_addr = self.request_queue.get(block=False) except Queue.Empty: self.log_debug("No more item in request queue, worker will " "exit now.") break except Exception as err: self.log_error("Failed to get request item from queue, error: " "%s. Worker will exit now." % repr(err)) break else: self.serve_API(clt_socket, clt_addr) self.request_queue.task_done() def setup(self): # create server socket try: self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error as msg: self.log_error("Failed to create SDK server socket: %s" % msg) sys.exit(1) server_sock = self.server_socket # bind server address and port host = CONF.sdkserver.bind_addr port = CONF.sdkserver.bind_port try: server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_sock.bind((host, port)) except socket.error as msg: self.log_error("Failed to bind to (%s, %d), reason: %s" % (host, port, msg)) server_sock.close() sys.exit(1) # Start listening server_sock.listen(5) self.log_info("SDK server now listening") def run(self): # Keep running in a loop to handle client connections while True: # Wait client connection conn, addr = self.server_socket.accept() self.log_debug("(%s:%s) Client connected." % (addr[0], addr[1])) # This put() function would be blocked here until there's # a slot in the queue self.request_queue.put((conn, addr)) thread_count = threading.active_count() if thread_count <= CONF.sdkserver.max_worker_count: thread = threading.Thread(target=self.worker_loop) self.log_debug("Worker count: %d, starting new worker: %s" % (thread_count - 1, thread.name)) thread.start() def start_daemon(): server = SDKServer() try: server.setup() server.run() finally: # This finally won't catch exceptions from child thread, so # the close here is safe. if server.server_socket is not None: server.log_info("Closing the server socket.") server.server_socket.close() zVMCloudConnector-1.6.3/zvmsdk/utils.py0000775000175000017510000006623514315210052017562 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # Copyright 2013 NEC Corporation. # Copyright 2011 OpenStack Foundation. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import errno import functools import netaddr import os import pwd import re import shlex import shutil import six import subprocess import sys import tempfile import time import traceback import string from zvmsdk import config from zvmsdk import constants from zvmsdk import exception from zvmsdk import log CONF = config.CONF LOG = log.LOG def execute(cmd, timeout=None): """ execute command, return rc and output string. The cmd argument can be a string or a list composed of the command name and each of its argument. eg, ['/usr/bin/cp', '-r', 'src', 'dst'] """ # Parse cmd string to a list if not isinstance(cmd, list): cmd = shlex.split(cmd) # Execute command rc = 0 output = "" try: output = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT, timeout=timeout) except subprocess.CalledProcessError as err: rc = err.returncode output = err.output except (subprocess.TimeoutExpired, PermissionError) as err: raise err except Exception as err: err_msg = ('Command "%s" Error: %s' % (' '.join(cmd), str(err))) raise exception.SDKInternalError(msg=err_msg) output = bytes.decode(output) return (rc, output) def get_host(): return ''.join([pwd.getpwuid(os.geteuid()).pw_name, '@', CONF.network.my_ip]) def looping_call(f, sleep=5, inc_sleep=0, max_sleep=60, timeout=600, exceptions=(), *args, **kwargs): """Helper function that to run looping call with fixed/dynamical interval. :param f: the looping call function or method. :param sleep: initial interval of the looping calls. :param inc_sleep: sleep time increment, default as 0. :param max_sleep: max sleep time. :param timeout: looping call timeout in seconds, 0 means no timeout. :param exceptions: exceptions that trigger re-try. """ time_start = time.time() expiration = time_start + timeout retry = True while retry: expired = timeout and (time.time() > expiration) LOG.debug( "timeout is %(timeout)s, expiration is %(expiration)s, \ time_start is %(time_start)s" % {"timeout": timeout, "expiration": expiration, "time_start": time_start}) try: f(*args, **kwargs) except exceptions: retry = not expired if retry: LOG.debug("Will re-try %(fname)s in %(itv)d seconds" % {'fname': f.__name__, 'itv': sleep}) time.sleep(sleep) sleep = min(sleep + inc_sleep, max_sleep) else: LOG.debug("Looping call %s timeout" % f.__name__) continue retry = False def convert_to_mb(s): """Convert memory size from GB to MB.""" s = s.upper() try: if s.endswith('G'): return float(s[:-1].strip()) * 1024 elif s.endswith('T'): return float(s[:-1].strip()) * 1024 * 1024 else: return float(s[:-1].strip()) except (IndexError, ValueError, KeyError, TypeError): errmsg = ("Invalid memory format: %s") % s raise exception.SDKInternalError(msg=errmsg) class PathUtils(object): def clean_temp_folder(self, tmp_folder): if os.path.isdir(tmp_folder): LOG.debug('Removing existing folder %s ', tmp_folder) shutil.rmtree(tmp_folder) def _get_guest_path(self): return os.path.join(constants.SDK_DATA_PATH, 'guests') def mkdir_if_not_exist(self, folder): if not os.path.exists(folder): LOG.debug("Creating the guest path %s", folder) os.makedirs(folder) # This is for persistent info for guests # by default it's /var/lib/zvmsdk/guests/xxxx def remove_guest_path(self, userid): guest_folder = os.path.join(self._get_guest_path(), userid) try: shutil.rmtree(guest_folder) except Exception: # Ignore any exception for delete temp folder pass def get_guest_temp_path(self, userid): tmp_inst_dir = tempfile.mkdtemp(prefix=userid, dir='/tmp') return tmp_inst_dir def get_guest_path(self, userid): guest_folder = os.path.join(self._get_guest_path(), userid) self.mkdir_if_not_exist(guest_folder) return guest_folder def get_console_log_path(self, userid): return os.path.join(self.get_guest_path(userid), "console.log") def create_import_image_repository(self, image_osdistro, type, image_name): zvmsdk_image_import_repo = os.path.join( CONF.image.sdk_image_repository, type, image_osdistro, image_name) if not os.path.exists(zvmsdk_image_import_repo): LOG.debug('Creating image repository %s for image import', zvmsdk_image_import_repo) os.makedirs(zvmsdk_image_import_repo) return zvmsdk_image_import_repo def create_file_repository(self, file_type): zvmsdk_file_repo = os.path.join(CONF.file.file_repository, file_type) if not os.path.exists(zvmsdk_file_repo): LOG.debug('Creating file repository %s for file transfer', zvmsdk_file_repo) os.makedirs(zvmsdk_file_repo) return zvmsdk_file_repo def to_utf8(text): if isinstance(text, bytes): return text elif isinstance(text, six.text_type): return text.encode() else: raise TypeError("bytes or Unicode expected, got %s" % type(text).__name__) def valid_userid(userid): if not isinstance(userid, six.string_types): return False if ((userid == '') or (userid.find(' ') != -1)): return False if len(userid) > 8: return False return True def valid_mac_addr(addr): ''' Validates a mac address''' if not isinstance(addr, six.string_types): return False valid = re.compile(r''' (^([0-9A-F]{2}[:]){5}([0-9A-F]{2})$) ''', re.VERBOSE | re.IGNORECASE) return valid.match(addr) is not None def valid_cidr(cidr): if not isinstance(cidr, six.string_types): return False try: netaddr.IPNetwork(cidr) except netaddr.AddrFormatError: return False if '/' not in cidr: return False if re.search('\s', cidr): return False return True def last_bytes(file_like_object, num): try: file_like_object.seek(-num, os.SEEK_END) except IOError as e: # seek() fails with EINVAL when trying to go before the start of the # file. It means that num is larger than the file size, so just # go to the start. if e.errno == errno.EINVAL: file_like_object.seek(0, os.SEEK_SET) else: raise remaining = file_like_object.tell() return (file_like_object.read(), remaining) def check_input_types(*types, **validkeys): """This is a function decorator to check all input parameters given to decorated function are in expected types. The checks can be skipped by specify skip_input_checks=True in decorated function. :param tuple types: expected types of input parameters to the decorated function :param validkeys: valid keywords(str) in a list. e.g. validkeys=['key1', 'key2'] """ def decorator(function): @functools.wraps(function) def wrap_func(*args, **kwargs): if args[0]._skip_input_check: # skip input check return function(*args, **kwargs) # drop class object self inputs = args[1:] if (len(inputs) > len(types)): msg = ("Too many parameters provided: %(specified)d specified," "%(expected)d expected." % {'specified': len(inputs), 'expected': len(types)}) LOG.info(msg) raise exception.SDKInvalidInputNumber(function.__name__, len(types), len(inputs)) argtypes = tuple(map(type, inputs)) match_types = types[0:len(argtypes)] invalid_type = False invalid_userid_idx = -1 for idx in range(len(argtypes)): _mtypes = match_types[idx] if not isinstance(_mtypes, tuple): _mtypes = (_mtypes,) argtype = argtypes[idx] if constants._TUSERID in _mtypes: userid_type = True for _tmtype in _mtypes: if ((argtype == _tmtype) and (_tmtype != constants._TUSERID)): userid_type = False if (userid_type and (not valid_userid(inputs[idx]))): invalid_userid_idx = idx break elif argtype not in _mtypes: invalid_type = True break if invalid_userid_idx != -1: msg = ("Invalid string value found at the #%d parameter, " "length should be less or equal to 8 and should not be " "null or contain spaces." % (invalid_userid_idx + 1)) LOG.info(msg) raise exception.SDKInvalidInputFormat(msg=msg) if invalid_type: msg = ("Invalid input types: %(argtypes)s; " "Expected types: %(types)s" % {'argtypes': str(argtypes), 'types': str(types)}) LOG.info(msg) raise exception.SDKInvalidInputTypes(function.__name__, str(types), str(argtypes)) valid_keys = validkeys.get('valid_keys') if valid_keys: for k in kwargs.keys(): if k not in valid_keys: msg = ("Invalid keyword: %(key)s; " "Expected keywords are: %(keys)s" % {'key': k, 'keys': str(valid_keys)}) LOG.info(msg) raise exception.SDKInvalidInputFormat(msg=msg) return function(*args, **kwargs) return wrap_func return decorator def import_class(import_str): """Returns a class from a string including module and class.""" mod_str, _sep, class_str = import_str.rpartition('.') __import__(mod_str) try: return getattr(sys.modules[mod_str], class_str) except AttributeError: raise ImportError('Class %s cannot be found (%s)' % (class_str, traceback.format_exception(*sys.exc_info()))) def import_object(import_str, *args, **kwargs): """Import a class and return an instance of it.""" return import_class(import_str)(*args, **kwargs) @contextlib.contextmanager def expect_invalid_resp_data(data=''): """Catch exceptions when using zvm client response data.""" try: yield except (ValueError, TypeError, IndexError, AttributeError, KeyError) as err: msg = ('Invalid smt response data: %s. Error: %s' % (data, six.text_type(err))) LOG.error(msg) raise exception.SDKInternalError(msg=msg) def wrap_invalid_resp_data_error(function): """Catch exceptions when using zvm client response data.""" @functools.wraps(function) def decorated_function(*arg, **kwargs): try: return function(*arg, **kwargs) except (ValueError, TypeError, IndexError, AttributeError, KeyError) as err: msg = ('Invalid smt response data. Error: %s' % six.text_type(err)) LOG.error(msg) raise exception.SDKInternalError(msg=msg) return decorated_function @contextlib.contextmanager def expect_and_reraise_internal_error(modID='SDK'): """Catch all kinds of zvm client request failure and reraise. modID: the moduleID that the internal error happens in. """ try: yield except exception.SDKInternalError as err: msg = err.format_message() raise exception.SDKInternalError(msg, modID=modID) @contextlib.contextmanager def log_and_reraise_sdkbase_error(action): """Catch SDK base exception and print error log before reraise exception. msg: the error message to be logged. """ try: yield except exception.SDKBaseException: msg = "Failed to " + action + "." LOG.error(msg) raise @contextlib.contextmanager def log_and_reraise_smt_request_failed(action=None): """Catch SDK base exception and print error log before reraise exception. msg: the error message to be logged. """ try: yield except exception.SDKSMTRequestFailed as err: msg = '' if action is not None: msg = "Failed to %s. " % action msg += "SMT error: %s" % err.format_message() LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) @contextlib.contextmanager def ignore_errors(): """Only execute the clauses and ignore the results""" try: yield except Exception as err: LOG.error('ignore an error: ' + str(err)) pass def get_smt_userid(): """Get the userid of smt server""" cmd = ["sudo", "/sbin/vmcp", "query userid"] try: userid = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) userid = bytes.decode(userid) userid = userid.split()[0] return userid except Exception as err: msg = ("Could not find the userid of the smt server: %s") % err raise exception.SDKInternalError(msg=msg) def get_lpar_name(): """Get the name of the LPAR that this vm is on.""" cmd = ["sudo", "/sbin/vmcp", "query userid"] try: userid = subprocess.check_output(cmd, close_fds=True, stderr=subprocess.STDOUT) userid = bytes.decode(userid) userid = userid.split()[-1] return userid except Exception as err: msg = ("Failed to get the LPAR name for the smt server: %s") % err raise exception.SDKInternalError(msg=msg) def get_namelist(): """Generate namelist. Either through set CONF.zvm.namelist, or by generate based on smt userid. """ if CONF.zvm.namelist is not None: # namelist length limit should be 64, but there's bug limit to 8 # will change the limit to 8 once the bug fixed if len(CONF.zvm.namelist) <= 8: return CONF.zvm.namelist # return ''.join(('NL', get_smt_userid().rjust(6, '0')[-6:])) # py3 compatible changes userid = get_smt_userid() return 'NL' + userid.rjust(6, '0')[-6:] def generate_iucv_authfile(fn, client): """Generate the iucv_authorized_userid file""" lines = ['#!/bin/bash\n', 'echo -n %s > /etc/iucv_authorized_userid\n' % client] with open(fn, 'w') as f: f.writelines(lines) def translate_response_data_to_expect_dict(results, step): """ Translate SMT response to a python dictionary ['volume name: IASFBA', 'volume_type:9336-ET', 'volume_size:564718', 'volume_name: IAS1CM', 'volume_type:3390-09', 'volume_size:60102'] translate to: {'IASFBA': {'volume_type': '9336-ET', 'volume_size': '564718'}, 'IAS1CM': {'volume_type': '3390-09', 'volume_size': '60102'}} :results: the SMT response in list format :step: count list members converted to one member of the directory """ data = {} for i in range(0, len(results), step): volume_name = results[i].split(':')[1].strip() data[volume_name] = {} for k in range(1, step): key, value = results[i + k].split(':') data[volume_name][key] = value return data @wrap_invalid_resp_data_error def translate_response_to_dict(rawdata, dirt): """Translate SMT response to a python dictionary. SMT response example: keyword1: value1\n keyword2: value2\n ... keywordn: valuen\n Will return a python dictionary: {keyword1: value1, keyword2: value2, ... keywordn: valuen,} """ data_list = rawdata.split("\n") data = {} for ls in data_list: for k in list(dirt.keys()): if ls.__contains__(dirt[k]): data[k] = ls[(ls.find(dirt[k]) + len(dirt[k])):].strip() break if data == {}: msg = ("Invalid smt response data. Error: No value matched with " "keywords. Raw Data: %(raw)s; Keywords: %(kws)s" % {'raw': rawdata, 'kws': str(dirt)}) raise exception.SDKInternalError(msg=msg) return data def make_dummy_image(image_path, d_type='CKD'): if d_type not in ('CKD', 'FBA'): d_type = 'CKD' d_unit = 'CYL' if d_type == 'FBA': d_unit = 'BLK' header = ("z/VM %(type)s Disk Image: 0 %(unit)s" % {'type': d_type, 'unit': d_unit}) header = (' '.join((header, 'HLen: 0055', 'GZIP: 0'))) with open(image_path, 'wb') as f: f.write(header.encode()) @contextlib.contextmanager def acquire_lock(lock): """ lock wrapper """ lock.acquire() try: yield finally: lock.release() def check_userid_exist(userid, needLogon=False): """The successful output is: FBA0004 - DSC The successful output for device is (vmcp q 0100): DASD 0100 3390 IAS106 R/W 29128 CYL ON DASD 1356 SUBCHANNEL = 0003 Errors are: HCPCQV003E Invalid option - XXXXX HCPQVD040E Device XXXX does not exist HCPCFC026E Operand missing or invalid HCPCQU045E XXXXX not logged on Success msgs: HCPCQU361E LOGOFF/FORCE pending for user xxxxxx """ cmd = 'sudo vmcp q %s' % userid rc, output = execute(cmd) if needLogon: strfail = '(^HCP\w\w\w003E|^HCP\w\w\w040E|' + \ '^HCP\w\w\w026E|^HCP\w\w\w045E)' strok = '(^%s)' % userid else: strfail = '(^HCP\w\w\w003E|^HCP\w\w\w040E|^HCP\w\w\w026E)' strok = '(^%s|^HCP\w\w\w045E|^HCP\w\w\w361E)' % userid if re.search(strfail, output): # userid not exist return False if re.search(strok, output): # userid exist return True # When reaching here most likely the userid represents a device # and anyway it's not a guest. return False def check_userid_on_others(userid): try: check_userid_exist(userid) cmd = 'sudo vmcp q %s' % userid rc, output = execute(cmd) if re.search(' - SSI', output): return True return False except Exception as err: msg = ("Could not find the userid: %s") % err raise exception.SDKInternalError(msg=msg) def expand_fcp_list(fcp_list): """Expand fcp list string into a python list object which contains each fcp devices in the list string. A fcp list is composed of fcp device addresses, range indicator '-', and split indicator ';'. Example 1: if fcp_list is "0011-0013;0015;0017-0018", then the function will return { 0: {'0011' ,'0012', '0013'} 1: {'0015'} 2: {'0017', '0018'} } Example 2: if fcp_list is empty string: '', then the function will return an empty set: {} ATTENTION: To support multipath, we expect fcp_list should be like "0011-0014;0021-0024", "0011-0014" should have been on same physical WWPN which we called path0, "0021-0024" should be on another physical WWPN we called path1 which is different from "0011-0014". path0 and path1 should have same count of FCP devices in their group. When attach, we will choose one WWPN from path0 group, and choose another one from path1 group. Then we will attach this pair of WWPNs together to the guest as a way to implement multipath. """ LOG.debug("Expand FCP list %s" % fcp_list) if not fcp_list: return dict() fcp_list = fcp_list.strip() fcp_list = fcp_list.replace(' ', '') range_pattern = '[0-9a-fA-F]{1,4}(-[0-9a-fA-F]{1,4})?' match_pattern = "^(%(range)s)(;%(range)s;?)*$" % \ {'range': range_pattern} item_pattern = "(%(range)s)(,%(range)s?)*" % \ {'range': range_pattern} multi_match_pattern = "^(%(range)s)(;%(range)s;?)*$" % \ {'range': item_pattern} if not re.match(match_pattern, fcp_list) and \ not re.match(multi_match_pattern, fcp_list): errmsg = ("Invalid FCP address %s") % fcp_list raise exception.SDKInternalError(msg=errmsg) fcp_devices = {} path_no = 0 for _range in fcp_list.split(';'): for item in _range.split(','): # remove duplicate entries devices = set() if item != '': if '-' not in item: # single device fcp_addr = int(item, 16) devices.add("%04x" % fcp_addr) else: # a range of address (_min, _max) = item.split('-') _min = int(_min, 16) _max = int(_max, 16) for fcp_addr in range(_min, _max + 1): devices.add("%04x" % fcp_addr) if fcp_devices.get(path_no): fcp_devices[path_no].update(devices) else: fcp_devices[path_no] = devices path_no = path_no + 1 return fcp_devices def shrink_fcp_list(fcp_list): """ Transform a FCP list to a string. :param fcp_list: (list) a list object contains FCPs. Case 1: only one FCP in the list. e.g. fcp_list = ['1A01'] Case 2: all the FCPs are continuous. e.g. fcp_list =['1A01', '1A02', '1A03'] Case 3: not all the FCPs are continuous. e.g. fcp_list = ['1A01', '1A02', '1A03', '1A05', '1AFF', '1B00', '1B01', '1B04'] Case 4: an empty list. e.g. fcp_list = [] :return fcp_str: (str) Case 1: fcp_str = '1A01' Case 2: fcp_str = '1A01 - 1A03' Case 3: fcp_str = '1A01 - 1A03, 1A05, 1AFF - 1B01, 1B04' Case 4: fcp_str = '' """ def __transform_fcp_list_into_str(local_fcp_list): """ Transform the FCP list into a string by recursively do the transformation against the first continuous range of the list, which is being shortened by list.pop(0) on the fly :param local_fcp_list: (list) a list object contains FCPs. In Python, hex is stored in the form of strings. Because incrementing is done on integers, we need to convert hex to an integer for doing math. """ # Case 1: only one FCP in the list. if len(local_fcp_list) == 1: fcp_section.append(local_fcp_list[0]) else: start_fcp = int(local_fcp_list[0], 16) end_fcp = int(local_fcp_list[-1], 16) count = len(local_fcp_list) - 1 # Case 2: all the FCPs are continuous. if start_fcp + count == end_fcp: # e.g. hex(int('1A01',16)) is '0x1a01' section_str = '{} - {}'.format( hex(start_fcp)[2:], hex(end_fcp)[2:]) fcp_section.append(section_str) # Case 3: not all the FCPs are continuous. else: start_fcp = int(local_fcp_list.pop(0), 16) for idx, fcp in enumerate(local_fcp_list.copy()): next_fcp = int(fcp, 16) # pop the fcp if it is continuous with the last # e.g. # when start_fcp is '1A01', # pop '1A02' and '1A03' if start_fcp + idx + 1 == next_fcp: local_fcp_list.pop(0) continue # e.g. # when start_fcp is '1A01', # next_fcp '1A05' is NOT continuous with the last else: end_fcp = start_fcp + idx # e.g. # when start_fcp is '1A01', # end_fcp is '1A03' if start_fcp != end_fcp: # e.g. hex(int('1A01',16)) is '0x1a01' section_str = '{} - {}'.format( hex(start_fcp)[2:], hex(end_fcp)[2:]) # e.g. # when start_fcp is '1A05', # end_fcp is '1A05' else: section_str = hex(start_fcp)[2:] fcp_section.append(section_str) break # recursively transform if FCP list still not empty if local_fcp_list: __transform_fcp_list_into_str(local_fcp_list) fcp_section = list() fcp_str = '' if fcp_list: # sort fcp_list in hex order, e.g. # before sort: ['1E01', '1A02', '1D03'] # after sort: ['1A02', '1D03', '1E01'] fcp_list.sort() __transform_fcp_list_into_str(fcp_list) # return a string contains all FCP fcp_str = ', '.join(fcp_section).upper() return fcp_str def verify_fcp_list_in_hex_format(fcp_list): """Verify each FCP in the list is in Hex format :param fcp_list: (list) a list object contains FCPs. """ if not isinstance(fcp_list, list): errmsg = ('fcp_list ({}) is not a list object.' '').format(fcp_list) raise exception.SDKInvalidInputFormat(msg=errmsg) # Verify each FCP should be a 4-digit hex for fcp in fcp_list: if not (len(fcp) == 4 and all(char in string.hexdigits for char in fcp)): errmsg = ('FCP list {} contains non-hex value.' '').format(fcp_list) raise exception.SDKInvalidInputFormat(msg=errmsg) zVMCloudConnector-1.6.3/zvmsdk/tests/0000775000175000017510000000000014315232035017200 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/0000775000175000017510000000000014315232035020157 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_imageops.py0000664000175000017510000000470613672563714023423 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from zvmsdk import config from zvmsdk import imageops from zvmsdk import utils as zvmutils from zvmsdk.tests.unit import base CONF = config.CONF class SDKImageOpsTestCase(base.SDKTestCase): def setUp(self): self._image_ops = imageops.get_imageops() self._pathutil = zvmutils.PathUtils() @mock.patch("zvmsdk.smtclient.SMTClient.image_import") def test_image_import(self, image_import): image_name = '95a4da37-9f9b-4fb2-841f-f0bb441b7544' url = 'file:///path/to/image/file' image_meta = {'os_version': 'rhel7.2', 'md5sum': 'e34166f61130fc9221415d76298d7987'} remote_host = 'image@192.168.99.1' self._image_ops.image_import(image_name, url, image_meta, remote_host) image_import.assert_called_once_with(image_name, url, image_meta, remote_host) @mock.patch("zvmsdk.smtclient.SMTClient.image_query") def test_image_query(self, image_query): imagekeyword = 'eae09a9f_7958_4024_a58c_83d3b2fc0aab' self._image_ops.image_query(imagekeyword) image_query.assert_called_once_with(imagekeyword) @mock.patch("zvmsdk.smtclient.SMTClient.image_delete") def test_image_delete(self, image_delete): image_name = 'eae09a9f_7958_4024_a58c_83d3b2fc0aab' self._image_ops.image_delete(image_name) image_delete.assert_called_once_with(image_name) @mock.patch("zvmsdk.smtclient.SMTClient.image_export") def test_image_export(self, image_export): image_name = 'testimage' dest_url = 'file:///path/to/export/image' self._image_ops.image_export(image_name, dest_url) image_export.assert_called_once_with(image_name, dest_url, None) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_config.py0000664000175000017510000000602014112136633023035 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from zvmsdk import config from zvmsdk.tests.unit import base CONFOPTS = config.CONFOPTS class ZVMConfigTestCases(base.SDKTestCase): def test_check_zvm_disk_pool_fba(self): CONFOPTS._check_zvm_disk_pool('fba:pool1') def test_check_zvm_disk_pool_eckd(self): CONFOPTS._check_zvm_disk_pool('eCKD:pool1') def test_check_zvm_disk_pool_err1(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_zvm_disk_pool, 'fbapool1') def test_check_zvm_disk_pool_err2(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_zvm_disk_pool, 'ECKD:') def test_check_user_default_max_memory(self): CONFOPTS._check_user_default_max_memory('30G') CONFOPTS._check_user_default_max_memory('1234M') def test_check_user_default_max_memory_err1(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_memory, '12.0G') def test_check_user_default_max_memory_err2(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_memory, '12') def test_check_user_default_max_memory_err3(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_memory, '12345M') def test_check_user_default_max_reserved_memory(self): CONFOPTS._check_user_default_max_reserved_memory('30G') CONFOPTS._check_user_default_max_reserved_memory('1234M') def test_check_user_default_max_reserved_err1(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_reserved_memory, '12.0G') def test_check_user_default_max_reserved_memory_err2(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_reserved_memory, '12') def test_check_user_default_max_reserved_memory_err3(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_reserved_memory, '12345M') def test_check_user_default_max_cpu(self): CONFOPTS._check_user_default_max_cpu(1) def test_check_user_default_max_cpu_err(self): self.assertRaises(config.OptFormatError, CONFOPTS._check_user_default_max_cpu, 65) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_volumeop.py0000775000175000017510000032376314315210052023452 0ustar ruirui00000000000000# Copyright 2017, 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from mock import call import shutil import uuid from zvmsdk import config from zvmsdk import database from zvmsdk import dist from zvmsdk import exception from zvmsdk import volumeop from zvmsdk.tests.unit import base class TestVolumeOperatorAPI(base.SDKTestCase): def setUp(self): super(TestVolumeOperatorAPI, self).setUp() self.operator = volumeop.VolumeOperatorAPI() @mock.patch("zvmsdk.volumeop.FCPManager.edit_fcp_template") def test_edit_fcp_template(self, mock_edit_tmpl): """ Test edit_fcp_template """ tmpl_id = 'fake_id' kwargs = { 'name': 'new_name', 'description': 'new_desc', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': ['sp1'], 'min_fcp_paths_count': 2} self.operator.edit_fcp_template(tmpl_id, **kwargs) mock_edit_tmpl.assert_called_once_with(tmpl_id, **kwargs) @mock.patch("zvmsdk.volumeop.FCPManager.get_fcp_templates") def test_get_fcp_templates(self, mock_get_tmpl): """ Test get_fcp_templates in VolumeOperator""" tmpl_list = ['fake_id'] assigner_id = 'fake_user' host_default = True default_sp_list = ['fake_sp'] self.operator.get_fcp_templates(template_id_list=tmpl_list, assigner_id=assigner_id, default_sp_list=default_sp_list, host_default=host_default) mock_get_tmpl.assert_called_once_with(tmpl_list, assigner_id, default_sp_list, host_default) @mock.patch("zvmsdk.volumeop.FCPManager.get_fcp_templates_details") def test_get_fcp_templates_details(self, mock_get_tmpl_details): """ Test get_fcp_templates_details in VolumeOperator""" tmpl_list = ['fake_id'] self.operator.get_fcp_templates_details(template_id_list=tmpl_list, raw=True, statistics=True, sync_with_zvm=False) mock_get_tmpl_details.assert_called_once_with(tmpl_list, raw=True, statistics=True, sync_with_zvm=False) @mock.patch("zvmsdk.volumeop.FCPManager.delete_fcp_template") def test_delete_fcp_template(self, mock_delete_tmpl): """ Test delete_fcp_template in VolumeOperator""" tmpl_id = 'fake_id' self.operator.delete_fcp_template(tmpl_id) mock_delete_tmpl.assert_called_once_with(tmpl_id) class TestVolumeConfiguratorAPI(base.SDKTestCase): @classmethod def setUpClass(cls): super(TestVolumeConfiguratorAPI, cls).setUpClass() cls.configurator = volumeop.VolumeConfiguratorAPI() @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd_direct") def test_get_status_code_from_systemctl(self, execute_cmd): line1 = ('(Error) ULTVMU0316E On CBM54008, command sent through IUCV ' 'failed. cmd: systemctl status zvmguestconfigure, rc: 8, rs: ' '3, out: zvmguestconfigure.service - Activation engine ' 'for configuring zLinux os when it start up') line2 = ('Loaded: loaded (/etc/systemd/system/zvmguest configure.' 'service; enabled; vendor preset: disabled)') line3 = (' Active: failed (Result: exit-code) since Thu ' '2020-12-17 05:19:09 EST; 8s ago') line4 = (' Process: 187837 ExecStart=/usr/bin/zvmguestcon' 'figure start (code=exited, status=1/FAILURE)') # from this line on, we wont process them line5 = ' Main PID: 187837 (code=exited, status=1/FAILURE)' line6 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: remove WWPN 0x500507680b22bac6 ' 'in zfcp.conf') line7 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: remove WWPN 0x500507680b22bac7 ' 'in zfcp.conf') line8 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: remove WWPN 0x500507680d060027 ' 'in zfcp.conf') line9 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: remove WWPN 0x500507680d120027 ' 'in zfcp.conf') line10 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: remove WWPN 0x500507680d760027 ' 'in zfcp.conf') line11 = ('ERROR: Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: remove WWPN 0x500507680d820027 ' 'in zfcp.conf') line12 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com ' 'zvmguestconfigure[187837]: zvmguestconfigure has ' 'successfully processed the reader files with exit_code: 1.') line13 = ("Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com systemd[1]" ": zvmguestconfigure.service: Main process exited, " "code=exited, status=1/FAILURE") line14 = ("Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com systemd[1]" ": zvmguestconfigure.service: Failed with result " "'exit-code'.") line15 = ('Dec 17 05:19:09 rhel82-bfv.boeblingen.de.ibm.com systemd[1]' ': Failed to start Activation engine for configuring zLinux ' 'os when it start up.') line16 = 'Return code 8, Reason code 3.' output = {'overallRC': 2, 'rc': 8, 'rs': 3, 'errno': 0, 'strError': '', 'response': [line1, line2, line3, line4, line5, '', line6, line7, line8, line9, line10, line11, line12, line13, line14, line15, '', line16], 'logEntries': []} execute_cmd.return_value = output assigner_id = 'userid1' command = 'fake command' code = self.configurator._get_status_code_from_systemctl(assigner_id, command) self.assertEqual(code, 1) @mock.patch("zvmsdk.dist.LinuxDistManager.get_linux_dist") @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd_direct") @mock.patch.object(dist.rhel7, "create_active_net_interf_cmd") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI." "_get_status_code_from_systemctl") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI." "configure_volume_attach") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI.check_IUCV_is_ready") def test_config_attach_reachable_but_exception(self, is_reachable, config_attach, get_status_code, restart_zvmguestconfigure, execute_cmd, get_dist): """config_attach has almost same logic with config_detach so only write UT cases of config_attach""" fcp_list = ['1a11', '1b11'] assigner_id = 'userid1' target_wwpns = ['1111', '1112'] target_lun = '2222' multipath = True os_version = 'rhel7' mount_point = '/dev/sdz' get_dist.return_value = dist.rhel7 config_attach.return_value = None is_reachable.return_value = True get_status_code.return_value = 1 execute_cmd.return_value = {'rc': 3} active_cmds = 'systemctl start zvmguestconfigure.service' restart_zvmguestconfigure.return_value = active_cmds self.assertRaises(exception.SDKVolumeOperationError, self.configurator.config_attach, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) get_dist.assert_called_once_with(os_version) restart_zvmguestconfigure.assert_called_once_with() execute_cmd.assert_called_once_with(assigner_id, active_cmds, timeout=1800) @mock.patch("zvmsdk.dist.LinuxDistManager.get_linux_dist") @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd_direct") @mock.patch.object(dist.rhel7, "create_active_net_interf_cmd") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI." "_get_status_code_from_systemctl") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI." "configure_volume_attach") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI.check_IUCV_is_ready") def test_config_attach_reachable(self, is_reachable, config_attach, get_status_code, restart_zvmguestconfigure, execute_cmd, get_dist): fcp_list = ['1a11', '1b11'] assigner_id = 'userid1' target_wwpns = ['1111', '1112'] target_lun = '2222' multipath = True os_version = 'rhel7' mount_point = '/dev/sdz' get_dist.return_value = dist.rhel7 config_attach.return_value = None is_reachable.return_value = True get_status_code.return_value = 1 execute_cmd.return_value = {'rc': 0} active_cmds = 'systemctl start zvmguestconfigure.service' restart_zvmguestconfigure.return_value = active_cmds self.configurator.config_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) get_dist.assert_called_once_with(os_version) restart_zvmguestconfigure.assert_called_once_with() execute_cmd.assert_called_once_with(assigner_id, active_cmds, timeout=1800) @mock.patch("zvmsdk.dist.LinuxDistManager.get_linux_dist") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI." "configure_volume_attach") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI.check_IUCV_is_ready") def test_config_attach_not_reachable(self, is_reachable, config_attach, get_dist): fcp = 'bfc3' assigner_id = 'userid1' target_wwpns = ['1111', '1112'] target_lun = '2222' multipath = True os_version = 'rhel7' mount_point = '/dev/sdz' is_reachable.return_value = False get_dist.return_value = dist.rhel7 config_attach.return_value = None self.configurator.config_attach(fcp, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) get_dist.assert_called_once_with(os_version) @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd") def test_check_IUCV_is_ready(self, execute_cmd): assigner_id = 'fakeid' execute_cmd.return_value = '' ret = self.configurator.check_IUCV_is_ready(assigner_id) execute_cmd.assert_called_once_with(assigner_id, 'pwd') self.assertEqual(ret, True) @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd") def test_check_IUCV_is_ready_not_ready(self, execute_cmd): # case: not ready, but can continue assigner_id = 'fakeid' results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} execute_cmd.side_effect = exception.SDKSMTRequestFailed( results, 'fake error contains other things') ret = self.configurator.check_IUCV_is_ready(assigner_id) execute_cmd.assert_called_once_with(assigner_id, 'pwd') self.assertEqual(ret, False) @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd") def test_check_IUCV_is_ready_raise_excetion(self, execute_cmd): # case: not ready, must raise exception assigner_id = 'fakeid' results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} execute_cmd.side_effect = exception.SDKSMTRequestFailed( results, 'fake error contains UNAUTHORIZED_ERROR') self.assertRaises(exception.SDKVolumeOperationError, self.configurator.check_IUCV_is_ready, assigner_id) def test_config_force_attach(self): pass def test_config_force_detach(self): pass @mock.patch("zvmsdk.smtclient.SMTClient.punch_file") @mock.patch.object(shutil, "rmtree") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI._create_file") @mock.patch("zvmsdk.dist.LinuxDistManager.get_linux_dist") @mock.patch("zvmsdk.dist.rhel7.get_volume_attach_configuration_cmds") def test_config_attach_active(self, get_attach_cmds, get_dist, create_file, rmtree, punch_file): fcp_list = ['1a11', '1b11'] assigner_id = 'userid1' target_wwpns = ['1111', '1112'] target_lun = '2222' multipath = True os_version = 'rhel7' mount_point = '/dev/sdz' config_file = '/tm/userid1xxx/attach_volume.sh' config_file_path = '/tm/userid1xxx/' linuxdist = dist.rhel7() get_dist.return_value = linuxdist create_file.return_value = (config_file, config_file_path) rmtree.return_value = None self.configurator.configure_volume_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, linuxdist) get_attach_cmds.assert_called_once_with(' '.join(fcp_list), ' '.join(target_wwpns), target_lun, multipath, mount_point) punch_file.assert_called_once_with(assigner_id, config_file, 'X') @mock.patch("zvmsdk.smtclient.SMTClient.punch_file") @mock.patch.object(shutil, "rmtree") @mock.patch("zvmsdk.volumeop.VolumeConfiguratorAPI._create_file") @mock.patch("zvmsdk.dist.LinuxDistManager.get_linux_dist") @mock.patch("zvmsdk.dist.rhel7.get_volume_detach_configuration_cmds") def test_config_detach_active(self, get_detach_cmds, get_dist, create_file, rmtree, punch_file): fcp_list = ['1a11', '1b11'] assigner_id = 'userid1' target_wwpns = ['1111', '1112'] target_lun = '2222' multipath = True connections = 2 os_version = 'rhel7' mount_point = '/dev/sdz' config_file = '/tm/userid1xxx/attach_volume.sh' config_file_path = '/tm/userid1xxx/' linuxdist = dist.rhel7() get_dist.return_value = linuxdist create_file.return_value = (config_file, config_file_path) rmtree.return_value = None self.configurator.configure_volume_detach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, linuxdist, connections) get_detach_cmds.assert_called_once_with(' '.join(fcp_list), ' '.join(target_wwpns), target_lun, multipath, mount_point, connections) punch_file.assert_called_once_with(assigner_id, config_file, 'X') class TestFCP(base.SDKTestCase): def test_parse_normal(self): info = ['opnstk1: FCP device number: B83D', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: NONE', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: 20076D8500005181', 'Owner: NONE'] fcp = volumeop.FCP(info) self.assertEqual('b83d', fcp.get_dev_no()) self.assertEqual('free', fcp.get_dev_status()) self.assertEqual('none', fcp.get_npiv_port()) self.assertEqual('59', fcp.get_chpid()) self.assertEqual('20076d8500005181', fcp.get_physical_port()) self.assertEqual('none', fcp.get_owner()) self.assertEqual(('b83d', 'none', '20076d8500005181', '59', 'free', 'none'), fcp.to_tuple()) def test_parse_npiv(self): info = ['opnstk1: FCP device number: B83D', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: 20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: 20076D8500005181', 'Owner: UNIT0001'] fcp = volumeop.FCP(info) self.assertEqual('b83d', fcp.get_dev_no()) self.assertEqual('active', fcp.get_dev_status()) self.assertEqual('20076d8500005182', fcp.get_npiv_port()) self.assertEqual('59', fcp.get_chpid()) self.assertEqual('20076d8500005181', fcp.get_physical_port()) self.assertEqual('unit0001', fcp.get_owner()) self.assertEqual(('b83d', '20076d8500005182', '20076d8500005181', '59', 'active', 'unit0001'), fcp.to_tuple()) class TestFCPManager(base.SDKTestCase): @classmethod @mock.patch("zvmsdk.volumeop.FCPManager.sync_db", mock.Mock()) def setUpClass(cls): super(TestFCPManager, cls).setUpClass() cls.fcpops = volumeop.FCPManager() cls.db_op = database.FCPDbOperator() cls.fcp_vol_mgr = TestFCPVolumeManager() def _insert_data_into_fcp_table(self, fcp_info_list): # insert data into all columns of fcp table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO fcp " "(fcp_id, assigner_id, connections, " "reserved, wwpn_npiv, wwpn_phy, chpid, " "state, owner, tmpl_id) VALUES " "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", fcp_info_list) def _insert_data_into_template_table(self, templates_info): # insert data into all columns of template table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template " "(id, name, description, is_default) " "VALUES (?, ?, ?, ?)", templates_info) def _delete_from_template_table(self, template_id_list): # delete templates record from template and template_sp_mapping templates_id = [(tmpl_id,) for tmpl_id in template_id_list] with database.get_fcp_conn() as conn: conn.executemany("DELETE FROM template " "WHERE id=?", templates_id) conn.executemany("DELETE FROM template_sp_mapping " "WHERE tmpl_id=?", templates_id) @mock.patch("zvmsdk.smtclient.SMTClient.get_fcp_info_by_status") def test_get_all_fcp_info(self, get_fcp_info): """Test get_all_fcp_info""" get_fcp_info.return_value = [] self.fcpops._get_all_fcp_info('dummy1') get_fcp_info.assert_called_once_with('dummy1', None) def test_get_fcp_dict_in_db(self): """Test get_fcp_dict_in_db""" # create 2 FCP records # a83c: connections == 0, reserved == 0 # a83d: connections == 2, reserved == 1 # pre create data in FCP DB for test template_id = '' fcp_info_list = [('1a01', 'user1', 0, 0, 'c05076de33000a01', 'c05076de3300264a', '27', 'active', 'owner1', template_id), ('1a02', 'user1', 0, 1, 'c05076de33000a02', 'c05076de3300264a', '27', 'active', 'owner1', template_id), ('1b01', 'user2', 1, 1, 'c05076de33000b01', 'c05076de3300264b', '27', 'active', 'owner1', template_id), ('1b03', 'user2', 2, 1, 'c05076de33000b03', 'c05076de3300264b', '27', 'active', 'owner2', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # remove dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._insert_data_into_fcp_table(fcp_info_list) try: fcp_dict = self.fcpops.get_fcp_dict_in_db() info_1a02 = fcp_dict['1a02'] info_1b03 = fcp_dict['1b03'] self.assertEqual(info_1a02['connections'], 0) self.assertEqual(info_1a02['reserved'], 1) self.assertEqual(info_1b03['connections'], 2) self.assertEqual(info_1b03['reserved'], 1) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_reserve_fcp_devices_with_existed_reserved_fcp(self): template_id = "fake_fcp_template_00" assinger_id = "wxy0001" sp_name = "fake_sp_name" fcp_info_list = [('1a10', assinger_id, 1, 1, 'c05076de3300a83c', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('1b10', assinger_id, 1, 1, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id), ('1a11', '', 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id), ('1b11', '', 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id) ] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) # insert data into template_fcp_mapping table template_fcp = [('1a10', template_id, 0), ('1b10', template_id, 1)] self.fcp_vol_mgr._insert_data_into_template_fcp_mapping_table(template_fcp) # insert data into template table to add a default template templates = [(template_id, 'name1', 'desc1', 1)] template_id_list = [tmpl[0] for tmpl in templates] self._insert_data_into_template_table(templates) template_sp_mapping = [(sp_name, template_id)] self.fcp_vol_mgr._insert_data_into_template_sp_mapping_table(template_sp_mapping) try: available_list, fcp_tmpl_id = self.fcpops.reserve_fcp_devices( assinger_id, template_id, sp_name) expected_fcp_list = [('1a10', 'c05076de3300a83c', 'c05076de33002641'), ('1b10', 'c05076de3300b83c', 'c05076de33002641')] actual_fcp_list = [] for fcp in available_list: fcp_id, wwpn_npiv, wwpn_phy = fcp actual_fcp_list.append((fcp_id, wwpn_npiv, wwpn_phy)) self.assertEqual(template_id, fcp_tmpl_id) self.assertEqual(expected_fcp_list, actual_fcp_list) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._delete_from_template_table(template_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) @mock.patch("zvmsdk.volumeop.FCPManager._sync_db_with_zvm", mock.Mock()) def test_reserve_fcp_devices_without_existed_reserved_fcp(self): """ reserve fcp devices for the assigner which hasn't reserved any fcp devices before """ template_id = "fake_fcp_template_00" assinger_id = "wxy0001" sp_name = "fake_sp_name" fcp_info_list = [('1a10', '', 0, 0, 'c05076de3300a83c', 'c05076de33002641', '27', 'free', '', ''), ('1b10', '', 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'free', '', ''), ('1a11', '', 0, 0, 'c05076de3300c83c', 'c05076de33002641', '27', 'free', '', ''), ('1b11', '', 0, 0, 'c05076de3300d83c', 'c05076de33002641', '27', 'free', '', '') ] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) # insert data into template_fcp_mapping table template_fcp = [('1a10', template_id, 0), ('1b10', template_id, 1)] self.fcp_vol_mgr._insert_data_into_template_fcp_mapping_table(template_fcp) # insert data into template table to add a default template templates = [(template_id, 'name1', 'desc1', 1)] template_id_list = [tmpl[0] for tmpl in templates] self._insert_data_into_template_table(templates) template_sp_mapping = [(sp_name, template_id)] self.fcp_vol_mgr._insert_data_into_template_sp_mapping_table(template_sp_mapping) config.CONF.volume.get_fcp_pair_with_same_index = 1 try: available_list, fcp_tmpl_id = self.fcpops.reserve_fcp_devices( assinger_id, template_id, sp_name) actual_fcp_list = [] for fcp in available_list: fcp_id, wwpn_npiv, wwpn_phy = fcp actual_fcp_list.append((fcp_id, wwpn_npiv, wwpn_phy)) self.assertEqual(template_id, fcp_tmpl_id) self.assertEqual(2, len(actual_fcp_list)) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._delete_from_template_table(template_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) def test_reserve_fcp_devices_without_default_template(self): """ Not specify template id, and no sp default template and no host default template, should raise error """ template_id = None assinger_id = "wxy0001" sp_name = "fake_sp_name" # insert data into template table to add a default template templates = [('0001', 'name1', 'desc1', 0)] template_id_list = [tmpl[0] for tmpl in templates] self._insert_data_into_template_table(templates) try: self.assertRaisesRegex(exception.SDKVolumeOperationError, "No FCP Multipath Template is specified and no " "default FCP Multipath Template is found.", self.fcpops.reserve_fcp_devices, assinger_id, template_id, sp_name) finally: self._delete_from_template_table(template_id_list) @mock.patch("zvmsdk.volumeop.FCPManager._sync_db_with_zvm", mock.Mock()) @mock.patch("zvmsdk.database.FCPDbOperator.get_allocated_fcps_from_assigner") @mock.patch("zvmsdk.database.FCPDbOperator.get_fcp_devices") def test_reserve_fcp_devices_without_free_fcp_device(self, mocked_get_fcp_devices, mocked_get_allocated_fcps): config.CONF.volume.get_fcp_pair_with_same_index = None mocked_get_fcp_devices.return_value = [] mocked_get_allocated_fcps.return_value = [] template_id = "fake_fcp_template_00" assinger_id = "wxy0001" sp_name = "fake_sp_name" available_list, fcp_tmpl_id = self.fcpops.reserve_fcp_devices( assinger_id, template_id, sp_name) self.assertEqual(template_id, fcp_tmpl_id) self.assertEqual(0, len(available_list)) def test_unreserve_fcp_devices_without_fcp_template(self): """ if not specify fcp_template_id when calling unreserve_fcp_devices, error will be raised """ assigner_id = "test_assigner" self.assertRaisesRegex(exception.SDKVolumeOperationError, "fcp_template_id is not specified while " "releasing FCP devices", self.fcpops.unreserve_fcp_devices, assigner_id, None) def test_unreserve_fcp_devices_return_empty_array(self): """If not found any fcp devices to release, return empty array""" template_id = "fake_fcp_template_00" assinger_id = "wxy0001" fcp_info_list = [('1a10', assinger_id, 0, 0, 'c05076de3300a83c', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('1b10', assinger_id, 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id), ('1a11', '', 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id), ('1b11', '', 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id) ] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) # insert data into template_fcp_mapping table template_fcp = [('1a10', template_id, 0), ('1b10', template_id, 1)] self.fcp_vol_mgr._insert_data_into_template_fcp_mapping_table( template_fcp) try: res = self.fcpops.unreserve_fcp_devices(assinger_id, template_id) self.assertEqual(len(res), 0) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) def test_valid_fcp_devcie_wwpn(self): assigner_id = 'test_assigner_1' fcp_list_1 = [('1a10', '', ''), ('1b10', 'wwpn_npiv_0', 'wwpn_phy_0')] self.assertRaisesRegex(exception.SDKVolumeOperationError, "NPIV WWPN of FCP device 1a10 not found", self.fcpops._valid_fcp_devcie_wwpn, fcp_list_1, assigner_id) fcp_list_2 = [('1a10', 'wwpn_npiv_0', ''), ('1b10', 'wwpn_npiv_0', 'wwpn_phy_0')] self.assertRaisesRegex(exception.SDKVolumeOperationError, "Physical WWPN of FCP device 1a10 not found", self.fcpops._valid_fcp_devcie_wwpn, fcp_list_2, assigner_id) @mock.patch("zvmsdk.utils.get_smt_userid", mock.Mock()) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") def test_get_fcp_dict_in_zvm(self, mock_zvm_fcp_info): """Test get_fcp_dict_in_zvm""" raw_fcp_info_from_zvm = [ 'opnstk1: FCP device number: 1A01', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: 20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: 1B03', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: ', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] mock_zvm_fcp_info.return_value = raw_fcp_info_from_zvm expected_fcp_dict_keys = { '1a01', '1b03'} fcp_dict = self.fcpops.get_fcp_dict_in_zvm() self.assertEqual(expected_fcp_dict_keys, set(fcp_dict.keys())) self.assertTrue( all([isinstance(v, volumeop.FCP) for v in list(fcp_dict.values())])) @mock.patch("zvmsdk.volumeop.FCPManager." "sync_fcp_table_with_zvm") @mock.patch("zvmsdk.volumeop.FCPManager." "get_fcp_dict_in_zvm") def test_sync_db_with_zvm(self, fcp_dict_in_zvm, sync_fcp_table_with_zvm): """Test sync_db_with_zvm""" zvm_fcp_dict = { # inter_set: '1a01': ('1a01', '', 1, 1, 0, '', '20076D8500005182', '20076D8500005181'), '1a02': ('1a02', '', 2, 1, 2, '', None, None), '1a03': ('1a03', '', 2, 1, 3, '', None, None), # del_fcp_set '1b05': ('1a05', '', 0, 1, 3, '', None, None), '1b06': ('1a06', '', 1, 1, 3, '', '20076D8500005187', '20076D8500005185'), '1b01': ('1b01', '', 0, 0, 2, '', None, None), '1b03': ('1b03', '', 0, 0, 1, '', None, None) } fcp_dict_in_zvm.return_value = zvm_fcp_dict self.fcpops._sync_db_with_zvm() fcp_dict_in_zvm.assert_called_once() sync_fcp_table_with_zvm.assert_called_once_with(zvm_fcp_dict) def test_sync_fcp_table_with_zvm(self): """Test sync_fcp_table_with_zvm""" # fcp info in original database template_id = '' fcp_info_list = [('1a01', 'user1', 0, 0, 'c05076de33000001', 'c05076de3300264a', '27', 'active', 'owner1', template_id), ('1a02', 'user1', 0, 0, 'c05076de33000002', 'c05076de3300264a', '27', 'active', 'owner1', template_id), ('1b01', 'user2', 1, 1, 'c05076de33000003', 'c05076de3300264b', '27', 'active', 'owner1', template_id), ('1b03', 'unit0001', 2, 1, 'c05076de33000004', 'c05076de3300264b', '27', 'active', 'owner2', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # remove dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._insert_data_into_fcp_table(fcp_info_list) # FCP devices info in z/VM # 1a01: is in z/VM, but WWPNs, state, owner are changed, # should update these columns. # 1a02: is not found in z/VM, should be removed from db. # 1b01: is in z/VM, WWPNs are changed, but it is in use # (reserved != 0 or connections != 0), so should not update # its NPIV and physical WWPNs. # 1b02: is new in z/VM, should add to db. # 1b03: is in z/VM, WWPNs, owner and CHPID changed, # should update values for owner and CHPID, but should NOT # update WWPNs because the FCP device is in use. fcp_info_in_zvm = [ 'opnstk1: FCP device number: 1A01', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: c05076de33000A01', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: 1B01', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: c05076de33000B01', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: OWNER1', 'opnstk1: FCP device number: 1B02', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: c05076de33000B02', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: 1B03', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: c05076de33000B03', 'opnstk1: Channel path ID: 30', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] fcp_dict_in_zvm = { '1a01': volumeop.FCP(fcp_info_in_zvm[0:6]), '1b01': volumeop.FCP(fcp_info_in_zvm[6:12]), '1b02': volumeop.FCP(fcp_info_in_zvm[12:18]), '1b03': volumeop.FCP(fcp_info_in_zvm[18:24]), } fcp_info_in_db_expected = { '1a01': ('1a01', 'user1', 0, 0, 'c05076de33000a01', '20076d8500005181', '27', 'free', 'none', template_id), '1b01': ('1b01', 'user2', 1, 1, 'c05076de33000003', 'c05076de3300264b', '27', 'active', 'owner1', template_id), '1b02': ('1b02', '', 0, 0, 'c05076de33000b02', '20076d8500005181', '27', 'free', 'none', template_id), '1b03': ('1b03', 'unit0001', 2, 1, 'c05076de33000004', 'c05076de3300264b', '30', 'active', 'unit0001', template_id) } try: self.fcpops.sync_fcp_table_with_zvm(fcp_dict_in_zvm) fcp_info_in_db_new = self.fcpops.get_fcp_dict_in_db() # because not return value is sqlite3.Row object # so need to comare them one by one self.assertEqual(fcp_info_in_db_new.keys(), fcp_info_in_db_expected.keys()) for fcp_id in fcp_info_in_db_new: self.assertEqual(tuple(fcp_info_in_db_new[fcp_id]), fcp_info_in_db_expected[fcp_id]) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch.object(uuid, 'uuid1') def test_create_fcp_template(self, get_uuid): """Test create_fcp_template""" # there is already a default template: # fakehos1-1111-1111-1111-111111111111 templates = [('fakehos1-1111-1111-1111-111111111111', 'name1', 'desc1', 1), ('fakehos2-1111-1111-1111-111111111111', 'name2', 'desc2', 0)] template_id_list = [tmpl[0] for tmpl in templates] self._insert_data_into_template_table(templates) # parameters of new template new_template_id = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' template_id_list.append(new_template_id) get_uuid.return_value = new_template_id name = "test template" description = "test create_fcp_template" fcp_devices = "1a00-1a01, 1a03;1b01, 1b03-1b04" fcp_id_list = ['1a00', '1a01', '1a03', '1b01', '1b03', '1b04'] host_default = True default_sp_list = ['sp1', 'sp2'] expected_templates_info = { 'fakehos1-1111-1111-1111-111111111111': { "id": "fakehos1-1111-1111-1111-111111111111", "name": "name1", "description": "desc1", "host_default": False, "storage_providers": [], 'min_fcp_paths_count': 0 }, 'fakehos2-1111-1111-1111-111111111111': { "id": "fakehos2-1111-1111-1111-111111111111", "name": "name2", "description": "desc2", "host_default": False, "storage_providers": [], 'min_fcp_paths_count': 0 }, 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c': { "id": "ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c", "name": "test template", "description": "test create_fcp_template", "host_default": True, "storage_providers": ['sp1', 'sp2'], 'min_fcp_paths_count': 2, }, } try: ret = self.fcpops.create_fcp_template(name, description, fcp_devices, host_default, default_sp_list) self.assertEqual(ret['fcp_template']['name'], name) self.assertEqual(ret['fcp_template']['description'], description) self.assertEqual(ret['fcp_template']['host_default'], host_default) self.assertEqual(ret['fcp_template']['storage_providers'], default_sp_list) # check content in database all_templates_info = self.fcpops.get_fcp_templates( template_id_list) for tmpl in all_templates_info['fcp_templates']: self.assertDictEqual(tmpl, expected_templates_info[tmpl['id']]) finally: self._delete_from_template_table(template_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, new_template_id) self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_create_fcp_template_with_error(self): name = 'test_fcp_tmpl' description = 'test-desc' fcp_devices = '1a10;1b10' min_fcp_paths_count = 4 self.assertRaisesRegex(exception.SDKConflictError, 'min_fcp_paths_count 4 is larger than fcp device path count 2', self.fcpops.create_fcp_template, name, description, fcp_devices, min_fcp_paths_count=min_fcp_paths_count) @mock.patch("zvmsdk.database.FCPDbOperator.edit_fcp_template") def test_edit_fcp_template(self, mock_db_edit_tmpl): """ Test edit_fcp_template """ tmpl_id = 'fake_id' kwargs = { 'name': 'new_name', 'description': 'new_desc', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': ['sp1'], 'min_fcp_paths_count': 2} self.fcpops.edit_fcp_template(tmpl_id, **kwargs) mock_db_edit_tmpl.assert_called_once_with(tmpl_id, **kwargs) def test_update_template_fcp_raw_usage(self): raw = ('fcp_id_1', 'tmpl_id_1', 0, 'assigner_id', 1, 0, 'wwpn_npiv', 'wwpn_phy', 'chpid', 'state', 'owner', '') expected = { 'tmpl_id_1': { 0: [('fcp_id_1', 'tmpl_id_1', 'assigner_id', 1, 0, 'wwpn_npiv', 'wwpn_phy', 'chpid', 'state', 'owner', '')]}} result = self.fcpops._update_template_fcp_raw_usage({}, raw) self.assertDictEqual(result, expected) def test_get_fcp_templates(self): """ Test get_fcp_templates in FCPManager""" try: # prepare test data template_id_1 = 'template_id_1' template_id_2 = 'template_id_2' templates = [(template_id_1, 'name1', 'desc1', 1), (template_id_2, 'name2', 'desc2', 0)] self._delete_from_template_table([template_id_1, template_id_2]) self._insert_data_into_template_table(templates) template_sp_mapping = [('sp1', template_id_1), ('sp2', template_id_2)] self.fcp_vol_mgr._insert_data_into_template_sp_mapping_table(template_sp_mapping) fcp_info_list_2 = [ # allocated ('1b00', 'user2', 1, 1, 'c05076de3300c83c', 'c05076de33002641', '27', 'active', '', template_id_2), # unallocated_but_active ('1b01', '', 0, 0, 'c05076de3300d83c', 'c05076de33002641', '35', 'active', 'owner2', '')] fcp_id_list_2 = [fcp_info[0] for fcp_info in fcp_info_list_2] self.db_op.bulk_delete_from_fcp_table(fcp_id_list_2) self._insert_data_into_fcp_table(fcp_info_list_2) # case1: get by template_id_list result_1 = self.fcpops.get_fcp_templates([template_id_1]) expected_1 = { "fcp_templates": [ { "id": template_id_1, "name": "name1", "description": "desc1", "host_default": True, "storage_providers": ["sp1"], 'min_fcp_paths_count': 0 }]} self.assertDictEqual(result_1, expected_1) # case2: get by assigner_id expected_2 = { "fcp_templates": [ { "id": template_id_2, "name": "name2", "description": "desc2", "host_default": False, "storage_providers": ["sp2"], 'min_fcp_paths_count': 0 }]} result_2 = self.fcpops.get_fcp_templates(assigner_id='user2') self.assertDictEqual(result_2, expected_2) # case3: get by host_default=True result_3 = self.fcpops.get_fcp_templates(host_default=True) self.assertDictEqual(result_3, expected_1) # # case4: get by host_default=False result_4 = self.fcpops.get_fcp_templates(host_default=False) self.assertDictEqual(result_4, expected_2) # case5: get by default_sp_list=['sp1'] result_5 = self.fcpops.get_fcp_templates(default_sp_list=['sp1']) self.assertDictEqual(result_5, expected_1) # case6: get by default_sp_list=['all'] expected_all = { "fcp_templates": [ { "id": template_id_1, "name": "name1", "description": "desc1", "host_default": True, "storage_providers": ["sp1"], 'min_fcp_paths_count': 0 }, { "id": template_id_2, "name": "name2", "description": "desc2", "host_default": False, "storage_providers": ["sp2"], 'min_fcp_paths_count': 0 }]} result_6 = self.fcpops.get_fcp_templates(default_sp_list=['all']) self.assertDictEqual(result_6, expected_all) # case7: without any parameter, will get all templates result_7 = self.fcpops.get_fcp_templates() self.assertDictEqual(result_7, expected_all) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list_2) self.db_op.bulk_delete_fcp_from_template(fcp_id_list_2, template_id_2) self._delete_from_template_table([template_id_1, template_id_2]) @mock.patch("zvmsdk.database.FCPDbOperator.get_fcp_templates") def test_get_fcp_templates_exception(self, mock_get): self.assertRaises(exception.SDKObjectNotExistError, self.fcpops.get_fcp_templates, ['fake_id_1', 'fake_id_2']) mock_get.assert_not_called() @mock.patch( "zvmsdk.volumeop.FCPManager._update_template_fcp_raw_usage") @mock.patch("zvmsdk.volumeop.FCPManager._sync_db_with_zvm") def test_get_fcp_templates_details(self, mock_sync, mock_raw): """ Test get_fcp_templates_details in FCPManager""" try: self.maxDiff = None # prepare test data template_id_1 = 'template_id_1' template_id_2 = 'template_id_2' templates = [(template_id_1, 'name1', 'desc1', 1), (template_id_2, 'name2', 'desc2', 0)] self._delete_from_template_table([template_id_1, template_id_2]) self._insert_data_into_template_table(templates) template_sp_mapping = [('sp1', template_id_1), ('sp2', template_id_2)] self.fcp_vol_mgr._insert_data_into_template_sp_mapping_table(template_sp_mapping) fcp_info_list_1 = [ # available ('1a00', '', 0, 0, 'c05076de3300a83c', 'c05076de33002641', '27', 'free', '', '') ] fcp_info_list_2 = [ # allocated ('1b00', 'user2', 1, 1, 'c05076de3300c83c', 'c05076de33002641', '27', 'active', '', template_id_2), # unallocated_but_active ('1b01', '', 0, 0, 'c05076de3300d83c', 'c05076de33002641', '35', 'active', 'owner2', '')] fcp_id_list_1 = [fcp_info[0] for fcp_info in fcp_info_list_1] fcp_id_list_2 = [fcp_info[0] for fcp_info in fcp_info_list_2] self.db_op.bulk_delete_from_fcp_table(fcp_id_list_1) self._insert_data_into_fcp_table(fcp_info_list_1) self.db_op.bulk_delete_from_fcp_table(fcp_id_list_2) self._insert_data_into_fcp_table(fcp_info_list_2) template_fcp = [('1a00', template_id_1, 0), ('1x00', template_id_1, 1), ('1b00', template_id_2, 0), ('1b01', template_id_2, 1)] fcp_id_list_1.append('1x00') self.db_op.bulk_delete_fcp_from_template(fcp_id_list_1, template_id_1) self.db_op.bulk_delete_fcp_from_template(fcp_id_list_2, template_id_2) self.fcp_vol_mgr._insert_data_into_template_fcp_mapping_table(template_fcp) # case1: test get_fcp_templates_details without input parameter expected_1 = { "id": template_id_1, "name": "name1", "description": "desc1", "host_default": True, "storage_providers": ["sp1"], 'min_fcp_paths_count': 2, "statistics": { 0: { "total": "1A00", "total_count": 1, "single_fcp": "1A00", "range_fcp": "", "available": "1A00", "available_count": 1, "allocated": "", "reserve_only": "", "connection_only": "", "unallocated_but_active": {}, "allocated_but_free": "", "notfound": "", "offline": "", "CHPIDs": {"27": "1A00"}}, 1: { "total": "1X00", "total_count": 1, "single_fcp": "1X00", "range_fcp": "", "available": "", "available_count": 0, "allocated": "", "reserve_only": "", "connection_only": "", "unallocated_but_active": {}, "allocated_but_free": "", "notfound": "1X00", "offline": "", "CHPIDs": {}} } } expected_2 = { "id": template_id_2, "name": "name2", "description": "desc2", "host_default": False, "storage_providers": ["sp2"], 'min_fcp_paths_count': 2, "statistics": { 0: { "total": "1B00", "total_count": 1, "single_fcp": "1B00", "range_fcp": "", "available": "", "available_count": 0, "allocated": "1B00", "reserve_only": "", "connection_only": "", "unallocated_but_active": {}, "allocated_but_free": "", "notfound": "", "offline": "", "CHPIDs": {"27": "1B00"}}, 1: { "total": "1B01", "total_count": 1, "single_fcp": "1B01", "range_fcp": "", "available": "", "available_count": 0, "allocated": "", "reserve_only": "", "connection_only": "", "unallocated_but_active": {"1B01": "owner2"}, "allocated_but_free": "", "notfound": "", "offline": "", "CHPIDs": {"35": "1B01"} } } } expected_all = { "fcp_templates": [expected_1, expected_2]} result_all = self.fcpops.get_fcp_templates_details(raw=False, statistics=True, sync_with_zvm=False) mock_sync.assert_not_called() self.assertDictEqual(result_all, expected_all) # case2: get_fcp_templates_details by template_id_list result = self.fcpops.get_fcp_templates_details(template_id_list=[template_id_1], raw=False, statistics=True, sync_with_zvm=False) expected = {'fcp_templates': [expected_1]} self.assertDictEqual(result, expected) # case3: get_fcp_templates_details with raw=True and sync_with_zvm=True self.fcpops.get_fcp_templates_details(template_id_list=[template_id_1], raw=True, statistics=True, sync_with_zvm=True) mock_raw.assert_called() mock_sync.assert_called() finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list_1) self.db_op.bulk_delete_from_fcp_table(fcp_id_list_2) self.db_op.bulk_delete_fcp_from_template(fcp_id_list_1, template_id_1) self.db_op.bulk_delete_fcp_from_template(fcp_id_list_2, template_id_2) self._delete_from_template_table([template_id_1, template_id_2]) @mock.patch("zvmsdk.database.FCPDbOperator.get_fcp_templates_details") @mock.patch("zvmsdk.volumeop.FCPManager._sync_db_with_zvm") def test_get_fcp_templates_details_exception(self, mock_sync, mock_get): self.assertRaises(exception.SDKObjectNotExistError, self.fcpops.get_fcp_templates_details, template_id_list=['fake_id_1', 'fake_id_2'], raw=False, statistics=True, sync_with_zvm=True) mock_sync.assert_not_called() mock_get.assert_not_called() def test_update_template_fcp_statistics_usage(self): self.maxDiff = None statistics_usage = {} # raw_item format # (fcp_id|tmpl_id|path|assigner_id|connections| # reserved|wwpn_npiv|wwpn_phy|chpid|state|owner|tmpl_id) raw_items = [('1a01', 'tmpl_id_1', '0', '', 2, 1, 'wwpn_npiv', 'wwpn_phy', '27', 'active', 'owner1', 'tmpl_id_1'), ('1a02', 'tmpl_id_1', '0', '', 0, 0, 'wwpn_npiv', 'wwpn_phy', '32', 'free', '', ''), ('1b01', 'tmpl_id_1', '1', '', 0, 0, 'wwpn_npiv', 'wwpn_phy', '27', 'active', 'assigner_id_1', ''), ('1b02', 'tmpl_id_1', '1', '', 0, 0, 'wwpn_npiv', 'wwpn_phy', '32', 'active', 'assigner_id_2', ''), ('1c03', 'tmpl_id_2', '0', '', 0, 1, 'wwpn_npiv', 'wwpn_phy', '25', 'free', '', ''), ('1c05', 'tmpl_id_2', '0', '', 1, 0, 'wwpn_npiv', 'wwpn_phy', '25', 'free', '', ''), ('1c06', 'tmpl_id_2', '0', '', 1, 1, 'wwpn_npiv', 'wwpn_phy', '26', 'free', '', ''), ('1d05', 'tmpl_id_2', '1', '', None, '', '', '', '', '', '', ''), ('1d06', 'tmpl_id_2', '1', '', 0, 0, 'wwpn_npiv', 'wwpn_phy', '', 'notfound', '', ''), ('1e09', 'tmpl_id_3', '0', '', 0, 0, 'wwpn_npiv', 'wwpn_phy', '30', 'offline', '', '')] for raw in raw_items: self.fcpops._update_template_fcp_statistics_usage( statistics_usage, raw) expected = { 'tmpl_id_1': { '0': { "total": ['1A01', '1A02'], "total_count": 0, "single_fcp": [], "range_fcp": [], "available": ['1A02'], "available_count": 0, "allocated": ['1A01'], "reserve_only": [], "connection_only": [], "unallocated_but_active": {}, "allocated_but_free": [], "notfound": [], "offline": [], "CHPIDs": {'27': ['1A01'], '32': ['1A02']}}, '1': { "total": ['1B01', '1B02'], "total_count": 0, "single_fcp": [], "range_fcp": [], "available": [], "available_count": 0, "allocated": [], "reserve_only": [], "connection_only": [], "unallocated_but_active": { '1B01': 'assigner_id_1', '1B02': 'assigner_id_2'}, "allocated_but_free": [], "notfound": [], "offline": [], "CHPIDs": {'27': ['1B01'], '32': ['1B02']}} }, 'tmpl_id_2': { '0': { "total": ['1C03', '1C05', '1C06'], "total_count": 0, "single_fcp": [], "range_fcp": [], "available": [], "available_count": 0, "allocated": ['1C06'], "reserve_only": ['1C03'], "connection_only": ['1C05'], "unallocated_but_active": {}, "allocated_but_free": ['1C05', '1C06'], "notfound": [], "offline": [], "CHPIDs": {'25': ['1C03', '1C05'], '26': ['1C06']}}, '1': { "total": ['1D05', '1D06'], "total_count": 0, "single_fcp": [], "range_fcp": [], "available": [], "available_count": 0, "allocated": [], "reserve_only": [], "connection_only": [], "unallocated_but_active": {}, "allocated_but_free": [], "notfound": ['1D05', '1D06'], "offline": [], "CHPIDs": {}} }, 'tmpl_id_3': { '0': { "total": ['1E09'], "total_count": 0, "single_fcp": [], "range_fcp": [], "available": [], "available_count": 0, "allocated": [], "reserve_only": [], "connection_only": [], "unallocated_but_active": {}, "allocated_but_free": [], "notfound": [], "offline": ['1E09'], "CHPIDs": {'30': ['1E09']}}} } self.assertDictEqual(statistics_usage, expected) @mock.patch("zvmsdk.database.FCPDbOperator.delete_fcp_template") def test_delete_fcp_template(self, mock_db_delete_tmpl): """ Test delete_fcp_template in FCPManager""" self.fcpops.delete_fcp_template('tmpl_id') mock_db_delete_tmpl.assert_called_once_with('tmpl_id') @mock.patch("zvmsdk.database.FCPDbOperator.increase_connections_by_assigner") def test_increase_fcp_connections(self, mock_increase_conn): """Test increase_fcp_connections""" mock_increase_conn.side_effect = [1, 2, 0] fcp_list = ['1a01', '1b01', '1c01'] assigner_id = 'fake_id' expect = {'1a01': 1, '1b01': 2, '1c01': 0} result = self.fcpops.increase_fcp_connections(fcp_list, assigner_id) self.assertDictEqual(result, expect) @mock.patch("zvmsdk.database.FCPDbOperator.decrease_connections") def test_decrease_fcp_connections(self, mock_decrease_conn): """Test decrease_fcp_connections""" # case1: no exception when call mock_decrease_conn mock_decrease_conn.side_effect = [1, 2, 0] fcp_list = ['1a01', '1b01', '1c01'] expect = {'1a01': 1, '1b01': 2, '1c01': 0} result = self.fcpops.decrease_fcp_connections(fcp_list) self.assertDictEqual(result, expect) # case2: raise exception when call mock_decrease_conn mock_decrease_conn.side_effect = [ 1, exception.SDKObjectNotExistError('fake_msg'), 0] fcp_list = ['1a01', '1b01', '1c01'] expect = {'1a01': 1, '1b01': 0, '1c01': 0} result = self.fcpops.decrease_fcp_connections(fcp_list) self.assertDictEqual(result, expect) class TestFCPVolumeManager(base.SDKTestCase): @classmethod @mock.patch("zvmsdk.volumeop.FCPManager.sync_db", mock.Mock()) def setUpClass(cls): super(TestFCPVolumeManager, cls).setUpClass() cls.volumeops = volumeop.FCPVolumeManager() cls.db_op = database.FCPDbOperator() # tearDownClass deleted to work around bug of 'no such table:fcp' def _insert_data_into_fcp_table(self, fcp_info_list): # insert data into all columns of fcp table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO fcp " "(fcp_id, assigner_id, connections, " "reserved, wwpn_npiv, wwpn_phy, chpid, " "state, owner, tmpl_id) VALUES " "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", fcp_info_list) def _insert_data_into_template_table(self, templates_info): # insert data into all columns of template table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template " "(id, name, description, is_default) " "VALUES (?, ?, ?, ?)", templates_info) def _insert_data_into_template_fcp_mapping_table(self, template_fcp_mapping): # insert data into all columns of template_fcp_mapping table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template_fcp_mapping " "(fcp_id, tmpl_id, path) " "VALUES (?, ?, ?)", template_fcp_mapping) def _insert_data_into_template_sp_mapping_table(self, template_sp_mapping): # insert data into all columns of template_sp_mapping table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template_sp_mapping " "(sp_name, tmpl_id) " "VALUES (?, ?)", template_sp_mapping) def _delete_from_template_table(self, template_id_list): templates_id = [(tmpl_id,) for tmpl_id in template_id_list] with database.get_fcp_conn() as conn: conn.executemany("DELETE FROM template " "WHERE id=?", templates_id) @mock.patch("zvmsdk.utils.get_smt_userid") @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.get_lpar_name") def test_get_volume_connector_unreserve(self, get_lpar_name, get_all_fcp_info, get_smt_userid): """Test get_volume_connector when reserve parameter is False""" get_lpar_name.return_value = "fakehos1" get_smt_userid.return_value = "fakesmt" fcp_list = ['opnstk1: FCP device number: A83C', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: ' 'C05076DE3300A83C', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' 'C05076DE33002641', 'Owner: FAKEUSER', 'opnstk1: FCP device number: B83C', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: ' 'C05076DE3300B83C', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' 'C05076DE33002641', 'Owner: FAKEUSER'] get_all_fcp_info.return_value = fcp_list # insert data into fcp table template_id = 'fakehos1-1111-1111-1111-111111111111' fcp_info_list = [('a83c', 'fakeuser', 0, 1, 'c05076de3300a83c', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('b83c', 'fakeuser', 0, 1, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._insert_data_into_fcp_table(fcp_info_list) # insert data into template_fcp_mapping table template_fcp = [('a83c', template_id, 0), ('b83c', template_id, 1)] self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self._insert_data_into_template_fcp_mapping_table(template_fcp) # insert data into template table to add a default template templates = [('fakehos1-1111-1111-1111-111111111111', 'name1', 'desc1', 1), ('fakehos2-1111-1111-1111-111111111111', 'name2', 'desc2', 0)] template_id_list = [tmpl[0] for tmpl in templates] self._insert_data_into_template_table(templates) try: connector = self.volumeops.get_volume_connector( 'fakeuser', False, fcp_template_id=template_id) expected = {'zvm_fcp': ['a83c', 'b83c'], 'wwpns': ['c05076de3300a83c', 'c05076de3300b83c'], 'phy_to_virt_initiators': { 'c05076de3300a83c': 'c05076de33002641', 'c05076de3300b83c': 'c05076de33002641' }, 'host': 'fakehos1_fakeuser', 'fcp_paths': 2, 'fcp_template_id': template_id} self.assertDictEqual(expected, connector) userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('b83c') self.assertEqual('fakeuser', userid) self.assertEqual(0, conn) self.assertEqual(0, reserved) # because reserve is False, so tmpl_id set to '' self.assertEqual('', tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self._delete_from_template_table(template_id_list) @mock.patch("zvmsdk.utils.get_smt_userid") @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.get_lpar_name") def test_get_volume_connector_reserve(self, get_lpar_name, get_all_fcp_info, get_smt_userid): """Test get_volume_connector when reserve parameter is True""" get_lpar_name.return_value = "fakehos1" get_smt_userid.return_value = "fakesmt" fcp_list = ['opnstk1: FCP device number: A83C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' 'C05076DE3300A83C', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' 'C05076DE33002641', 'Owner: NONE', 'opnstk1: FCP device number: B83C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' 'C05076DE3300B83C', 'opnstk1: Channel path ID: 27', 'opnstk1: Physical world wide port number: ' 'C05076DE33002641', 'Owner: NONE'] get_all_fcp_info.return_value = fcp_list # insert data into fcp table template_id = 'fakehos1-1111-1111-1111-111111111111' # in database, the state in active, but in zvm it is free # get_volume_connector should be able to get them fcp_info_list = [('a83c', '', 0, 0, 'c05076de3300a83c', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('b83c', '', 0, 0, 'c05076de3300b83c', 'c05076de33002641', '27', 'active', 'owner2', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) # insert data into template_fcp_mapping table template_fcp = [('a83c', template_id, 0), ('b83c', template_id, 1)] self._insert_data_into_template_fcp_mapping_table(template_fcp) # insert data into template table to add a default template templates = [('fakehos1-1111-1111-1111-111111111111', 'name1', 'desc1', 1), ('fakehos2-1111-1111-1111-111111111111', 'name2', 'desc2', 0)] template_id_list = [tmpl[0] for tmpl in templates] self._insert_data_into_template_table(templates) try: connector = self.volumeops.get_volume_connector('fakeuser', True) expected = {'zvm_fcp': ['a83c', 'b83c'], 'wwpns': ['c05076de3300a83c', 'c05076de3300b83c'], 'phy_to_virt_initiators': { 'c05076de3300a83c': 'c05076de33002641', 'c05076de3300b83c': 'c05076de33002641' }, 'host': 'fakehos1_fakeuser', 'fcp_paths': 2, 'fcp_template_id': template_id} self.assertDictEqual(expected, connector) userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('b83c') self.assertEqual('FAKEUSER', userid) self.assertEqual(0, conn) self.assertEqual(1, reserved) self.assertEqual(template_id, tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self._delete_from_template_table(template_id_list) def test_get_volume_connector_reserve_with_error(self): """The specified FCP Multipath Template doesn't exist, should raise error.""" assigner_id = 'fakeuser' fcp_template_id = '0001' sp_name = 'v7k60' self.assertRaisesRegex(exception.SDKVolumeOperationError, "fcp_template_id 0001 doesn't exist.", self.volumeops.get_volume_connector, assigner_id, True, fcp_template_id, sp_name) @mock.patch("zvmsdk.smtclient.SMTClient.volume_refresh_bootmap") def test_volume_refresh_bootmap(self, mock_volume_refresh_bootmap): fcpchannels = ['5d71'] wwpns = ['5005076802100c1b', '5005076802200c1b'] lun = '0000000000000000' res = self.volumeops.volume_refresh_bootmap(fcpchannels, wwpns, lun) mock_volume_refresh_bootmap.assert_has_calls(res) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._add_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._dedicate_fcp") def test_attach(self, mock_dedicate, mock_add_disk, mock_check, mock_fcp_info): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'false', 'target_wwpn': ['20076D8500005182', '20076D8500005183'], 'target_lun': '2222', 'zvm_fcp': ['c123', 'd123'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} fcp_list = ['opnstk1: FCP device number: C123', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' '20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: D123', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: ' '20076D8500005183', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] mock_fcp_info.return_value = fcp_list # insert data into tempalte template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('c123', 'user1', 0, 0, '20076D8500005182', '20076D8500005181', '27', 'active', 'owner1', template_id), ('d123', 'user1', 0, 0, '20076D8500005183', '20076D8500005181', '27', 'active', 'owner2', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] wwpns = ['20076d8500005182', '20076d8500005183'] self._insert_data_into_fcp_table(fcp_info_list) # insert data into template_fcp_mapping table template_fcp = [('c123', template_id, 0), ('d123', template_id, 1)] self._insert_data_into_template_fcp_mapping_table(template_fcp) try: self.volumeops.attach(connection_info) self.assertEqual(mock_dedicate.call_args_list, [call('c123', 'USER1'), call('d123', 'USER1')]) mock_add_disk.assert_called_once_with(['c123', 'd123'], 'USER1', wwpns, '2222', False, 'rhel7', '/dev/sdz') finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) @mock.patch("zvmsdk.volumeop.FCPVolumeManager.get_fcp_usage") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._do_attach") def test_attach_with_exception(self, mock_do_attach, mock_check_userid, mock_get_fcp_usage): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'false', 'target_wwpn': ['20076D8500005182'], 'target_lun': '2222', 'zvm_fcp': ['E83C', 'D83C'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} # case1: enter except-block mock_check_userid.return_value = True mock_get_fcp_usage.side_effect = (Exception(), ('user1', 1, 1, 'fake_tmpl_id')) mock_do_attach.side_effect = exception.SDKSMTRequestFailed({}, 'fake_msg') self.assertRaises(exception.SDKSMTRequestFailed, self.volumeops.attach, connection_info) mock_do_attach.assert_called_once() self.assertEqual(mock_get_fcp_usage.call_args_list, [call('e83c'), call('d83c')]) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._add_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._dedicate_fcp") def test_attach_with_root_volume(self, mock_dedicate, mock_add_disk, mock_check, mock_fcp_info): connection_info = {'platform': 's390x', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'false', 'target_wwpn': ['20076D8500005182', '20076D8500005183'], 'target_lun': '2222', 'zvm_fcp': ['c123', 'd123'], 'mount_point': '/dev/sdz', 'assigner_id': 'user2', 'is_root_volume': True} fcp_list = ['opnstk1: FCP device number: C123', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: 20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: D123', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: 20076D8500005183', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] mock_fcp_info.return_value = fcp_list fcp_info_list = [('c123', 'user2', 0, 1, 'c05076de3300011c', 'c05076de33002641', '27', 'active', 'owner1', ''), ('d123', 'user2', 0, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: self.volumeops.attach(connection_info) userid, reserved, conns, tmpl_id = self.volumeops.get_fcp_usage('c123') self.assertEqual(userid, 'user2') self.assertEqual(reserved, 1) self.assertEqual(conns, 1) self.assertEqual(tmpl_id, '') userid, reserved, conns, tmpl_id = self.volumeops.get_fcp_usage('d123') self.assertEqual(userid, 'user2') self.assertEqual(reserved, 1) self.assertEqual(conns, 1) self.assertEqual(tmpl_id, '') self.assertFalse(mock_dedicate.called) self.assertFalse(mock_add_disk.called) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._add_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._dedicate_fcp") def test_do_attach_with_no_dedicate(self, mock_dedicate, mock_add_disk, mock_check, mock_fcp_info): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'false', 'target_wwpn': ['20076D8500005182', '20076D8500005183'], 'target_lun': '2222', 'zvm_fcp': ['c123', 'd123'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} fcp_list = ['opnstk1: FCP device number: C123', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' '20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: D123', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: ' '20076D8500005183', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: NONE'] mock_fcp_info.return_value = fcp_list mock_check.return_value = True wwpns = ['20076d8500005182', '20076d8500005183'] fcp_info_list = [('c123', 'user1', 2, 1, 'c05076de3300011c', 'c05076de33002641', '27', 'active', 'owner1', ''), ('d123', 'user1', 2, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: self.volumeops.attach(connection_info) self.assertFalse(mock_dedicate.called) mock_add_disk.assert_has_calls([mock.call(['c123', 'd123'], 'USER1', wwpns, '2222', False, 'rhel7', '/dev/sdz')]) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.volumeop.FCPVolumeManager.get_fcp_usage") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._rollback_do_attach") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._dedicate_fcp") @mock.patch("zvmsdk.volumeop.FCPManager.increase_fcp_connections") def test_do_attach_with_rollback_due_to_dedicate_fcp_failure(self, mock_increase_conn, mock_dedicate, mock_rollback, mock_check, mock_get_fcp_usage): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'false', 'target_wwpn': ['20076D8500005182'], 'target_lun': '2222', 'zvm_fcp': ['e83c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} mock_check.return_value = True mock_increase_conn.return_value = {'e83c': 1, 'b83c': 1} mock_get_fcp_usage.return_value = ['user1', 1, 1, 'fake_tmpl_id'] results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} mock_dedicate.side_effect = exception.SDKSMTRequestFailed( results, 'fake error') self.assertRaises(exception.SDKBaseException, self.volumeops.attach, connection_info) mock_rollback.assert_called_once_with(['e83c'], 'USER1', ['20076d8500005182'], '2222', False, 'rhel7', '/dev/sdz') self.assertEqual(mock_get_fcp_usage.call_args_list, [call('e83c')]) @mock.patch("zvmsdk.volumeop.FCPVolumeManager.get_fcp_usage") @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._add_disks") @mock.patch("zvmsdk.volumeop.FCPManager.increase_fcp_connections") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") @mock.patch("zvmsdk.volumeop.FCPManager.decrease_fcp_connections") def test_detach_rollback(self, mock_decrease, mock_remove_disk, mock_increase, mock_add_disk, mock_check, mock_fcp_info, get_fcp_usage): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'False', 'target_wwpn': ['20076D8500005182'], 'target_lun': '2222', 'zvm_fcp': ['f83c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} fcp_list = ['opnstk1: FCP device number: F83C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' '20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE'] get_fcp_usage.return_value = ('user1', 0, 0, '') mock_fcp_info.return_value = fcp_list # this return does not matter mock_check.return_value = True mock_decrease.return_value = {'f83c': 0} results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} mock_remove_disk.side_effect = exception.SDKSMTRequestFailed( results, 'fake error') self.assertRaises(exception.SDKBaseException, self.volumeops.detach, connection_info) # because no fcp dedicated mock_add_disk.assert_called_once_with(['f83c'], 'USER1', ['20076d8500005182'], '2222', False, 'rhel7', '/dev/sdz') @mock.patch("zvmsdk.volumeop.FCPVolumeManager.get_fcp_usage") @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._add_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") def test_detach_need_rollback(self, mock_remove_disk, mock_add_disk, mock_check, mock_fcp_info, get_fcp_usage): """Test need_rollback dict was set correctly. """ connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'False', 'target_wwpn': ['20076D8500005181', '20076D8500005182'], 'target_lun': '2222', 'zvm_fcp': ['f83c', 'f84c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} fcp_list = ['opnstk1: FCP device number: F83C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' '20076D8500005181', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: F84C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: ' '20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005182', 'Owner: NONE'] # set connections of f83c to 1 # left connections of f84c to 0 fcp_info_list = [('f83c', 'user1', 1, 1, 'c05076de3300011c', 'c05076de33002641', '27', 'active', 'owner1', ''), ('f84c', 'user1', 0, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: get_fcp_usage.return_value = ('use1', 0, 0, '') mock_fcp_info.return_value = fcp_list # this return does not matter mock_check.return_value = True results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} mock_remove_disk.side_effect = exception.SDKSMTRequestFailed( results, 'fake error') self.assertRaises(exception.SDKBaseException, self.volumeops.detach, connection_info) # because no fcp dedicated mock_add_disk.assert_called_once_with(['f83c', 'f84c'], 'USER1', ['20076d8500005181', '20076d8500005182'], '2222', False, 'rhel7', '/dev/sdz') finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._undedicate_fcp") def test_detach(self, mock_undedicate, mock_remove_disk, mock_check, mock_fcp_info): """Test detach API.""" connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'True', 'target_wwpn': ['20076D8500005182', '20076D8500005183'], 'target_lun': '2222', 'zvm_fcp': ['183c', '283c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} fcp_list = ['opnstk1: FCP device number: 183C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: 20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: 283C', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: ' '20076D8500005183', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] mock_fcp_info.return_value = fcp_list mock_check.return_value = True wwpns = ['20076d8500005182', '20076d8500005183'] fcp_info_list = [('183c', 'user1', 0, 1, 'c05076de3300011c', 'c05076de33002641', '27', 'active', 'owner1', ''), ('283c', 'user1', 1, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: self.volumeops.detach(connection_info) mock_undedicate.assert_has_calls([mock.call('183c', 'USER1'), mock.call('283c', 'USER1')]) mock_remove_disk.assert_has_calls([mock.call(['183c', '283c'], 'USER1', wwpns, '2222', True, 'rhel7', '/dev/sdz', 0)]) userid, reserved, conns, tmpl_id = self.volumeops.get_fcp_usage('183c') self.assertEqual(userid, 'user1') self.assertEqual(reserved, 1) self.assertEqual(conns, 0) self.assertEqual(tmpl_id, '') userid, reserved, conns, tmpl_id = self.volumeops.get_fcp_usage('283c') self.assertEqual(userid, 'user1') self.assertEqual(reserved, 1) self.assertEqual(conns, 0) self.assertEqual(tmpl_id, '') finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._undedicate_fcp") def test_root_volume_detach(self, mock_undedicate, mock_remove_disk, mock_check, mock_fcp_info): """Test detach root volume.""" connection_info = {'platform': 's390x', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'True', 'target_wwpn': ['20076D8500005182', '20076D8500005183'], 'target_lun': '2222', 'zvm_fcp': ['183c', '283c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1', 'is_root_volume': True} fcp_list = ['opnstk1: FCP device number: 183C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: 20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: 283C', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: 20076D8500005183', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] mock_fcp_info.return_value = fcp_list mock_check.return_value = True fcp_info_list = [('183c', 'user1', 0, 1, 'c05076de3300011c', 'c05076de33002641', '27', 'active', 'owner1', ''), ('283c', 'user1', 1, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: self.volumeops.detach(connection_info) self.assertFalse(mock_undedicate.called) self.assertFalse(mock_remove_disk.called) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.volumeop.FCPManager._get_all_fcp_info") @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._undedicate_fcp") def test_update_connections_only_detach(self, mock_undedicate, mock_remove_disk, mock_check, mock_fcp_info): """Test only update connections when detach volume.""" connection_info = {'platform': 's390x', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'True', 'target_wwpn': ['20076D8500005182', '20076D8500005183'], 'target_lun': '2222', 'zvm_fcp': ['183c', '283c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1', 'update_connections_only': True} fcp_list = ['opnstk1: FCP device number: 183C', 'opnstk1: Status: Free', 'opnstk1: NPIV world wide port number: 20076D8500005182', 'opnstk1: Channel path ID: 59', 'opnstk1: Physical world wide port number: ' '20076D8500005181', 'Owner: NONE', 'opnstk1: FCP device number: 283C', 'opnstk1: Status: Active', 'opnstk1: NPIV world wide port number: 20076D8500005183', 'opnstk1: Channel path ID: 50', 'opnstk1: Physical world wide port number: ' '20076D8500005185', 'Owner: UNIT0001'] mock_fcp_info.return_value = fcp_list mock_check.return_value = True fcp_info_list = [('183c', 'user1', 0, 1, 'c05076de3300011c', 'c05076de33002641', '27', 'active', 'owner1', ''), ('283c', 'user1', 1, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: self.volumeops.detach(connection_info) self.assertFalse(mock_undedicate.called) self.assertFalse(mock_remove_disk.called) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._undedicate_fcp") def test_detach_no_undedicate(self, mock_undedicate, mock_remove_disk, mock_check): """Test no undedidicate action is called when detach.""" connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': 'False', 'target_wwpn': ['1111'], 'target_lun': '2222', 'zvm_fcp': ['283c'], 'mount_point': '/dev/sdz', 'assigner_id': 'user1'} mock_check.return_value = True fcp_info_list = [('283c', 'user1', 2, 1, 'c05076de3300011d', 'c05076de33002641', '27', 'active', 'owner2', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] self._insert_data_into_fcp_table(fcp_info_list) try: self.volumeops.detach(connection_info) self.assertFalse(mock_undedicate.called) mock_remove_disk.assert_called_once_with(['283c'], 'USER1', ['1111'], '2222', False, 'rhel7', '/dev/sdz', 1) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_update_statistics_usage(self): """ Test for _update_statistics_usage() is included in test_get_all_fcp_usage_xxx() """ pass def test_update_raw_fcp_usage(self): """ Test for _update_raw_fcp_usage() is included in test_get_all_fcp_usage_xxx() """ pass def test_get_fcp_usage(self): """Test get_fcp_usage""" template_id = 'fakehost-1111-1111-1111-111111111111' # reserved == 1, connections == 2, assigner_id == 'user1' fcp_info_list = [('283c', 'user1', 2, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO fcp (fcp_id, assigner_id, " "connections, reserved, wwpn_npiv, wwpn_phy, " "chpid, state, owner, tmpl_id) VALUES " "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", fcp_info_list) try: userid, reserved, conns, tmpl_id = self.volumeops.get_fcp_usage('283c') self.assertEqual(userid, 'user1') self.assertEqual(reserved, 1) self.assertEqual(conns, 2) self.assertEqual(template_id, tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_set_fcp_usage(self): """Test set_fcp_usage""" template_id = 'fakehost-1111-1111-1111-111111111111' # reserved == 1, connections == 2, assigner_id == 'user1' fcp_info_list = [('283c', 'user1', 2, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO fcp (fcp_id, assigner_id, " "connections, reserved, wwpn_npiv, wwpn_phy, " "chpid, state, owner, tmpl_id) VALUES " "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", fcp_info_list) try: # change reserved to 0 and connections to 3 new_tmpl_id = 'newhost-1111-1111-1111-111111111111' self.volumeops.set_fcp_usage('283c', 'user2', 0, 3, new_tmpl_id) userid, reserved, conns, tmpl_id = self.volumeops.get_fcp_usage('283c') self.assertEqual(userid, 'user2') self.assertEqual(reserved, 0) self.assertEqual(conns, 3) self.assertEqual(new_tmpl_id, tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) @mock.patch("zvmsdk.volumeop.FCPVolumeManager._remove_disks") @mock.patch("zvmsdk.database.FCPDbOperator.unreserve_fcps") @mock.patch("zvmsdk.volumeop.FCPVolumeManager._undedicate_fcp") @mock.patch("zvmsdk.database.FCPDbOperator.get_connections_from_fcp") def test_rollback_do_attach(self, mock_get_conn, mock_undedicate, mock_unreserve, mock_rm_disk): """Test _rollback_do_attach""" mock_rm_disk.side_effect = Exception() mock_undedicate.side_effect = [ None, exception.SDKSMTRequestFailed({}, 'msg'), None] mock_get_conn.side_effect = [1, 0, 0, 0] fcp_list = ['1a01', '1b01', '1c01', '1d01'] assigner_id = 'fake_id' target_wwpns = 'tgt_wwpn' target_lun = '1' multipath = True os_version = 'os' mount_point = '/dev/sda' total_connections = 1 self.volumeops._rollback_do_attach(fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point) # verify self.assertEqual(mock_get_conn.call_args_list, [call(fcp_list[0]), call(fcp_list[1]), call(fcp_list[2]), call(fcp_list[3])]) self.assertRaises(Exception, mock_rm_disk, fcp_list, assigner_id, target_wwpns, target_lun, multipath, os_version, mount_point, total_connections) self.assertEqual(mock_undedicate.call_args_list, [call(fcp_list[1], assigner_id), call(fcp_list[2], assigner_id), call(fcp_list[3], assigner_id)]) mock_unreserve.assert_called_once_with(fcp_list[1:]) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_vmops.py0000664000175000017510000004674614263501130022752 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import tempfile from zvmsdk import dist from zvmsdk import exception from zvmsdk import vmops from zvmsdk.tests.unit import base class SDKVMOpsTestCase(base.SDKTestCase): def setUp(self): super(SDKVMOpsTestCase, self).setUp() self.vmops = vmops.get_vmops() @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") def test_get_power_state(self, gps): gps.return_value = 'on' self.vmops.get_power_state('cbi00063') gps.assert_called_with('cbi00063') @mock.patch("zvmsdk.smtclient.SMTClient.get_guest_connection_status") def test_is_reachable(self, ggcs): ggcs.return_value = True ret = self.vmops.is_reachable('cbi00063') self.assertEqual(ret, True) @mock.patch("zvmsdk.smtclient.SMTClient.guest_start") def test_guest_start(self, guest_start): self.vmops.guest_start('cbi00063') guest_start.assert_called_once_with('cbi00063') @mock.patch('zvmsdk.vmops.VMOps.is_reachable') @mock.patch('zvmsdk.vmops.VMOps.wait_for_reachable') @mock.patch("zvmsdk.smtclient.SMTClient.guest_start") def test_guest_start_timeout(self, guest_start, wait, is_up): is_up.return_value = False timeout = 10 self.assertRaises(exception.SDKGuestOperationError, self.vmops.guest_start, 'cbi00063', timeout) guest_start.assert_called_once_with('cbi00063') wait.assert_called_once_with('cbi00063', timeout) is_up.assert_called_once_with('cbi00063') @mock.patch("zvmsdk.smtclient.SMTClient.guest_pause") def test_guest_pause(self, guest_pause): self.vmops.guest_pause('cbi00063') guest_pause.assert_called_once_with('cbi00063') @mock.patch("zvmsdk.smtclient.SMTClient.guest_unpause") def test_guest_unpause(self, guest_unpause): self.vmops.guest_unpause('cbi00063') guest_unpause.assert_called_once_with('cbi00063') @mock.patch("zvmsdk.smtclient.SMTClient.namelist_add") @mock.patch("zvmsdk.smtclient.SMTClient.create_vm") def test_create_vm(self, create_vm, namelistadd): userid = 'fakeuser' cpu = 2 memory = '2g' disk_list = [] user_profile = 'testprof' max_cpu = 10 max_mem = '4G' vdevs = ['1234'] loaddev = {'portname': '5678', 'lun': '0000000000000000'} account = "dummy dummy" comment_list = ['comment1', 'comment2 is here'] self.vmops.create_vm(userid, cpu, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', vdevs, loaddev, account, comment_list, '', '', '', '') create_vm.assert_called_once_with(userid, cpu, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', vdevs, loaddev, account, comment_list, '', '', '', '') namelistadd.assert_called_once_with('TSTNLIST', userid) @mock.patch("zvmsdk.smtclient.SMTClient.process_additional_minidisks") def test_guest_config_minidisks(self, process_additional_minidisks): userid = 'userid' disk_list = [{'vdev': '0101', 'format': 'ext3', 'mntdir': '/mnt/0101'}] self.vmops.guest_config_minidisks(userid, disk_list) process_additional_minidisks.assert_called_once_with(userid, disk_list) @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") def test_is_powered_off(self, check_stat): check_stat.return_value = 'off' ret = self.vmops.is_powered_off('cbi00063') self.assertEqual(True, ret) @mock.patch("zvmsdk.smtclient.SMTClient.guest_get_kernel_info") @mock.patch("zvmsdk.smtclient.SMTClient.guest_get_os_version") @mock.patch("zvmsdk.smtclient.SMTClient.get_active_cpu_addrs") @mock.patch("zvmsdk.smtclient.SMTClient.get_image_performance_info") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_get_info(self, gps, gipi, gaca, ggov, ggki): gps.return_value = 'on' gipi.return_value = {'used_memory': u'4872872 KB', 'used_cpu_time': u'6911844399 uS', 'guest_cpus': u'2', 'userid': u'CMABVT', 'max_memory': u'8388608 KB'} gaca.return_value = [0, 1, 2] ggov.return_value = 'RHEL8.4' kernel_info = 'Linux 4.18.0-305.el8.s390x s390x' ggki.return_value = kernel_info vm_info = self.vmops.get_info('fakeid') gps.assert_called_once_with('fakeid') gipi.assert_called_once_with('fakeid') gaca.assert_called_once_with('fakeid') ggov.assert_called_once_with('fakeid') ggki.assert_called_once_with('fakeid') self.assertEqual(vm_info['power_state'], 'on') self.assertEqual(vm_info['max_mem_kb'], 8388608) self.assertEqual(vm_info['mem_kb'], 4872872) self.assertEqual(vm_info['num_cpu'], 2) self.assertEqual(vm_info['cpu_time_us'], 6911844399) self.assertEqual(vm_info['online_cpu_num'], 3) self.assertEqual(vm_info['os_distro'], 'RHEL8.4') self.assertEqual(vm_info['kernel_info'], kernel_info) @mock.patch("zvmsdk.smtclient.SMTClient.get_image_performance_info") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_get_info_error(self, gps, gipi): gps.return_value = 'on' gipi.side_effect = exception.ZVMVirtualMachineNotExist( zvm_host='fakehost', userid='fakeid') self.assertRaises(exception.ZVMVirtualMachineNotExist, self.vmops.get_info, 'fakeid') @mock.patch("zvmsdk.smtclient.SMTClient.guest_get_kernel_info") @mock.patch("zvmsdk.smtclient.SMTClient.guest_get_os_version") @mock.patch("zvmsdk.smtclient.SMTClient.get_active_cpu_addrs") @mock.patch("zvmsdk.smtclient.SMTClient.get_user_direct") @mock.patch("zvmsdk.smtclient.SMTClient.get_image_performance_info") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_get_info_shutdown(self, gps, gipi, gud, gaca, ggov, ggki): gps.return_value = 'off' gipi.return_value = None gud.return_value = [ u'USER FAKEUSER DFLTPASS 2048m 2048m G', u'INCLUDE PROFILE', u'CPU 00 BASE', u'CPU 01', u'IPL 0100', u'NICDEF 1000 TYPE QDIO LAN SYSTEM VSW2 MACID 0E4E8E', u'MDISK 0100 3390 34269 3338 OMB1A9 MR', u''] gaca.return_value = [0, 1, 2] ggov.return_value = 'RHEL8.4' kernel_info = 'Linux 4.18.0-305.el8.s390x s390x' ggki.return_value = kernel_info vm_info = self.vmops.get_info('fakeid') gps.assert_called_once_with('fakeid') gud.assert_called_once_with('fakeid') gaca.assert_called_once_with('fakeid') ggov.assert_called_once_with('fakeid') ggki.assert_called_once_with('fakeid') self.assertEqual(vm_info['power_state'], 'off') self.assertEqual(vm_info['max_mem_kb'], 2097152) self.assertEqual(vm_info['mem_kb'], 0) self.assertEqual(vm_info['num_cpu'], 2) self.assertEqual(vm_info['cpu_time_us'], 0) self.assertEqual(vm_info['online_cpu_num'], 3) self.assertEqual(vm_info['os_distro'], 'RHEL8.4') self.assertEqual(vm_info['kernel_info'], kernel_info) @mock.patch("zvmsdk.smtclient.SMTClient.guest_get_kernel_info") @mock.patch("zvmsdk.smtclient.SMTClient.guest_get_os_version") @mock.patch("zvmsdk.smtclient.SMTClient.get_active_cpu_addrs") @mock.patch("zvmsdk.smtclient.SMTClient.get_user_direct") @mock.patch("zvmsdk.smtclient.SMTClient.get_image_performance_info") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_get_info_get_uid_failed(self, gps, gipi, gud, gaca, ggov, ggki): gps.return_value = 'off' gipi.return_value = None gud.side_effect = exception.ZVMVirtualMachineNotExist(userid='fakeid', zvm_host='fakehost') self.assertRaises(exception.ZVMVirtualMachineNotExist, self.vmops.get_info, 'fakeid') @mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info") def test_get_adapters_info(self, adapters_info): adapters = [{u'lan_owner': u'SYSTEM', u'adapter_address': u'1000', u'lan_name': u'VSC12345', u'adapter_status': u'02', u'mac_address': u'02:55:36:5D:48:57', u'mac_ip_version': u'4', u'mac_ip_address': u'9.152.85.152'}] adapters_info.return_value = adapters ret = self.vmops.get_adapters_info('fakeid') self.assertEqual(ret['adapters'][0]['mac_ip_address'], u'9.152.85.152') @mock.patch("zvmsdk.smtclient.SMTClient._get_image_last_access_time") @mock.patch("zvmsdk.database.ImageDbOperator.image_query_record") @mock.patch("zvmsdk.smtclient.SMTClient.guest_deploy") @mock.patch("zvmsdk.smtclient.SMTClient.image_get_os_distro") def test_guest_deploy(self, image_get_os_distro, deploy_image_to_vm, img_query, get_atime): image_get_os_distro.return_value = 'fake-distro' get_atime.return_value = 1581910539.3330014 img_query.return_value = [{'imageosdistro': 'rhel6.7'}] self.vmops.guest_deploy('fakevm', 'fakeimg', '/test/transport.tgz') image_get_os_distro.assert_called_once_with('fakeimg') deploy_image_to_vm.assert_called_with('fakevm', 'fakeimg', '/test/transport.tgz', None, None, False) @mock.patch("zvmsdk.smtclient.SMTClient._get_image_last_access_time") @mock.patch('zvmsdk.vmops.VMOps.set_hostname') @mock.patch("zvmsdk.database.ImageDbOperator.image_query_record") @mock.patch("zvmsdk.smtclient.SMTClient.guest_deploy") def test_guest_deploy_sethostname(self, deploy_image_to_vm, img_query, set_hostname, get_atime): fake_hostname = 'fakehost' img_query.return_value = [{'imageosdistro': 'rhel6.7'}] get_atime.return_value = 1581910539.3330014 self.vmops.guest_deploy('fakevm', 'fakeimg', hostname=fake_hostname) deploy_image_to_vm.assert_called_with('fakevm', 'fakeimg', None, None, None, False) img_query.assert_called_once_with('fakeimg') set_hostname.assert_called_once_with('fakevm', fake_hostname, 'rhel6.7') @mock.patch("zvmsdk.smtclient.SMTClient.guest_capture") def test_guest_capture(self, guest_capture): self.vmops.guest_capture('fakevm', 'fakeimg') guest_capture.assert_called_once_with('fakevm', 'fakeimg', capture_type = 'rootonly', compress_level = 6) @mock.patch("zvmsdk.smtclient.SMTClient.get_user_direct") def test_get_definition_info(self, get_user_direct): get_user_direct.return_value = [ 'line1', 'NICDEF 1000 TYPE QDIO LAN SYSTEM VSWITCH'] self.vmops.get_definition_info("fake_user_id", nic_coupled='1000') get_user_direct.assert_called_with("fake_user_id") @mock.patch("zvmsdk.smtclient.SMTClient.delete_vm") def test_delete_vm(self, delete_vm): userid = 'userid' self.vmops.delete_vm(userid) delete_vm.assert_called_once_with(userid) @mock.patch("zvmsdk.smtclient.SMTClient.execute_cmd") def test_execute_cmd(self, execute_cmd): userid = 'userid' cmdStr = 'ls' self.vmops.execute_cmd(userid, cmdStr) execute_cmd.assert_called_once_with(userid, cmdStr) @mock.patch("zvmsdk.smtclient.SMTClient.guest_stop") def test_guest_stop(self, gs): userid = 'userid' self.vmops.guest_stop(userid) gs.assert_called_once_with(userid) @mock.patch("zvmsdk.smtclient.SMTClient.guest_stop") def test_guest_stop_with_timeout(self, gs): userid = 'userid' gs.return_value = u'off' self.vmops.guest_stop(userid, timeout=300, poll_interval=10) gs.assert_called_once_with(userid, timeout=300, poll_interval=10) @mock.patch("zvmsdk.smtclient.SMTClient.get_vm_list") def test_guest_list(self, get_vm_list): self.vmops.guest_list() get_vm_list.assert_called_once_with() @mock.patch("zvmsdk.smtclient.SMTClient.add_mdisks") @mock.patch("zvmsdk.smtclient.SMTClient.get_user_direct") def test_create_disks(self, gud, amds): user_direct = ['USER TEST TEST', 'MDISK 100 3390', 'MDISK 101 3390'] gud.return_value = user_direct self.vmops.create_disks('userid', []) gud.assert_called_once_with('userid') amds.assert_called_once_with('userid', [], '0102') @mock.patch("zvmsdk.smtclient.SMTClient.add_mdisks") @mock.patch("zvmsdk.smtclient.SMTClient.get_user_direct") def test_create_disks_200(self, gud, amds): user_direct = ['USER TEST TEST', 'MDISK 100 3390', 'MDISK 200 3390'] gud.return_value = user_direct self.vmops.create_disks('userid', []) gud.assert_called_once_with('userid') amds.assert_called_once_with('userid', [], '0201') @mock.patch("zvmsdk.smtclient.SMTClient.remove_mdisks") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") def test_delete_disks(self, gps, rmd): gps.return_value = 'off' self.vmops.delete_disks('userid', ['101', '102']) rmd.assert_called_once_with('userid', ['101', '102']) @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") def test_delete_disks_active(self, gps): gps.return_value = 'on' self.assertRaises(exception.SDKFunctionNotImplementError, self.vmops.delete_disks, 'userid', ['101', '102']) gps.assert_called_once_with('userid') @mock.patch("zvmsdk.smtclient.SMTClient.guest_reboot") def test_guest_reboot(self, guest_reboot): self.vmops.guest_reboot('cbi00063') guest_reboot.assert_called_once_with('cbi00063') @mock.patch("zvmsdk.smtclient.SMTClient.guest_reset") def test_guest_reset(self, guest_reset): self.vmops.guest_reset('cbi00063') guest_reset.assert_called_once_with('cbi00063') @mock.patch("zvmsdk.smtclient.SMTClient.live_resize_cpus") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_live_resize_cpus(self, power_state, do_resize): userid = 'testuid' cpu_cnt = 3 power_state.return_value = 'on' self.vmops.live_resize_cpus(userid, cpu_cnt) power_state.assert_called_once_with(userid) do_resize.assert_called_once_with(userid, cpu_cnt) @mock.patch("zvmsdk.smtclient.SMTClient.live_resize_cpus") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_live_resize_cpus_guest_inactive(self, power_state, do_resize): userid = 'testuid' cpu_cnt = 3 power_state.return_value = 'off' self.assertRaises(exception.SDKConflictError, self.vmops.live_resize_cpus, userid, cpu_cnt) power_state.assert_called_once_with(userid) do_resize.assert_not_called() @mock.patch("zvmsdk.smtclient.SMTClient.resize_cpus") def test_resize_cpus(self, do_resize): userid = 'testuid' cpu_cnt = 3 self.vmops.resize_cpus(userid, cpu_cnt) do_resize.assert_called_once_with(userid, cpu_cnt) @mock.patch("zvmsdk.smtclient.SMTClient.live_resize_memory") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_live_resize_memory(self, power_state, do_resize): userid = 'testuid' size = '1g' power_state.return_value = 'on' self.vmops.live_resize_memory(userid, size) power_state.assert_called_once_with(userid) do_resize.assert_called_once_with(userid, size) @mock.patch("zvmsdk.smtclient.SMTClient.live_resize_memory") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_live_resize_memory_guest_inactive(self, power_state, do_resize): userid = 'testuid' size = '1g' power_state.return_value = 'off' self.assertRaises(exception.SDKConflictError, self.vmops.live_resize_memory, userid, size) power_state.assert_called_once_with(userid) do_resize.assert_not_called() @mock.patch("zvmsdk.smtclient.SMTClient.resize_memory") def test_resize_memory(self, do_resize): userid = 'testuid' size = '1g' self.vmops.resize_memory(userid, size) do_resize.assert_called_once_with(userid, size) @mock.patch("zvmsdk.smtclient.SMTClient.live_migrate_move") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_live_migrate_vm(self, power_state, live_migrate_vm): userid = 'testuid' destination = 'testssi' parms = {} action = "move" power_state.return_value = 'on' self.vmops.live_migrate_vm(userid, destination, parms, action) power_state.assert_called_once_with(userid) live_migrate_vm.assert_called_once_with(userid, destination, parms) @mock.patch("zvmsdk.smtclient.SMTClient.live_migrate_test") @mock.patch('zvmsdk.vmops.VMOps.get_power_state') def test_live_migrate_test(self, power_state, live_migrate_vm): userid = 'testuid' destination = 'testssi' parms = {} action = "test" power_state.return_value = 'on' self.vmops.live_migrate_vm(userid, destination, parms, action) power_state.assert_called_once_with(userid) live_migrate_vm.assert_called_once_with(userid, destination) @mock.patch('zvmsdk.smtclient.SMTClient.punch_file') @mock.patch('zvmsdk.utils.PathUtils.get_guest_temp_path') @mock.patch.object(dist.rhel7, 'get_extend_partition_cmds') @mock.patch("zvmsdk.dist.LinuxDistManager.get_linux_dist") def test_guest_grow_root_volume(self, get_dist, get_dist_cmds, tmp_path, punch_file): userid = "FAKE_USERID" os_version = "RHEL7.8" get_dist.return_value = dist.rhel7 get_dist_cmds.return_value = "fake_cmds" tmp_inst_dir = tempfile.mkdtemp(prefix=userid, dir='/tmp') tmp_path.return_value = tmp_inst_dir self.vmops.guest_grow_root_volume(userid, os_version) get_dist.assert_called_once_with(os_version) get_dist_cmds.assert_called_once_with() tmp_path.assert_called_once_with(userid) punch_file.assert_called_once_with(userid, ("%s/gpartvol.sh" % tmp_inst_dir), "X") zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_networkops.py0000664000175000017510000003346614112136633024021 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import shutil from zvmsdk.tests.unit import base from zvmsdk import dist from zvmsdk import networkops class SDKNetworkOpsTestCase(base.SDKTestCase): def setUp(self): self.networkops = networkops.get_networkops() @mock.patch('zvmsdk.smtclient.SMTClient.create_nic') def test_create_nic(self, create_nic): self.networkops.create_nic("fakeid", '1000', 'Fake_nic_id', active=True) create_nic.assert_called_with("fakeid", vdev='1000', nic_id='Fake_nic_id', mac_addr=None, active=True) @mock.patch('zvmsdk.smtclient.SMTClient.get_vswitch_list') def test_get_vswitch_list(self, get_vswitch_list): self.networkops.get_vswitch_list() get_vswitch_list.assert_called_with() @mock.patch('zvmsdk.smtclient.SMTClient.couple_nic_to_vswitch') def test_couple_nic_to_vswitch(self, couple_nic_to_vswitch): self.networkops.couple_nic_to_vswitch("fake_userid", "nic_vdev", "fake_VS_name", active=True, vlan_id=5) couple_nic_to_vswitch.assert_called_with("fake_userid", "nic_vdev", "fake_VS_name", vlan_id=5, active=True) @mock.patch('zvmsdk.smtclient.SMTClient.uncouple_nic_from_vswitch') def test_uncouple_nic_from_vswitch(self, uncouple_nic_from_vswitch): self.networkops.uncouple_nic_from_vswitch("fake_userid", "nic_vdev", True) uncouple_nic_from_vswitch.assert_called_with("fake_userid", "nic_vdev", active=True) @mock.patch('zvmsdk.smtclient.SMTClient.add_vswitch') def test_add_vswitch(self, add_vswitch): self.networkops.add_vswitch("fakename", "fakerdev", controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='UNAWARE', port_type='ACCESS', gvrp='GVRP', queue_mem=8, native_vid=2, persist=False) add_vswitch.assert_called_with("fakename", rdev="fakerdev", controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='UNAWARE', port_type='ACCESS', gvrp='GVRP', queue_mem=8, native_vid=2, persist=False) @mock.patch('zvmsdk.smtclient.SMTClient.grant_user_to_vswitch') def test_grant_user_to_vswitch(self, grant_user): self.networkops.grant_user_to_vswitch("vswitch_name", "userid") grant_user.assert_called_with("vswitch_name", "userid") @mock.patch('zvmsdk.smtclient.SMTClient.revoke_user_from_vswitch') def test_revoke_user_from_vswitch(self, revoke_user): self.networkops.revoke_user_from_vswitch("vswitch_name", "userid") revoke_user.assert_called_with("vswitch_name", "userid") @mock.patch('zvmsdk.smtclient.SMTClient.set_vswitch_port_vlan_id') def test_set_vswitch_port_vlan_id(self, set_vswitch): self.networkops.set_vswitch_port_vlan_id("vswitch_name", "userid", "vlan_id") set_vswitch.assert_called_with("vswitch_name", "userid", "vlan_id") @mock.patch('zvmsdk.smtclient.SMTClient.set_vswitch') def test_set_vswitch(self, set_vswitch): self.networkops.set_vswitch("vswitch_name", grant_userid='fake_id') set_vswitch.assert_called_with("vswitch_name", grant_userid='fake_id') @mock.patch('zvmsdk.smtclient.SMTClient.delete_vswitch') def test_delete_vswitch(self, delete_vswitch): self.networkops.delete_vswitch("vswitch_name", True) delete_vswitch.assert_called_with("vswitch_name", True) @mock.patch('zvmsdk.smtclient.SMTClient.delete_nic') def test_delete_nic(self, delete_nic): self.networkops.delete_nic("userid", "vdev", True) delete_nic.assert_called_with("userid", "vdev", active=True) @mock.patch('zvmsdk.smtclient.SMTClient.get_nic_info') def test_get_nic_info(self, get_nic_info): self.networkops.get_nic_info(userid='testid', vswitch='VSWITCH') get_nic_info.assert_called_with(userid='testid', nic_id=None, vswitch='VSWITCH') @mock.patch.object(shutil, 'rmtree') @mock.patch('zvmsdk.smtclient.SMTClient.execute_cmd') @mock.patch('zvmsdk.smtclient.SMTClient.update_guestdb_with_net_set') @mock.patch('zvmsdk.smtclient.SMTClient.punch_file') @mock.patch('zvmsdk.networkops.NetworkOPS._generate_network_doscript') @mock.patch('zvmsdk.smtclient.SMTClient.is_first_network_config') @mock.patch('zvmsdk.smtclient.SMTClient.get_guest_temp_path') def test_network_configuration(self, temp_path, is_first, doscript, punch, update_guestdb, execute_cmd, rmtree): userid = 'fakeid' os_version = 'rhel7.2' network_info = [] network_file_path = '/tmp' active_cmds = 'execute command' network_doscript = 'file' temp_path.return_value = network_file_path is_first.return_value = True doscript.return_value = (network_doscript, active_cmds) rmtree.return_value = None self.networkops.network_configuration(userid, os_version, network_info, active=True) temp_path.assert_called_with(userid) is_first.assert_called_with(userid) doscript.assert_called_with(userid, os_version, network_info, network_file_path, True, active=True) punch.assert_called_with(userid, network_doscript, "X") update_guestdb.assert_called_with(userid) execute_cmd.assert_called_with(userid, active_cmds) @mock.patch('zvmsdk.dist.LinuxDistManager.get_linux_dist') @mock.patch.object(dist.rhel7, 'create_network_configuration_files') @mock.patch('zvmsdk.networkops.NetworkOPS._create_znetconfig') @mock.patch('zvmsdk.networkops.NetworkOPS._add_file') @mock.patch('zvmsdk.networkops.NetworkOPS._create_invokeScript') @mock.patch('zvmsdk.networkops.NetworkOPS._create_network_doscript') def test_generate_network_doscript_not_active(self, doscript, invokeScript, add_file, znetconfig, config, linux_dist): net_conf_files = [('target1', 'content1')] net_cmd_file = [('target2', 'content2')] net_conf_cmds = '' clean_cmd = '' net_enable = '' userid = 'fakeid' os_version = 'rhel7.2' network_info = [] first = False network_file_path = '/tmp' files_and_cmds = net_conf_files, net_conf_cmds, clean_cmd, net_enable files_map = [] files_map.append({'target_path': 'target1', 'source_file': "0000"}) files_map.append({'target_path': 'target2', 'source_file': "0001"}) linux_dist.return_value = dist.rhel7 config.return_value = files_and_cmds znetconfig.return_value = net_cmd_file add_file.return_value = None invokeScript.return_value = None doscript.return_value = 'result1' r1, r2 = self.networkops._generate_network_doscript(userid, os_version, network_info, network_file_path, first, active=False) linux_dist.assert_called_with(os_version) config.assert_called_with(network_file_path, network_info, first, active=False) invokeScript.assert_called_with(network_file_path, clean_cmd, files_map) doscript.assert_called_with(network_file_path) self.assertEqual(r1, 'result1') self.assertEqual(r2, '') @mock.patch('zvmsdk.dist.LinuxDistManager.get_linux_dist') @mock.patch.object(dist.rhel7, 'create_network_configuration_files') @mock.patch.object(dist.rhel7, 'create_active_net_interf_cmd') @mock.patch('zvmsdk.networkops.NetworkOPS._create_znetconfig') @mock.patch('zvmsdk.networkops.NetworkOPS._add_file') @mock.patch('zvmsdk.networkops.NetworkOPS._create_invokeScript') @mock.patch('zvmsdk.networkops.NetworkOPS._create_network_doscript') def test_generate_network_doscript_active(self, doscript, invokeScript, add_file, znetconfig, active_cmd, config, linux_dist): net_conf_files = [('target1', 'content1')] net_cmd_file = [('target2', 'content2')] active_net_cmd = 'create_active_net_interf_cmd' net_conf_cmds = '' clean_cmd = '' net_enable = '' userid = 'fakeid' os_version = 'rhel7.2' network_info = [] first = False network_file_path = '/tmp' files_and_cmds = net_conf_files, net_conf_cmds, clean_cmd, net_enable files_map = [] files_map.append({'target_path': 'target1', 'source_file': "0000"}) files_map.append({'target_path': 'target2', 'source_file': "0001"}) linux_dist.return_value = dist.rhel7 config.return_value = files_and_cmds active_cmd.return_value = active_net_cmd znetconfig.return_value = net_cmd_file add_file.return_value = None invokeScript.return_value = None doscript.return_value = 'result1' r1, r2 = self.networkops._generate_network_doscript(userid, os_version, network_info, network_file_path, first, active=True) linux_dist.assert_called_with(os_version) config.assert_called_with(network_file_path, network_info, first, active=True) invokeScript.assert_called_with(network_file_path, clean_cmd, files_map) doscript.assert_called_with(network_file_path) self.assertEqual(r1, 'result1') self.assertEqual(r2, active_net_cmd) @mock.patch('zvmsdk.smtclient.SMTClient.query_vswitch') def test_vswitch_query(self, query_vswitch): self.networkops.vswitch_query("vswitch_name") query_vswitch.assert_called_with("vswitch_name") @mock.patch.object(shutil, 'rmtree') @mock.patch('zvmsdk.smtclient.SMTClient.execute_cmd') @mock.patch('zvmsdk.smtclient.SMTClient.punch_file') @mock.patch('zvmsdk.networkops.NetworkOPS._add_file') @mock.patch('zvmsdk.networkops.NetworkOPS._create_znetconfig') @mock.patch.object(dist.rhel7, 'get_network_configuration_files') @mock.patch.object(dist.rhel7, 'delete_vdev_info') @mock.patch.object(dist.rhel7, 'create_active_net_interf_cmd') @mock.patch('zvmsdk.dist.LinuxDistManager.get_linux_dist') @mock.patch('zvmsdk.smtclient.SMTClient.get_guest_temp_path') def test_delete_network_configuration(self, temp_path, linux_dist, active_cmd, delete_vdev, get_netconf_files, znetconfig, add_file, punch, execute_cmd, rmtree): userid = 'fakeid' os_version = 'rhel7.2' vdev = '1000' net_cmd_file = [('target', 'content')] active_net_cmd = 'create_active_net_interf_cmd' delete_vdev_info = 'delete_vdev_info' get_network_configuration_files = 'network_conf_file' network_file_path = '/tmp' temp_path.return_value = network_file_path linux_dist.return_value = dist.rhel7 active_cmd.return_value = active_net_cmd delete_vdev.return_value = delete_vdev_info get_netconf_files.return_value = get_network_configuration_files znetconfig.return_value = net_cmd_file add_file.return_value = None rmtree.return_value = None self.networkops.delete_network_configuration(userid, os_version, vdev, active=True) temp_path.assert_called_with(userid) linux_dist.assert_called_with(os_version) get_netconf_files.assert_called_with(vdev) delete_vdev.assert_called_with(vdev) punch.assert_called_with(userid, '/tmp/DEL1000.sh', "X") execute_cmd.assert_called_with(userid, active_net_cmd) @mock.patch('zvmsdk.smtclient.SMTClient.dedicate_OSA') def test_dedicate_OSA(self, dedicate_OSA): self.networkops.dedicate_OSA("fakeid", 'F000', vdev='1000', active=True) dedicate_OSA.assert_called_with("fakeid", 'F000', vdev='1000', active=True) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_api.py0000664000175000017510000010403114315210052022333 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from zvmsdk import api from zvmsdk import exception from zvmsdk.tests.unit import base from zvmsdk import vmops class SDKAPITestCase(base.SDKTestCase): """Testcases for compute APIs.""" @classmethod def setUpClass(cls): super(SDKAPITestCase, cls).setUpClass() cls.userid = 'TESTUID' cls.userid_list = ["USERID1", "USERID2"] def setUp(self): super(SDKAPITestCase, self).setUp() vmops.VMOps.check_guests_exist_in_db = mock.MagicMock() patcher = mock.patch('zvmsdk.volumeop.FCPManager.sync_db') self.addCleanup(patcher.stop) self.mock_sync_db = patcher.start() self.api = api.SDKAPI() def test_init_ComputeAPI(self): self.assertTrue(isinstance(self.api, api.SDKAPI)) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.edit_fcp_template") def test_edit_fcp_template(self, mock_edit_tmpl): """ Test edit_fcp_template """ tmpl_id = 'fake_id' kwargs = { 'name': 'new_name', 'description': 'new_desc', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': ['sp1'], 'min_fcp_paths_count': 2} self.api.edit_fcp_template(tmpl_id, **kwargs) mock_edit_tmpl.assert_called_once_with(tmpl_id, **kwargs) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.get_fcp_templates") def test_get_fcp_templates(self, mock_get_tmpl): """ Test get_fcp_templates """ tmpl_list = ['fake_id'] assigner_id = 'fake_user' host_default = True default_sp_list = ['fake_sp'] self.api.get_fcp_templates(template_id_list=tmpl_list, assigner_id=assigner_id, default_sp_list=default_sp_list, host_default=host_default) mock_get_tmpl.assert_called_once_with(template_id_list=tmpl_list, assigner_id=assigner_id, default_sp_list=default_sp_list, host_default=host_default) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.get_fcp_templates_details") def test_get_fcp_templates_details(self, mock_get_tmpl_details): """ Test get_fcp_templates_details """ tmpl_list = ['fake_id'] self.api.get_fcp_templates_details(template_id_list=tmpl_list, raw=True, statistics=True, sync_with_zvm=False) mock_get_tmpl_details.assert_called_once_with(template_id_list=['fake_id'], raw=True, statistics=True, sync_with_zvm=False) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.delete_fcp_template") def test_delete_fcp_template(self, mock_del_tmpl): self.api.delete_fcp_template('fake_id') mock_del_tmpl.assert_called_once_with('fake_id') @mock.patch("zvmsdk.vmops.VMOps.get_power_state") def test_guest_get_power_state_real(self, gstate): self.api.guest_get_power_state_real(self.userid) gstate.assert_called_once_with(self.userid) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.vmops.VMOps.get_power_state") def test_guest_get_power_state(self, gstate, chk_uid): chk_uid.return_value = True self.api.guest_get_power_state(self.userid) chk_uid.assert_called_once_with(self.userid) gstate.assert_called_once_with(self.userid) chk_uid.reset_mock() gstate.reset_mock() chk_uid.return_value = False self.assertRaises(exception.SDKObjectNotExistError, self.api.guest_get_power_state, self.userid) chk_uid.assert_called_once_with(self.userid) gstate.assert_not_called() @mock.patch("zvmsdk.vmops.VMOps.get_info") def test_guest_get_info(self, ginfo): self.api.guest_get_info(self.userid) ginfo.assert_called_once_with(self.userid) @mock.patch("zvmsdk.vmops.VMOps.get_definition_info") def test_guest_get_user_direct_(self, ginfo): ginfo.return_value = {'user_direct': ['CPU 00 BASE', 'USER USERID1 PASSWORD 4096m ']} expected_value = {'user_direct': ['CPU 00 BASE', 'USER USERID1 ****** 4096m ']} result = self.api.guest_get_user_direct(self.userid) ginfo.assert_called_once_with(self.userid) self.assertEqual(result, expected_value) @mock.patch("zvmsdk.vmops.VMOps.get_adapters_info") def test_guest_get_adapters_info(self, adapters_info): self.api.guest_get_adapters_info(self.userid) adapters_info.assert_called_once_with(self.userid) @mock.patch("zvmsdk.vmops.VMOps.guest_deploy") def test_guest_deploy(self, guest_deploy): user_id = 'fakevm' image_name = 'fakeimg' transportfiles = '/tmp/transport.tgz' vdev = '0100' self.api.guest_deploy(user_id, image_name, transportfiles=transportfiles, vdev=vdev) guest_deploy.assert_called_with(user_id.upper(), image_name, transportfiles, None, vdev, None, False) @mock.patch("zvmsdk.imageops.ImageOps.image_import") def test_image_import(self, image_import): image_name = '95a4da37-9f9b-4fb2-841f-f0bb441b7544' url = "file:///install/temp/test.img" image_meta = {'os_version': "rhel6.7"} self.api.image_import(image_name, url, image_meta) image_import.assert_called_once_with(image_name, url, image_meta, remote_host=None) @mock.patch("zvmsdk.imageops.ImageOps.image_export") def test_image_export(self, image_export): image_name = '95a4da37-9f9b-4fb2-841f-f0bb441b7544' dest_url = "file:///install/temp/test.img" self.api.image_export(image_name, dest_url) image_export.assert_called_once_with(image_name, dest_url, None) @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, '', None, '', '', '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_account(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' account = "dummy account" self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, account=account) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, account, None, '', '', '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_cpupool(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' cschedule = 'POOL1' self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, cschedule=cschedule) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, '', None, cschedule, '', '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_share(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' cshare = 'RELATIVE 125' self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, cshare=cshare) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, '', None, '', cshare, '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_rdomain(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' rdomain = 'Z15ONLY' self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, rdomain=rdomain) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, '', None, '', '', rdomain, '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_pcif(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' pcif = '100:200' self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, pcif=pcif) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, '', None, '', '', '', pcif) @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_comment(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' max_cpu = 10 max_mem = '4G' comment_list = ["dummy account", "this is a test"] self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, comment_list=comment_list) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, '', '', '', [], {}, '', comment_list, '', '', '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_default_profile(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = '' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'user_profile', 'abc') self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, 'abc', max_cpu, max_mem, '', '', '', [], {}, '', None, '', '', '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_no_disk_pool(self, create_vm): disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD: eckdpool1'}, {'size': '1g', 'format': 'ext3'}, {'size': '1g', 'format': 'swap'}] vcpus = 1 memory = 1024 user_profile = 'profile' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'disk_pool', None) self.assertRaises(exception.SDKInvalidInputFormat, self.api.guest_create, self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem) create_vm.assert_not_called() @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_no_disk_pool_swap_only(self, create_vm): disk_list = [{'size': '1g', 'format': 'swap'}] vcpus = 1 memory = 1024 user_profile = 'profile' base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'swap_force_mdisk', False) self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, 32, '64G', '', '', '', [], {}, '', None, '', '', '', '') @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_no_disk_pool_force_mdisk(self, create_vm): disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD: eckdpool1'}, {'size': '1g', 'format': 'ext3'}, {'size': '1g', 'format': 'swap'}] vcpus = 1 memory = 1024 user_profile = 'profile' max_cpu = 10 max_mem = '4G' # should be no side effect at all base.set_conf('zvm', 'swap_force_mdisk', True) base.set_conf('zvm', 'disk_pool', None) self.assertRaises(exception.SDKInvalidInputFormat, self.api.guest_create, self.userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem) create_vm.assert_not_called() @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_no_disk_pool_swap_only_force_mdisk(self, create_vm): disk_list = [{'size': '1g', 'format': 'swap'}] vcpus = 1 memory = 1024 user_profile = 'profile' base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'swap_force_mdisk', True) self.assertRaises(exception.SDKInvalidInputFormat, self.api.guest_create, self.userid, vcpus, memory, disk_list, user_profile) @mock.patch("zvmsdk.vmops.VMOps.create_vm") def test_guest_create_with_default_max_cpu_memory(self, create_vm): vcpus = 1 memory = 1024 disk_list = [] user_profile = 'profile' self.api.guest_create(self.userid, vcpus, memory, disk_list, user_profile) create_vm.assert_called_once_with(self.userid, vcpus, memory, disk_list, user_profile, 32, '64G', '', '', '', [], {}, '', None, '', '', '', '') @mock.patch("zvmsdk.imageops.ImageOps.image_query") def test_image_query(self, image_query): imagekeyword = 'eae09a9f_7958_4024_a58c_83d3b2fc0aab' self.api.image_query(imagekeyword) image_query.assert_called_once_with(imagekeyword) @mock.patch("zvmsdk.vmops.VMOps.delete_vm") @mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db") def test_guest_delete(self, cge, delete_vm): cge.return_value = True self.api.guest_delete(self.userid) cge.assert_called_once_with(self.userid, raise_exc=False) delete_vm.assert_called_once_with(self.userid) @mock.patch("zvmsdk.vmops.VMOps.delete_vm") @mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db") def test_guest_delete_userid_in_lower_case(self, cge, delete_vm): cge.return_value = True self.api.guest_delete('testuid') cge.assert_called_once_with(self.userid, raise_exc=False) delete_vm.assert_called_once_with(self.userid) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db") def test_guest_delete_not_exist(self, cge, cue): cge.return_value = False cue.return_value = False self.api.guest_delete(self.userid) cge.assert_called_once_with(self.userid, raise_exc=False) cue.assert_called_once_with(self.userid) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db") def test_guest_delete_not_exist_in_db(self, cge, cue): cge.return_value = False cue.return_value = True self.assertRaises(exception.SDKObjectNotExistError, self.api.guest_delete, self.userid) cge.assert_called_once_with(self.userid, raise_exc=False) cue.assert_called_once_with(self.userid) @mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_stats") def test_guest_inspect_cpus_list(self, inspect_stats): self.api.guest_inspect_stats(self.userid_list) inspect_stats.assert_called_once_with(self.userid_list) @mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_stats") def test_guest_inspect_cpus_single(self, inspect_stats): self.api.guest_inspect_stats(self.userid) inspect_stats.assert_called_once_with([self.userid]) @mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_vnics") def test_guest_inspect_vnics_list(self, inspect_vnics): self.api.guest_inspect_vnics(self.userid_list) inspect_vnics.assert_called_once_with(self.userid_list) @mock.patch("zvmsdk.monitor.ZVMMonitor.inspect_vnics") def test_guest_inspect_vnics_single(self, inspect_vnics): self.api.guest_inspect_vnics(self.userid) inspect_vnics.assert_called_once_with([self.userid]) @mock.patch("zvmsdk.vmops.VMOps.guest_stop") def test_guest_stop(self, gs): self.api.guest_stop(self.userid) gs.assert_called_once_with(self.userid) @mock.patch("zvmsdk.vmops.VMOps.guest_stop") def test_guest_stop_with_timeout(self, gs): self.api.guest_stop(self.userid, timeout=300) gs.assert_called_once_with(self.userid, timeout=300) @mock.patch("zvmsdk.vmops.VMOps.guest_softstop") def test_guest_softstop(self, gss): self.api.guest_softstop(self.userid, timeout=300) gss.assert_called_once_with(self.userid, timeout=300) @mock.patch("zvmsdk.vmops.VMOps.guest_pause") def test_guest_pause(self, gp): self.api.guest_pause(self.userid) gp.assert_called_once_with(self.userid) @mock.patch("zvmsdk.vmops.VMOps.guest_unpause") def test_guest_unpause(self, gup): self.api.guest_unpause(self.userid) gup.assert_called_once_with(self.userid) @mock.patch("zvmsdk.vmops.VMOps.guest_config_minidisks") def test_guest_process_additional_disks(self, config_disks): disk_list = [{'vdev': '0101', 'format': 'ext3', 'mntdir': '/mnt/0101'}] self.api.guest_config_minidisks(self.userid, disk_list) config_disks.assert_called_once_with(self.userid, disk_list) @mock.patch("zvmsdk.imageops.ImageOps.image_delete") def test_image_delete(self, image_delete): image_name = 'eae09a9f_7958_4024_a58c_83d3b2fc0aab' self.api.image_delete(image_name) image_delete.assert_called_once_with(image_name) def test_set_vswitch(self): self.assertRaises(exception.SDKInvalidInputFormat, self.api.vswitch_set, "vswitch_name", unknown='fake_id') @mock.patch("zvmsdk.vmops.VMOps.create_disks") def test_guest_add_disks(self, cds): disk_list = [{'size': '1g'}] self.api.guest_create_disks(self.userid, disk_list) cds.assert_called_once_with(self.userid, disk_list) @mock.patch("zvmsdk.vmops.VMOps.create_disks") def test_guest_add_disks_no_disk_pool(self, cds): disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD: eckdpool1'}, {'size': '1g', 'format': 'ext3'}] base.set_conf('zvm', 'disk_pool', None) self.assertRaises(exception.SDKInvalidInputFormat, self.api.guest_create_disks, self.userid, disk_list) cds.ssert_not_called() @mock.patch("zvmsdk.vmops.VMOps.create_disks") def test_guest_add_disks_nothing_to_do(self, cds): self.api.guest_create_disks('userid', []) cds.assert_not_called() @mock.patch("zvmsdk.vmops.VMOps.delete_disks") def test_guest_delete_disks(self, dds): vdev_list = ['0102', '0103'] self.api.guest_delete_disks(self.userid, vdev_list) dds.assert_called_once_with(self.userid, vdev_list) @mock.patch("zvmsdk.vmops.VMOps.live_resize_cpus") def test_guest_live_resize_cpus(self, live_resize_cpus): cpu_cnt = 3 self.api.guest_live_resize_cpus(self.userid, cpu_cnt) live_resize_cpus.assert_called_once_with(self.userid, cpu_cnt) @mock.patch("zvmsdk.vmops.VMOps.resize_cpus") def test_guest_resize_cpus(self, resize_cpus): cpu_cnt = 3 self.api.guest_resize_cpus(self.userid, cpu_cnt) resize_cpus.assert_called_once_with(self.userid, cpu_cnt) @mock.patch("zvmsdk.vmops.VMOps.live_resize_memory") def test_guest_live_resize_mem(self, live_resize_memory): size = "1024m" self.api.guest_live_resize_mem(self.userid, size) live_resize_memory.assert_called_once_with(self.userid, size) @mock.patch("zvmsdk.vmops.VMOps.resize_memory") def test_guest_resize_mem(self, resize_memory): size = "2g" self.api.guest_resize_mem(self.userid, size) resize_memory.assert_called_once_with(self.userid, size) @mock.patch("zvmsdk.vmops.VMOps.guest_grow_root_volume") def test_guest_grow_root_volume(self, grow_root_volume): os_version = "RHEL7.8" self.api.guest_grow_root_volume(self.userid, os_version) grow_root_volume.assert_called_once_with(self.userid, os_version) @mock.patch("zvmsdk.networkops.NetworkOPS.grant_user_to_vswitch") def test_vswitch_grant_user(self, guv): self.api.vswitch_grant_user("testvsw", self.userid) guv.assert_called_once_with("testvsw", self.userid) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.attach_volume_to_instance") def test_volume_attach(self, mock_attach): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': False, 'target_wwpn': '1111', 'target_lun': '2222', 'zvm_fcp': 'b83c', 'assigner_id': 'user1'} self.api.volume_attach(connection_info) mock_attach.assert_called_once_with(connection_info) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI.volume_refresh_bootmap") def test_refresh_bootmap(self, mock_attach): fcpchannel = ['5d71'] wwpn = ['5005076802100c1b', '5005076802200c1b'] lun = '01000000000000' wwid = '600507640083826de00000000000605b' fcp_template_id = 'fake_tmpl_id' self.api.volume_refresh_bootmap(fcpchannel, wwpn, lun, wwid, fcp_template_id=fcp_template_id) mock_attach.assert_called_once_with(fcpchannel, wwpn, lun, wwid=wwid, transportfiles=None, guest_networks=None, fcp_template_id=fcp_template_id) @mock.patch("zvmsdk.volumeop.VolumeOperatorAPI." "detach_volume_from_instance") def test_volume_detach(self, mock_detach): connection_info = {'platform': 'x86_64', 'ip': '1.2.3.4', 'os_version': 'rhel7', 'multipath': False, 'target_wwpn': '1111', 'target_lun': '2222', 'zvm_fcp': 'b83c', 'assigner_id': 'user1'} self.api.volume_detach(connection_info) mock_detach.assert_called_once_with(connection_info) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info") @mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered") @mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record") def test_guest_register(self, networkdb_add, guestdb_reg, get_adapters_info, chk_usr): networkdb_add.return_value = '' guestdb_reg.return_value = '' adapters = [{'adapter_address': '1000', 'adapter_status': '02', 'lan_owner': 'SYSTEM', 'lan_name': 'VSC11590', 'mac_address': '02:55:36:EF:50:91', 'mac_ip_version': '4', 'mac_ip_address': '1.2.3.4'}] get_adapters_info.return_value = adapters chk_usr.return_value = True meta_data = 'rhel7' net_set = '1' port_macs = {'EF5091': '6e2ecc4f-14a2-4f33-9f12-5ac4a42f97e7', '69FCF1': '389dee5e-7b03-405c-b1e8-7c9c235d1425' } self.api.guest_register(self.userid, meta_data, net_set, port_macs) networkdb_add.assert_called_once_with(self.userid, '1000', '6e2ecc4f-14a2-4f33-9f12' '-5ac4a42f97e7', 'VSC11590') guestdb_reg.assert_called_once_with(self.userid, 'rhel7', '1') get_adapters_info.assert_called_once_with(self.userid) chk_usr.assert_called_once_with(self.userid) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info") @mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered") @mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record") def test_guest_register_invalid_portmacs(self, networkdb_add, guestdb_reg, get_adapters_info, chk_usr): networkdb_add.return_value = '' guestdb_reg.return_value = '' adapters = [{'adapter_address': '1000', 'adapter_status': '02', 'lan_owner': 'SYSTEM', 'lan_name': 'VSC11590', 'mac_address': '02:55:36:EF:50:91', 'mac_ip_version': '4', 'mac_ip_address': '1.2.3.4'}] get_adapters_info.return_value = adapters chk_usr.return_value = True meta_data = 'rhel7' net_set = '1' port_macs = '6e2ecc4f-14a2-4f33-9f12-5ac4a42f97e7' self.assertRaises(exception.SDKInvalidInputFormat, self.api.guest_register, self.userid, meta_data, net_set, port_macs) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info") @mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered") @mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record") def test_guest_register_no_port_macs(self, networkdb_add, guestdb_reg, get_adapters_info, chk_usr): networkdb_add.return_value = '' guestdb_reg.return_value = '' adapters = [{'adapter_address': '1000', 'adapter_status': '02', 'lan_owner': 'SYSTEM', 'lan_name': 'VSC11590', 'mac_address': '02:55:36:EF:50:91', 'mac_ip_version': '4', 'mac_ip_address': '1.2.3.4'}] get_adapters_info.return_value = adapters chk_usr.return_value = True meta_data = 'rhel7' net_set = '1' self.api.guest_register(self.userid, meta_data, net_set) networkdb_add.assert_called_once_with(self.userid, '1000', None, 'VSC11590') guestdb_reg.assert_called_once_with(self.userid, 'rhel7', '1') get_adapters_info.assert_called_once_with(self.userid) chk_usr.assert_called_once_with(self.userid) @mock.patch("zvmsdk.utils.check_userid_exist") @mock.patch("zvmsdk.smtclient.SMTClient.get_adapters_info") @mock.patch("zvmsdk.database.GuestDbOperator.add_guest_registered") @mock.patch("zvmsdk.database.NetworkDbOperator.switch_add_record") @mock.patch("zvmsdk.database.GuestDbOperator.update_guest_by_userid") @mock.patch("zvmsdk.database.GuestDbOperator.get_comments_by_userid") @mock.patch("zvmsdk.database.GuestDbOperator.get_migrated_guest_list") @mock.patch("zvmsdk.database.GuestDbOperator.get_guest_by_userid") def test_guest_register_guest_in_db(self, get_guest, get_mig_guest, get_comments, update_guest, networkdb_add, guestdb_reg, get_adapters_info, chk_usr): get_guest.return_value = 'fake_guest' get_mig_guest.return_value = self.userid + ' other info' get_comments.return_value = {'migrated': 1} update_guest.return_value = '' # Below mocks shall not be called networkdb_add.return_value = '' guestdb_reg.return_value = '' get_adapters_info.return_value = [] chk_usr.return_value = True meta_data = 'rhel7' net_set = '1' self.api.guest_register(self.userid, meta_data, net_set) get_guest.assert_called_once_with(self.userid) get_mig_guest.assert_called_once_with() get_comments.assert_called_once_with(self.userid) update_guest.assert_called_once_with(self.userid, comments={'migrated': 0}) chk_usr.assert_called_once_with(self.userid) networkdb_add.assert_not_called() guestdb_reg.assert_not_called() get_adapters_info.assert_not_called() @mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db") @mock.patch("zvmsdk.database.NetworkDbOperator." "switch_delete_record_for_userid") @mock.patch("zvmsdk.database.GuestDbOperator.delete_guest_by_userid") def test_guest_deregister(self, guestdb_del, networkdb_del, chk_db): guestdb_del.return_value = '' networkdb_del.return_value = '' chk_db.return_value = True self.api.guest_deregister(self.userid) guestdb_del.assert_called_once_with(self.userid) networkdb_del.assert_called_once_with(self.userid) chk_db.assert_called_once_with(self.userid, raise_exc=False) @mock.patch("zvmsdk.vmops.VMOps.check_guests_exist_in_db") @mock.patch("zvmsdk.database.NetworkDbOperator." "switch_delete_record_for_userid") @mock.patch("zvmsdk.database.GuestDbOperator.delete_guest_by_userid") def test_guest_deregister_not_exists(self, guestdb_del, networkdb_del, chk_db): guestdb_del.return_value = '' networkdb_del.return_value = '' chk_db.return_value = False self.api.guest_deregister(self.userid) guestdb_del.assert_called_once_with(self.userid) networkdb_del.assert_called_once_with(self.userid) chk_db.assert_called_once_with(self.userid, raise_exc=False) @mock.patch("zvmsdk.hostops.HOSTOps.guest_list") def test_host_get_guest_list(self, guest_list): self.api.host_get_guest_list() guest_list.assert_called_once_with() @mock.patch("zvmsdk.hostops.HOSTOps.diskpool_get_volumes") def test_host_get_diskpool_volumes(self, diskpool_vols): base.set_conf('zvm', 'disk_pool', None) disk_pool = 'ECKD:IAS1PL' result = self.api.host_get_diskpool_volumes(disk_pool) diskpool_vols.assert_called_once_with('ECKD:IAS1PL') # Test disk_pool is None disk_pool = None try: self.api.host_get_diskpool_volumes(disk_pool) except Exception as exc: errmsg = ("Invalid disk_pool input None, disk_pool should be" " configured for sdkserver.") result = errmsg in six.text_type(exc) self.assertEqual(result, True) pass @mock.patch("zvmsdk.hostops.HOSTOps.get_volume_info") def test_host_get_volume_info(self, volume_info): volume = 'VOLUM1' result = self.api.host_get_volume_info(volume) volume_info.assert_called_once_with(volume) # Test volume is None volume = None try: self.api.host_get_volume_info(volume) except Exception as exc: errmsg = ("Invalid volume input None, volume" " must be specified.") result = errmsg in six.text_type(exc) self.assertEqual(result, True) pass @mock.patch("zvmsdk.hostops.HOSTOps.diskpool_get_info") def test_host_diskpool_get_info(self, dp_info): base.set_conf('zvm', 'disk_pool', None) results = self.api.host_diskpool_get_info() self.assertEqual(results['disk_total'], 0) self.assertEqual(results['disk_available'], 0) self.assertEqual(results['disk_used'], 0) dp_info.ssert_not_called() zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_hostops.py0000664000175000017510000001565614263437505023317 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import six from zvmsdk import config from zvmsdk import hostops from zvmsdk import exception from zvmsdk.tests.unit import base CONF = config.CONF class SDKHostOpsTestCase(base.SDKTestCase): def setUp(self): self._hostops = hostops.get_hostops() @mock.patch("zvmsdk.smtclient.SMTClient.host_get_ssi_info") @mock.patch("zvmsdk.smtclient.SMTClient.get_all_user_direct") def test_guest_list(self, host_get_ssi_info, get_all_user_direct): host_get_ssi_info.return_value = [] self._hostops.guest_list() get_all_user_direct.assert_called_once_with() host_get_ssi_info.assert_called_once() @mock.patch("zvmsdk.smtclient.SMTClient.host_get_ssi_info") @mock.patch("zvmsdk.utils.check_userid_on_others") @mock.patch("zvmsdk.smtclient.SMTClient.get_all_user_direct") def test_guest_list_ssi_host(self, host_get_ssi_info, check_userid_on_others, get_all_user_direct): res_ssi = ['ssi_name = SSI', 'ssi_mode = Stable', 'ssi_pdr = IAS7CM_on_139E'] host_get_ssi_info.return_value = res_ssi self._hostops.guest_list() get_all_user_direct.assert_called_once_with() host_get_ssi_info.assert_called_once() check_userid_on_others.assert_called() @mock.patch("zvmsdk.hostops.HOSTOps.diskpool_get_info") @mock.patch("zvmsdk.smtclient.SMTClient.get_host_info") def test_get_host_info(self, get_host_info, diskpool_get_info): get_host_info.return_value = { "zcc_userid": "FAKEUSER", "zvm_host": "FAKENODE", "zhcp": "fakehcp.fake.com", "cec_vendor": "FAKE", "cec_model": "2097", "hypervisor_os": "z/VM 6.1.0", "hypervisor_name": "fakenode", "architecture": "s390x", "lpar_cpu_total": "10", "lpar_cpu_used": "10", "lpar_memory_total": "16G", "lpar_memory_used": "16.0G", "lpar_memory_offline": "0", "ipl_time": "IPL at 03/13/14 21:43:12 EDT", } diskpool_get_info.return_value = { "disk_total": 406105, "disk_used": 367263, "disk_available": 38843, } host_info = self._hostops.get_info() get_host_info.assert_called_once_with() base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') diskpool = CONF.zvm.disk_pool.split(':')[1] diskpool_get_info.assert_called_once_with(diskpool) self.assertEqual(host_info['vcpus'], 10) self.assertEqual(host_info['hypervisor_version'], 610) self.assertEqual(host_info['disk_total'], 406105) # Test disk_pool is None base.set_conf('zvm', 'disk_pool', None) host_info = self._hostops.get_info() self.assertEqual(host_info['disk_total'], 0) self.assertEqual(host_info['disk_used'], 0) self.assertEqual(host_info['disk_available'], 0) @mock.patch("zvmsdk.smtclient.SMTClient.get_diskpool_info") def test_get_diskpool_info(self, get_diskpool_info): get_diskpool_info.return_value = { "disk_total": "406105.3G", "disk_used": "367262.6G", "disk_available": "38842.7M", } dp_info = self._hostops.diskpool_get_info("fakepool") get_diskpool_info.assert_called_once_with("fakepool") self.assertEqual(dp_info['disk_total'], 406105) self.assertEqual(dp_info['disk_used'], 367263) self.assertEqual(dp_info['disk_available'], 38) @mock.patch("time.time") @mock.patch("zvmsdk.hostops.HOSTOps._cache_enabled") @mock.patch("zvmsdk.smtclient.SMTClient.get_diskpool_volumes") def test_diskpool_get_volumes(self, get_diskpool_vols, cache_enable, mock_time): self._hostops._volumes = {} volumes = {'diskpool_volumes': 'IAS100 IAS101'} get_diskpool_vols.return_value = volumes cache_enable.return_value = True mock_time.return_value = 1 diskpool_vols = self._hostops.diskpool_get_volumes("eckd:fakepool") get_diskpool_vols.assert_called_once_with("FAKEPOOL") self.assertEqual(diskpool_vols['diskpool_volumes'], 'IAS100 IAS101') self.assertEqual(self._hostops.disk_pool, "eckd:fakepool") # Test has cache data volumes = {'diskpool_volumes': 'IAS400 IAS501'} base.set_conf('monitor', 'cache_interval', '60') self._hostops._volumes = volumes diskpool_vols = self._hostops.diskpool_get_volumes("eckd:fakepool") self.assertEqual(1, get_diskpool_vols.call_count) self.assertEqual(diskpool_vols['diskpool_volumes'], 'IAS400 IAS501') self.assertEqual(self._hostops.disk_pool, "eckd:fakepool") # Test CONF.zvm.disk_pool has changed volumes = {'diskpool_volumes': 'IAS401 IAS601'} get_diskpool_vols.return_value = volumes base.set_conf('monitor', 'cache_interval', '60') diskpool_vols = self._hostops.diskpool_get_volumes("eckd:fakepool2") self.assertEqual(diskpool_vols['diskpool_volumes'], 'IAS401 IAS601') self.assertEqual(self._hostops.disk_pool, "eckd:fakepool2") @mock.patch("zvmsdk.smtclient.SMTClient.get_volume_info") def test_get_volume_info(self, get_vol_infos): self._hostops._volume_infos = None get_vol_infos.return_value = {'IASFBA': {'volume_type': '9336-ET', 'volume_size': '564718'}, 'IAS1CM': {'volume_type': '3390-09', 'volume_size': '60102'}} volume_info = self._hostops.get_volume_info('IAS1CM') get_vol_infos.assert_called_once_with() self.assertEqual(volume_info['volume_type'], '3390-09') self.assertEqual(volume_info['volume_size'], '60102') # Test cache is not None self._hostops._volume_infos = get_vol_infos.return_value volume_info = self._hostops.get_volume_info('IASFBA') self.assertEqual(volume_info['volume_type'], '9336-ET') self.assertEqual(volume_info['volume_size'], '564718') # Test cache not None, but volume not in the disk_pool try: volume_info = self._hostops.get_volume_info('IASFBB') except exception.ZVMNotFound as exc: exc = six.text_type(exc) if "Not found the volume info in " in exc: pass self.assertEqual(2, get_vol_infos.call_count) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_utils.py0000664000175000017510000001656014266177632022760 0ustar ruirui00000000000000# Copyright 2017, 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import subprocess from mock import Mock import zvmsdk.utils as zvmutils from zvmsdk.tests.unit import base from zvmsdk import exception class ZVMUtilsTestCases(base.SDKTestCase): def test_convert_to_mb(self): self.assertEqual(2355.2, zvmutils.convert_to_mb('2.3G')) self.assertEqual(20, zvmutils.convert_to_mb('20M')) self.assertEqual(1153433.6, zvmutils.convert_to_mb('1.1T')) @mock.patch.object(zvmutils, 'get_smt_userid') def test_get_namelist(self, gsu): gsu.return_value = 'TUID' self.assertEqual('TSTNLIST', zvmutils.get_namelist()) base.set_conf('zvm', 'namelist', None) gsu.return_value = 'TUID' self.assertEqual('NL00TUID', zvmutils.get_namelist()) gsu.return_value = 'TESTUSER' self.assertEqual('NLSTUSER', zvmutils.get_namelist()) base.set_conf('zvm', 'namelist', 'TSTNLIST') @mock.patch.object(subprocess, 'check_output') def test_get_lpar_name(self, vmcp_query): vmcp_query.return_value = b"IAAS01EF AT BOEM5401" self.assertEqual("BOEM5401", zvmutils.get_lpar_name()) def test_expand_fcp_list_normal(self): fcp_list = "1f10;1f11;1f12;1f13;1f14" expected = {0: set(['1f10']), 1: set(['1f11']), 2: set(['1f12']), 3: set(['1f13']), 4: set(['1f14'])} fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) def test_expand_fcp_list_with_dash(self): fcp_list = "1f10-1f14" expected = {0: set(['1f10', '1f11', '1f12', '1f13', '1f14'])} fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) def test_expand_fcp_list_with_normal_plus_dash(self): fcp_list = "1f10;1f11-1f13;1f17" expected = {0: set(['1f10']), 1: set(['1f11', '1f12', '1f13']), 2: set(['1f17'])} fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) def test_expand_fcp_list_with_normal_plus_2dash(self): fcp_list = "1f10;1f11-1f13;1f17-1f1a;1f02" expected = {0: set(['1f10']), 1: set(['1f11', '1f12', '1f13']), 2: set(['1f17', '1f18', '1f19', '1f1a']), 3: set(['1f02'])} fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) def test_expand_fcp_list_with_uncontinuous_equal_count(self): fcp_list = "5c70-5c71,5c73-5c74;5d70-5d71,5d73-5d74" expected = {0: set(['5c70', '5c71', '5c73', '5c74']), 1: set(['5d70', '5d71', '5d73', '5d74'])} fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) def test_expand_fcp_list_with_4_uncontinuous_equal_count(self): fcp_list = "5c70-5c71,5c73-5c74;5d70-5d71,\ 5d73-5d74;1111-1112,1113-1114;2211-2212,2213-2214" expected = {0: set(['5c70', '5c71', '5c73', '5c74']), 1: set(['5d70', '5d71', '5d73', '5d74']), 2: set(['1111', '1112', '1113', '1114']), 3: set(['2211', '2212', '2213', '2214']), } fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) def test_expand_fcp_list_with_uncontinuous_not_equal_count(self): fcp_list = "5c73-5c74;5d70-5d71,5d73-5d74" expected = {0: set(['5c73', '5c74']), 1: set(['5d70', '5d71', '5d73', '5d74'])} fcp_info = zvmutils.expand_fcp_list(fcp_list) self.assertEqual(expected, fcp_info) @mock.patch("zvmsdk.utils.verify_fcp_list_in_hex_format", Mock()) def test_shrink_fcp_list(self): """Test shrink_fcp_list""" # Case1: only one FCP in the list. fcp_list = ['1A01'] expected_fcp_str = '1A01' result = zvmutils.shrink_fcp_list(fcp_list) self.assertEqual(expected_fcp_str, result) # Case 2: all the FCPs are continuous. expected_fcp_str = [ '1A01 - 1A0E', # continuous in last 1 digit '1A0E - 1A2E', # continuous in last 2 digits '1AEF - 1B1F'] # continuous in last 3 digits expected_fcp_count = [ 14, # continuous in last 1 digit 33, # continuous in last 2 digits 49] # continuous in last 3 digits for idx, efs in enumerate(expected_fcp_str): fcp_list = list( zvmutils.expand_fcp_list(efs)[0]) result = zvmutils.shrink_fcp_list(fcp_list.copy()) self.assertEqual(efs, result) self.assertEqual(expected_fcp_count[idx], len(fcp_list)) # Case 3: not all the FCPs are continuous. expected_fcp_str = [ '1A01, 1A0E - 1A2E', # case 3.1 '1A0E - 1A2E, 1B01', # case 3.2 '1A05, 1A0E - 1A2E, 1A4A, 1AEF - 1B1F', # case 3.3 '1A0E - 1A2E, 1A4A, 1A5B, 1AEF - 1B1F'] # case 3.4 expected_fcp_count = [ 34, # case 3.1 34, # case 3.2 84, # case 3.3 84 # case 3.4 ] for idx, efs in enumerate(expected_fcp_str): fcp_list = list( zvmutils.expand_fcp_list(efs)[0]) result = zvmutils.shrink_fcp_list(fcp_list.copy()) self.assertEqual(efs, result) self.assertEqual(expected_fcp_count[idx], len(fcp_list)) # Case 4: an empty list. fcp_list = [] expected_fcp_str = '' result = zvmutils.shrink_fcp_list(fcp_list) self.assertEqual(expected_fcp_str, result) def test_verify_fcp_list_in_hex_format(self): """Test verify_fcp_list_in_hex_format(fcp_list)""" # case1: not a list object fcp_list = '1A00 - 1A03' self.assertRaisesRegex(exception.SDKInvalidInputFormat, "not a list object", zvmutils.verify_fcp_list_in_hex_format, fcp_list) # case2: FCP(1A0) length != 4 fcp_list = ['1A00', '1A0'] self.assertRaisesRegex(exception.SDKInvalidInputFormat, "non-hex value", zvmutils.verify_fcp_list_in_hex_format, fcp_list) # case3: FCP(1A0G) not a 4-digit hex fcp_list = ['1A00', '1a0G'] self.assertRaisesRegex(exception.SDKInvalidInputFormat, "non-hex value", zvmutils.verify_fcp_list_in_hex_format, fcp_list) # case4: FCP(1A0R) not a 4-digit hex fcp_list = ['1a00', '1A0F'] zvmutils.verify_fcp_list_in_hex_format(fcp_list) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/0000775000175000017510000000000014315232035021632 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/0000775000175000017510000000000014315232035023432 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/test_vswitch.py0000664000175000017510000001723313575566551026564 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jwt import mock import unittest from zvmsdk import exception from zvmsdk import config from zvmsdk.sdkwsgi.handlers import vswitch from zvmsdk.sdkwsgi import util CONF = config.CONF FAKE_UUID = '00000000-0000-0000-0000-000000000000' def set_conf(section, opt, value): CONF[section][opt] = value class FakeResp(object): def __init__(self): self.body = {} class FakeReq(object): def __init__(self): self.headers = {} self.environ = {} self.__name__ = '' self.response = FakeResp() def __getitem__(self, name): return self.headers class HandlersGuestTest(unittest.TestCase): def setUp(self): set_conf('wsgi', 'auth', 'none') expired_elapse = datetime.timedelta(seconds=100) expired_time = datetime.datetime.utcnow() + expired_elapse payload = jwt.encode({'exp': expired_time}, 'username') self.req = FakeReq() self.req.headers['X-Auth-Token'] = payload @mock.patch.object(vswitch.VswitchAction, 'list') def test_vswitch_list(self, mock_list): mock_list.return_value = '' vswitch.vswitch_list(self.req) self.assertTrue(mock_list.called) @mock.patch.object(vswitch.VswitchAction, 'create') def test_vswitch_create(self, mock_create): mock_create.return_value = {} body_str = """{"vswitch": {"name": "name1", "rdev": "1234 abcd 123F", "port_type": 1, "controller": "*"}}""" self.req.body = body_str vswitch.vswitch_create(self.req) body = util.extract_json(body_str) mock_create.assert_called_once_with(body=body) @mock.patch.object(vswitch.VswitchAction, 'create') def test_vswitch_create_with_userid_controller(self, mock_create): mock_create.return_value = {} body_str = """{"vswitch": {"name": "name1", "rdev": "1234 abcd 123F", "port_type": 1, "controller": "userid01"}}""" self.req.body = body_str vswitch.vswitch_create(self.req) body = util.extract_json(body_str) mock_create.assert_called_once_with(body=body) def test_vswitch_create_invalidname(self): body_str = '{"vswitch": {"name": "", "rdev": "1234"}}' self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_rdevlist(self): body_str = '{"vswitch": {"name": "name1", "rdev": "12345 sss"}}' self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(vswitch.VswitchAction, 'delete') def test_vswitch_delete(self, mock_delete, mock_name): mock_delete.return_value = {} mock_name.return_value = 'vsw1' vswitch.vswitch_delete(self.req) mock_delete.assert_called_once_with('vsw1') @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(vswitch.VswitchAction, 'query') def test_vswitch_query(self, mock_query, mock_name): mock_query.return_value = {} mock_name.return_value = 'vsw1' vswitch.vswitch_query(self.req) mock_query.assert_called_once_with('vsw1') def test_vswitch_create_invalid_connection(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "connection": 3}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_queue_mem(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "queue_mem": 10}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_network_type(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "network_type": 3}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_update(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "update": 4}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_vid(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "vid": -1}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_native_vid(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "native_vid": 4096}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_router(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "router": 3}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_grvp(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "gvrp": 3}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) def test_vswitch_create_invalid_controller(self): body_str = """{"vswitch": {"name": "name1", "rdev": "1234", "controller": "node12345"}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, vswitch.vswitch_create, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(vswitch.VswitchAction, 'update') def test_vswitch_update(self, mock_update, mock_name): mock_name.return_value = 'vsw1' body_str = '{"vswitch": {"grant_userid": "user1"}}' mock_update.return_value = {} self.req.body = body_str vswitch.vswitch_update(self.req) body = util.extract_json(body_str) mock_update.assert_called_once_with('vsw1', body=body) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/test_image.py0000664000175000017510000002176113732306202026134 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jwt import mock import unittest from zvmsdk import exception from zvmsdk import config from zvmsdk.sdkwsgi.handlers import image from zvmsdk.sdkwsgi import util CONF = config.CONF FAKE_UUID = '00000000-0000-0000-0000-000000000000' def set_conf(section, opt, value): CONF[section][opt] = value class FakeResp(object): def __init__(self): self.body = {} class FakeReq(object): def __init__(self): self.headers = {} self.environ = {} self.response = FakeResp() self.__name__ = '' def __getitem__(self, name): return self.headers class HandlersImageTest(unittest.TestCase): def setUp(self): set_conf('wsgi', 'auth', 'none') expired_elapse = datetime.timedelta(seconds=100) expired_time = datetime.datetime.utcnow() + expired_elapse payload = jwt.encode({'exp': expired_time}, 'username') self.req = FakeReq() self.req.headers['X-Auth-Token'] = payload @mock.patch.object(image.ImageAction, 'create') def test_image_create(self, mock_create): body_str = """{"image": {"image_name": "46a4aea3-54b6-4b1c", "url": "file:///tmp/test.img", "image_meta": { "os_version": "rhel7.2", "md5sum": "12345678912345678912345678912345" }, "remotehost": "hostname" } }""" self.req.body = body_str body = { 'image': { 'remotehost': 'hostname', 'image_meta': { 'os_version': 'rhel7.2', 'md5sum': '12345678912345678912345678912345' }, 'url': 'file:///tmp/test.img', 'image_name': '46a4aea3-54b6-4b1c' } } mock_create.return_value = '' image.image_create(self.req) mock_create.assert_called_once_with(body=body) @mock.patch.object(image.ImageAction, 'create') def test_image_create_rhcos(self, mock_create): body_str = """{"image": {"image_name": "46a4aea3-54b6-4b1c", "url": "file:///tmp/test.img", "image_meta": { "os_version": "rhcos4.2", "md5sum": "12345678912345678912345678912345", "disk_type": "DASD" }, "remotehost": "hostname" } }""" self.req.body = body_str body = { 'image': { 'remotehost': 'hostname', 'image_meta': { 'os_version': 'rhcos4.2', 'md5sum': '12345678912345678912345678912345', "disk_type": "DASD" }, 'url': 'file:///tmp/test.img', 'image_name': '46a4aea3-54b6-4b1c' } } mock_create.return_value = '' image.image_create(self.req) mock_create.assert_called_once_with(body=body) def test_image_create_invalidname(self): body_str = '{"image": {"version": ""}}' self.req.body = body_str self.assertRaises(exception.ValidationError, image.image_create, self.req) def test_image_create_invalid_os_version(self): body_str = """{"image": {"image_name": "46a4aea3-54b6-4b1c", "url": "file:///tmp/test.img", "image_meta": { "os_version": "rhel2.2", "md5sum": "12345678912345678912345678912345" } } }""" self.req.body = body_str self.assertRaises(exception.ValidationError, image.image_create, self.req) def test_image_create_invalid_rhcos_os_version(self): body_str = """{"image": {"image_name": "46a4aea3-54b6-4b1c", "url": "file:///tmp/test.img", "image_meta": { "os_version": "rhcos43", "md5sum": "12345678912345678912345678912345", "disk_type": "DASD" } } }""" self.req.body = body_str self.assertRaises(exception.ValidationError, image.image_create, self.req) def test_image_create_rhcos_invalid_disktype(self): body_str = """{"image": {"image_name": "46a4aea3-54b6-4b1c", "url": "file:///tmp/test.img", "image_meta": { "os_version": "rhcos4.2", "md5sum": "12345678912345678912345678912345", "disk_type": "any" } } }""" self.req.body = body_str self.assertRaises(exception.ValidationError, image.image_create, self.req) def test_image_create_invalid_url(self): # FIXME: need url format validation pass def test_image_create_invalid_image_meta(self): # miss os_version param body_str = """{"image": {"url": "file:///tmp/test.img", "image_meta": { "md5sum": "12345678912345678912345678912345" } } }""" self.req.body = body_str self.assertRaises(exception.ValidationError, image.image_create, self.req) def test_image_create_invalid_image_meta_md5sum(self): # md5sum is less than 32 chars body_str = """{"image": {"url": "file://tmp/test.img", "image_meta": { "os_version": "rhel7.2", "md5sum": "2345678912345678912345678912345" } } }""" self.req.body = body_str self.assertRaises(exception.ValidationError, image.image_create, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(image.ImageAction, 'get_root_disk_size') def test_image_get_root_disk_size(self, mock_get, mock_name): mock_name.return_value = 'dummy' mock_get.return_value = '100' image.image_get_root_disk_size(self.req) mock_get.assert_called_once_with(self.req, 'dummy') @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(image.ImageAction, 'delete') def test_image_delete(self, mock_delete, mock_name): mock_delete.return_value = '' mock_name.return_value = 'dummy' image.image_delete(self.req) mock_delete.assert_called_once_with('dummy') @mock.patch.object(image.ImageAction, 'query') def test_image_query(self, mock_query): mock_query.return_value = '[]' self.req.GET = {} self.req.GET['imagename'] = 'image1' image.image_query(self.req) mock_query.assert_called_once_with(self.req, 'image1') @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(image.ImageAction, 'export') def test_image_export(self, mock_export, mock_get): mock_export.return_value = '{}' body_str = """{"location":{ "dest_url": "file:///tmp/images/image1", "remote_host": "192.168.12.34" } }""" body = {u'location': {u'dest_url': u'file:///tmp/images/image1', u'remote_host': u'192.168.12.34'}} fake_image_name = '46a4aea3-54b6-4b1c' mock_get.return_value = fake_image_name self.req.body = body_str image.image_export(self.req) mock_export.assert_called_once_with(fake_image_name, body=body) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/__init__.py0000664000175000017510000000000013575566551025555 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/test_version.py0000664000175000017510000000321213575566551026552 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock import unittest from zvmsdk.sdkwsgi.handlers import version from zvmsdk import version as sdk_version class HandlersRootTest(unittest.TestCase): def setUp(self): pass def test_version(self): req = mock.Mock() ver_str = {"rc": 0, "overallRC": 0, "errmsg": "", "modID": None, "output": {"api_version": version.APIVERSION, "min_version": version.APIVERSION, "version": sdk_version.__version__, "max_version": version.APIVERSION, }, "rs": 0} res = version.version(req) self.assertEqual('application/json', req.response.content_type) # version_json = json.dumps(ver_res) # version_str = utils.to_utf8(version_json) ver_res = json.loads(req.response.body.decode('utf-8')) self.assertEqual(ver_str, ver_res) self.assertEqual('application/json', res.content_type) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/test_host.py0000664000175000017510000000537414263437505026044 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jwt import mock import unittest from zvmsdk.sdkwsgi.handlers import host class FakeResp(object): def __init__(self): self.body = {} class FakeReq(object): def __init__(self): self.headers = {} self.environ = {} self.__name__ = '' self.response = FakeResp() def __getitem__(self, name): return self.headers class HandlersHostTest(unittest.TestCase): def setUp(self): expired_elapse = datetime.timedelta(seconds=100) expired_time = datetime.datetime.utcnow() + expired_elapse payload = jwt.encode({'exp': expired_time}, 'username') self.req = FakeReq() self.req.headers['X-Auth-Token'] = payload @mock.patch.object(host.HostAction, 'get_guest_list') def test_host_get_guest_list(self, mock_get_guest_list): mock_get_guest_list.return_value = '' host.host_get_guest_list(self.req) mock_get_guest_list.assert_called_once_with() @mock.patch.object(host.HostAction, 'get_info') def test_host_get_info(self, mock_get_info): mock_get_info.return_value = '' host.host_get_info(self.req) self.assertTrue(mock_get_info.called) @mock.patch.object(host.HostAction, 'diskpool_get_info') def test_host_get_disk_info(self, mock_get_disk_info): mock_get_disk_info.return_value = '' self.req.GET = {} self.req.GET['poolname'] = 'disk1' host.host_get_disk_info(self.req) self.assertTrue(mock_get_disk_info.called) @mock.patch.object(host.HostAction, 'get_diskpool_volumes') def test_host_get_disk_volumes(self, mock_get_disk_vols): mock_get_disk_vols.return_value = '' self.req.GET = {} self.req.GET['poolname'] = 'disk1' host.host_get_diskpool_volumes(self.req) self.assertTrue(mock_get_disk_vols.called) @mock.patch.object(host.HostAction, 'get_volume_info') def test_host_get_volume_info(self, mock_get_vol): mock_get_vol.return_value = '' self.req.GET = {} self.req.GET['volumename'] = 'volum1' host.host_get_volume_info(self.req) self.assertTrue(mock_get_vol.called) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/test_guest.py0000664000175000017510000016455014263437505026220 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jwt import mock import unittest import webob.exc from zvmsdk import exception from zvmsdk.sdkwsgi.handlers import guest from zvmsdk.sdkwsgi import util FAKE_USERID = '00000000-0000-0000-0000-000000000000' FAKE_USERID_LIST_STR = 'ab,c,userid1' FAKE_USERID_LIST = ['ab', 'c', 'userid1'] class FakeReqGet(object): def get(self, userid): return FAKE_USERID_LIST_STR def keys(self): return ['userid'] def values(self): return FAKE_USERID_LIST class FakeResp(object): def __init__(self): self.body = {} class FakeReq(object): def __init__(self): self.headers = {} self.environ = {} self.body = {} self.response = FakeResp() self.__name__ = '' def __getitem__(self, name): return self.headers class SDKWSGITest(unittest.TestCase): def setUp(self): expired_elapse = datetime.timedelta(seconds=100) expired_time = datetime.datetime.utcnow() + expired_elapse payload = jwt.encode({'exp': expired_time}, 'username') self.req = FakeReq() self.req.headers['X-Auth-Token'] = payload class GuestActionsTest(SDKWSGITest): @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_start(self, mock_action, mock_userid): self.req.body = '{"action": "start"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_start', FAKE_USERID, 0) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_stop(self, mock_action, mock_userid): self.req.body = '{"action": "stop"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_stop', FAKE_USERID, timeout=None, poll_interval=None) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_stop_with_timeout(self, mock_action, mock_userid): self.req.body = '{"action": "stop", "timeout": 300}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_stop', FAKE_USERID, timeout=300, poll_interval=None) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_register(self, mock_action, mock_userid): self.req.body = '{"action": "register_vm",\ "meta": "rhel7",\ "net_set": "1",\ "port_macs": "5abc7819-abec-4deb-9115-2af5da249155"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_register', FAKE_USERID, "rhel7", "1", "5abc7819-abec-4deb" "-9115-2af5da249155") @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_register_no_port_macs(self, mock_action, mock_userid): self.req.body = '{"action": "register_vm",\ "meta": "rhel7",\ "net_set": "1"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_register', FAKE_USERID, "rhel7", "1", None) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_deregister(self, mock_action, mock_userid): self.req.body = '{"action": "deregister_vm"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_deregister', FAKE_USERID) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_softstop_with_timeout_poll_interval(self, mock_action, mock_userid): self.req.body = """{"action": "softstop", "timeout": 300, "poll_interval": 15}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_softstop', FAKE_USERID, timeout=300, poll_interval=15) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_get_console_output(self, mock_action, mock_userid): self.req.body = '{"action": "get_console_output"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_get_console_output', FAKE_USERID) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_live_resize_cpus(self, mock_action, mock_userid): self.req.body = '{"action": "live_resize_cpus", "cpu_cnt": 3}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_live_resize_cpus', FAKE_USERID, 3) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_live_resize_cpus_missing_param(self, mock_action, mock_userid): self.req.body = '{"action": "live_resize_cpus"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_live_resize_cpus_invalid_cpu_cnt_1(self, mock_action, mock_userid): self.req.body = '{"action": "live_resize_cpus", "cpu_cnt": 65}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_live_resize_cpus_invalid_cpu_cnt_2(self, mock_action, mock_userid): self.req.body = '{"action": "live_resize_cpus", "cpu_cnt": 0}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_live_resize_cpus_invalid_cpu_cnt_type(self, mock_action, mock_userid): self.req.body = '{"action": "live_resize_cpus", "cpu_cnt": "2"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_cpus(self, mock_action, mock_userid): self.req.body = '{"action": "resize_cpus", "cpu_cnt": 3}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_resize_cpus', FAKE_USERID, 3) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_cpus_missing_param(self, mock_action, mock_userid): self.req.body = '{"action": "resize_cpus"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_cpus_invalid_cpu_cnt_1(self, mock_action, mock_userid): self.req.body = '{"action": "resize_cpus", "cpu_cnt": 65}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_cpus_invalid_cpu_cnt_2(self, mock_action, mock_userid): self.req.body = '{"action": "resize_cpus", "cpu_cnt": 0}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_cpus_invalid_cpu_cnt_type(self, mock_action, mock_userid): self.req.body = '{"action": "resize_cpus", "cpu_cnt": "2"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_mem(self, mock_action, mock_userid): self.req.body = '{"action": "resize_mem", "size": "4096m"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_resize_mem', FAKE_USERID, "4096m") @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_mem_missing_param(self, mock_action, mock_userid): self.req.body = '{"action": "resize_mem"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_mem_invalid_mem_1(self, mock_action, mock_userid): self.req.body = '{"action": "resize_mem", "size": "88888M"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_mem_invalid_mem_2(self, mock_action, mock_userid): self.req.body = '{"action": "resize_mem", "size": "123"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_resize_cpus_invalid_mem_type(self, mock_action, mock_userid): self.req.body = '{"action": "resize_mem", "size": 1024}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_live_resize_mem(self, mock_action, mock_userid): self.req.body = '{"action": "live_resize_mem", "size": "4G"}' mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_live_resize_mem', FAKE_USERID, "4G") @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_grow_root_volume(self, mock_action, mock_userid): self.req.body = """{"action": "grow_root_volume", "os_version": "RHEL7.8"}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_grow_root_volume', FAKE_USERID, "RHEL7.8") @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_deploy(self, mock_action, mock_userid): self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": "test@host1.x.y", "vdev": "1000"}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_deploy', FAKE_USERID, 'image1', remotehost='test@host1.x.y', transportfiles='file1', vdev='1000', hostname=None, skipdiskcopy=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_deploy_vdev_None(self, mock_action, mock_userid): self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": "test@host1.x.y", "vdev": null}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_deploy', FAKE_USERID, 'image1', remotehost='test@host1.x.y', transportfiles='file1', vdev=None, hostname=None, skipdiskcopy=False) @mock.patch.object(util, 'wsgi_path_item') def test_guest_deploy_missing_param(self, mock_userid): self.req.body = """{"action": "deploy", "transportfiles": "file1", "remotehost": "test@host1.x.y", "vdev": "1000"}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_deploy_invalid_vdev(self, mock_userid): # vdev not string type self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": "test@host.com.cn", "vdev": 1000}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_deploy_invalid_remotehost(self, mock_userid): self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": ".122.sd..", "vdev": "1000"}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_deploy_with_ip_in_remotehost(self, mock_action, mock_userid): self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": "test@192.168.99.99", "vdev": "1000"}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_deploy', FAKE_USERID, 'image1', remotehost='test@192.168.99.99', transportfiles='file1', vdev='1000', hostname=None, skipdiskcopy=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_deploy_with_fqdn_in_remotehost(self, mock_action, mock_userid): # remote host with Hostname + DomainName in it self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": "test123@test.xyz.com", "vdev": "1000"}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with('guest_deploy', FAKE_USERID, 'image1', remotehost='test123@test.xyz.com', transportfiles='file1', vdev='1000', hostname=None, skipdiskcopy=False) @mock.patch.object(util, 'wsgi_path_item') def test_guest_deploy_without_username_in_remotehost(self, mock_userid): # remote host without username in it self.req.body = """{"action": "deploy", "image": "image1", "transportfiles": "file1", "remotehost": "@test.xyz.com", "vdev": "1000"}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_deploy_additional_param(self, mock_userid): # A typo in the transportfiles self.req.body = """{"action": "deploy", "image": "image1", "transportfiless": "file1", "remotehost": "test@192.168.99.1", "vdev": "1000"}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_invalid_action(self, mock_userid): self.req.body = '{"fake": "None"}' mock_userid.return_value = FAKE_USERID self.assertRaises(webob.exc.HTTPBadRequest, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_capture_additional_param(self, mock_userid): # Put wrong parameter compressionlevel, it should be compresslevel self.req.body = """{"action": "capture", "image": "image1", "capture_type": "rootonly", "compression_level": "6"}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_capture_invalid_capturetype(self, mock_userid): # Put compresslevel to be invalid 10 self.req.body = """{"action": "capture", "image": "image1", "capture_type": "rootdisk", "compress_level": "10"}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_capture_invalid_compresslevel(self, mock_userid): # Put capture type to be invalid value self.req.body = """{"action": "capture", "image": "image1", "capture_type": "faketype", "compress_level": 9}""" mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_action, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_capture(self, mock_action, mock_userid): self.req.body = """{"action": "capture", "image": "image1", "capture_type": "rootonly", "compress_level": 6}""" mock_action.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_action(self.req) mock_action.assert_called_once_with("guest_capture", FAKE_USERID, "image1", capture_type="rootonly", compress_level=6) class HandlersGuestTest(SDKWSGITest): @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create(self, mock_create): body_str = '{"guest": {"userid": "name1", "vcpus": 1, "memory": 1}}' self.req.body = body_str mock_create.return_value = '' guest.guest_create(self.req) mock_create.assert_called_once_with('guest_create', 'name1', 1, 1) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_with_profile(self, mock_create): body_str = '''{"guest": {"userid": "name1", "vcpus": 1, "memory": 1, "user_profile": "profile1"}}''' self.req.body = body_str mock_create.return_value = '' guest.guest_create(self.req) mock_create.assert_called_once_with('guest_create', 'name1', 1, 1, user_profile="profile1") def test_guest_create_invalid_userid(self): body_str = '{"guest": {"userid": ""}}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_with_disk_list(self, mock_create): body_str = """{"guest": {"userid": "name1", "vcpus": 1, "memory": 1, "disk_list": [{"size": "1g", "format": "xfs", "disk_pool": "ECKD:poolname"} ]}}""" self.req.body = body_str mock_create.return_value = '' guest.guest_create(self.req) mock_create.assert_called_once_with('guest_create', 'name1', 1, 1, disk_list=[{u'size': u'1g', 'format': 'xfs', 'disk_pool': 'ECKD:poolname'}]) def test_guest_create_invalid_disk_list(self): body_str = """{"guest": {"userid": "name1", "vcpus": 1, "memory": 1, "disk_list": [{"size": 1}]}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_with_invalid_format(self, mock_create): body_str = """{"guest": {"userid": "name1", "vcpus": 1, "memory": 1, "disk_list": [{"size": "1g", "format": "dummy", "disk_pool": "ECKD:poolname"} ]}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) def test_guest_create_invalid_disk_list_param(self): body_str = """{"guest": {"userid": "name1", "vcpus": 1, "memory": 1, "disk_list": [{"size": "1g", "dummy": 1}]}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) def test_guest_create_invalid_disk_list_poolname(self): body_str = """{"guest": {"userid": "name1", "vcpus": 1, "memory": 1, "disk_list": [{"size": "1g", "disk_pool": "pool"}]}}""" self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) def test_guest_create_invalid_cpu(self): body_str = '{"guest": {"userid": "name1", "vcpus": "dummy"}}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) def test_guest_create_invalid_mem(self): body_str = '{"guest": {"userid": "name1", "memory": "dummy"}}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) def test_guest_create_false_input(self): body_str = '{"guest": {"userid": "name1", "dummy": "dummy"}}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) body_str = '{"guest": {"userid": "name1"}, "dummy": "dummy"}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create, self.req) @mock.patch.object(guest.VMHandler, 'list') def test_guest_list(self, mock_list): mock_list.return_value = '' guest.guest_list(self.req) mock_list.assert_called_once_with() @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(guest.VMHandler, 'get_power_state_real') def test_guest_power_state_real(self, mock_get, mock_userid): mock_get.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_get_power_state_real(self.req) mock_get.assert_called_once_with(self.req, FAKE_USERID) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(guest.VMHandler, 'get_info') def test_guest_get_info(self, mock_get, mock_userid): mock_get.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_get_info(self.req) mock_get.assert_called_once_with(self.req, FAKE_USERID) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(guest.VMHandler, 'get_user_direct') def test_guest_get_user_direct(self, mock_get, mock_userid): mock_get.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_get_user_direct(self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(guest.VMHandler, 'get_power_state') def test_guest_power_state(self, mock_get, mock_userid): mock_get.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_get_power_state(self.req) mock_get.assert_called_once_with(self.req, FAKE_USERID) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(guest.VMHandler, 'delete') def test_guest_delete(self, mock_delete, mock_userid): mock_delete.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_delete(self.req) mock_delete.assert_called_once_with(FAKE_USERID) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_list(self, mock_interface): mock_interface.return_value = '' guest.guest_list(self.req) mock_interface.assert_called_once_with('guest_list') @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_nic(self, mock_create, mock_userid): vdev = '1234' nic_id = "514fec03-0d96-4349-a670-d972805fb579" mac_addr = "02:00:00:11:22:33" body_str = """{"nic": {"vdev": "1234", "nic_id": "514fec03-0d96-4349-a670-d972805fb579", "mac_addr": "02:00:00:11:22:33"} }""" self.req.body = body_str mock_create.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_create_nic(self.req) mock_create.assert_called_once_with('guest_create_nic', FAKE_USERID, active=False, mac_addr=mac_addr, nic_id=nic_id, vdev=vdev) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_nic_vdev_None(self, mock_create, mock_userid): vdev = None nic_id = "514fec03-0d96-4349-a670-d972805fb579" mac_addr = "02:00:00:11:22:33" body_str = """{"nic": {"vdev": null, "nic_id": "514fec03-0d96-4349-a670-d972805fb579", "mac_addr": "02:00:00:11:22:33"} }""" self.req.body = body_str mock_create.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_create_nic(self.req) mock_create.assert_called_once_with('guest_create_nic', FAKE_USERID, active=False, mac_addr=mac_addr, nic_id=nic_id, vdev=vdev) def test_guest_create_nic_invalid_vdev(self): body_str = '{"nic": {"vdev": 123}}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create_nic, self.req) def test_guest_create_nic_invalid_mac_addr(self): body_str = '{"nic": {"mac_addr": "11:22:33:44:55:6s"}}' self.req.body = body_str self.assertRaises(exception.ValidationError, guest.guest_create_nic, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface(self, mock_interface, mock_userid): os_version = 'rhel6' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}] bstr = """{"interface": {"os_version": "rhel6", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_OSA(self, mock_interface, mock_userid): os_version = 'rhel6' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56', 'osa_device': 'AABB'}] bstr = """{"interface": {"os_version": "rhel6", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56", "osa_device": "AABB"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_delete_network_interface(self, mock_interface, mock_userid): os_version = 'rhel6' vdev = '1000' bstr = """{"interface": {"os_version": "rhel6", "vdev": "1000"}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_delete_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_delete_network_interface', FAKE_USERID, os_version, vdev, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_correct_rhcos_version(self, mock_interface, mock_userid): os_version = 'rhcos4' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}] bstr = """{"interface": {"os_version": "rhcos4", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_correct_rhcos_version_1(self, mock_interface, mock_userid): os_version = 'rhcos4.6' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}] bstr = """{"interface": {"os_version": "rhcos4.6", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_correct_rhcos_version_2(self, mock_interface, mock_userid): os_version = 'rhcos4.6.8' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}] bstr = """{"interface": {"os_version": "rhcos4.6.8", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_correct_rhcos_version_3(self, mock_interface, mock_userid): os_version = 'rhcos4.11.11' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}] bstr = """{"interface": {"os_version": "rhcos4.11.11", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_correct_rhcos_version_4(self, mock_interface, mock_userid): os_version = 'rhcos4.6.11' guest_networks = [{'ip_addr': '192.168.12.34', 'dns_addr': ['9.1.2.3'], 'gateway_addr': '192.168.95.1', 'cidr': '192.168.95.0/24', 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56'}] bstr = """{"interface": {"os_version": "rhcos4.6.11", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' guest.guest_create_network_interface(self.req) mock_interface.assert_called_once_with( 'guest_create_network_interface', FAKE_USERID, os_version=os_version, guest_networks=guest_networks, active=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_wrong_rhcos_version(self, mock_interface, mock_userid): bstr = """{"interface": {"os_version": "rhcos4.", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' self.assertRaises(exception.ValidationError, guest.guest_create_network_interface, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_wrong_rhcos_version_1(self, mock_interface, mock_userid): bstr = """{"interface": {"os_version": "rhcos4.a", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' self.assertRaises(exception.ValidationError, guest.guest_create_network_interface, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_wrong_rhcos_version_2(self, mock_interface, mock_userid): bstr = """{"interface": {"os_version": "rhcos4.6.a", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' self.assertRaises(exception.ValidationError, guest.guest_create_network_interface, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_network_interface_with_wrong_rhcos_version_3(self, mock_interface, mock_userid): bstr = """{"interface": {"os_version": "rhcos4.111.111", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0/24", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID mock_interface.return_value = '' self.assertRaises(exception.ValidationError, guest.guest_create_network_interface, self.req) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guests_get_nic_info(self, mock_interface): self.req.GET = {} mock_interface.return_value = '' guest.guests_get_nic_info(self.req) mock_interface.assert_called_once_with( 'guests_get_nic_info', userid=None, nic_id=None, vswitch=None) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guests_get_nic_info_all(self, mock_interface): userid = 'fakeid' nic_id = 'fake_nic_id' vswitch = 'vswitch' self.req.GET = {} self.req.GET['userid'] = userid self.req.GET['nic_id'] = nic_id self.req.GET['vswitch'] = vswitch mock_interface.return_value = '' guest.guests_get_nic_info(self.req) mock_interface.assert_called_once_with( 'guests_get_nic_info', userid=userid, nic_id=nic_id, vswitch=vswitch) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guests_get_nic_info_with_userid(self, mock_interface): userid = 'fakeid' self.req.GET = {} self.req.GET['userid'] = userid mock_interface.return_value = '' guest.guests_get_nic_info(self.req) mock_interface.assert_called_once_with( 'guests_get_nic_info', userid=userid, nic_id=None, vswitch=None) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guests_get_nic_info_with_nicid(self, mock_interface): nic_id = 'fake_nic_id' self.req.GET = {} self.req.GET['nic_id'] = nic_id mock_interface.return_value = '' guest.guests_get_nic_info(self.req) mock_interface.assert_called_once_with( 'guests_get_nic_info', userid=None, nic_id=nic_id, vswitch=None) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guests_get_nic_info_with_vswitch(self, mock_interface): vswitch = 'vswitch' self.req.GET = {} self.req.GET['vswitch'] = vswitch mock_interface.return_value = '' guest.guests_get_nic_info(self.req) mock_interface.assert_called_once_with( 'guests_get_nic_info', userid=None, nic_id=None, vswitch=vswitch) # TODO: move this test to sdk layer instead of API layer # or we can use validation to validate cidr @unittest.skip("we use send_request now.....") @mock.patch.object(util, 'wsgi_path_item') def test_guest_create_network_interface_invalid_cidr(self, mock_userid): # / not in cidr bstr = """{"interface": {"os_version": "rhel6", "guest_networks": [ {"ip_addr": "192.168.12.34", "dns_addr": ["9.1.2.3"], "gateway_addr": "192.168.95.1", "cidr": "192.168.95.0", "nic_vdev": "1000", "mac_addr": "02:00:00:12:34:56"}]}}""" self.req.body = bstr mock_userid.return_value = FAKE_USERID self.assertRaises(webob.exc.HTTPBadRequest, guest.guest_create_network_interface, self.req) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_guest_create_disks(self, mock_create, mock_userid): disk_list = [{u'size': u'1g', 'disk_pool': 'ECKD:poolname'}] body_str = """{"disk_info": {"disk_list": [{"size": "1g", "disk_pool": "ECKD:poolname"} ]}}""" mock_create.return_value = '' self.req.body = body_str mock_userid.return_value = FAKE_USERID guest.guest_create_disks(self.req) mock_create.assert_called_once_with('guest_create_disks', FAKE_USERID, disk_list) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') @mock.patch.object(util, 'wsgi_path_item') def test_guest_config_minidisks(self, mock_userid, mock_config): disk_list = [{'vdev': '0101', 'format': 'ext3', 'mntdir': '/mnt/0101'}] mock_config.return_value = '' body_str = """{"disk_info": {"disk_list": [{"vdev": "0101", "format": "ext3", "mntdir": "/mnt/0101"} ]}}""" self.req.body = body_str mock_userid.return_value = FAKE_USERID guest.guest_config_disks(self.req) mock_config.assert_called_once_with('guest_config_minidisks', FAKE_USERID, disk_list) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') @mock.patch.object(util, 'wsgi_path_item') def test_guest_delete_disks(self, mock_userid, mock_delete): vdev_list = ['0101'] mock_delete.return_value = '' body_str = """{"vdev_info": {"vdev_list": ["0101"]}}""" self.req.body = body_str mock_userid.return_value = FAKE_USERID guest.guest_delete_disks(self.req) mock_delete.assert_called_once_with('guest_delete_disks', FAKE_USERID, vdev_list) @mock.patch.object(util, 'wsgi_path_item') @mock.patch.object(guest.VMHandler, 'get_definition_info') def test_guest_get(self, mock_get, mock_userid): mock_get.return_value = '' mock_userid.return_value = FAKE_USERID guest.guest_get(self.req) mock_get.assert_called_once_with(self.req, FAKE_USERID) @mock.patch.object(guest.VMHandler, 'inspect_stats') def test_guest_get_stats(self, mock_get): self.req.GET = FakeReqGet() mock_get.return_value = '{}' guest.guest_get_stats(self.req) mock_get.assert_called_once_with(self.req, FAKE_USERID_LIST) @mock.patch.object(guest.VMHandler, 'inspect_vnics') def test_guest_get_interface_stats(self, mock_get): self.req.GET = FakeReqGet() mock_get.return_value = '{}' guest.guest_get_interface_stats(self.req) mock_get.assert_called_once_with(self.req, FAKE_USERID_LIST) def mock_get_userid_vdev(self, env, param): if param == 'userid': return FAKE_USERID else: return '1000' @mock.patch('zvmconnector.connector.ZVMConnector.send_request') @mock.patch.object(util, 'wsgi_path_item') def test_delete_nic(self, mock_userid, mock_delete): body_str = '{"info": {}}' self.req.body = body_str mock_delete.return_value = {'overallRC': 0} mock_userid.side_effect = self.mock_get_userid_vdev guest.guest_delete_nic(self.req) mock_delete.assert_called_once_with('guest_delete_nic', FAKE_USERID, "1000", active=False) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') @mock.patch.object(util, 'wsgi_path_item') def test_guest_couple_nic(self, mock_userid, mock_couple): body_str = '{"info": {"couple": "true", "vswitch": "vsw1"}}' self.req.body = body_str mock_couple.return_value = '' mock_userid.side_effect = self.mock_get_userid_vdev guest.guest_couple_uncouple_nic(self.req) mock_couple.assert_called_once_with('guest_nic_couple_to_vswitch', FAKE_USERID, "1000", "vsw1", active=False, vlan_id=-1) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') @mock.patch.object(util, 'wsgi_path_item') def test_guest_uncouple_nic(self, mock_userid, mock_uncouple): body_str = '{"info": {"couple": "false"}}' self.req.body = body_str mock_uncouple.return_value = '' mock_userid.side_effect = self.mock_get_userid_vdev guest.guest_couple_uncouple_nic(self.req) mock_uncouple.assert_called_once_with( 'guest_nic_uncouple_from_vswitch', FAKE_USERID, "1000", active=False) @mock.patch.object(util, 'wsgi_path_item') def test_guest_couple_nic_missing_required_1(self, mock_userid): body_str = '{"info": {}}' self.req.body = body_str mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_couple_uncouple_nic, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_uncouple_nic_bad_vswitch(self, mock_userid): body_str = '{"info": {"couple": "false", "active": "dummy"}}' self.req.body = body_str mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_couple_uncouple_nic, self.req) @mock.patch.object(util, 'wsgi_path_item') def test_guest_uncouple_nic_bad_couple(self, mock_userid): body_str = '{"info": {"couple": "couple"}}' self.req.body = body_str mock_userid.return_value = FAKE_USERID self.assertRaises(exception.ValidationError, guest.guest_couple_uncouple_nic, self.req) @mock.patch.object(guest.VMHandler, 'create') def test_guest_create_unauthorized(self, mock_create): body_str = '{"guest": {"userid": "name1", "vcpus": 1, "memory": 1}}' self.req.body = body_str mock_create.side_effect = webob.exc.HTTPBadRequest self.assertRaises(webob.exc.HTTPBadRequest, guest.guest_create, self.req) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/handlers/test_volume.py0000664000175000017510000003054414315210052026353 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import jwt import json import mock import unittest from zvmsdk import config from zvmsdk.sdkwsgi import util from zvmsdk.sdkwsgi.handlers import volume CONF = config.CONF FAKE_UUID = '00000000-0000-0000-0000-000000000000' def set_conf(section, opt, value): CONF[section][opt] = value class FakeResp(object): def __init__(self): self.body = {} class FakeReq(object): def __init__(self): self.headers = {} self.environ = {} self.__name__ = '' self.response = FakeResp() def __getitem__(self, name): return self.headers class VolumeActionTestCase(unittest.TestCase): def setUp(self): self.volume_action = volume.VolumeAction() @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_edit_fcp_template(self, mock_send_request): """Test edit_fcp_template()""" fcp_template_id = 'fake_id' # case1: replace storage_providers with default_sp_list kwargs = { 'fcp_template_id': fcp_template_id, 'storage_providers': ['sp8', 'sp9']} self.volume_action.edit_fcp_template(body=kwargs) mock_send_request.assert_called_with( 'edit_fcp_template', fcp_template_id, name=None, description=None, fcp_devices=None, host_default=None, min_fcp_paths_count=None, default_sp_list=kwargs['storage_providers']) # case2: host_default use boolean kwargs = { 'fcp_template_id': fcp_template_id, 'host_default': True, 'name': 'fake_name'} self.volume_action.edit_fcp_template(body=kwargs) mock_send_request.assert_called_with( 'edit_fcp_template', fcp_template_id, name=kwargs['name'], host_default=True, description=None, fcp_devices=None, min_fcp_paths_count=None, default_sp_list=None) # case3: host_default use string 'true' kwargs = { 'fcp_template_id': fcp_template_id, 'host_default': 'true', 'name': 'fake_name'} self.volume_action.edit_fcp_template(body=kwargs) mock_send_request.assert_called_with( 'edit_fcp_template', fcp_template_id, name=kwargs['name'], host_default=True, description=None, fcp_devices=None, min_fcp_paths_count=None, default_sp_list=None) # case4: host_default use string 'FALSE' kwargs = { 'fcp_template_id': fcp_template_id, 'host_default': 'FALSE', 'name': 'fake_name'} self.volume_action.edit_fcp_template(body=kwargs) mock_send_request.assert_called_with( 'edit_fcp_template', fcp_template_id, name=kwargs['name'], host_default=False, description=None, fcp_devices=None, min_fcp_paths_count=None, default_sp_list=None) class HandlersVolumeTest(unittest.TestCase): def setUp(self): set_conf('wsgi', 'auth', 'none') expired_elapse = datetime.timedelta(seconds=100) expired_time = datetime.datetime.utcnow() + expired_elapse payload = jwt.encode({'exp': expired_time}, 'username') self.req = FakeReq() self.req.headers['X-Auth-Token'] = payload @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_volume_attach(self, mock_attach): mock_attach.return_value = {'overallRC': 0} connection_info = {"assigner_id": "username", "zvm_fcp": ["1fc5"], "target_wwpn": ["0x5005076801401234"], "target_lun": "0x0026000000000000", "os_version": "rhel7.2", "multipath": "true", "mount_point": ""} body_str = {"info": {"connection": connection_info}} self.req.body = json.dumps(body_str) volume.volume_attach(self.req) mock_attach.assert_called_once_with( 'volume_attach', connection_info) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_volume_detach(self, mock_detach): mock_detach.return_value = {'overallRC': 0} connection_info = {"assigner_id": "username", "zvm_fcp": ["1fc5"], "target_wwpn": ["0x5005076801401234"], "target_lun": "0x0026000000000000", "os_version": "rhel7.2", "multipath": "true", "mount_point": ""} body_str = {"info": {"connection": connection_info}} self.req.body = json.dumps(body_str) volume.volume_detach(self.req) mock_detach.assert_called_once_with( 'volume_detach', connection_info) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_refresh_volume_bootmap(self, mock_refresh_bootmap): mock_refresh_bootmap.return_value = {'overallRC': 0} fcpchannels = ['5d71'] wwpns = ['5005076802100c1b', '5005076802200c1b'] lun = '0000000000000000' wwid = '600507640083826de00000000000605b' fcp_template_id = 'fake_fcp_tmpl_id' info = {"fcpchannel": fcpchannels, "wwpn": wwpns, "lun": lun, "wwid": wwid, "fcp_template_id": fcp_template_id} body_str = {"info": info} self.req.body = json.dumps(body_str) volume.volume_refresh_bootmap(self.req) mock_refresh_bootmap.assert_called_once_with( 'volume_refresh_bootmap', fcpchannels, wwpns, lun, wwid, '', [], fcp_template_id) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_get_volume_connector(self, mock_send_request, mock_path_item): mock_send_request.return_value = {'overallRC': 0} mock_path_item.return_value = 'fakeuser' info = {'reserve': True, 'fcp_template_id': 'faketmpl'} body_str = {"info": info} self.req.body = json.dumps(body_str) # to pass validataion.query_schema self.req.environ['wsgiorg.routing_args'] = ( (), {'userid': 'fakeuser'}) volume.get_volume_connector(self.req) mock_send_request.assert_called_once_with('get_volume_connector', 'fakeuser', True, 'faketmpl', None) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_get_fcp_usage(self, mock_send_request, mock_path_item): mock_send_request.return_value = {'overallRC': 0} mock_path_item.return_value = '1a00' # to pass validataion.query_schema self.req.environ['wsgiorg.routing_args'] = ( (), {'fcp_id': '1a00'}) volume.get_fcp_usage(self.req) mock_send_request.assert_called_once_with('get_fcp_usage', '1a00') @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_set_fcp_usage(self, mock_send_request, mock_path_item): mock_send_request.return_value = {'overallRC': 0} mock_path_item.return_value = '1a00' info = {'userid': 'fakeuser', 'reserved': 1, 'connections': 2, 'fcp_template_id': 'faketmpl'} body_str = {"info": info} self.req.body = json.dumps(body_str) volume.set_fcp_usage(self.req) mock_send_request.assert_called_once_with('set_fcp_usage', '1a00', 'fakeuser', 1, 2, 'faketmpl') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_create_fcp_template(self, mock_send_request): mock_send_request.return_value = {'overallRC': 0} body = {'name': 'tmpl name', 'description': 'desc text', 'fcp_devices': '1a00-1a0f;1b00, 1b05-1b0f', 'host_default': 'yes', 'storage_providers': [], 'min_fcp_paths_count': 2} self.req.body = json.dumps(body) volume.create_fcp_template(self.req) mock_send_request.assert_called_once_with('create_fcp_template', 'tmpl name', description='desc text', fcp_devices='1a00-1a0f;1b00, 1b05-1b0f', host_default=True, default_sp_list=[], min_fcp_paths_count=2) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_create_fcp_template_default_values(self, mock_send_request): """Test the default values was set correctly when create a FCP Multipath Template.""" mock_send_request.return_value = {'overallRC': 0} body = {'name': 'tmpl name'} self.req.body = json.dumps(body) volume.create_fcp_template(self.req) mock_send_request.assert_called_once_with('create_fcp_template', 'tmpl name', description='', fcp_devices='', host_default=False, default_sp_list=[], min_fcp_paths_count=None) @mock.patch('zvmsdk.sdkwsgi.handlers.volume.VolumeAction.edit_fcp_template') def test_edit_fcp_template(self, mock_va_edit_template): mock_va_edit_template.return_value = 'fake_value' body_str = { 'storage_providers': ['sp8', 'sp9'], 'host_default': True, 'name': 'fake_name'} self.req.body = json.dumps(body_str) # tmpl_id length maxLength is 36 defined in # zvmsdk/sdkwsgi/validation/parameter_types.py tmpl_id = 'fake_template_id' + '0' * 20 self.req.environ['wsgiorg.routing_args'] = ( (), {'template_id': tmpl_id}) body_str['fcp_template_id'] = tmpl_id volume.edit_fcp_template(self.req) mock_va_edit_template.assert_called_once_with(body=body_str) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_get_fcp_templates(self, mock_send_request): mock_send_request.return_value = {'overallRC': 0} # To pass validataion.query_schema self.req.environ['wsgiorg.routing_args'] = ( (), {'template_id_list': ['id1', 'id2']}) self.req.GET = {'template_id_list': ['id1', 'id2'], 'assigner_id': 'fakeuser', 'storage_providers': ['v7k']} volume.get_fcp_templates(self.req) mock_send_request.assert_called_once_with('get_fcp_templates', ['id1', 'id2'], 'fakeuser', ['v7k'], None) @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_get_fcp_templates_details(self, mock_send_request): mock_send_request.return_value = {'overallRC': 0} # To pass validataion.query_schema self.req.environ['wsgiorg.routing_args'] = ( (), {'template_id_list': ['id1', 'id2']}) self.req.GET = {'template_id_list': ['id1', 'id2']} volume.get_fcp_templates_details(self.req) mock_send_request.assert_called_once_with('get_fcp_templates_details', ['id1', 'id2'], raw=False, statistics=True, sync_with_zvm=False) @mock.patch.object(util, 'wsgi_path_item') @mock.patch('zvmconnector.connector.ZVMConnector.send_request') def test_delete_fcp_template(self, mock_send_request, mock_path_item): mock_send_request.return_value = {'overallRC': 0} mock_path_item.return_value = 'fake tmpl id' volume.delete_fcp_template(self.req) mock_send_request.assert_called_once_with('delete_fcp_template', 'fake tmpl id') zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/test_utils.py0000664000175000017510000000613614315210052024404 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from zvmsdk.sdkwsgi import util class SDKWsgiUtilsTestCase(unittest.TestCase): def __init__(self, methodName='runTest'): super(SDKWsgiUtilsTestCase, self).__init__(methodName) def test_get_http_code_from_sdk_return(self): msg = {} msg['overallRC'] = 404 ret = util.get_http_code_from_sdk_return(msg, default=201) self.assertEqual(404, ret) msg['overallRC'] = 400 ret = util.get_http_code_from_sdk_return(msg) self.assertEqual(400, ret) msg['overallRC'] = 100 ret = util.get_http_code_from_sdk_return(msg, default=200) self.assertEqual(400, ret) msg['overallRC'] = 0 ret = util.get_http_code_from_sdk_return(msg, default=204) self.assertEqual(204, ret) msg['overallRC'] = 300 ret = util.get_http_code_from_sdk_return(msg, default=201) self.assertEqual(500, ret) def test_get_http_code_from_sdk_return_with_already_exist(self): msg = {} msg['overallRC'] = 8 msg['rc'] = 212 msg['rs'] = 36 ret = util.get_http_code_from_sdk_return(msg, additional_handler=util.handle_already_exists) self.assertEqual(409, ret) msg['rc'] = 100 ret = util.get_http_code_from_sdk_return(msg, additional_handler=util.handle_already_exists) self.assertEqual(500, ret) def test_mask_tuple_password(self): source = [ ('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0'), ('X-Auth-Token', 'eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.eyJleHAi' 'OjE2NTgyODEwODR9.LiUjn4yK7SNdsdArrgFBRr0wk9L_C' 'la7QQFxW94o3aw'), ('cache-control', 'no-cache')] target = [ ('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0'), ('X-Auth-Token', '***'), ('cache-control', 'no-cache')] ret = util.mask_tuple_password(source) self.assertEqual(ret, target) source = [ ('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0'), ('X-Auth-Token', ''), ('cache-control', 'no-cache')] target = [ ('Content-Type', 'text/html; charset=UTF-8'), ('Content-Length', '0'), ('X-Auth-Token', '***'), ('cache-control', 'no-cache')] ret = util.mask_tuple_password(source) self.assertEqual(ret, target) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/__init__.py0000664000175000017510000000000013575566551023755 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkwsgi/test_handler.py0000664000175000017510000010622214266177632024703 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import testtools import unittest import webob.exc from zvmsdk import exception from zvmsdk.sdkwsgi import handler from zvmsdk.sdkwsgi.handlers import tokens env = {'SERVER_SOFTWARE': 'WSGIServer/0.1 Python/2.7.3', 'SCRIPT_NAME': '', 'REQUEST_METHOD': 'GET', 'SERVER_PROTOCOL': 'HTTP/1.1', 'SERVER_PORT': '8001', 'HTTP_HOST': '127.0.0.1:8001', 'wsgi.version': (1, 0), 'HTTP_ACCEPT': '*/*', 'LESSCLOSE': '/usr/bin/lesspipe %s %s', 'wsgi.run_once': False, 'QUERY_STRING': '', 'wsgi.multiprocess': False, 'SERVER_NAME': 'localhost', 'REMOTE_ADDR': '127.0.0.1', 'wsgi.url_scheme': 'http', 'CONTENT_LENGTH': '', 'wsgi.multithread': True, 'CONTENT_TYPE': 'text/plain', 'REMOTE_HOST': 'localhost'} def dummy(status, headerlist): pass class GuestActionNegativeTest(unittest.TestCase): def setUp(self): self.env = env def test_guest_invalid_resource(self): self.env['PATH_INFO'] = '/guests/1/action' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) class GuestActionTest(unittest.TestCase): def setUp(self): self.env = env @mock.patch('zvmsdk.sdkwsgi.util.extract_json') def test_guest_start(self, mock_json): mock_json.return_value = {"action": "start"} self.env['PATH_INFO'] = '/guests/1/action' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMAction.start') \ as start: start.return_value = {'overallRC': 0} h(self.env, dummy) start.assert_called_once_with('1', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') def test_guest_deploy(self, mock_json): mock_json.return_value = {"action": "deploy"} self.env['PATH_INFO'] = '/guests/1/action' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMAction.deploy') \ as deploy: deploy.return_value = {'overallRC': 0} h(self.env, dummy) deploy.assert_called_once_with('1', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') def test_guest_stop(self, mock_json): mock_json.return_value = {"action": "stop"} self.env['PATH_INFO'] = '/guests/1/action' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMAction.stop') \ as stop: stop.return_value = {'overallRC': 0} h(self.env, dummy) stop.assert_called_once_with('1', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') def test_guest_softstop(self, mock_json): mock_json.return_value = {"action": "softstop"} self.env['PATH_INFO'] = '/guests/1/action' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMAction.softstop') \ as stop: stop.return_value = {'overallRC': 0} h(self.env, dummy) stop.assert_called_once_with('1', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') def test_guest_get_console_output(self, mock_json): mock_json.return_value = {"action": "get_console_output"} self.env['PATH_INFO'] = '/guests/1/action' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() url = 'zvmsdk.sdkwsgi.handlers.guest.VMAction.get_console_output' with mock.patch(url) as get_console_output: get_console_output.return_value = {'overallRC': 0} h(self.env, dummy) get_console_output.assert_called_once_with('1', body={}) class GuestHandlerNegativeTest(unittest.TestCase): def setUp(self): self.env = env def test_guest_invalid_resource(self): self.env['PATH_INFO'] = '/gueba' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) def test_guest_list_invalid(self): self.env['PATH_INFO'] = '/guests' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) def test_guest_stats_method_invalid(self): self.env['PATH_INFO'] = '/guests/stats' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) def test_guest_get_info_method_invalid(self): self.env['PATH_INFO'] = '/guests/1/info' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) def test_guest_get_info_resource_invalid(self): self.env['PATH_INFO'] = '/guests/1/info1' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) @testtools.skip('temply disable because of volume not support now') def test_guest_volume_invalid_method(self): self.env['PATH_INFO'] = '/guests/1/volumes' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) def test_guest_network_interface_invalid_method(self): self.env['PATH_INFO'] = '/guests/1/interface' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) class GuestHandlerTest(unittest.TestCase): def setUp(self): self.env = env @mock.patch.object(tokens, 'validate') def test_guest_list(self, mock_validate): self.env['PATH_INFO'] = '/guests' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMHandler.list') \ as list: list.return_value = {'overallRC': 0} h(self.env, dummy) self.assertTrue(list.called) @mock.patch.object(tokens, 'validate') def test_guest_get_info(self, mock_validate): self.env['PATH_INFO'] = '/guests/1/info' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMHandler.get_info') \ as get_info: get_info.return_value = {'overallRC': 0} h(self.env, dummy) get_info.assert_called_once_with(mock.ANY, '1') @mock.patch.object(tokens, 'validate') def test_guest_get_user_direct(self, mock_validate): self.env['PATH_INFO'] = '/guests/1/user_direct' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.guest.VMHandler.' 'get_user_direct') as get_user_direct: get_user_direct.return_value = {'overallRC': 0} h(self.env, dummy) get_user_direct.assert_called_once_with(mock.ANY, '1') @mock.patch.object(tokens, 'validate') def test_guest_get(self, mock_validate): self.env['PATH_INFO'] = '/guests/1' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler.get_definition_info' with mock.patch(func) as get: get.return_value = {'overallRC': 0} h(self.env, dummy) get.assert_called_once_with(mock.ANY, '1') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_delete_nic(self, mock_validate, mock_json): mock_json.return_value = '' self.env['PATH_INFO'] = '/guests/1/nic/1000' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler.delete_nic' with mock.patch(func) as delete_nic: delete_nic.return_value = {'overallRC': 0} h(self.env, dummy) delete_nic.assert_called_once_with('1', '1000', '') @mock.patch.object(tokens, 'validate') def test_guest_get_power_state_real(self, mock_validate): self.env['PATH_INFO'] = '/guests/1/power_state_real' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler'\ '.get_power_state_real' with mock.patch(func) as get_power: get_power.return_value = {'overallRC': 0} h(self.env, dummy) get_power.assert_called_once_with(mock.ANY, '1') @mock.patch.object(tokens, 'validate') def test_guest_get_power_state(self, mock_validate): self.env['PATH_INFO'] = '/guests/1/power_state' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler'\ '.get_power_state' with mock.patch(func) as get_power: get_power.return_value = {'overallRC': 0} h(self.env, dummy) get_power.assert_called_once_with(mock.ANY, '1') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_create(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/guests' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler.create' with mock.patch(func) as create: create.return_value = {'overallRC': 0} h(self.env, dummy) self.assertTrue(create.called) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_delete(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/guests/1' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler.delete' with mock.patch(func) as delete: delete.return_value = {'overallRC': 0} h(self.env, dummy) delete.assert_called_once_with('1') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_create_nic(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/guests/1/nic' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler.create_nic' with mock.patch(func) as create_nic: create_nic.return_value = {'overallRC': 0} h(self.env, dummy) create_nic.assert_called_once_with('1', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_update_nic(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/guests/1/nic/1000' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.guest.VMHandler.nic_couple_uncouple' with mock.patch(func) as update_nic: update_nic.return_value = {'overallRC': 0} h(self.env, dummy) update_nic.assert_called_once_with('1', '1000', body={}) @mock.patch.object(tokens, 'validate') def test_guest_get_stats_empty_userid_list(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/stats' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = '' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as get_info: get_info.return_value = {'overallRC': 0} h(self.env, dummy) get_info.assert_called_once_with('guest_inspect_stats', []) @mock.patch.object(tokens, 'validate') def test_guest_get_stats_userid_list(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/stats' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = 'userid=l1,l2' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as get_info: get_info.return_value = {'overallRC': 0} h(self.env, dummy) get_info.assert_called_once_with('guest_inspect_stats', ['l1', 'l2']) @mock.patch.object(tokens, 'validate') def test_guest_get_stats_invalid(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/stats' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = 'userid=l1,l2&userd=l3,l4' h = handler.SdkHandler() self.assertRaises(exception.ValidationError, h, self.env, dummy) @mock.patch.object(tokens, 'validate') def test_guest_get_interface_stats_empty_userid_list(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/interfacestats' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = '' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as get_info: get_info.return_value = {'overallRC': 0} h(self.env, dummy) get_info.assert_called_once_with('guest_inspect_vnics', []) @mock.patch.object(tokens, 'validate') def test_guest_get_interface_stats_user_list(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/interfacestats' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = 'userid=l1,l2' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as get_info: get_info.return_value = {'overallRC': 0} h(self.env, dummy) get_info.assert_called_once_with('guest_inspect_vnics', ['l1', 'l2']) @mock.patch.object(tokens, 'validate') def test_guest_get_interface_stats_invalid(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/interfacestats' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = 'use=l1,l2' h = handler.SdkHandler() self.assertRaises(exception.ValidationError, h, self.env, dummy) @mock.patch.object(tokens, 'validate') def test_guests_get_nic_info_without_limitation(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/nics' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = '' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as guests_get_nic_info: guests_get_nic_info.return_value = {'overallRC': 0} h(self.env, dummy) guests_get_nic_info.assert_called_once_with('guests_get_nic_info', userid=None, nic_id=None, vswitch=None) @mock.patch.object(tokens, 'validate') def test_guests_get_nic_info_with_userid(self, mock_validate): self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/nics' self.env['REQUEST_METHOD'] = 'GET' self.env['QUERY_STRING'] = 'userid=test' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as guests_get_nic_info: guests_get_nic_info.return_value = {'overallRC': 0} h(self.env, dummy) guests_get_nic_info.assert_called_once_with('guests_get_nic_info', userid='test', nic_id=None, vswitch=None) @testtools.skip('temply disable because of volume not support now') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_attach_volume(self, mock_validate, mock_json): mock_json.return_value = {} self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/1/volumes' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.attach' with mock.patch(func) as attach: attach.return_value = {'overallRC': 0} h(self.env, dummy) attach.assert_called_once_with('1', {}) @testtools.skip('temply disable because of volume not support now') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_detach_volume(self, mock_validate, mock_json): mock_json.return_value = {} self.env['wsgiorg.routing_args'] = () self.env['PATH_INFO'] = '/guests/1/volumes' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.detach' with mock.patch(func) as detach: detach.return_value = {'overallRC': 0} h(self.env, dummy) detach.assert_called_once_with('1', {}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_delete_network_interface(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/guests/1/interface' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() func = ('zvmsdk.sdkwsgi.handlers.guest.VMHandler.' 'delete_network_interface') with mock.patch(func) as delete_network_interface: delete_network_interface.return_value = {'overallRC': 0} h(self.env, dummy) delete_network_interface.assert_called_once_with('1', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_guest_create_network_interface(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/guests/1/interface' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() func = ('zvmsdk.sdkwsgi.handlers.guest.VMHandler.' 'create_network_interface') with mock.patch(func) as create_network_interface: create_network_interface.return_value = {'overallRC': 0} h(self.env, dummy) create_network_interface.assert_called_once_with('1', body={}) class ImageHandlerNegativeTest(unittest.TestCase): def setUp(self): self.env = env def test_image_create_invalid_method(self): self.env['PATH_INFO'] = '/images' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) def test_image_get_root_disk_size_invalid(self): self.env['PATH_INFO'] = '/images/image1/root_size' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) class ImageHandlerTest(unittest.TestCase): def setUp(self): self.env = env @mock.patch.object(tokens, 'validate') def test_image_root_disk_size(self, mock_validate): self.env['PATH_INFO'] = '/images/image1/root_disk_size' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as get_size: get_size.return_value = {'overallRC': 0} h(self.env, dummy) get_size.assert_called_once_with('image_get_root_disk_size', 'image1') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_image_create(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/images' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() func = 'zvmsdk.sdkwsgi.handlers.image.ImageAction.create' with mock.patch(func) as create: create.return_value = {'overallRC': 0} h(self.env, dummy) create.assert_called_once_with(body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_image_delete(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/images/image1' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as delete: delete.return_value = {'overallRC': 0} h(self.env, dummy) delete.assert_called_once_with('image_delete', 'image1') @mock.patch.object(tokens, 'validate') def test_image_query(self, mock_validate): self.env['PATH_INFO'] = '/images' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() func = 'zvmconnector.connector.ZVMConnector.send_request' with mock.patch(func) as query: query.return_value = {'overallRC': 0} h(self.env, dummy) query.assert_called_once_with('image_query', None) class HostHandlerNegativeTest(unittest.TestCase): def setUp(self): self.env = env def test_host_get_guest_list_invalid(self): self.env['PATH_INFO'] = '/host1/guest' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) def test_host_get_resource_invalid(self): self.env['PATH_INFO'] = '/host1' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) def test_host_get_info_invalid(self): self.env['PATH_INFO'] = '/host/inf' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) def test_host_get_disk_size_invalid(self): self.env['PATH_INFO'] = '/host/disk_inf/d1' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPNotFound, h, self.env, dummy) class HostHandlerTest(unittest.TestCase): def setUp(self): self.env = env @mock.patch.object(tokens, 'validate') def test_host_get_guest_list(self, mock_validate): self.env['PATH_INFO'] = '/host/guests' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() with mock.patch('zvmsdk.sdkwsgi.handlers.host.HostAction' '.get_guest_list') as get_guest_list: get_guest_list.return_value = {'overallRC': 0} h(self.env, dummy) self.assertTrue(get_guest_list.called) @mock.patch.object(tokens, 'validate') def test_host_get_info(self, mock_validate): self.env['PATH_INFO'] = '/host' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.host.HostAction.get_info' with mock.patch(function) as get_info: get_info.return_value = {'overallRC': 0} h(self.env, dummy) self.assertTrue(get_info.called) @mock.patch.object(tokens, 'validate') def test_host_get_disk_info(self, mock_validate): self.env['PATH_INFO'] = '/host/diskpool' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.host.HostAction.diskpool_get_info' with mock.patch(function) as get_disk_info: get_disk_info.return_value = {'overallRC': 0} h(self.env, dummy) get_disk_info.assert_called_once_with(mock.ANY, None) @mock.patch.object(tokens, 'validate') def test_host_get_diskpool_volumes(self, mock_validate): self.env['PATH_INFO'] = '/host/diskpool_volumes' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.host.HostAction.'\ 'get_diskpool_volumes' with mock.patch(function) as get_diskpool_volumes: get_diskpool_volumes.return_value = {'overallRC': 0} h(self.env, dummy) get_diskpool_volumes.assert_called_once_with(mock.ANY, None) @mock.patch.object(tokens, 'validate') def test_host_get_volume_info(self, mock_validate): self.env['PATH_INFO'] = '/host/volume' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.host.HostAction.'\ 'get_volume_info' with mock.patch(function) as get_volume_info: get_volume_info.return_value = {'overallRC': 0} h(self.env, dummy) get_volume_info.assert_called_once_with(mock.ANY, None) class VswitchHandlerNegativeTest(unittest.TestCase): def setUp(self): self.env = env def test_vswitch_put_method_invalid(self): self.env['PATH_INFO'] = '/vswitches' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() self.assertRaises(webob.exc.HTTPMethodNotAllowed, h, self.env, dummy) class VswitchHandlerTest(unittest.TestCase): def setUp(self): self.env = env @mock.patch.object(tokens, 'validate') def test_vswitch_list(self, mock_validate): self.env['PATH_INFO'] = '/vswitches' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.vswitch.VswitchAction.list' with mock.patch(function) as list: list.return_value = {'overallRC': 0} h(self.env, dummy) self.assertTrue(list.called) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_vswitch_create(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/vswitches' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.vswitch.VswitchAction.create' with mock.patch(function) as create: create.return_value = {'overallRC': 0} h(self.env, dummy) self.assertTrue(create.called) @mock.patch.object(tokens, 'validate') def test_vswitch_delete(self, mock_validate): self.env['PATH_INFO'] = '/vswitches/vsw1' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.vswitch.VswitchAction.delete' with mock.patch(function) as delete: delete.return_value = {'overallRC': 0} h(self.env, dummy) delete.assert_called_once_with('vsw1') @mock.patch.object(tokens, 'validate') def test_vswitch_query(self, mock_validate): self.env['PATH_INFO'] = '/vswitches/vsw1' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.vswitch.VswitchAction.query' with mock.patch(function) as query: query.return_value = {'overallRC': 0} h(self.env, dummy) query.assert_called_once_with('vsw1') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_vswitch_update(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/vswitches/vsw1' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.vswitch.VswitchAction.update' with mock.patch(function) as update: update.return_value = {'overallRC': 0} h(self.env, dummy) update.assert_called_once_with('vsw1', body={}) class VolumeHandlerTest(unittest.TestCase): def setUp(self): self.env = env @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_get_volume_connector(self, mock_validate, mock_json): mock_json.return_value = { 'info': { 'reserve': True } } self.env['PATH_INFO'] = '/volumes/conn/test0001' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'get_volume_connector' with mock.patch(function) as get_volume_connector: get_volume_connector.return_value = {'overallRC': 0} h(self.env, dummy) get_volume_connector.assert_called_once_with(mock.ANY, 'test0001', True, None, None) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_create_fcp_template(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcptemplates' self.env['REQUEST_METHOD'] = 'POST' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'create_fcp_template' with mock.patch(function) as create_fcp_template: create_fcp_template.return_value = {'overallRC': 0} h(self.env, dummy) create_fcp_template.assert_called_once_with(body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_get_fcp_templates(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcptemplates' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'get_fcp_templates' with mock.patch(function) as get_fcp_templates: get_fcp_templates.return_value = {'overallRC': 0} h(self.env, dummy) get_fcp_templates.assert_called_once_with(mock.ANY, None, None, None, None) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_get_fcp_templates_details(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcptemplates/detail' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'get_fcp_templates_details' with mock.patch(function) as get_fcp_templates_details: get_fcp_templates_details.return_value = {'overallRC': 0} h(self.env, dummy) get_fcp_templates_details.assert_called_once_with(mock.ANY, None, False, True, False) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_delete_fcp_template(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcptemplates/fakeid' self.env['REQUEST_METHOD'] = 'DELETE' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'delete_fcp_template' with mock.patch(function) as delete_fcp_template: delete_fcp_template.return_value = {'overallRC': 0} h(self.env, dummy) delete_fcp_template.assert_called_once_with('fakeid') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_edit_fcp_template(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcptemplates/fakeid' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'edit_fcp_template' with mock.patch(function) as edit_fcp_template: edit_fcp_template.return_value = {'overallRC': 0} h(self.env, dummy) edit_fcp_template.assert_called_once_with(body={'fcp_template_id': 'fakeid'}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_get_fcp_usage(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcp/1a00' self.env['REQUEST_METHOD'] = 'GET' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'get_fcp_usage' with mock.patch(function) as get_fcp_usage: get_fcp_usage.return_value = {'overallRC': 0} h(self.env, dummy) get_fcp_usage.assert_called_once_with(mock.ANY, '1a00') @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_set_fcp_usage(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcp/1a00' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'set_fcp_usage' with mock.patch(function) as set_fcp_usage: set_fcp_usage.return_value = {'overallRC': 0} h(self.env, dummy) set_fcp_usage.assert_called_once_with('1a00', body={}) @mock.patch('zvmsdk.sdkwsgi.util.extract_json') @mock.patch.object(tokens, 'validate') def test_volume_refresh_bootmap(self, mock_validate, mock_json): mock_json.return_value = {} self.env['PATH_INFO'] = '/volumes/fcp/1a00' self.env['REQUEST_METHOD'] = 'PUT' h = handler.SdkHandler() function = 'zvmsdk.sdkwsgi.handlers.volume.VolumeAction.' \ 'set_fcp_usage' with mock.patch(function) as set_fcp_usage: set_fcp_usage.return_value = {'overallRC': 0} h(self.env, dummy) set_fcp_usage.assert_called_once_with('1a00', body={}) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkclientcases/0000775000175000017510000000000014315232035023156 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkclientcases/test_restclient.py0000664000175000017510000015003114267725344026763 0ustar ruirui00000000000000# Copyright 2017, 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import json import mock import requests import unittest from zvmconnector import restclient from zvmsdk import config CONF = config.CONF class FakeResp(object): def __init__(self): self.content = '{"output": "{}"}' class RESTClientTestCase(unittest.TestCase): """Testcases for RESTClient.""" def setUp(self): self.client = restclient.RESTClient(ssl_enabled=False) self.fake_userid = 'userid01' self.base_url = 'http://127.0.0.1:8888' self.headers = {'Content-Type': 'application/json'} self.headers.update(self.headers or {}) self.response = FakeResp() self.client_ssl = restclient.RESTClient(ssl_enabled=True) self.base_url_ssl = 'https://127.0.0.1:8888' def test_init_ComputeAPI(self): self.assertTrue(isinstance(self.client, restclient.RESTClient)) def _tmp_token(self): token = '1234567890' return token @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_list(self, get_token, request): method = 'GET' url = '/guests' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_list") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_delete(self, get_token, request): method = 'DELETE' url = '/guests/%s' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_delete", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_definition_info(self, get_token, request): method = 'GET' url = '/guests/%s' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_get_definition_info", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_create(self, get_token, request): # method = 'POST' # url = '/guests' disks = [{'size': '3g', 'is_boot_disk': True}] # body = {'guest': {'userid': self.fake_userid, # 'vcpus': 1, # 'memory': 1024, # 'disk_list': disks, # 'user_profile': 'profile' # }} # body = json.dumps(body) # header = self.headers # full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_create", self.fake_userid, 1, 1024, disk_list=disks, user_profile='profile') # request.assert_called_with(method, full_uri, # data=body, headers=header, # verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_inspect_stats(self, get_token, request): method = 'GET' url = '/guests/stats?userid=%s' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_inspect_stats", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) fake_userid_string = 'userid1, userid2, userid3,userid4' url = '/guests/stats?userid=%s' % fake_userid_string full_uri = self.base_url + url self.client.call("guest_inspect_stats", fake_userid_string) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_inspect_vnics(self, get_token, request): method = 'GET' url = '/guests/interfacestats?userid=%s' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_inspect_vnics", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) fake_userid_string = 'userid1, userid2, userid3,userid4' url = '/guests/interfacestats?userid=%s' % fake_userid_string full_uri = self.base_url + url self.client.call("guest_inspect_vnics", fake_userid_string) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guests_get_nic_info(self, get_token, request): method = 'GET' url = '/guests/nics?userid=%s&nic_id=%s&vswitch=%s' % ( self.fake_userid, '1000', 'xcatvsw1') body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guests_get_nic_info", userid=self.fake_userid, nic_id='1000', vswitch='xcatvsw1') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_start(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'start'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_start", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_stop(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'stop'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_stop", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_softstop(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'softstop'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_softstop", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_softstop_parameter_set_zero(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'softstop', 'timeout': 0, 'poll_interval': 0} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_softstop", self.fake_userid, timeout=0, poll_interval=0) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_pause(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'pause'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_pause", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_unpause(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'unpause'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_unpause", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_reboot(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'reboot'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_reboot", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_reset(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'reset'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_reset", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_console_output(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'get_console_output'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_get_console_output", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_live_migrate(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_register(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_deregister(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_live_resize_cpus(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_resize_cpus(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_resize_mem(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'resize_mem', 'size': '4g'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_resize_mem", self.fake_userid, '4g') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_live_resize_mem(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'live_resize_mem', 'size': '4g'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_live_resize_mem", self.fake_userid, '4g') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_grow_root_volume(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'grow_root_volume', 'os_version': 'RHEL7.8'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_grow_root_volume", self.fake_userid, 'RHEL7.8') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_capture(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'capture', 'image': 'image_captured'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_capture", self.fake_userid, 'image_captured') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_deploy(self, get_token, request): method = 'POST' url = '/guests/%s/action' % self.fake_userid body = {'action': 'capture', 'image': 'image_captured'} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_capture", self.fake_userid, 'image_captured') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_power_state_real(self, get_token, request): method = 'GET' url = '/guests/%s/power_state_real' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_get_power_state_real", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_info(self, get_token, request): method = 'GET' url = '/guests/%s/info' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_get_info", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_info_ssl(self, get_token, request): method = 'GET' url = '/guests/%s/info' % self.fake_userid body = None header = self.headers full_uri = self.base_url_ssl + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client_ssl.call("guest_get_info", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_user_direct(self, get_token, request): method = 'GET' url = '/guests/%s/user_direct' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_get_user_direct", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_adapters_info(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_create_nic(self, get_token, request): # method = 'POST' # url = '/guests/%s/nic' % self.fake_userid body = {'nic': {'vdev': '123', 'nic_id': '1234', 'mac_addr': 'xx:xx:xx:xx:xx:xx', 'active': False}} body = json.dumps(body) # header = self.headers # full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_create_nic", self.fake_userid, vdev='123', nic_id='1234', mac_addr='xx:xx:xx:xx:xx:xx', active=False) request.assert_called_once() # request.assert_called_with(method, full_uri, # data=body, headers=header, # verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_delete_nic(self, get_token, request): method = 'DELETE' url = '/guests/%s/nic/%s' % (self.fake_userid, '123') body = {'active': False} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_delete_nic", self.fake_userid, '123', active=False) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_nic_couple_to_vswitch(self, get_token, request): method = 'PUT' url = '/guests/%s/nic/%s' % (self.fake_userid, '123') body = {'info': {'couple': True, 'vswitch': 'vswitch1', 'active': False}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_nic_couple_to_vswitch", self.fake_userid, '123', 'vswitch1', active=False) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_nic_couple_to_vswitch_vlan_id(self, get_token, request): method = 'PUT' url = '/guests/%s/nic/%s' % (self.fake_userid, '123') body = {'info': {'couple': True, 'vswitch': 'vswitch1', 'vlan_id': 1234, 'active': False}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_nic_couple_to_vswitch", self.fake_userid, '123', 'vswitch1', vlan_id=1234, active=False) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_nic_uncouple_from_vswitch(self, get_token, request): method = 'PUT' url = '/guests/%s/nic/%s' % (self.fake_userid, '123') body = {'info': {'couple': False, 'active': False}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_nic_uncouple_from_vswitch", self.fake_userid, '123', active=False) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_create_network_interface(self, get_token, request): method = 'POST' networks = [{'ip_addr': '12.12.12.12'}] url = '/guests/%s/interface' % self.fake_userid body = {'interface': {'os_version': 'rhel7.2', 'guest_networks': networks, 'active': False}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_create_network_interface", self.fake_userid, 'rhel7.2', networks, active=False) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_delete_network_interface(self, get_token, request): method = 'DELETE' url = '/guests/%s/interface' % self.fake_userid body = {'interface': {'os_version': 'rhel7.2', 'vdev': '123', 'active': False}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_delete_network_interface", self.fake_userid, 'rhel7.2', '123', active=False) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_get_power_state(self, get_token, request): method = 'GET' url = '/guests/%s/power_state' % self.fake_userid body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_get_power_state", self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_create_disks(self, get_token, request): method = 'POST' disks = [{'size': '3g'}] url = '/guests/%s/disks' % self.fake_userid body = {'disk_info': {'disk_list': disks}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_create_disks", self.fake_userid, disks) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_delete_disks(self, get_token, request): method = 'DELETE' vdevs = ['0101', '0102'] url = '/guests/%s/disks' % self.fake_userid body = {'vdev_info': {'vdev_list': vdevs}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_delete_disks", self.fake_userid, vdevs) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_guest_config_minidisks(self, get_token, request): method = 'PUT' disks = [{'vdev': '0101', 'mntdir': '/mnt/0101'}] url = '/guests/%s/disks' % self.fake_userid body = {'disk_info': {'disk_list': disks}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("guest_config_minidisks", self.fake_userid, disks) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_volume_attach(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_volume_detach(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_volume_refresh_bootmap(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_get_volume_connector(self, get_token, request): method = 'GET' kwargs = {'reserve': True, 'fcp_template_id': 'fake id', 'storage_provider': 'v7k'} url = '/volumes/conn/fakeuser' header = self.headers body = { "info": { "reserve": True, "fcp_template_id": 'fake id', "storage_provider": "v7k" } } full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("get_volume_connector", 'fakeuser', **kwargs) request.assert_called_with(method, full_uri, data=json.dumps(body), headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_get_fcp_templates(self, get_token, request): method = 'GET' template_list = ['fake_template_id' + '0' * 20] url = '/volumes/fcptemplates?template_id_list=%s' % template_list full_uri = self.base_url + url header = self.headers request.return_value = self.response get_token.return_value = self._tmp_token() kwargs = {'template_id_list': template_list} args = [] self.client.call("get_fcp_templates", *args, **kwargs) body = None request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) assigner_id = 'assigner_id' kwargs = {'assigner_id': assigner_id} url = '/volumes/fcptemplates?assigner_id=%s' % assigner_id full_uri = self.base_url + url self.client.call("get_fcp_templates", *args, **kwargs) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) default_sp_list = 'default_sp_list' kwargs = {'storage_providers': default_sp_list} url = '/volumes/fcptemplates?storage_providers=%s' % default_sp_list full_uri = self.base_url + url self.client.call("get_fcp_templates", *args, **kwargs) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) host_default = 'host_default' kwargs = {'host_default': host_default} url = '/volumes/fcptemplates?host_default=%s' % host_default full_uri = self.base_url + url self.client.call("get_fcp_templates", *args, **kwargs) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_get_fcp_templates_details(self, get_token, request): method = 'GET' template_list = ['fake_template_id' + '0' * 20] url = '/volumes/fcptemplates/detail?template_id_list=%s&' % template_list url += 'raw=False&statistics=True&sync_with_zvm=False' full_uri = self.base_url + url header = self.headers request.return_value = self.response get_token.return_value = self._tmp_token() kwargs = {'template_id_list': template_list} args = [] self.client.call("get_fcp_templates_details", *args, **kwargs) body = None request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) kwargs = {} url = '/volumes/fcptemplates/detail?' url += 'raw=False&statistics=True&sync_with_zvm=False' full_uri = self.base_url + url self.client.call("get_fcp_templates_details", *args, **kwargs) body = None request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_delete_fcp_template(self, get_token, request): method = 'DELETE' url = '/volumes/fcptemplates/92ac944a-fb7a-11ec-870c-02553600000f' header = self.headers body = None full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("delete_fcp_template", '92ac944a-fb7a-11ec-870c-02553600000f') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_get_fcp_usage(self, get_token, request): method = 'GET' url = '/volumes/fcp/1a00' header = self.headers body = None full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("get_fcp_usage", '1a00') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_set_fcp_usage(self, get_token, request): method = 'PUT' url = '/volumes/fcp/1a00' header = self.headers body = {'info': {'userid': 'fakeuser', 'reserved': 1, 'connections': 2, 'fcp_template_id': 'f0fd9e40-fb7a-11ec-ba2a-02553600000f'}} body = json.dumps(body) full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("set_fcp_usage", '1a00', 'fakeuser', 1, 2, 'f0fd9e40-fb7a-11ec-ba2a-02553600000f') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) body = {'info': {'userid': 'fakeuser', 'reserved': 0, 'connections': 0, 'fcp_template_id': ''}} body = json.dumps(body) self.client.call("set_fcp_usage", '1a00', 'fakeuser', 0, 0, '') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_create_fcp_template(self, get_token, request): method = 'POST' url = '/volumes/fcptemplates' header = self.headers body = {'name': 'tmpl name', 'description': 'desc text', 'fcp_devices': '1a00,1a03-1a04;1b00-1b05', 'host_default': True, 'default_sp_list': ['v5k', 'v7k', 'd8k']} body = json.dumps(body) full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("create_fcp_template", 'tmpl name', description='desc text', fcp_devices='1a00,1a03-1a04;1b00-1b05', host_default=True, default_sp_list=['v5k', 'v7k', 'd8k']) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) body = {'name': 'tmpl name', 'description': 'desc text', 'fcp_devices': '1a00,1a03-1a04;1b00-1b05'} body = json.dumps(body) self.client.call("create_fcp_template", 'tmpl name', description='desc text', fcp_devices='1a00,1a03-1a04;1b00-1b05') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_edit_fcp_template(self, get_token, request): method = 'PUT' fake_tmpl_id = 'fake_template_id' + '0' * 20 url = '/volumes/fcptemplates/%s' % fake_tmpl_id full_uri = self.base_url + url header = self.headers request.return_value = self.response get_token.return_value = self._tmp_token() kwargs = {'name': 'fake_name', 'description': 'desc'} self.client.call("edit_fcp_template", fake_tmpl_id, **kwargs) body = json.dumps(kwargs) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_host_get_info(self, get_token, request): method = 'GET' url = '/host' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("host_get_info") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_host_get_guest_list(self, get_token, request): method = 'GET' url = '/host/guests' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("host_get_guest_list") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_host_get_diskpool_volumes(self, get_token, request): method = 'GET' url = '/host/diskpool_volumes?poolname=pool1' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("host_get_diskpool_volumes", disk_pool='pool1') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) url = '/host/diskpool_volumes' full_uri = self.base_url + url self.client.call("host_get_diskpool_volumes") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_host_diskpool_get_info(self, get_token, request): # wait host_diskpool_get_info bug fixed pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_host_get_volume_info(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_host_get_ssi_info(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_image_import(self, get_token, request): method = 'POST' image_uri = 'file:///tmp/100.img' image_meta = {'os_version': 'rhel7.2', 'md5sum': 'dummy'} url = '/images' body = {'image': {'image_name': '100.img', 'url': image_uri, 'image_meta': image_meta}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("image_import", '100.img', image_uri, image_meta) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_image_query(self, get_token, request): method = 'GET' url = '/images' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("image_query") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) url = '/images?imagename=test-name1' full_uri = self.base_url + url self.client.call("image_query", imagename='test-name1') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_image_delete(self, get_token, request): method = 'DELETE' url = '/images/%s' % '100.img' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("image_delete", '100.img') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_image_export(self, get_token, request): method = 'PUT' destination = 'file:///tmp/export.img' url = '/images/%s' % '100.img' body = {'location': {'dest_url': destination}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("image_export", '100.img', destination) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_image_get_root_disk_size(self, get_token, request): method = 'GET' url = '/images/%s/root_disk_size' % '100.img' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("image_get_root_disk_size", '100.img') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_file_import(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_file_export(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_token_create(self, get_token, request): method = 'POST' url = '/token' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("token_create") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_get_list(self, get_token, request): method = 'GET' url = '/vswitches' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("vswitch_get_list") request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_create(self, get_token, request): method = 'POST' url = '/vswitches' body = {'vswitch': {'name': 'dummy'}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("vswitch_create", 'dummy') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_delete(self, get_token, request): method = 'DELETE' url = '/vswitches/%s' % 'dummy' body = None header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("vswitch_delete", 'dummy') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_query(self, get_token, request): pass @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_grant_user(self, get_token, request): method = 'PUT' url = '/vswitches/%s' % 'dummy' body = {'vswitch': {'grant_userid': self.fake_userid}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("vswitch_grant_user", 'dummy', self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_revoke_user(self, get_token, request): method = 'PUT' url = '/vswitches/%s' % 'dummy' body = {'vswitch': {'revoke_userid': self.fake_userid}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("vswitch_revoke_user", 'dummy', self.fake_userid) request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) @mock.patch.object(requests, 'request') @mock.patch('zvmconnector.restclient.RESTClient._get_token') def test_vswitch_set_vlan_id_for_user(self, get_token, request): method = 'PUT' url = '/vswitches/%s' % 'dummy' body = {'vswitch': {'user_vlan_id': {'userid': self.fake_userid, 'vlanid': 'vlan_id'}}} body = json.dumps(body) header = self.headers full_uri = self.base_url + url request.return_value = self.response get_token.return_value = self._tmp_token() self.client.call("vswitch_set_vlan_id_for_user", 'dummy', self.fake_userid, 'vlan_id') request.assert_called_with(method, full_uri, data=body, headers=header, verify=False) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/sdkclientcases/__init__.py0000664000175000017510000000000013575566551025301 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/base.py0000775000175000017510000000273613575566551021502 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import unittest from zvmsdk import config CONF = config.CONF def set_conf(section, opt, value): CONF[section][opt] = value class SDKTestCase(unittest.TestCase): @classmethod def setUpClass(cls): # This can be used to set up confs before running all cases super(SDKTestCase, cls).setUpClass() cls.old_db_dir = CONF.database.dir set_conf('database', 'dir', '/tmp/') set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') set_conf('image', 'sdk_image_repository', '/tmp/') set_conf('zvm', 'namelist', 'TSTNLIST') @classmethod def tearDownClass(cls): super(SDKTestCase, cls).tearDownClass() # Restore the original db path CONF.database.dir = cls.old_db_dir def setUp(self): super(SDKTestCase, self).setUp() def _fake_fun(self, value=None): return lambda *args, **kwargs: value zVMCloudConnector-1.6.3/zvmsdk/tests/unit/__init__.py0000664000175000017510000000000013575566551022302 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_smtclient.py0000664000175000017510000074430614315210052023603 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import mock from mock import call import tempfile import time import subprocess from smtLayer import smt from zvmsdk import config from zvmsdk import database from zvmsdk import dist from zvmsdk import exception from zvmsdk import smtclient from zvmsdk import utils as zvmutils from zvmsdk.tests.unit import base CONF = config.CONF class SDKSMTClientTestCases(base.SDKTestCase): """Test cases for smt zvm client.""" def setUp(self): self._smtclient = smtclient.SMTClient() def _generate_results(self, overallrc=0, rc=0, rs=0, errno=0, strerror='', logentries=[], response=[]): return {'rc': rc, 'errno': errno, 'strError': strerror, 'overallRC': overallrc, 'logEntries': logentries, 'rs': rs, 'response': response} @mock.patch.object(smt.SMT, 'request') def test_private_request_success(self, request): requestData = "fake request" request.return_value = {'overallRC': 0} self._smtclient._request(requestData) request.assert_called_once_with(requestData) @mock.patch.object(smt.SMT, 'request') def test_private_request_failed(self, request): requestData = "fake request" request.return_value = {'overallRC': 1, 'logEntries': []} self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient._request, requestData) @mock.patch.object(smt.SMT, 'request') def test_private_request_failed_110(self, request): requestData = "fake request" request.return_value = {'overallRC': 25, 'rc': -110, 'rs': '0', 'logEntries': []} try: self._smtclient._request(requestData) except exception.SDKInternalError as e: data = ('This is likely to be caused by temporary z/VM ' 'SMAPI down issue') self.assertTrue(str(e).__contains__(data)) return raise Exception("should raise exception.SDKInternalError") @mock.patch.object(smt.SMT, 'request') def test_private_request_failed_596(self, request): requestData = "fake request" request.return_value = {'overallRC': 1, 'rc': 596, 'rs': '1234', 'logEntries': []} try: self._smtclient._request(requestData) except exception.SDKSMTRequestFailed as e: data = ("Check " " on z/VM DIRMAINT error messages") self.assertTrue(str(e).__contains__(data)) return raise Exception("should raise exception.SDKSMTRequestFailed") @mock.patch.object(smt.SMT, 'request') def test_private_request_failed_396(self, request): requestData = "fake request" request.return_value = {'overallRC': 1, 'rc': 396, 'rs': '1234', 'logEntries': []} try: self._smtclient._request(requestData) except exception.SDKSMTRequestFailed as e: data = ("Check " " on z/VM CP error messages") self.assertTrue(str(e).__contains__(data)) return raise Exception("should raise exception.SDKSMTRequestFailed") @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_start(self, request): fake_userid = 'FakeID' requestData = "PowerVM FakeID on" request.return_value = {'overallRC': 0} self._smtclient.guest_start(fake_userid) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_stop(self, request): fake_userid = 'FakeID' requestData = "PowerVM FakeID off" request.return_value = {'overallRC': 0} self._smtclient.guest_stop(fake_userid) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_stop_with_timeout(self, request): fake_userid = 'FakeID' requestData = "PowerVM FakeID off --maxwait 300" request.return_value = {'overallRC': 0} self._smtclient.guest_stop(fake_userid, timeout=300) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_stop_with_poll_interval(self, request): fake_userid = 'FakeID' rd = "PowerVM FakeID off --maxwait 300 --poll 10" request.return_value = {'overallRC': 0} self._smtclient.guest_stop(fake_userid, timeout=300, poll_interval=10) request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_softstop(self, request): fake_userid = 'FakeID' requestData = "PowerVM FakeID softoff --wait --maxwait 300 --poll 10" request.return_value = {'overallRC': 0} self._smtclient.guest_softstop(fake_userid, timeout=300, poll_interval=10) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_pause(self, request, power_state): power_state.return_value = 'on' fake_userid = 'FakeID' requestData = "PowerVM FakeID pause" request.return_value = {'overallRC': 0} self._smtclient.guest_pause(fake_userid) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_unpause(self, request, power_state): power_state.return_value = 'on' fake_userid = 'FakeID' requestData = "PowerVM FakeID unpause" request.return_value = {'overallRC': 0} self._smtclient.guest_unpause(fake_userid) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_power_state(self, request): fake_userid = 'FakeID' requestData = "PowerVM FakeID status" request.return_value = {'overallRC': 0, 'response': [fake_userid + ': on']} status = self._smtclient.get_power_state(fake_userid) request.assert_called_once_with(requestData) self.assertEqual('on', status) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl 0100 --commandSetShare "RELATIVE 200"') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_has_diskpool(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}, {'size': '1g', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl 0100') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_multi_no_diskpool(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}, {'size': '1g', 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}, {'size': '1g', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl 0100') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_no_diskpool_512M(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '512M', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --vdisk 0100:512M') r = self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_not_called() add_guest.assert_called_with(user_id) expected = [{'size': '512M', 'format': 'swap', 'vdev': '0100'}] self.assertEqual(expected, r) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_no_diskpool_1G(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1G', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --vdisk 0100:1G') r = self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_not_called() add_guest.assert_called_with(user_id) expected = [{'size': '1G', 'format': 'swap', 'vdev': '0100'}] self.assertEqual(expected, r) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_no_diskpool_2048M(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '2048M', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --vdisk 0100:2048M') r = self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_not_called() add_guest.assert_called_with(user_id) expected = [{'size': '2048M', 'format': 'swap', 'vdev': '0100'}] self.assertEqual(expected, r) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_no_diskpool_2G(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '2G', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --vdisk 0100:2G') r = self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_not_called() add_guest.assert_called_with(user_id) expected = [{'size': '2G', 'format': 'swap', 'vdev': '0100'}] self.assertEqual(expected, r) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_no_diskpool_4G(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '4G', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) self.assertRaises(exception.SDKInvalidInputFormat, self._smtclient.create_vm, user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) add_mdisks.assert_not_called() @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_swap_no_diskpool_4096M(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '4096M', 'format': 'swap'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) self.assertRaises(exception.SDKInvalidInputFormat, self._smtclient.create_vm, user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) add_mdisks.assert_not_called() @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_root_no_diskpool_no_disk(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_not_called() add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_root_no_diskpool(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', None) base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, '', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_account(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa"') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, None) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_comment(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" comment_list = ["comment1", "comment2 is comment"] base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa" ' '--comment "comment1$@$@$comment2 is comment$@$@$"') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, comment_list) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_cschedule(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa" ' '--commandSchedule CEEPOOL') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, [], 'CEEPOOL') request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_cshare(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa" ' '--commandSetShare "RELATIVE 125"') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, [], '', 'RELATIVE 125') request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_cshare_unit(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 100) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa" ' '--commandSetShare "RELATIVE 200"') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, [], '', '') request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) base.set_conf('zvm', 'user_default_share_unit', 0) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_rdomain(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa" ' '--commandRDomain Z15ONLY') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, [], '', '', 'Z15ONLY') request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_pcif(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' account = "dummy account aaa" base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms ' '--account "dummy account aaa" ' '--commandPcif 100:200') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, account, [], '', '', '', '100:200') request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_invalid_pcif_1(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) self.assertRaises(exception.SDKInvalidInputFormat, self._smtclient.create_vm, user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, '', [], '', '', '', '1000') @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_invalid_pcif_2(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) self.assertRaises(exception.SDKInvalidInputFormat, self._smtclient.create_vm, user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', '', '', [], {}, '', [], '', '', '', '1000:2000:3000') @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_boot_from_volume(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 100) ipl_from = '5c71' dedicate_vdevs = ['5c71', '5d71'] loaddev = {'portname': '5005076802400c1b', 'lun': '0000000000000000'} rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl 5c71 ' '--dedicate "5c71 5d71" --commandSetShare "RELATIVE 200" ' '--loadportname 5005076802400c1b --loadlun 0000000000000000') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, ipl_from, '', '', dedicate_vdevs, loaddev, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_boot_from_volume_only(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [] profile = 'osdflt' max_cpu = 10 max_mem = '4G' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) ipl_from = '5c71' dedicate_vdevs = ['5c71', '5d71'] loaddev = {'portname': '5005076802400c1b', 'lun': '0000000000000000'} rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl 5c71 ' '--dedicate "5c71 5d71" ' '--loadportname 5005076802400c1b --loadlun 0000000000000000') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, ipl_from, '', '', dedicate_vdevs, loaddev, '', []) request.assert_called_with(rd) add_mdisks.assert_not_called() add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, 'add_mdisks') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(database.GuestDbOperator, 'add_guest') def test_create_vm_cms_with_param(self, add_guest, request, add_mdisks): user_id = 'fakeuser' cpu = 2 memory = 1024 disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'}] profile = 'osdflt' max_cpu = 10 max_mem = '4G' ipl_param = 'dummy' ipl_loadparam = 'load=1' base.set_conf('zvm', 'default_admin_userid', 'lbyuser1 lbyuser2') base.set_conf('zvm', 'user_root_vdev', '0100') base.set_conf('zvm', 'disk_pool', 'ECKD:TESTPOOL') base.set_conf('zvm', 'user_default_share_unit', 0) rd = ('makevm fakeuser directory LBYONLY 1024m G --cpus 2 ' '--profile osdflt --maxCPU 10 --maxMemSize 4G --setReservedMem ' '--logonby lbyuser1:lbyuser2 --ipl cms --iplParam dummy ' '--iplLoadparam load=1') self._smtclient.create_vm(user_id, cpu, memory, disk_list, profile, max_cpu, max_mem, 'cms', ipl_param, ipl_loadparam, [], {}, '', []) request.assert_called_with(rd) add_mdisks.assert_called_with(user_id, disk_list) add_guest.assert_called_with(user_id) @mock.patch.object(smtclient.SMTClient, '_request') def test_add_mdisk(self, request): userid = 'fakeuser' disk = {'size': '1g', 'disk_pool': 'ECKD:eckdpool1', 'format': 'ext3'} vdev = '0101' rd = ('changevm fakeuser add3390 eckdpool1 0101 1g --mode MR ' '--filesystem ext3') self._smtclient._add_mdisk(userid, disk, vdev), request.assert_called_once_with(rd) def test_add_mdisk_no_disk_pool(self): vdev = '0101' disk = {'size': '1g', 'format': 'ext3'} base.set_conf('zvm', 'disk_pool', None) self.assertRaises(exception.SDKGuestOperationError, self._smtclient._add_mdisk, 'fakeuser', disk, vdev) @mock.patch.object(smtclient.SMTClient, '_request') def test_add_mdisk_format_none(self, request): userid = 'fakeuser' disk = {'size': '1g', 'disk_pool': 'ECKD:eckdpool1', 'format': 'none'} vdev = '0101' rd = ('changevm fakeuser add3390 eckdpool1 0101 1g --mode MR') self._smtclient._add_mdisk(userid, disk, vdev), request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_remove_mdisk(self, request): userid = 'fakeuser' vdev = '0102' rd = 'changevm fakeuser removedisk 0102' self._smtclient._remove_mdisk(userid, vdev), request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_authorize_iucv_client(self, request): fake_userid = 'FakeID' client_userid = 'ClientID' requestData = "ChangeVM FakeID punchfile /tmp/FakeID/iucvauth.sh" + \ " --class x" request.return_value = {'overallRC': 0} self._smtclient.guest_authorize_iucv_client(fake_userid, client_userid) request.assert_called_once_with(requestData) self.assertIs(os.path.exists('/tmp/FakeID'), False) @mock.patch.object(database.GuestDbOperator, 'update_guest_by_userid') @mock.patch.object(database.ImageDbOperator, 'image_query_record') @mock.patch.object(smtclient.SMTClient, 'guest_authorize_iucv_client') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_guest_deploy(self, get_image_path, request, execute, mkdtemp, cleantemp, guestauth, image_query, guest_update): base.set_conf("zvm", "user_root_vdev", "0100") execute.side_effect = [(0, ""), (0, ""), (0, "")] mkdtemp.return_value = '/tmp/tmpdir' image_query.return_value = [{'imageosdistro': 'fakeos'}] userid = 'fakeuser' image_name = 'fakeimg' get_image_path.return_value = \ '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg' transportfiles = '/faketran' self._smtclient.guest_deploy(userid, image_name, transportfiles) get_image_path.assert_called_once_with(image_name) unpack_cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', 'fakeuser', '0100', '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg/0100'] cp_cmd = ["/usr/bin/cp", '/faketran', '/tmp/tmpdir/faketran'] execute.assert_has_calls([mock.call(unpack_cmd), mock.call(cp_cmd)]) purge_rd = "changevm fakeuser purgerdr" punch_rd = ("changevm fakeuser punchfile " "/tmp/tmpdir/faketran --class X") request.assert_has_calls([mock.call(purge_rd), mock.call(punch_rd)]) mkdtemp.assert_called_with() cleantemp.assert_called_with('/tmp/tmpdir') guestauth.assert_called_once_with(userid, None) guest_update.assert_called_once_with(userid, meta='os_version=fakeos') @mock.patch.object(database.GuestDbOperator, 'update_guest_by_userid') @mock.patch.object(database.ImageDbOperator, 'image_query_record') @mock.patch.object(dist.rhcos4, 'read_coreos_parameter') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_guest_deploy_rhcos(self, get_image_path, request, execute, coreos_param, image_query, guest_update): base.set_conf("zvm", "user_root_vdev", "0100") execute.side_effect = [(0, ""), (0, "")] image_query.return_value = [{'imageosdistro': 'RHCOS4', 'comments': u"{'disk_type':'DASD'}" }] userid = 'fakeuser' image_name = 'fakeimg' get_image_path.return_value = \ '/var/lib/zvmsdk/images/netboot/RHCOS4/fakeimg' transportfiles = '/faketran' coreos_param.return_value = \ '10.10.0.29::10.10.0.1:24:fakeuser:enc1000:none:' self._smtclient.guest_deploy_rhcos(userid, image_name, transportfiles) get_image_path.assert_called_once_with(image_name) guest_update.assert_called_once_with(userid, meta='os_version=RHCOS4') @mock.patch.object(database.GuestDbOperator, 'update_guest_by_userid') @mock.patch.object(smtclient.SMTClient, '_get_unpackdiskimage_cmd_rhcos') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_deploy_rhcos_withskip(self, request, execute, unpack, guest_update): base.set_conf("zvm", "user_root_vdev", "0100") unpack.return_value = 'command' execute.side_effect = [(0, ""), (0, "")] userid = 'fakeuser' image_name = 'RHCOS4' transportfiles = '/faketran' self._smtclient.guest_deploy_rhcos(userid, image_name, transportfiles, skipdiskcopy=True) execute.assert_called_once_with('command') guest_update.assert_called_once_with(userid, meta='os_version=RHCOS4') @mock.patch.object(database.GuestDbOperator, 'update_guest_by_userid') @mock.patch.object(database.ImageDbOperator, 'image_query_record') @mock.patch.object(dist.rhcos4, 'read_coreos_parameter') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_guest_deploy_rhcos_remote(self, get_image_path, request, execute, mkdtemp, cleantemp, coreos_param, image_query, guest_update): base.set_conf("zvm", "user_root_vdev", "0100") base.set_conf("zvm", "remotehost_sshd_port", "22") execute.side_effect = [(0, ""), (0, "")] mkdtemp.return_value = '/tmp/tmpdir' image_query.return_value = [{'imageosdistro': 'RHCOS4', 'comments': u"{'disk_type':'DASD'}" }] userid = 'fakeuser' image_name = 'fakeimg' get_image_path.return_value = \ '/var/lib/zvmsdk/images/netboot/RHCOS4/fakeimg' transportfiles = '/faketran' remote_host = 'user@1.1.1.1' coreos_param.return_value = \ '10.10.0.29::10.10.0.1:24:fakeuser:enc1000:none:' self._smtclient.guest_deploy_rhcos(userid, image_name, transportfiles, remote_host) get_image_path.assert_called_once_with(image_name) scp_cmd = ["/usr/bin/scp", "-B", "-P", "22", "-o StrictHostKeyChecking=no", "user@1.1.1.1:/faketran", "/tmp/tmpdir/faketran"] execute.assert_has_calls([mock.call(scp_cmd)]) mkdtemp.assert_called_with() cleantemp.assert_called_with('/tmp/tmpdir') guest_update.assert_called_once_with(userid, meta='os_version=RHCOS4') @mock.patch.object(dist.rhcos4, 'read_coreos_parameter') @mock.patch.object(smtclient.SMTClient, '_get_image_disk_type') @mock.patch.object(smtclient.SMTClient, 'image_get_os_distro') def test_get_unpackdiskimage_cmd_rhcos(self, os_version, image_disk_type, coreos_param): os_version.return_value = 'RHCOS4' image_disk_type.return_value = 'ECKD' coreos_param.return_value = \ '10.10.0.29::10.10.0.1:24:FAKEUSER:enc1000:none:10.10.0.250:' hostname = 'fakehost' userid = 'fakeuser' image_name = 'FakeImg' transportfiles = '/var/lib/nova/instances/fake/ignition.file' image_file = '/var/lib/zvmsdk/images/netboot/RHCOS4/fakeimg/0100' vdev = '1000' cmd = self._smtclient._get_unpackdiskimage_cmd_rhcos(userid, image_name, transportfiles, vdev, image_file, hostname) coreos_param.assert_called_once_with(userid) self.assertEqual(cmd, ['sudo', '/opt/zthin/bin/unpackdiskimage', 'fakeuser', '1000', '/var/lib/zvmsdk/images/netboot/RHCOS4/fakeimg/0100', '/var/lib/nova/instances/fake/ignition.file', 'ECKD', '0.0.1000,0.0.1001,0.0.1002', '10.10.0.29::10.10.0.1:24:fakehost:enc1000:none:' '10.10.0.250:']) @mock.patch.object(dist.rhcos4, 'read_coreos_parameter') @mock.patch.object(smtclient.SMTClient, '_get_image_disk_type') @mock.patch.object(smtclient.SMTClient, 'image_get_os_distro') @mock.patch.object(smtclient.SMTClient, '_get_wwpn_lun') def test_get_unpackdiskimage_cmd_rhcos_SCSI(self, loaddev, os_version, image_disk_type, coreos_param): loaddev.return_value = ('111111', '000000') os_version.return_value = 'RHCOS4' image_disk_type.return_value = 'SCSI' coreos_param.return_value = \ '10.10.0.29::10.10.0.1:24:FAKEUSER:enc1000:none:10.10.0.250:' hostname = 'fakehost' userid = 'fakeuser' image_name = 'FakeImg' transportfiles = '/var/lib/nova/instances/fake/ignition.file' image_file = '/var/lib/zvmsdk/images/netboot/RHCOS4/fakeimg/0100' vdev = '1000' cmd = self._smtclient._get_unpackdiskimage_cmd_rhcos(userid, image_name, transportfiles, vdev, image_file, hostname) coreos_param.assert_called_once_with(userid) self.assertEqual(cmd, ['sudo', '/opt/zthin/bin/unpackdiskimage', '1000', '0x111111', '0x000000', '/var/lib/zvmsdk/images/netboot/RHCOS4/fakeimg/0100', '/var/lib/nova/instances/fake/ignition.file', 'SCSI', '0.0.1000,0.0.1001,0.0.1002', '10.10.0.29::10.10.0.1:24:fakehost:enc1000:none:' '10.10.0.250:']) @mock.patch.object(dist.rhcos4, 'read_coreos_parameter') @mock.patch.object(smtclient.SMTClient, '_get_wwpn_lun') def test_get_unpackdiskimage_cmd_rhcos_skipcopy(self, loaddev, coreos_param): coreos_param.return_value = \ '10.10.0.29::10.10.0.1:24:FAKEUSER:enc1000:none:10.10.0.250:' loaddev.return_value = ('111111', '000000') hostname = 'fakehost' userid = 'fakeuser' image_name = 'RHCOS4' transportfiles = '/var/lib/nova/instances/fake/ignition.file' image_file = None vdev = '1000' cmd = self._smtclient._get_unpackdiskimage_cmd_rhcos(userid, image_name, transportfiles, vdev, image_file, hostname, True) coreos_param.assert_called_once_with(userid) loaddev.assert_called_once_with(userid) self.assertEqual(cmd, ['sudo', '/opt/zthin/bin/unpackdiskimage', '1000', '0x111111', '0x000000', '/var/lib/nova/instances/fake/ignition.file', '0.0.1000,0.0.1001,0.0.1002', '10.10.0.29::10.10.0.1:24:fakehost:enc1000:none:' '10.10.0.250:']) @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_guest_deploy_unpackdiskimage_failed(self, get_image_path, request, execute): base.set_conf("zvm", "user_root_vdev", "0100") userid = 'fakeuser' image_name = 'fakeimg' transportfiles = '/faketran' get_image_path.return_value = \ '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg' unpack_error = ('unpackdiskimage fakeuser start time: ' '2017-08-16-01:29:59.453\nSOURCE USER ID: "fakeuser"\n' 'DISK CHANNEL: "0100"\n' 'IMAGE FILE: "/var/lib/zvmsdk/images/fakeimg"\n\n' 'Image file compression level: 6\n' 'Deploying image to fakeuser\'s disk at channel 100.\n' 'ERROR: Unable to link fakeuser 0100 disk. ' 'HCPLNM053E FAKEUSER not in CP directory\n' 'HCPDTV040E Device 260C does not exist\n' 'ERROR: Failed to connect disk: fakeuser:0100\n\n' 'IMAGE DEPLOYMENT FAILED.\n' 'A detailed trace can be found at: /var/log/zthin/' 'unpackdiskimage_trace_2017-08-16-01:29:59.453.txt\n' 'unpackdiskimage end time: 2017-08-16-01:29:59.605\n') execute.return_value = (3, unpack_error) self.assertRaises(exception.SDKGuestOperationError, self._smtclient.guest_deploy, userid, image_name, transportfiles) get_image_path.assert_called_once_with(image_name) imagefile = '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg/0100' unpack_cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', 'fakeuser', '0100', imagefile] execute.assert_has_calls([call(['/usr/bin/hexdump', '-C', '-n', '64', imagefile]), call(unpack_cmd)]) @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_guest_deploy_cp_transport_failed(self, get_image_path, request, execute, mkdtemp, cleantemp): base.set_conf("zvm", "user_root_vdev", "0100") cp_error = ("/usr/bin/cp: cannot stat '/faketran': " "No such file or directory\n") execute.side_effect = [(0, "hexdump"), (0, ""), (1, cp_error)] mkdtemp.return_value = '/tmp/tmpdir' userid = 'fakeuser' image_name = 'fakeimg' transportfiles = '/faketran' get_image_path.return_value = \ '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg' self.assertRaises(exception.SDKGuestOperationError, self._smtclient.guest_deploy, userid, image_name, transportfiles) get_image_path.assert_called_once_with(image_name) unpack_cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', 'fakeuser', '0100', '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg/0100'] cp_cmd = ["/usr/bin/cp", '/faketran', '/tmp/tmpdir/faketran'] execute.assert_has_calls([mock.call(unpack_cmd), mock.call(cp_cmd)]) purge_rd = "changevm fakeuser purgerdr" request.assert_called_once_with(purge_rd) mkdtemp.assert_called_with() cleantemp.assert_called_with('/tmp/tmpdir') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(tempfile, 'mkdtemp') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_guest_deploy_smt_request_failed(self, get_image_path, request, execute, mkdtemp, cleantemp): base.set_conf("zvm", "user_root_vdev", "0100") get_image_path.return_value = \ '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg' fake_smt_results = {'rs': 8, 'errno': 0, 'strError': 'Failed', 'overallRC': 3, 'rc': 400, 'logEntries': '', 'response': ['(Error) output and error info']} execute.side_effect = [(0, ""), (0, ""), (0, "")] request.side_effect = [None, exception.SDKSMTRequestFailed( fake_smt_results, 'fake error')] mkdtemp.return_value = '/tmp/tmpdir' userid = 'fakeuser' image_name = 'fakeimg' transportfiles = '/faketran' remote_host = "user@1.1.1.1" self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.guest_deploy, userid, image_name, transportfiles, remote_host) get_image_path.assert_called_once_with(image_name) unpack_cmd = ['sudo', '/opt/zthin/bin/unpackdiskimage', 'fakeuser', '0100', '/var/lib/zvmsdk/images/netboot/rhel7/fakeimg/0100'] scp_cmd = ["/usr/bin/scp", "-B", '-P', '22', '-o StrictHostKeyChecking=no', 'user@1.1.1.1:/faketran', '/tmp/tmpdir/faketran'] execute.assert_has_calls([mock.call(unpack_cmd), mock.call(scp_cmd)]) purge_rd = "changevm fakeuser purgerdr" punch_rd = ("changevm fakeuser punchfile " "/tmp/tmpdir/faketran --class X") request.assert_has_calls([mock.call(purge_rd), mock.call(punch_rd)]) mkdtemp.assert_called_with() cleantemp.assert_called_with('/tmp/tmpdir') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_get_os_distro(self, image_info): image_info.return_value = [{'image_size_in_bytes': '3072327680', 'disk_size_units': '3339:CYL', 'md5sum': '370cd177c51e39f0e2e2b\ eecbb88f701', 'comments': "{'disk_type':'DASD'}", 'imagename': '0b3013e1-1356-431c-\ b680-ba06a1768aea', 'imageosdistro': 'RHCOS4', 'type': 'rootonly'}] self.assertEqual(self._smtclient.image_get_os_distro(image_info), 'RHCOS4') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_get_image_disk_type_dasd(self, image_info): image_info.return_value = [{'image_size_in_bytes': '3072327680', 'disk_size_units': '3339:CYL', 'md5sum': '370cd177c51e39f0e2e2b\ eecbb88f701', 'comments': "{'disk_type':'DASD'}", 'imagename': '0b3013e1-1356-431c-\ b680-ba06a1768aea', 'imageosdistro': 'RHCOS4', 'type': 'rootonly'}] self.assertEqual(self._smtclient._get_image_disk_type(image_info), 'ECKD') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_get_image_disk_type_scsi(self, image_info): image_info.return_value = [{'image_size_in_bytes': '3072327680', 'disk_size_units': '3339:CYL', 'md5sum': '370cd177c51e39f0e2e2b\ eecbb88f701', 'comments': "{'disk_type':'SCSI'}", 'imagename': '0b3013e1-1356-431c-\ b680-ba06a1768aea', 'imageosdistro': 'RHCOS4', 'type': 'rootonly'}] self.assertEqual(self._smtclient._get_image_disk_type(image_info), 'SCSI') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_get_image_disk_type_failed(self, image_info): image_info.return_value = [{'image_size_in_bytes': '3072327680', 'disk_size_units': '3339:CYL', 'md5sum': '370cd177c51e39f0e2e2b\ eecbb88f701', 'comments': "{'disk_type':'FCP'}", 'imagename': '0b3013e1-1356-431c-\ b680-ba06a1768aea', 'imageosdistro': 'RHCOS4', 'type': 'rootonly'}] self.assertEqual(self._smtclient._get_image_disk_type(image_info), None) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_adapters_info(self, request): userid = 'FakeID' data = [ "adapter_count=2", "adapter_address=1000", "port_type=1", "extended_port_status=00", "adapter_type=2", "network_device_count=3", "adapter_status=00", "device_options=00000000", "router_status=00", "adapter_info_end", "adapter_address=2000", "port_type=0", "extended_port_status=00", "adapter_type=2", "network_device_count=3", "adapter_status=02", "lan_owner=SYSTEM", "lan_name=VSC12345", "device_options=C1000000", "router_status=00", "mac_count=3", "mac_address=01005E000001", "mac_address_type=01", "mac_status=00", "mac_address=0255365D4857", "mac_address_type=00", "mac_status=00", "mac_ip_version=4", "mac_ip_address=9.123.123.123", "mac_address=333300000001", "mac_address_type=01", "mac_status=00", "mac_info_end", "adapter_info_end"] request.return_value = {'response': data} rd = ' '.join(( "SMAPI %s API Virtual_Network_Adapter_Query_Extended" % userid, "--operands", "-k 'image_device_number=*'")) ret = self._smtclient.get_adapters_info('FakeID') request.assert_called_once_with(rd) self.assertEqual(ret[1]['mac_ip_address'], '9.123.123.123') self.assertEqual(ret[1]['mac_ip_version'], '4') self.assertEqual(ret[1]['mac_address'], '02:55:36:5D:48:57') @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_grant_user_to_vswitch(self, request, userid): userid.return_value = 'FakeHostID' vswitch_name = 'FakeVs' userid = 'FakeID' requestData = ' '.join(( 'SMAPI FakeHostID API Virtual_Network_Vswitch_Set_Extended', "--operands", "-k switch_name=FakeVs", "-k grant_userid=FakeID", "-k persist=YES")) self._smtclient.grant_user_to_vswitch(vswitch_name, userid) request.assert_called_once_with(requestData) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_revoke_user_from_vswitch(self, request, userid): userid.return_value = 'FakeHostID' vswitch_name = 'FakeVs' userid = 'FakeID' requestData = ' '.join(( 'SMAPI FakeHostID API Virtual_Network_Vswitch_Set_Extended', "--operands", "-k switch_name=FakeVs", "-k revoke_userid=FakeID", "-k persist=YES")) self._smtclient.revoke_user_from_vswitch(vswitch_name, userid) request.assert_called_once_with(requestData) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_image_performance_query_single(self, smt_req, get_smt_userid): get_smt_userid.return_value = "SMTUSER" smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': [ 'Virtual server ID: FAKEVM', 'Record version: "1"', 'Guest flags: "0"', 'Used CPU time: "646609178 uS"', 'Elapsed time: "596837441984 uS"', 'Minimum memory: "0 KB"', 'Max memory: "2097152 KB"', 'Shared memory: "302180 KB"', 'Used memory: "302180 KB"', 'Active CPUs in CEC: "44"', 'Logical CPUs in VM: "6"', 'Guest CPUs: "2"', 'Minimum CPU count: "2"', 'Max CPU limit: "10000"', 'Processor share: "100"', 'Samples CPU in use: "371"', ',Samples CPU delay: "116"', 'Samples page wait: "0"', 'Samples idle: "596331"', 'Samples other: "12"', 'Samples total: "596830"', 'Guest name: "FAKEVM "', ''] } pi_info = self._smtclient.image_performance_query('fakevm') self.assertEqual(pi_info['FAKEVM']['used_memory'], "302180 KB") self.assertEqual(pi_info['FAKEVM']['used_cpu_time'], "646609178 uS") self.assertEqual(pi_info['FAKEVM']['elapsed_cpu_time'], "596837441984 uS") self.assertEqual(pi_info['FAKEVM']['min_cpu_count'], "2") self.assertEqual(pi_info['FAKEVM']['max_cpu_limit'], "10000") self.assertEqual(pi_info['FAKEVM']['samples_cpu_in_use'], "371") self.assertEqual(pi_info['FAKEVM']['samples_cpu_delay'], "116") self.assertEqual(pi_info['FAKEVM']['guest_cpus'], "2") self.assertEqual(pi_info['FAKEVM']['userid'], "FAKEVM") self.assertEqual(pi_info['FAKEVM']['max_memory'], "2097152 KB") self.assertEqual(pi_info['FAKEVM']['min_memory'], "0 KB") self.assertEqual(pi_info['FAKEVM']['shared_memory'], "302180 KB") @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_image_performance_query_single_off(self, smt_req, get_smt_userid): get_smt_userid.return_value = "SMTUSER" smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': [] } pi_info = self._smtclient.image_performance_query('fakevm') self.assertDictEqual(pi_info, {}) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_image_performance_query_multiple(self, smt_req, get_smt_userid): get_smt_userid.return_value = "SMTUSER" response_list = ['Virtual server ID: fakevm', 'Record version: "1"', 'Guest flags: "0"', 'Used CPU time: "652337849 uS"', 'Elapsed time: "602181110336 uS"', 'Minimum memory: "0 KB"', 'Max memory: "2097152 KB"', 'Shared memory: "302336 KB"', 'Used memory: "302336 KB"', 'Active CPUs in CEC: "44"', 'Logical CPUs in VM: "6"', 'Guest CPUs: "2"', 'Minimum CPU count: "2"', 'Max CPU limit: "10000"', 'Processor share: "100"', 'Samples CPU in use: "375"', ',Samples CPU delay: "116"', 'Samples page wait: "0"', 'Samples idle: "601671"', 'Samples other: "12"', 'Samples total: "602174"', 'Guest name: "FAKEVM "', '', 'Virtual server ID: fakevm2', 'Record version: "1"', 'Guest flags: "0"', 'Used CPU time: "3995650268844 uS"', 'Elapsed time: "3377790094595 uS"', 'Minimum memory: "0 KB"', 'Max memory: "8388608 KB"', 'Shared memory: "8383048 KB"', 'Used memory: "8383048 KB"', 'Active CPUs in CEC: "44"', 'Logical CPUs in VM: "6"', 'Guest CPUs: "4"', 'Minimum CPU count: "4"', 'Max CPU limit: "10000"', 'Processor share: "100"', 'Samples CPU in use: "1966323"', ',Samples CPU delay: "111704"', 'Samples page wait: "0"', 'Samples idle: "4001258"', 'Samples other: "8855"', 'Samples total: "6088140"', 'Guest name: "FAKEVM2 "', ''] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': response_list } pi_info = self._smtclient.image_performance_query(['fakevm', 'fakevm2']) self.assertEqual(pi_info['FAKEVM']['used_memory'], "302336 KB") self.assertEqual(pi_info['FAKEVM']['used_cpu_time'], "652337849 uS") self.assertEqual(pi_info['FAKEVM']['elapsed_cpu_time'], "602181110336 uS") self.assertEqual(pi_info['FAKEVM']['min_cpu_count'], "2") self.assertEqual(pi_info['FAKEVM']['max_cpu_limit'], "10000") self.assertEqual(pi_info['FAKEVM']['samples_cpu_in_use'], "375") self.assertEqual(pi_info['FAKEVM']['samples_cpu_delay'], "116") self.assertEqual(pi_info['FAKEVM']['guest_cpus'], "2") self.assertEqual(pi_info['FAKEVM']['userid'], "FAKEVM") self.assertEqual(pi_info['FAKEVM']['max_memory'], "2097152 KB") self.assertEqual(pi_info['FAKEVM']['min_memory'], "0 KB") self.assertEqual(pi_info['FAKEVM']['shared_memory'], "302336 KB") self.assertEqual(pi_info['FAKEVM2']['used_memory'], "8383048 KB") self.assertEqual(pi_info['FAKEVM2']['used_cpu_time'], "3995650268844 uS") self.assertEqual(pi_info['FAKEVM2']['elapsed_cpu_time'], "3377790094595 uS") self.assertEqual(pi_info['FAKEVM2']['min_cpu_count'], "4") self.assertEqual(pi_info['FAKEVM2']['max_cpu_limit'], "10000") self.assertEqual(pi_info['FAKEVM2']['samples_cpu_in_use'], "1966323") self.assertEqual(pi_info['FAKEVM2']['samples_cpu_delay'], "111704") self.assertEqual(pi_info['FAKEVM2']['guest_cpus'], "4") self.assertEqual(pi_info['FAKEVM2']['userid'], "FAKEVM2") self.assertEqual(pi_info['FAKEVM2']['max_memory'], "8388608 KB") self.assertEqual(pi_info['FAKEVM2']['min_memory'], "0 KB") self.assertEqual(pi_info['FAKEVM2']['shared_memory'], "8383048 KB") @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_system_image_performance_query(self, smt_req, get_smt_userid): get_smt_userid.return_value = "SMTUSER" response_list = ['Virtual server ID: fakevm', 'Record version: "1"', 'Guest flags: "0"', 'Used CPU time: "652337849 uS"', 'Elapsed time: "602181110336 uS"', 'Minimum memory: "0 KB"', 'Max memory: "2097152 KB"', 'Shared memory: "302336 KB"', 'Used memory: "302336 KB"', 'Active CPUs in CEC: "44"', 'Logical CPUs in VM: "6"', 'Guest CPUs: "2"', 'Minimum CPU count: "2"', 'Max CPU limit: "10000"', 'Processor share: "100"', 'Samples CPU in use: "375"', ',Samples CPU delay: "116"', 'Samples page wait: "0"', 'Samples idle: "601671"', 'Samples other: "12"', 'Samples total: "602174"', 'Guest name: "FAKEVM "', '', 'Virtual server ID: fakevm2', 'Record version: "1"', 'Guest flags: "0"', 'Used CPU time: "3995650268844 uS"', 'Elapsed time: "3377790094595 uS"', 'Minimum memory: "0 KB"', 'Max memory: "8388608 KB"', 'Shared memory: "8383048 KB"', 'Used memory: "8383048 KB"', 'Active CPUs in CEC: "44"', 'Logical CPUs in VM: "6"', 'Guest CPUs: "4"', 'Minimum CPU count: "4"', 'Max CPU limit: "10000"', 'Processor share: "100"', 'Samples CPU in use: "1966323"', ',Samples CPU delay: "111704"', 'Samples page wait: "0"', 'Samples idle: "4001258"', 'Samples other: "8855"', 'Samples total: "6088140"', 'Guest name: "FAKEVM2 "', ''] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': response_list } pi_info = self._smtclient.system_image_performance_query(['fakevm', 'fakevm2']) self.assertEqual(pi_info['FAKEVM']['used_memory'], "302336 KB") self.assertEqual(pi_info['FAKEVM']['used_cpu_time'], "652337849 uS") self.assertEqual(pi_info['FAKEVM']['elapsed_cpu_time'], "602181110336 uS") self.assertEqual(pi_info['FAKEVM']['min_cpu_count'], "2") self.assertEqual(pi_info['FAKEVM']['max_cpu_limit'], "10000") self.assertEqual(pi_info['FAKEVM']['samples_cpu_in_use'], "375") self.assertEqual(pi_info['FAKEVM']['samples_cpu_delay'], "116") self.assertEqual(pi_info['FAKEVM']['guest_cpus'], "2") self.assertEqual(pi_info['FAKEVM']['userid'], "FAKEVM") self.assertEqual(pi_info['FAKEVM']['max_memory'], "2097152 KB") self.assertEqual(pi_info['FAKEVM']['min_memory'], "0 KB") self.assertEqual(pi_info['FAKEVM']['shared_memory'], "302336 KB") self.assertEqual(pi_info['FAKEVM2']['used_memory'], "8383048 KB") self.assertEqual(pi_info['FAKEVM2']['used_cpu_time'], "3995650268844 uS") self.assertEqual(pi_info['FAKEVM2']['elapsed_cpu_time'], "3377790094595 uS") self.assertEqual(pi_info['FAKEVM2']['min_cpu_count'], "4") self.assertEqual(pi_info['FAKEVM2']['max_cpu_limit'], "10000") self.assertEqual(pi_info['FAKEVM2']['samples_cpu_in_use'], "1966323") self.assertEqual(pi_info['FAKEVM2']['samples_cpu_delay'], "111704") self.assertEqual(pi_info['FAKEVM2']['guest_cpus'], "4") self.assertEqual(pi_info['FAKEVM2']['userid'], "FAKEVM2") self.assertEqual(pi_info['FAKEVM2']['max_memory'], "8388608 KB") self.assertEqual(pi_info['FAKEVM2']['min_memory'], "0 KB") self.assertEqual(pi_info['FAKEVM2']['shared_memory'], "8383048 KB") @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_virtual_network_vswitch_query_byte_stats(self, smt_req, get_smt_userid): get_smt_userid.return_value = "SMTUSER" vsw_data = ['vswitch count: 2', '', 'vswitch number: 1', 'vswitch name: XCATVSW1', 'uplink count: 1', 'uplink_conn: 6240', 'uplink_fr_rx: 3658251', 'uplink_fr_rx_dsc: 0', 'uplink_fr_rx_err: 0', 'uplink_fr_tx: 4209828', 'uplink_fr_tx_dsc: 0', 'uplink_fr_tx_err: 0', 'uplink_rx: 498914052', 'uplink_tx: 2615220898', 'bridge_fr_rx: 0', 'bridge_fr_rx_dsc: 0', 'bridge_fr_rx_err: 0', 'bridge_fr_tx: 0', 'bridge_fr_tx_dsc: 0', 'bridge_fr_tx_err: 0', 'bridge_rx: 0', 'bridge_tx: 0', 'nic count: 2', 'nic_id: INST1 0600', 'nic_fr_rx: 573952', 'nic_fr_rx_dsc: 0', 'nic_fr_rx_err: 0', 'nic_fr_tx: 548780', 'nic_fr_tx_dsc: 0', 'nic_fr_tx_err: 4', 'nic_rx: 103024058', 'nic_tx: 102030890', 'nic_id: INST2 0600', 'nic_fr_rx: 17493', 'nic_fr_rx_dsc: 0', 'nic_fr_rx_err: 0', 'nic_fr_tx: 16886', 'nic_fr_tx_dsc: 0', 'nic_fr_tx_err: 4', 'nic_rx: 3111714', 'nic_tx: 3172646', 'vlan count: 0', '', 'vswitch number: 2', 'vswitch name: XCATVSW2', 'uplink count: 1', 'uplink_conn: 6200', 'uplink_fr_rx: 1608681', 'uplink_fr_rx_dsc: 0', 'uplink_fr_rx_err: 0', 'uplink_fr_tx: 2120075', 'uplink_fr_tx_dsc: 0', 'uplink_fr_tx_err: 0', 'uplink_rx: 314326223', 'uplink_tx: 1503721533', 'bridge_fr_rx: 0', 'bridge_fr_rx_dsc: 0', 'bridge_fr_rx_err: 0', 'bridge_fr_tx: 0', 'bridge_fr_tx_dsc: 0', 'bridge_fr_tx_err: 0', 'bridge_rx: 0', 'bridge_tx: 0', 'nic count: 2', 'nic_id: INST1 1000', 'nic_fr_rx: 34958', 'nic_fr_rx_dsc: 0', 'nic_fr_rx_err: 0', 'nic_fr_tx: 16211', 'nic_fr_tx_dsc: 0', 'nic_fr_tx_err: 0', 'nic_rx: 4684435', 'nic_tx: 3316601', 'nic_id: INST2 1000', 'nic_fr_rx: 27211', 'nic_fr_rx_dsc: 0', 'nic_fr_rx_err: 0', 'nic_fr_tx: 12344', 'nic_fr_tx_dsc: 0', 'nic_fr_tx_err: 0', 'nic_rx: 3577163', 'nic_tx: 2515045', 'vlan count: 0' ] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': vsw_data } vsw_dict = self._smtclient.virtual_network_vswitch_query_byte_stats() self.assertEqual(2, len(vsw_dict['vswitches'])) self.assertEqual(2, len(vsw_dict['vswitches'][1]['nics'])) self.assertEqual('INST1', vsw_dict['vswitches'][0]['nics'][0]['userid']) self.assertEqual('3577163', vsw_dict['vswitches'][1]['nics'][1]['nic_rx']) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_all_user_direct(self, smt_req): resp = ['vm1', 'vm2', 'vm3'] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': resp} expect = ['vm1', 'vm2', 'vm3'] rd = 'getvm alldirectory' guest_list = self._smtclient.get_all_user_direct() smt_req.assert_called_once_with(rd) self.assertEqual(guest_list, expect) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_host_info(self, smt_req): resp = ['ZCC USERID: OPNCLOUD', 'z/VM Host: OPNSTK2', 'Architecture: s390x', 'CEC Vendor: IBM', 'CEC Model: 2817', 'Hypervisor OS: z/VM 6.4.0', 'Hypervisor Name: OPNSTK2', 'LPAR CPU Total: 6', 'LPAR CPU Used: 6', 'LPAR Memory Total: 50G', 'LPAR Memory Offline: 0', 'LPAR Memory Used: 36.5G', 'IPL Time: IPL at 07/12/17 22:37:47 EDT'] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': resp} expect = {'architecture': 's390x', 'cec_model': '2817', 'cec_vendor': 'IBM', 'hypervisor_name': 'OPNSTK2', 'hypervisor_os': 'z/VM 6.4.0', 'ipl_time': 'IPL at 07/12/17 22:37:47 EDT', 'lpar_cpu_total': '6', 'lpar_cpu_used': '6', 'lpar_memory_offline': '0', 'lpar_memory_total': '50G', 'lpar_memory_used': '36.5G', 'zcc_userid': 'OPNCLOUD', 'zvm_host': 'OPNSTK2'} host_info = self._smtclient.get_host_info() smt_req.assert_called_once_with('getHost general') self.assertDictEqual(host_info, expect) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_diskpool_info(self, smt_req): resp = ['XCATECKD Total: 3623.0G', 'XCATECKD Used: 397.4G', 'XCATECKD Free: 3225.6G'] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': resp} expect = {'disk_available': '3225.6G', 'disk_total': '3623.0G', 'disk_used': '397.4G'} dp_info = self._smtclient.get_diskpool_info('pool') smt_req.assert_called_once_with('getHost diskpoolspace pool') self.assertDictEqual(dp_info, expect) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_diskpool_volumes(self, smt_req): resp = {'Diskpool Volumes:' 'IAS100 IAS200'} smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': resp} expect = {'diskpool_volumes': 'IAS100 IAS200'} diskpool_vols = self._smtclient.get_diskpool_volumes('fakepool') smt_req.assert_called_once_with('gethost diskpoolvolumes fakepool') self.assertDictEqual(diskpool_vols, expect) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_volume_info(self, smt_req): resp = ['volume name: IASFBA', 'volume_type:9336-ET', 'volume_size:564718', 'volume_name: IAS1CM', 'volume_type:3390-09', 'volume_size:60102'] smt_req.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': resp} expect = {'IASFBA': {'volume_type': '9336-ET', 'volume_size': '564718'}, 'IAS1CM': {'volume_type': '3390-09', 'volume_size': '60102'}} volume_info = self._smtclient.get_volume_info() smt_req.assert_called_once_with('gethost volumeinfo') self.assertDictEqual(volume_info, expect) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_get_vswitch_list(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" request.return_value = {'overallRC': 0, 'response': ['VSWITCH: Name: VSTEST1', 'VSWITCH: Name: VSTEST2', 'VSWITCH: Name: VSTEST3', 'VSWITCH: Name: VSTEST4']} expect = ['VSTEST1', 'VSTEST2', 'VSTEST3', 'VSTEST4'] rd = ' '.join(( "SMAPI SMTUSER API Virtual_Network_Vswitch_Query", "--operands", "-s \'*\'")) list = self._smtclient.get_vswitch_list() request.assert_called_once_with(rd) self.assertEqual(list, expect) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_set_vswitch_port_vlan_id(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" request.return_value = {'overallRC': 0} userid = 'FakeID' vswitch_name = 'FakeVS' vlan_id = 'FakeVLAN' rd = ' '.join(( "SMAPI SMTUSER API Virtual_Network_Vswitch_Set_Extended", "--operands", "-k grant_userid=FakeID", "-k switch_name=FakeVS", "-k user_vlan_id=FakeVLAN", "-k persist=YES")) self._smtclient.set_vswitch_port_vlan_id(vswitch_name, userid, vlan_id) request.assert_called_once_with(rd) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_add_vswitch(self, request, get_smt_userid): get_smt_userid.return_value = 'SMTUSER' request.return_value = {'overallRC': 0} rd = ' '.join(( "SMAPI SMTUSER API Virtual_Network_Vswitch_Create_Extended", "--operands", "-k switch_name=fakename", "-k real_device_address='111 222'", "-k connection_value=CONNECT", "-k queue_memory_limit=5", "-k transport_type=ETHERNET", "-k vlan_id=10", "-k persist=NO", "-k port_type=ACCESS", "-k gvrp_value=GVRP", "-k native_vlanid=None", "-k routing_value=NONROUTER")) self._smtclient.add_vswitch("fakename", rdev="111 222", controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='10', port_type='ACCESS', gvrp='GVRP', queue_mem=5, native_vid=None, persist=False) request.assert_called_with(rd) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_set_vswitch(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" request.return_value = {'overallRC': 0} rd = ' '.join(( "SMAPI SMTUSER API Virtual_Network_Vswitch_Set_Extended", "--operands", "-k switch_name=fake_vs", "-k real_device_address='1000 1003'")) self._smtclient.set_vswitch("fake_vs", real_device_address='1000 1003') request.assert_called_with(rd) @mock.patch.object(zvmutils, 'execute') def test_refresh_bootmap_return_value(self, execute): base.set_conf('volume', 'min_fcp_paths_count', 2) fcpchannels = ['5d71'] wwpns = ['5005076802100c1b', '5005076802200c1b'] lun = '0000000000000000' wwid = '600507640083826de00000000000605b' execute.side_effect = [(0, "")] self._smtclient.volume_refresh_bootmap(fcpchannels, wwpns, lun, wwid=wwid, min_fcp_paths_count=2) refresh_bootmap_cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', '--fcpchannel=5d71', '--wwpn=5005076802100c1b,5005076802200c1b', '--lun=0000000000000000', '--wwid=600507640083826de00000000000605b', '--minfcp=2'] execute.assert_called_once_with(refresh_bootmap_cmd, timeout=1200) @mock.patch.object(zvmutils, 'execute') def test_refresh_bootmap_return_value_withskip(self, execute): fcpchannels = ['5d71'] wwpns = ['5005076802100c1b', '5005076802200c1b'] lun = '0000000000000000' wwid = '600507640083826de00000000000605b' execute.side_effect = [(0, "")] self._smtclient.volume_refresh_bootmap(fcpchannels, wwpns, lun, wwid=wwid, min_fcp_paths_count=2) refresh_bootmap_cmd = ['sudo', '/opt/zthin/bin/refresh_bootmap', '--fcpchannel=5d71', '--wwpn=5005076802100c1b,5005076802200c1b', '--lun=0000000000000000', '--wwid=600507640083826de00000000000605b', '--minfcp=2'] execute.assert_called_once_with(refresh_bootmap_cmd, timeout=1200) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_set_vswitch_with_errorcode(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} request.side_effect = exception.SDKSMTRequestFailed( results, 'fake error') self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.set_vswitch, "vswitch_name", grant_userid='fake_id') @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_vswitch(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" request.return_value = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 0, 'logEntries': [], 'rc': 0, 'response': ['fake response']} switch_name = 'FakeVS' rd = ' '.join(( "SMAPI SMTUSER API Virtual_Network_Vswitch_Delete_Extended", "--operands", "-k switch_name=FakeVS", "-k persist=YES")) self._smtclient.delete_vswitch(switch_name, True) request.assert_called_once_with(rd) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_vswitch_with_errorcode(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} request.side_effect = exception.SDKSMTRequestFailed( results, 'fake error') self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.delete_vswitch, "vswitch_name", True) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_vswitch_not_exist(self, request, get_smt_userid): get_smt_userid.return_value = "SMTUSER" results = {'rs': 40, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 212, 'response': ['fake response']} request.side_effect = exception.SDKSMTRequestFailed( results, 'fake error') switch_name = 'FakeVS' rd = ' '.join(( "SMAPI SMTUSER API Virtual_Network_Vswitch_Delete_Extended", "--operands", "-k switch_name=FakeVS", "-k persist=YES")) self._smtclient.delete_vswitch(switch_name, True) request.assert_called_once_with(rd) @mock.patch.object(database.NetworkDbOperator, 'switch_select_table') def test_get_available_vdev(self, switch_select_table): switch_select_table.return_value = [ {'userid': 'fake_id', 'interface': '1003', 'switch': None, 'port': None, 'comments': None}, {'userid': 'fake_id', 'interface': '1006', 'switch': None, 'port': None, 'comments': None}] result = self._smtclient._get_available_vdev('fake_id', vdev='1009') switch_select_table.assert_called_with() self.assertEqual(result, '1009') @mock.patch.object(database.NetworkDbOperator, 'switch_select_table') def test_get_available_vdev_without_vdev(self, switch_select_table): switch_select_table.return_value = [ {'userid': 'FAKE_ID', 'interface': '1003', 'switch': None, 'port': None, 'comments': None}, {'userid': 'FAKE_ID', 'interface': '2003', 'switch': None, 'port': None, 'comments': None}] result = self._smtclient._get_available_vdev('fake_id', vdev=None) switch_select_table.assert_called_with() self.assertEqual(result, '2006') @mock.patch.object(database.NetworkDbOperator, 'switch_select_table') def test_get_available_vdev_with_used_vdev(self, switch_select_table): switch_select_table.return_value = [ {'userid': 'FAKE_ID', 'interface': '1003', 'switch': None, 'port': None, 'comments': None}, {'userid': 'FAKE_ID', 'interface': '1006', 'switch': None, 'port': None, 'comments': None}] self.assertRaises(exception.SDKConflictError, self._smtclient._get_available_vdev, 'fake_id', vdev='1004') @mock.patch.object(smtclient.SMTClient, '_get_available_vdev') @mock.patch.object(smtclient.SMTClient, '_create_nic') def test_create_nic(self, create_nic, get_vdev): userid = 'fake_id' get_vdev.return_value = '1009' self._smtclient.create_nic(userid, vdev='1009', nic_id='nic_id') create_nic.assert_called_with(userid, '1009', nic_id="nic_id", mac_addr=None, active=False) get_vdev.assert_called_with(userid, vdev='1009') @mock.patch.object(smtclient.SMTClient, '_get_available_vdev') @mock.patch.object(smtclient.SMTClient, '_create_nic') def test_create_nic_without_vdev(self, create_nic, get_vdev): userid = 'fake_id' get_vdev.return_value = '2006' self._smtclient.create_nic(userid, nic_id='nic_id') create_nic.assert_called_with(userid, '2006', nic_id='nic_id', mac_addr=None, active=False) get_vdev.assert_called_with(userid, vdev=None) @mock.patch.object(smtclient.SMTClient, '_get_available_vdev') def test_create_nic_with_used_vdev(self, get_vdev): get_vdev.side_effect = exception.SDKConflictError('network', rs=6, vdev='1004', userid='fake_id', msg="error") self.assertRaises(exception.SDKConflictError, self._smtclient.create_nic, 'fake_id', nic_id="nic_id", vdev='1004') @mock.patch.object(database.NetworkDbOperator, 'switch_add_record') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_private_create_nic_active(self, power_state, request, add_record): request.return_value = {'overallRC': 0} power_state.return_value = 'on' self._smtclient._create_nic("fakenode", "fake_vdev", nic_id="fake_nic", mac_addr='11:22:33:44:55:66', active=True) add_record.assert_called_once_with("fakenode", "fake_vdev", port="fake_nic") rd1 = ' '.join(( 'SMAPI fakenode API Virtual_Network_Adapter_Create_Extended_DM', "--operands", "-k image_device_number=fake_vdev", "-k adapter_type=QDIO", "-k mac_id=445566")) rd2 = ' '.join(( 'SMAPI fakenode API Virtual_Network_Adapter_Create_Extended', "--operands", "-k image_device_number=fake_vdev", "-k adapter_type=QDIO")) # make sure only those 2 APIs called, no retry triggered self.assertEqual(2, request.call_count) request.assert_any_call(rd1) request.assert_any_call(rd2) @mock.patch.object(time, "sleep") @mock.patch.object(smtclient.SMTClient, '_create_nic_inactive_exception') @mock.patch.object(database.NetworkDbOperator, 'switch_add_record') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_private_create_nic_active_retry(self, power_state, request, add_record, cnie, ts): results = {'rc': 400, 'rs': 12, 'logEntries': ''} request.side_effect = exception.SDKSMTRequestFailed(results, "fake error") power_state.return_value = 'on' self._smtclient._create_nic("fakenode", "fake_vdev", nic_id="fake_nic", mac_addr='11:22:33:44:55:66', active=False) add_record.assert_called_once_with("fakenode", "fake_vdev", port="fake_nic") cnie.assert_called_once_with(mock.ANY, 'fakenode', 'fake_vdev') rd1 = ' '.join(( 'SMAPI fakenode API Virtual_Network_Adapter_Create_Extended_DM', "--operands", "-k image_device_number=fake_vdev", "-k adapter_type=QDIO", "-k mac_id=445566")) request.assert_any_call(rd1) self.assertEqual(5, request.call_count) self.assertEqual(4, ts.call_count) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_user_direct(self, req): req.return_value = {'response': 'OK'} resp = self._smtclient.get_user_direct('user1') req.assert_called_once_with('getvm user1 directory') self.assertEqual(resp, 'OK') @mock.patch.object(database.NetworkDbOperator, 'switch_delete_record_for_nic') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(database.NetworkDbOperator, 'switch_select_record_for_userid') def test_delete_nic(self, select_rec, power_state, request, delete_nic): select_rec.return_value = [{"interface": "1000", "comments": None}] power_state.return_value = 'on' userid = 'FakeID' vdev = '1000' rd1 = ' '.join(( "SMAPI FakeID API Virtual_Network_Adapter_Delete_DM", "--operands", '-v 1000')) rd2 = ' '.join(( "SMAPI FakeID API Virtual_Network_Adapter_Delete", "--operands", '-v 1000')) self._smtclient.delete_nic(userid, vdev, True) request.assert_any_call(rd1) request.assert_any_call(rd2) delete_nic.assert_called_with(userid, vdev) @mock.patch.object(smtclient.SMTClient, '_undedicate_nic') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(database.NetworkDbOperator, 'switch_select_record_for_userid') def test_delete_nic_OSA(self, select_rec, power_state, undedicate_nic): select_rec.return_value = [{"interface": "1000", "comments": "OSA=F000"}] power_state.return_value = 'on' userid = 'FakeID' vdev = '1000' self._smtclient.delete_nic(userid, vdev, True) undedicate_nic.assert_called_with(userid, vdev, active=True) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') @mock.patch.object(smtclient.SMTClient, '_couple_nic') def test_couple_nic_to_vswitch_no_vlan(self, couple_nic, replace, lock, get_user): replace_data = ["USER ABC", "NICDEF 1000 DEVICE 3", "NICDEF 1000 LAN SYSTEM VS1"] get_user.return_value = ["USER ABC", "NICDEF 1000 DEVICE 3"] self._smtclient.couple_nic_to_vswitch("fake_userid", "1000", "VS1", active=True) lock.assert_called_with("fake_userid") replace.assert_called_with("fake_userid", replace_data) couple_nic.assert_called_with("fake_userid", "1000", "VS1", active=True) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') @mock.patch.object(smtclient.SMTClient, '_couple_nic') def test_couple_nic_to_vswitch_vlan(self, couple_nic, replace, lock, get_user): replace_data = ["USER ABC", "NICDEF 1000 DEVICE 3", "NICDEF 1000 LAN SYSTEM VS1 VLAN 55 PORTTYPE ACCESS"] get_user.return_value = ["USER ABC", "NICDEF 1000 DEVICE 3"] self._smtclient.couple_nic_to_vswitch("fake_userid", "1000", "VS1", active=True, vlan_id=55) lock.assert_called_with("fake_userid") replace.assert_called_with("fake_userid", replace_data) couple_nic.assert_called_with("fake_userid", "1000", "VS1", active=True) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') @mock.patch.object(smtclient.SMTClient, '_couple_nic') def test_couple_nic_to_vswitch_vlan_fail(self, couple_nic, replace, lock, get_user, request): replace_data = ["USER ABC", "NICDEF 1000 DEVICE 3", "NICDEF 1000 LAN SYSTEM VS1 VLAN 55 PORTTYPE ACCESS"] results = {'rs': 0, 'errno': 0, 'strError': '', 'overallRC': 1, 'logEntries': [], 'rc': 0, 'response': ['fake response']} get_user.return_value = ["USER ABC", "NICDEF 1000 DEVICE 3"] replace.side_effect = exception.SDKSMTRequestFailed(results, "fake error") request.return_value = {'overallRC': 0} self.assertRaises(exception.SDKGuestOperationError, self._smtclient.couple_nic_to_vswitch, "fake_userid", "1000", "VS1", active=True, vlan_id=55) lock.assert_called_with("fake_userid") replace.assert_called_with("fake_userid", replace_data) request.assert_called_with("SMAPI fake_userid API Image_Unlock_DM ") @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') @mock.patch.object(smtclient.SMTClient, '_couple_nic') def test_couple_nic_to_vswitch_not_actually_called(self, couple_nic, replace, lock, get_user, request): # If user direct NICDEF already LAN SYSTEM , # skip the Image_Replace_DM and couple nic actions get_user.return_value = ["USER ABC", "NICDEF 1000 DEVICE 3 LAN SYSTEM VS1"] request.return_value = {'overallRC': 0} self._smtclient.couple_nic_to_vswitch("fake_userid", "1000", "VS1", active=True, vlan_id=55) lock.assert_not_called() replace.assert_not_called() couple_nic.assert_not_called() @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') @mock.patch.object(smtclient.SMTClient, '_couple_nic') def test_couple_nic_to_vswitch_port_type(self, couple_nic, replace, lock, get_user): replace_data = ["USER ABC", "NICDEF 1000 DEVICE 3", "NICDEF 1000 LAN SYSTEM VS1 VLAN 8 PORTTYPE TRUNK"] get_user.return_value = ["USER ABC", "NICDEF 1000 DEVICE 3"] self._smtclient.couple_nic_to_vswitch("fake_userid", "1000", "VS1", active=True, vlan_id=8, port_type='TRUNK') lock.assert_called_with("fake_userid") replace.assert_called_with("fake_userid", replace_data) couple_nic.assert_called_with("fake_userid", "1000", "VS1", active=True) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') @mock.patch.object(smtclient.SMTClient, '_couple_nic') def test_couple_nic_to_vswitch_nic_in_lowercase(self, couple_nic, replace, lock, get_user): replace_data = ["USER ABC", "NICDEF 1E00 DEVICE 3", "NICDEF 1E00 LAN SYSTEM VS1 VLAN 8 PORTTYPE TRUNK"] get_user.return_value = ["USER ABC", "NICDEF 1E00 DEVICE 3"] self._smtclient.couple_nic_to_vswitch("fake_userid", "1e00", "VS1", active=True, vlan_id=8, port_type='TRUNK') lock.assert_called_with("fake_userid") replace.assert_called_with("fake_userid", replace_data) couple_nic.assert_called_with("fake_userid", "1e00", "VS1", active=True) @mock.patch.object(smtclient.SMTClient, '_uncouple_nic') def test_uncouple_nic_from_vswitch(self, uncouple_nic): self._smtclient.uncouple_nic_from_vswitch("fake_userid", "fakevdev", False) uncouple_nic.assert_called_with("fake_userid", "fakevdev", active=False) @mock.patch.object(database.NetworkDbOperator, 'switch_update_record_with_switch') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_couple_nic(self, power_state, request, update_switch): request.return_value = {'overallRC': 0} power_state.return_value = 'on' userid = 'FakeID' vdev = 'FakeVdev' vswitch_name = 'FakeVS' requestData2 = ' '.join(( 'SMAPI FakeID', "API Virtual_Network_Adapter_Connect_Vswitch", "--operands", "-v FakeVdev", "-n FakeVS")) self._smtclient._couple_nic(userid, vdev, vswitch_name, active=True) update_switch.assert_called_with(userid, vdev, vswitch_name) request.assert_any_call(requestData2) @mock.patch.object(database.NetworkDbOperator, 'switch_update_record_with_switch') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_uncouple_nic(self, power_state, request, update_switch): request.return_value = {'overallRC': 0} power_state.return_value = 'on' userid = 'FakeID' vdev = 'FakeVdev' requestData1 = ' '.join(( 'SMAPI FakeID', "API Virtual_Network_Adapter_Disconnect_DM", "--operands", "-v FakeVdev")) requestData2 = ' '.join(( 'SMAPI FakeID', "API Virtual_Network_Adapter_Disconnect", "--operands", "-v FakeVdev")) self._smtclient._uncouple_nic(userid, vdev, active=True) update_switch.assert_called_with(userid, vdev, None) request.assert_any_call(requestData1) request.assert_any_call(requestData2) @mock.patch.object(database.GuestDbOperator, 'get_migrated_guest_info_list') @mock.patch.object(database.GuestDbOperator, 'get_guest_list') def test_get_vm_list(self, db_list, migrated_list): db_list.return_value = [(u'9a5c9689-d099-46bb-865f-0c01c384f58c', u'TEST0', u'', u''), (u'3abe0ac8-90b5-4b00-b624-969c184b8158', u'TEST1', u'comm1', u''), (u'aa252ca5-03aa-4407-9c2e-d9737ddb8d24', u'TEST2', u'comm2', u'meta2')] migrated_list.return_value = [] userid_list = self._smtclient.get_vm_list() db_list.assert_called_once() migrated_list.assert_called_once() self.assertListEqual(sorted(userid_list), sorted(['TEST0', 'TEST1', 'TEST2'])) @mock.patch.object(database.GuestDbOperator, 'get_migrated_guest_info_list') @mock.patch.object(database.GuestDbOperator, 'get_guest_list') def test_get_vm_list_exclude_migrated(self, db_list, migrated_list): db_list.return_value = [(u'9a5c9689-d099-46bb-865f-0c01c384f58c', u'TEST0', u'', u''), (u'3abe0ac8-90b5-4b00-b624-969c184b8158', u'TEST1', u'comm1', u''), (u'aa252ca5-03aa-4407-9c2e-d9737ddb8d24', u'TEST2', u'{"migrated": 1}', u'meta2')] migrated_list.return_value = [(u'aa252ca5-03aa-4407-9c2e-d9737ddb8d24', u'TEST2', u'{"migrated": 1}', u'meta2')] userid_list = self._smtclient.get_vm_list() db_list.assert_called_once() migrated_list.assert_called_once() self.assertListEqual(sorted(userid_list), sorted(['TEST0', 'TEST1'])) @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_userid(self, request): rd = 'deletevm fuser1 directory' self._smtclient.delete_userid('fuser1') request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_execute_cmd(self, request): rd = 'cmdVM fuser1 CMD \'ls\'' self._smtclient.execute_cmd('fuser1', 'ls') request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_userid_not_exist(self, request): rd = 'deletevm fuser1 directory' results = {'rc': 400, 'rs': 4, 'logEntries': ''} request.side_effect = exception.SDKSMTRequestFailed(results, "fake error") self._smtclient.delete_userid('fuser1') request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_userid_too_slow(self, request): rd1 = 'deletevm fuser1 directory' rd2 = 'PowerVM fuser1 off --maxwait 30 --poll 10' results = {'rc': 596, 'rs': 6831, 'logEntries': ''} request.side_effect = exception.SDKSMTRequestFailed(results, "fake error") self._smtclient.delete_userid('fuser1') request.assert_has_calls([mock.call(rd1), mock.call(rd2), mock.call(rd1)]) @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_userid_with_vdisk_warning(self, request): rd = 'deletevm fuser1 directory' results = {'rc': 596, 'rs': 3543, 'logEntries': ''} request.side_effect = exception.SDKSMTRequestFailed(results, "fake error") self._smtclient.delete_userid('fuser1') request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_userid_with_service_machine_error(self, request): rd = 'deletevm fuser1 directory' results = {'rc': 596, 'rs': 2119, 'logEntries': ''} request.side_effect = exception.SDKSMTRequestFailed(results, "fake error") self._smtclient.delete_userid('fuser1') request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') def test_delete_userid_failed(self, request): rd = 'deletevm fuser1 directory' results = {'rc': 400, 'rs': 104, 'logEntries': ''} request.side_effect = exception.SDKSMTRequestFailed(results, "fake error") self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.delete_userid, 'fuser1') request.assert_called_once_with(rd) @mock.patch.object(subprocess, 'check_output') def test_get_disk_size_units_rhcos(self, check): image_path = 'test_path' check.return_value = b'3072327680' size = self._smtclient._get_disk_size_units_rhcos(image_path) self.assertEqual(size, '4168:CYL') @mock.patch.object(subprocess, 'check_output') def test_get_disk_size_units_rhcos_execute_error(self, check): image_path = 'test_path' check.return_value = b"fdisk error" self.assertRaises(exception.SDKImageOperationError, self._smtclient._get_disk_size_units_rhcos, image_path) check.side_effect = subprocess.CalledProcessError(1, "fake error") self.assertRaises(exception.SDKImageOperationError, self._smtclient._get_disk_size_units_rhcos, image_path) check.side_effect = Exception("fake error") self.assertRaises(exception.SDKInternalError, self._smtclient._get_disk_size_units_rhcos, image_path) @mock.patch.object(os, 'rename') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(smtclient.FilesystemBackend, 'image_import') @mock.patch.object(zvmutils.PathUtils, 'create_import_image_repository') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_import(self, image_query, create_path, image_import, get_md5sum, disk_size_units, image_size, image_add_record, rename): image_name = 'testimage' url = 'file:///tmp/testdummyimg' image_meta = {'os_version': 'rhel6.5', 'md5sum': 'c73ce117eef8077c3420bfc8f473ac2f'} import_image_fpath = '/home/netboot/rhel6.5/testimage/testdummyimg' final_image_fpath = '/home/netboot/rhel6.5/testimage/0100' image_query.return_value = [] create_path.return_value = '/home/netboot/rhel6.5/testimage' get_md5sum.return_value = 'c73ce117eef8077c3420bfc8f473ac2f' disk_size_units.return_value = '3338:CYL' image_size.return_value = '512000' self._smtclient.image_import(image_name, url, image_meta) image_query.assert_called_once_with(image_name) image_import.assert_called_once_with(image_name, url, import_image_fpath, remote_host=None) get_md5sum.assert_called_once_with(import_image_fpath) disk_size_units.assert_called_once_with(final_image_fpath) image_size.assert_called_once_with(final_image_fpath) image_add_record.assert_called_once_with(image_name, 'rhel6.5', 'c73ce117eef8077c3420bfc8f473ac2f', '3338:CYL', '512000', 'rootonly', comments=None) @mock.patch.object(os, 'rename') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units_rhcos') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(smtclient.FilesystemBackend, 'image_import') @mock.patch.object(zvmutils.PathUtils, 'create_import_image_repository') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_import_rhcos(self, image_query, create_path, image_import, get_md5sum, disk_size_units, image_size, image_add_record, rename): image_name = 'testimage' url = 'file:///tmp/testdummyimg' image_meta = {'os_version': 'rhcos4.2', 'md5sum': 'c73ce117eef8077c3420bfc8f473ac2f', 'disk_type': 'DASD'} import_image_fpath = '/home/netboot/rhcos4.2/testimage/testdummyimg' final_image_fpath = '/home/netboot/rhcos4.2/testimage/0100' image_query.return_value = [] create_path.return_value = '/home/netboot/rhcos4.2/testimage' get_md5sum.return_value = 'c73ce117eef8077c3420bfc8f473ac2f' disk_size_units.return_value = '3338:CYL' image_size.return_value = '512000' self._smtclient.image_import(image_name, url, image_meta) image_query.assert_called_once_with(image_name) image_import.assert_called_once_with(image_name, url, import_image_fpath, remote_host=None) get_md5sum.assert_called_once_with(import_image_fpath) disk_size_units.assert_called_once_with(final_image_fpath) image_size.assert_called_once_with(final_image_fpath) image_add_record.assert_called_once_with(image_name, 'rhcos4.2', 'c73ce117eef8077c3420bfc8f473ac2f', '3338:CYL', '512000', 'rootonly', comments="{'disk_type': 'DASD'}") @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_import_image_already_exist(self, image_query, get_image_path): image_name = 'testimage' url = 'file:///tmp/testdummyimg' image_meta = {'os_version': 'rhel6.5', 'md5sum': 'c73ce117eef8077c3420bfc8f473ac2f'} image_query.return_value = [(u'testimage', u'rhel6.5', u'c73ce117eef8077c3420bfc8f473ac2f', u'3338:CYL', u'5120000', u'netboot', None)] self.assertRaises(exception.SDKImageOperationError, self._smtclient.image_import, image_name, url, image_meta) image_query.assert_called_once_with(image_name) get_image_path.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(smtclient.FilesystemBackend, 'image_import') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_import_invalid_md5sum(self, image_query, image_import, get_md5sum): image_name = 'testimage' url = 'file:///tmp/testdummyimg' image_meta = {'os_version': 'rhel6.5', 'md5sum': 'c73ce117eef8077c3420bfc8f473ac2f'} image_query.return_value = [] get_md5sum.return_value = 'c73ce117eef8077c3420bfc000000' self.assertRaises(exception.SDKImageOperationError, self._smtclient.image_import, image_name, url, image_meta) @mock.patch.object(smtclient.SMTClient, '_get_image_path_by_name') def test_get_image_access_time_image_not_exist(self, image_path): image_name = "testimage" image_path.return_value = '/tmp/notexistpath/' self.assertRaises(exception.SDKImageOperationError, self._smtclient._get_image_last_access_time, image_name) @mock.patch.object(zvmutils.PathUtils, 'create_import_image_repository') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_import_rhcos_nodisktype(self, image_query, create_path): image_name = 'testimage' url = 'file:///tmp/testdummyimg' image_meta = {'os_version': 'rhcos4.2', 'md5sum': 'c73ce117eef8077c3420bfc8f473ac2f'} self.assertRaises(exception.SDKImageOperationError, self._smtclient.image_import, image_name, url, image_meta) image_meta = {'os_version': 'rhcos4.2', 'md5sum': 'c73ce117eef8077c3420bfc8f473ac2f', 'disk_type': 'error'} self.assertRaises(exception.SDKImageOperationError, self._smtclient.image_import, image_name, url, image_meta) @mock.patch.object(smtclient.SMTClient, '_get_image_last_access_time') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_query(self, image_query, access_time): image_name = "testimage" fake_access_time = 1581910539.3330014 access_time.return_value = fake_access_time image_query.return_value = [{'imagename': 'testimage'}] image_info = self._smtclient.image_query(image_name) image_query.assert_called_once_with(image_name) self.assertEqual(image_info[0]['last_access_time'], fake_access_time) @mock.patch.object(smtclient.SMTClient, '_get_image_last_access_time') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_query_image_not_exist(self, image_query, access_time): image_name = "testimage" fake_access_time = 1581910539.3330014 access_time.return_value = fake_access_time image_query.return_value = None image_info = self._smtclient.image_query(image_name) self.assertEqual(image_info, []) @mock.patch.object(smtclient.SMTClient, '_get_image_last_access_time') @mock.patch.object(database.ImageDbOperator, 'image_query_record') def test_image_query_none_imagename(self, image_query, access_time): image_name = None fake_access_time = 1581910539.3330014 image_query.return_value = [{u'image_size_in_bytes': u'719045489', u'last_access_time': 1581910539.3330014, u'disk_size_units': u'3339:CYL', u'md5sum': u'157e2a2c3be1d49ef6e69324', u'comments': None, u'imagename': u'testimage', u'imageosdistro': u'rhel7', u'type': u'rootonly'}] access_time.return_value = fake_access_time image_info = self._smtclient.image_query(image_name) image_query.assert_called_once_with(image_name) self.assertEqual(image_info[0]['last_access_time'], fake_access_time) @mock.patch.object(database.ImageDbOperator, 'image_delete_record') @mock.patch.object(smtclient.SMTClient, '_delete_image_file') def test_image_delete(self, delete_file, delete_db_record): image_name = 'testimage' self._smtclient.image_delete(image_name) delete_file.assert_called_once_with(image_name) delete_db_record.assert_called_once_with(image_name) @mock.patch.object(smtclient.SMTClient, 'image_get_root_disk_size') def test_image_get_root_disk_size(self, query_disk_size_units): image_name = 'testimage' self._smtclient.image_get_root_disk_size(image_name) query_disk_size_units.assert_called_once_with(image_name) @mock.patch.object(database.ImageDbOperator, 'image_query_record') @mock.patch.object(smtclient.FilesystemBackend, 'image_export') def test_image_export(self, image_export, image_query): image_name = u'testimage' dest_url = 'file:///path/to/exported/image' remote_host = 'nova@9.x.x.x' image_query.return_value = [ {'imagename': u'testimage', 'imageosdistro': u'rhel6.5', 'md5sum': u'c73ce117eef8077c3420bfc8f473ac2f', 'disk_size_units': u'3338:CYL', 'image_size_in_bytes': u'5120000', 'type': u'rootonly', 'comments': None}] expect_return = { 'image_name': u'testimage', 'image_path': u'file:///path/to/exported/image', 'os_version': u'rhel6.5', 'md5sum': u'c73ce117eef8077c3420bfc8f473ac2f', 'comments': None } real_return = self._smtclient.image_export(image_name, dest_url, remote_host=remote_host) image_query.assert_called_once_with(image_name) self.assertDictEqual(real_return, expect_return) def test_generate_vdev(self): base = '0100' idx = 1 vdev = self._smtclient._generate_vdev(base, idx) self.assertEqual(vdev, '0101') def test_generate_generate_increasing_nic_id(self): nic_id_base = "1000" nic_id = self._smtclient._generate_increasing_nic_id(nic_id_base) self.assertEqual(nic_id, '0.0.1000,0.0.1001,0.0.1002') def test_generate_generate_increasing_nic_id_failed(self): self.assertRaises(exception.SDKInvalidInputFormat, self._smtclient._generate_increasing_nic_id, 'FFFF') @mock.patch.object(smtclient.SMTClient, '_add_mdisk') def test_add_mdisks(self, add_mdisk): userid = 'fakeuser' disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1'}, {'size': '200000', 'disk_pool': 'FBA:fbapool1', 'format': 'ext3'}] self._smtclient.add_mdisks(userid, disk_list) add_mdisk.assert_any_call(userid, disk_list[0], '0100') add_mdisk.assert_any_call(userid, disk_list[1], '0101') def test_add_mdisks_no_disk_pool(self): disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1'}, {'size': '200000', 'format': 'ext3'}] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.add_mdisks, 'fakeuser', disk_list) @mock.patch.object(smtclient.SMTClient, '_add_mdisk') def test_add_mdisks_with_1dev(self, add_mdisk): userid = 'fakeuser' disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1'}, {'size': '200000', 'disk_pool': 'FBA:fbapool1', 'format': 'ext3', 'vdev': '0200'}] self._smtclient.add_mdisks(userid, disk_list) add_mdisk.assert_any_call(userid, disk_list[0], '0100') add_mdisk.assert_any_call(userid, disk_list[1], '0200') @mock.patch.object(smtclient.SMTClient, '_add_mdisk') def test_add_mdisks_with_2dev(self, add_mdisk): userid = 'fakeuser' disk_list = [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1', 'vdev': '0200'}, {'size': '200000', 'disk_pool': 'FBA:fbapool1', 'format': 'ext3', 'vdev': '0300'}] self._smtclient.add_mdisks(userid, disk_list) add_mdisk.assert_any_call(userid, disk_list[0], '0200') add_mdisk.assert_any_call(userid, disk_list[1], '0300') @mock.patch.object(smtclient.SMTClient, '_request') def test_dedicate_device(self, request): fake_userid = 'FakeID' vaddr = 'vaddr' raddr = 'raddr' mode = 1 requestData = "changevm FakeID dedicate vaddr raddr 1" request.return_value = {'overallRC': 0} self._smtclient.dedicate_device(fake_userid, vaddr, raddr, mode) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_request') def test_get_fcp_info_by_status(self, request): fake_userid = 'FakeID' requestData = "getvm FakeID fcpinfo all YES" fcp_info = ['FCP device number: 5C01', 'Status: Active', 'NPIV world wide port number: C05076DDF7001111', 'Channel path ID: 55', 'Physical world wide port number: C05076DDF7001111', 'Owner: BJCB0007', 'FCP device number: 5C02', 'Status: Free', 'NPIV world wide port number: C05076DDF7002222', 'Channel path ID: 55', 'Physical world wide port number: C05076DDF7002222', 'Owner: NONE'] request.return_value = {'overallRC': 0, 'response': fcp_info} self._smtclient.get_fcp_info_by_status(fake_userid) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_request') def test_undedicate_device(self, request): fake_userid = 'FakeID' vaddr = 'vaddr' requestData = "changevm FakeID undedicate vaddr" request.return_value = {'overallRC': 0} self._smtclient.undedicate_device(fake_userid, vaddr) request.assert_called_once_with(requestData) @mock.patch.object(smtclient.SMTClient, '_remove_mdisk') def test_remove_mdisks(self, remove_mdisk): userid = 'fakeuser' vdev_list = ['102', '103'] self._smtclient.remove_mdisks(userid, vdev_list) remove_mdisk.assert_any_call(userid, vdev_list[0]) remove_mdisk.assert_any_call(userid, vdev_list[1]) @mock.patch.object(smtclient.SMTClient, 'image_performance_query') def test_get_image_performance_info(self, ipq): ipq.return_value = { u'FAKEVM': { 'used_memory': u'5222192 KB', 'used_cpu_time': u'25640530229 uS', 'guest_cpus': u'2', 'userid': u'FKAEVM', 'max_memory': u'8388608 KB'}} info = self._smtclient.get_image_performance_info('FAKEVM') self.assertEqual(info['used_memory'], '5222192 KB') @mock.patch.object(smtclient.SMTClient, 'image_performance_query') def test_get_image_performance_info_not_exist(self, ipq): ipq.return_value = {} info = self._smtclient.get_image_performance_info('fakevm') self.assertEqual(info, None) def test_is_vdev_valid_true(self): vdev = '1009' vdev_info = ['1003', '1006'] result = self._smtclient._is_vdev_valid(vdev, vdev_info) self.assertEqual(result, True) def test_is_vdev_valid_False(self): vdev = '2002' vdev_info = ['2000', '2004'] result = self._smtclient._is_vdev_valid(vdev, vdev_info) self.assertEqual(result, False) @mock.patch.object(zvmutils, 'execute') @mock.patch.object(smtclient.SMTClient, '_request') def test_get_user_console_output(self, req, execu): req.return_value = self._generate_results(response=['cons: 0001 0002']) execu.side_effect = [(0, 'first line\n'), (0, 'second line\n')] cons_log = self._smtclient.get_user_console_output('fakeuser') req.assert_called_once_with('getvm fakeuser consoleoutput') execu.assert_any_call('sudo /usr/sbin/vmur re -t -O 0001') execu.assert_any_call('sudo /usr/sbin/vmur re -t -O 0002') self.assertEqual(cons_log, 'first line\nsecond line\n') @mock.patch.object(smtclient.SMTClient, '_request') def test_get_user_console_output_request_failed(self, req): req.side_effect = exception.SDKSMTRequestFailed({}, 'err') self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.get_user_console_output, 'fakeuser') @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_reboot(self, req): req.return_value = self._generate_results() self._smtclient.guest_reboot('fakeuser') req.assert_called_once_with('PowerVM fakeuser reboot') @mock.patch.object(smtclient.SMTClient, '_request') def test_guest_reset(self, req): req.return_value = self._generate_results() self._smtclient.guest_reset('fakeuser') req.assert_called_once_with('PowerVM fakeuser reset') @mock.patch.object(smtclient.SMTClient, '_request') def test_get_guest_connection_status(self, req): result = self._generate_results(rs=1, response=['testuid: reachable']) req.return_value = result is_reachable = self._smtclient.get_guest_connection_status('testuid') self.assertTrue(is_reachable) @mock.patch.object(database.NetworkDbOperator, 'switch_select_record') def test_get_nic_info(self, select): self._smtclient.get_nic_info(userid='testid', nic_id='fake_nic') select.assert_called_with(userid='testid', nic_id='fake_nic', vswitch=None) @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_guest_capture_get_capture_devices_rh7(self, execcmd): userid = 'fakeid' execcmd.side_effect = [['/dev/disk/by-path/ccw-0.0.0100-part1'], ['/dev/dasda1'], ['0.0.0100(ECKD) at ( 94: 0) is dasda' ' : active at blocksize: 4096,' ' 600840 blocks, 2347 MB']] result = self._smtclient._get_capture_devices(userid) self.assertEqual(result, ['0100']) @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_guest_capture_get_capture_devices_ubuntu(self, execcmd): userid = 'fakeid' execcmd.side_effect = [['UUID=8320ec9d-c2b5-439f-b0a0-cede08afe957' ' allow_lun_scan=0 crashkernel=128M' ' BOOT_IMAGE=0'], ['/dev/dasda1'], ['0.0.0100(ECKD) at ( 94: 0) is dasda' ' : active at blocksize: 4096,' ' 600840 blocks, 2347 MB']] result = self._smtclient._get_capture_devices(userid) self.assertEqual(result, ['0100']) @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_guest_capture_get_os_version_rh7(self, execcmd): userid = 'fakeid' execcmd.side_effect = [['/etc/os-release', '/etc/redhat-release', '/etc/system-release'], ['NAME="Red Hat Enterprise Linux Server"', 'VERSION="7.0 (Maipo)"', 'ID="rhel"', 'ID_LIKE="fedora"', 'VERSION_ID="7.0"', 'PRETTY_NAME="Red Hat Enterprise Linux' ' Server 7.0 (Maipo)"', 'ANSI_COLOR="0;31"', 'CPE_NAME="cpe:/o:redhat:enterprise_linux:' '7.0:GA:server"', 'HOME_URL="https://www.redhat.com/"']] result = self._smtclient.guest_get_os_version(userid) self.assertEqual(result, 'rhel7.0') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_guest_capture_get_os_version_rhel67_sles11(self, execcmd): userid = 'fakeid' execcmd.side_effect = [['/etc/redhat-release', '/etc/system-release'], ['Red Hat Enterprise Linux Server release 6.7' ' (Santiago)']] result = self._smtclient.guest_get_os_version(userid) self.assertEqual(result, 'rhel6.7') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_guest_capture_get_os_version_ubuntu(self, execcmd): userid = 'fakeid' execcmd.side_effect = [['/etc/lsb-release', '/etc/os-release'], ['NAME="Ubuntu"', 'VERSION="16.04 (Xenial Xerus)"', 'ID=ubuntu', 'ID_LIKE=debian', 'PRETTY_NAME="Ubuntu 16.04"', 'VERSION_ID="16.04"', 'HOME_URL="http://www.ubuntu.com/"', 'SUPPORT_URL="http://help.ubuntu.com/"', 'BUG_REPORT_URL="http://bugs.launchpad.net' '/ubuntu/"', 'UBUNTU_CODENAME=xenial']] result = self._smtclient.guest_get_os_version(userid) self.assertEqual(result, 'ubuntu16.04') @mock.patch.object(database.GuestDbOperator, 'get_guest_metadata_with_userid') def test_get_os_version_from_userid_with_os_info(self, db_list): db_list.return_value = [(u'os_version=rhel8.3', u'')] userid = 'TEST1' os_version = self._smtclient.get_os_version_from_userid(userid) db_list.assert_called_with(userid) self.assertEqual('RHEL8.3', os_version) @mock.patch.object(database.GuestDbOperator, 'get_guest_metadata_with_userid') def test_get_os_version_from_userid_without_os_info(self, db_list): db_list.return_value = [(u'comm1', u'0')] userid = 'TEST0' os_version = self._smtclient.get_os_version_from_userid(userid) db_list.assert_called_with(userid) self.assertEqual('UNKNOWN', os_version) @mock.patch.object(database.GuestDbOperator, 'get_guest_metadata_with_userid') def test_get_os_version_from_userid_exception(self, db_list): msg = 'test' db_list.side_effect = exception.SDKGuestOperationError(rs=1, msg=msg) userid = 'test123' self.assertRaises(exception.SDKGuestOperationError, self._smtclient.get_os_version_from_userid, userid) db_list.assert_called_with(userid) @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, 'guest_start') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(zvmutils.PathUtils, 'mkdir_if_not_exist') @mock.patch.object(smtclient.SMTClient, 'guest_stop') @mock.patch.object(smtclient.SMTClient, '_get_capture_devices') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_good_path_unreachable_poweron( self, guest_connection_status, execcmd, get_os_version, get_capture_devices, guest_stop, mkdir, execute, md5sum, disk_size_units, imagesize, rm_folder, image_add_record, get_user_direct, guest_start, get_power_state, get_os_mock): userid = 'fakeid' image_name = 'fakeimage' get_user_direct.return_value = ['USER TEST1234 LBYONLY 4096m 64G G', 'COMMAND SET VCONFIG MODE LINUX', 'COMMAND DEFINE CPU 00 TYPE IFL', 'MDISK 0100 3390 0001 14564 IAS114 MR'] execcmd.return_value = ['/'] image_temp_dir = '/'.join([CONF.image.sdk_image_repository, 'staging', 'UNKNOWN', image_name]) image_file_path = '/'.join((image_temp_dir, '0100')) cmd1 = ['sudo', '/opt/zthin/bin/creatediskimage', userid, '0100', image_file_path, '--compression', '6'] execute.side_effect = [(0, ''), (0, '')] image_final_dir = '/'.join((CONF.image.sdk_image_repository, 'netboot', 'UNKNOWN', image_name)) image_final_path = '/'.join((image_final_dir, '0100')) cmd2 = ['mv', image_file_path, image_final_path] md5sum.return_value = '547396211b558490d31e0de8e15eef0c' disk_size_units.return_value = '1000:CYL' imagesize.return_value = '1024000' guest_connection_status.return_value = False get_power_state.return_value = 'on' get_os_mock.return_value = 'UNKNOWN' self._smtclient.guest_capture(userid, image_name) guest_connection_status.assert_called_with(userid) execcmd.assert_not_called() get_os_version.assert_not_called() get_capture_devices.assert_not_called() guest_stop.assert_called_once_with(userid) execute.assert_has_calls([mock.call(cmd1), mock.call(cmd2)]) mkdir.assert_has_calls([mock.call(image_temp_dir)], [mock.call(image_final_dir)]) rm_folder.assert_called_once_with(image_temp_dir) md5sum.assert_called_once_with(image_final_path) disk_size_units.assert_called_once_with(image_final_path) imagesize.assert_called_once_with(image_final_path) image_add_record.assert_called_once_with(image_name, 'UNKNOWN', '547396211b558490d31e0de8e15eef0c', '1000:CYL', '1024000', 'rootonly') get_user_direct.assert_called_once_with(userid) guest_start.assert_called_once_with(userid) get_power_state.assert_called_once_with(userid) get_os_mock.assert_called_once_with(userid) @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, 'guest_start') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(zvmutils.PathUtils, 'mkdir_if_not_exist') @mock.patch.object(smtclient.SMTClient, 'guest_stop') @mock.patch.object(smtclient.SMTClient, '_get_capture_devices') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_good_path_poweroff(self, guest_connection_status, execcmd, get_os_version, get_capture_devices, guest_stop, mkdir, execute, md5sum, disk_size_units, imagesize, rm_folder, image_add_record, get_user_direct, guest_start, get_power_state, get_os_mock): userid = 'fakeid' image_name = 'fakeimage' get_user_direct.return_value = ['USER TEST1234 LBYONLY 4096m 64G G', 'COMMAND SET VCONFIG MODE LINUX', 'COMMAND DEFINE CPU 00 TYPE IFL', 'MDISK 0100 3390 0001 14564 IAS114 MR'] execcmd.return_value = ['/'] image_temp_dir = '/'.join([CONF.image.sdk_image_repository, 'staging', 'UNKNOWN', image_name]) image_file_path = '/'.join((image_temp_dir, '0100')) cmd1 = ['sudo', '/opt/zthin/bin/creatediskimage', userid, '0100', image_file_path, '--compression', '6'] execute.side_effect = [(0, ''), (0, '')] image_final_dir = '/'.join((CONF.image.sdk_image_repository, 'netboot', 'UNKNOWN', image_name)) image_final_path = '/'.join((image_final_dir, '0100')) cmd2 = ['mv', image_file_path, image_final_path] md5sum.return_value = '547396211b558490d31e0de8e15eef0c' disk_size_units.return_value = '1000:CYL' imagesize.return_value = '1024000' guest_connection_status.return_value = False get_power_state.return_value = 'off' get_os_mock.return_value = 'UNKNOWN' self._smtclient.guest_capture(userid, image_name) guest_connection_status.assert_called_with(userid) execcmd.assert_not_called() get_os_version.assert_not_called() get_capture_devices.assert_not_called() guest_stop.assert_not_called() execute.assert_has_calls([mock.call(cmd1), mock.call(cmd2)]) mkdir.assert_has_calls([mock.call(image_temp_dir)], [mock.call(image_final_dir)]) rm_folder.assert_called_once_with(image_temp_dir) md5sum.assert_called_once_with(image_final_path) disk_size_units.assert_called_once_with(image_final_path) imagesize.assert_called_once_with(image_final_path) image_add_record.assert_called_once_with(image_name, 'UNKNOWN', '547396211b558490d31e0de8e15eef0c', '1000:CYL', '1024000', 'rootonly') get_user_direct.assert_called_once_with(userid) get_power_state.assert_called_once_with(userid) guest_start.assert_not_called() get_os_mock.assert_called_once_with(userid) @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, 'guest_start') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(zvmutils.PathUtils, 'mkdir_if_not_exist') @mock.patch.object(smtclient.SMTClient, 'guest_stop') @mock.patch.object(smtclient.SMTClient, '_get_capture_devices') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_good_path_poweroff_os(self, guest_connection_status, execcmd, get_os_version, get_capture_devices, guest_stop, mkdir, execute, md5sum, disk_size_units, imagesize, rm_folder, image_add_record, get_user_direct, guest_start, get_power_state, get_os_mock): userid = 'fakeid' image_name = 'fakeimage' get_user_direct.return_value = ['USER TEST1234 LBYONLY 4096m 64G G', 'COMMAND SET VCONFIG MODE LINUX', 'COMMAND DEFINE CPU 00 TYPE IFL', 'MDISK 0100 3390 0001 14564 IAS114 MR'] execcmd.return_value = ['/'] image_temp_dir = '/'.join([CONF.image.sdk_image_repository, 'staging', 'RHEL8.3', image_name]) image_file_path = '/'.join((image_temp_dir, '0100')) cmd1 = ['sudo', '/opt/zthin/bin/creatediskimage', userid, '0100', image_file_path, '--compression', '6'] execute.side_effect = [(0, ''), (0, '')] image_final_dir = '/'.join((CONF.image.sdk_image_repository, 'netboot', 'RHEL8.3', image_name)) image_final_path = '/'.join((image_final_dir, '0100')) cmd2 = ['mv', image_file_path, image_final_path] md5sum.return_value = '547396211b558490d31e0de8e15eef0c' disk_size_units.return_value = '1000:CYL' imagesize.return_value = '1024000' guest_connection_status.return_value = False get_power_state.return_value = 'off' get_os_mock.return_value = 'RHEL8.3' self._smtclient.guest_capture(userid, image_name) guest_connection_status.assert_called_with(userid) execcmd.assert_not_called() get_os_version.assert_not_called() get_capture_devices.assert_not_called() guest_stop.assert_not_called() execute.assert_has_calls([mock.call(cmd1), mock.call(cmd2)]) mkdir.assert_has_calls([mock.call(image_temp_dir)], [mock.call(image_final_dir)]) rm_folder.assert_called_once_with(image_temp_dir) md5sum.assert_called_once_with(image_final_path) disk_size_units.assert_called_once_with(image_final_path) imagesize.assert_called_once_with(image_final_path) image_add_record.assert_called_once_with(image_name, 'RHEL8.3', '547396211b558490d31e' '0de8e15eef0c', '1000:CYL', '1024000', 'rootonly') get_user_direct.assert_called_once_with(userid) get_power_state.assert_called_once_with(userid) guest_start.assert_not_called() get_os_mock.assert_called_once_with(userid) @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, 'guest_start') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(zvmutils.PathUtils, 'mkdir_if_not_exist') @mock.patch.object(smtclient.SMTClient, 'guest_stop') @mock.patch.object(smtclient.SMTClient, '_get_capture_devices') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_poweroff_with_force_disk(self, guest_connection_status, execcmd, get_os_version, get_capture_devices, guest_stop, mkdir, execute, md5sum, disk_size_units, imagesize, rm_folder, image_add_record, get_user_direct, guest_start, get_power_state, get_os_mock): CONF.zvm.force_capture_disk = '0100' userid = 'fakeid' image_name = 'fakeimage' get_user_direct.return_value = ['USER TEST1234 LBYONLY 4096m 64G G', 'COMMAND SET VCONFIG MODE LINUX', 'COMMAND DEFINE CPU 00 TYPE IFL', 'MDISK 0100 3390 0001 14564 IAS114 MR'] execcmd.return_value = ['/'] image_temp_dir = '/'.join([CONF.image.sdk_image_repository, 'staging', 'UNKNOWN', image_name]) image_file_path = '/'.join((image_temp_dir, '0100')) cmd1 = ['sudo', '/opt/zthin/bin/creatediskimage', userid, '0100', image_file_path, '--compression', '6'] execute.side_effect = [(0, ''), (0, '')] image_final_dir = '/'.join((CONF.image.sdk_image_repository, 'netboot', 'UNKNOWN', image_name)) image_final_path = '/'.join((image_final_dir, '0100')) cmd2 = ['mv', image_file_path, image_final_path] md5sum.return_value = '547396211b558490d31e0de8e15eef0c' disk_size_units.return_value = '1000:CYL' imagesize.return_value = '1024000' guest_connection_status.return_value = False get_power_state.return_value = 'off' get_os_mock.return_value = 'UNKNOWN' self._smtclient.guest_capture(userid, image_name) guest_connection_status.assert_called_with(userid) execcmd.assert_not_called() get_os_version.assert_not_called() get_capture_devices.assert_not_called() guest_stop.assert_not_called() get_user_direct.assert_not_called() execute.assert_has_calls([mock.call(cmd1), mock.call(cmd2)]) mkdir.assert_has_calls([mock.call(image_temp_dir)], [mock.call(image_final_dir)]) rm_folder.assert_called_once_with(image_temp_dir) md5sum.assert_called_once_with(image_final_path) disk_size_units.assert_called_once_with(image_final_path) imagesize.assert_called_once_with(image_final_path) image_add_record.assert_called_once_with(image_name, 'UNKNOWN', '547396211b558490d31e0de8e15eef0c', '1000:CYL', '1024000', 'rootonly') guest_start.assert_not_called() get_power_state.assert_called_with(userid) get_os_mock.assert_called_with(userid) @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'get_power_state') @mock.patch.object(smtclient.SMTClient, 'guest_start') @mock.patch.object(smtclient.SMTClient, 'get_user_direct') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(zvmutils.PathUtils, 'mkdir_if_not_exist') @mock.patch.object(smtclient.SMTClient, 'guest_stop') @mock.patch.object(smtclient.SMTClient, '_get_capture_devices') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_poweroff_with_device_ass(self, guest_connection_status, execcmd, get_os_version, get_capture_devices, guest_stop, mkdir, execute, md5sum, disk_size_units, imagesize, rm_folder, image_add_record, get_user_direct, guest_start, get_power_state, get_os_mock): userid = 'fakeid' image_name = 'fakeimage' get_user_direct.return_value = ['USER TEST1234 LBYONLY 4096m 64G G', 'COMMAND SET VCONFIG MODE LINUX', 'COMMAND DEFINE CPU 00 TYPE IFL', 'MDISK 0100 3390 0001 14564 IAS114 MR'] execcmd.return_value = ['/'] image_temp_dir = '/'.join([CONF.image.sdk_image_repository, 'staging', 'UNKNOWN', image_name]) image_file_path = '/'.join((image_temp_dir, '0100')) cmd1 = ['sudo', '/opt/zthin/bin/creatediskimage', userid, '0100', image_file_path, '--compression', '6'] execute.side_effect = [(0, ''), (0, '')] image_final_dir = '/'.join((CONF.image.sdk_image_repository, 'netboot', 'UNKNOWN', image_name)) image_final_path = '/'.join((image_final_dir, '0100')) cmd2 = ['mv', image_file_path, image_final_path] md5sum.return_value = '547396211b558490d31e0de8e15eef0c' disk_size_units.return_value = '1000:CYL' imagesize.return_value = '1024000' guest_connection_status.return_value = False get_power_state.return_value = 'off' get_os_mock.return_value = 'UNKNOWN' self._smtclient.guest_capture(userid, image_name, capture_device_assign='0100') guest_connection_status.assert_called_with(userid) execcmd.assert_not_called() get_os_version.assert_not_called() get_capture_devices.assert_not_called() guest_stop.assert_not_called() get_user_direct.assert_not_called() execute.assert_has_calls([mock.call(cmd1), mock.call(cmd2)]) mkdir.assert_has_calls([mock.call(image_temp_dir)], [mock.call(image_final_dir)]) rm_folder.assert_called_once_with(image_temp_dir) md5sum.assert_called_once_with(image_final_path) disk_size_units.assert_called_once_with(image_final_path) imagesize.assert_called_once_with(image_final_path) image_add_record.assert_called_once_with(image_name, 'UNKNOWN', '547396211b558490d31e0' 'de8e15eef0c', '1000:CYL', '1024000', 'rootonly') guest_start.assert_not_called() get_power_state.assert_called_with(userid) get_os_mock.assert_called_with(userid) @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'guest_start') @mock.patch.object(database.ImageDbOperator, 'image_add_record') @mock.patch.object(zvmutils.PathUtils, 'clean_temp_folder') @mock.patch.object(smtclient.SMTClient, '_get_image_size') @mock.patch.object(smtclient.SMTClient, '_get_disk_size_units') @mock.patch.object(smtclient.SMTClient, '_get_md5sum') @mock.patch.object(zvmutils, 'execute') @mock.patch.object(zvmutils.PathUtils, 'mkdir_if_not_exist') @mock.patch.object(smtclient.SMTClient, 'guest_softstop') @mock.patch.object(smtclient.SMTClient, '_get_capture_devices') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_good_path_poweron(self, guest_connection_status, execcmd, get_os_version, get_capture_devices, softstop, mkdir, execute, md5sum, disk_size_units, imagesize, rm_folder, image_add_record, guest_start, get_os_mock): userid = 'fakeid' image_name = 'fakeimage' execcmd.return_value = ['/'] get_os_version.return_value = 'rhel7.0' get_capture_devices.return_value = ['0100'] image_temp_dir = '/'.join([CONF.image.sdk_image_repository, 'staging', 'rhel7.0', image_name]) image_file_path = '/'.join((image_temp_dir, '0100')) cmd1 = ['sudo', '/opt/zthin/bin/creatediskimage', userid, '0100', image_file_path, '--compression', '6'] execute.side_effect = [(0, ''), (0, '')] image_final_dir = '/'.join((CONF.image.sdk_image_repository, 'netboot', 'rhel7.0', image_name)) image_final_path = '/'.join((image_final_dir, '0100')) cmd2 = ['mv', image_file_path, image_final_path] md5sum.return_value = '547396211b558490d31e0de8e15eef0c' disk_size_units.return_value = '1000:CYL' imagesize.return_value = '1024000' guest_connection_status.return_value = True self._smtclient.guest_capture(userid, image_name) guest_connection_status.assert_called_with(userid) execcmd.assert_called_once_with(userid, 'pwd') get_os_version.assert_called_once_with(userid) get_capture_devices.assert_called_once_with(userid, 'rootonly') softstop.assert_called_once_with(userid) execute.assert_has_calls([mock.call(cmd1), mock.call(cmd2)]) mkdir.assert_has_calls([mock.call(image_temp_dir)], [mock.call(image_final_dir)]) rm_folder.assert_called_once_with(image_temp_dir) md5sum.assert_called_once_with(image_final_path) disk_size_units.assert_called_once_with(image_final_path) imagesize.assert_called_once_with(image_final_path) image_add_record.assert_called_once_with(image_name, 'rhel7.0', '547396211b558490d31e0de8e15eef0c', '1000:CYL', '1024000', 'rootonly') guest_start.assert_called_once_with(userid) get_os_mock.assert_not_called() @mock.patch.object(smtclient.SMTClient, 'get_os_version_from_userid') @mock.patch.object(smtclient.SMTClient, 'guest_get_os_version') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, 'get_guest_connection_status') def test_guest_capture_error_path(self, guest_connection_status, execcmd, get_os_version, get_os_mock): userid = 'fakeid' image_name = 'fakeimage' result = {'rs': 101, 'errno': 0, 'strError': '', 'overallRC': 2, 'rc': 4, 'response': ['(Error) ULTVMU0315E IUCV socket error' ' sending command to FP1T0006. cmd: pwd, ' 'rc: 4, rs: 101, out: ERROR: ERROR connecting' ' socket:', 'Network is unreachable', 'Return' ' code 4, Reason code 101.']} guest_connection_status.return_value = True execcmd.side_effect = exception.SDKSMTRequestFailed(result, 'err') self.assertRaises(exception.SDKGuestOperationError, self._smtclient.guest_capture, userid, image_name) guest_connection_status.assert_called_with(userid) execcmd.assert_called_once_with(userid, 'pwd') get_os_version.assert_not_called() get_os_mock.assert_not_called() @mock.patch.object(database.GuestDbOperator, 'get_guest_by_userid') def test_is_first_network_config_true(self, db_list): db_list.return_value = [u'9a5c9689-d099-46bb-865f-0c01c384f58c', u'TEST', u'', 0] result = self._smtclient.is_first_network_config('TEST') db_list.assert_called_once_with('TEST') self.assertTrue(result) @mock.patch.object(database.GuestDbOperator, 'get_guest_by_userid') def test_is_first_network_config_false(self, db_list): db_list.return_value = [u'9a5c9689-d099-46bb-865f-0c01c384f58c', u'TEST', u'', 1] result = self._smtclient.is_first_network_config('TEST') db_list.assert_called_once_with('TEST') self.assertFalse(result) @mock.patch.object(database.GuestDbOperator, 'update_guest_by_userid') def test_update_guestdb_with_net_set(self, update): self._smtclient.update_guestdb_with_net_set('TEST') update.assert_called_once_with('TEST', net_set='1') @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_query_vswitch_NotExist(self, req, get_id): get_id.return_value = "SMTUSER" req.side_effect = exception.SDKSMTRequestFailed( {'rc': 212, 'rs': 40}, 'err') self.assertRaises(exception.SDKObjectNotExistError, self._smtclient.query_vswitch, 'testvs') @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_query_vswitch_RequestFailed(self, req, get_id): get_id.return_value = "SMTUSER" req.side_effect = exception.SDKSMTRequestFailed( {'rc': 1, 'rs': 1}, 'err') self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.query_vswitch, 'testvs') @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_query_OSA_RequestFailed(self, req, get_id): get_id.return_value = "SMTUSER" req.side_effect = exception.SDKSMTRequestFailed( {'rc': 1, 'rs': 1}, 'err') self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient._query_OSA) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_query_OSA_NoOSA(self, req, get_id): get_id.return_value = "SMTUSER" req.side_effect = exception.SDKSMTRequestFailed( {'rc': 4, 'rs': 4}, 'err') result = self._smtclient._query_OSA() get_id.assert_called_once_with() self.assertEqual(result, {}) @mock.patch.object(zvmutils, 'get_smt_userid') @mock.patch.object(smtclient.SMTClient, '_request') def test_query_OSA(self, req, get_id): get_id.return_value = "SMTUSER" osa_info = [ "OSA Address: 0440", "OSA Status: FREE", "OSA Type: OSA", "CHPID Address: 10", "Agent Status: NO", "OSA Address: 0441", "OSA Status: FREE", "OSA Type: OSA", "CHPID Address: 10", "Agent Status: NO", "OSA Address: 4000", "OSA Status: ATTACHED TCPIP", "OSA Type: OSA", "CHPID Address: 3B", "Agent Status: NO", "OSA Address: FB1D", "OSA Status: FREE", "OSA Type: HIPER", "CHPID Address: FB", "Agent Status: NO", ] req.return_value = {'response': osa_info} expected = {'OSA': {'FREE': ['0440', '0441'], 'BOXED': [], 'OFFLINE': [], 'ATTACHED': [('TCPIP', '4000')]}, 'HIPER': {'FREE': ['FB1D'], 'BOXED': [], 'OFFLINE': [], 'ATTACHED': []}} result = self._smtclient._query_OSA() get_id.assert_called_once_with() self.assertEqual(result.keys(), expected.keys()) self.assertEqual(result['OSA'], expected['OSA']) self.assertEqual(result['HIPER'], expected['HIPER']) @mock.patch.object(smtclient.SMTClient, '_query_OSA') def test_is_OSA_free_noOSA(self, query_osa): query_osa.return_value = {'HIPER': {}} result = self._smtclient._is_OSA_free('0100') query_osa.assert_called_once_with() self.assertFalse(result) @mock.patch.object(smtclient.SMTClient, '_query_OSA') def test_is_OSA_free_noFree(self, query_osa): query_osa.return_value = {'OSA': {'FREE': []}} result = self._smtclient._is_OSA_free('0100') query_osa.assert_called_once_with() self.assertFalse(result) @mock.patch.object(smtclient.SMTClient, '_query_OSA') def test_is_OSA_free_notallFree(self, query_osa): query_osa.return_value = {'OSA': {'FREE': ['0100', '0101']}} result = self._smtclient._is_OSA_free('0100') query_osa.assert_called_once_with() self.assertFalse(result) @mock.patch.object(smtclient.SMTClient, '_query_OSA') def test_is_OSA_free_OK_num(self, query_osa): query_osa.return_value = {'OSA': {'FREE': ['0100', '0101', '0102']}} result = self._smtclient._is_OSA_free('0100') query_osa.assert_called_once_with() self.assertTrue(result) @mock.patch.object(smtclient.SMTClient, '_query_OSA') def test_is_OSA_free_OK_character(self, query_osa): query_osa.return_value = {'OSA': {'FREE': ['0AA0', '0AA1', '0AA2']}} result = self._smtclient._is_OSA_free('AA0') query_osa.assert_called_once_with() self.assertTrue(result) @mock.patch.object(smtclient.SMTClient, '_get_available_vdev') @mock.patch.object(smtclient.SMTClient, '_is_OSA_free') @mock.patch.object(smtclient.SMTClient, '_dedicate_OSA') def test_dedicate_OSA(self, attach_osa, OSA_free, get_vdev): OSA_free.return_value = True get_vdev.return_value = '1000' result = self._smtclient.dedicate_OSA('userid', 'OSA_device', vdev='nic_vdev', active=True) get_vdev.assert_called_once_with('userid', vdev='nic_vdev') OSA_free.assert_called_once_with('OSA_device') attach_osa.assert_called_once_with('userid', 'OSA_device', '1000', active=True) self.assertEqual(result, '1000') @mock.patch.object(smtclient.SMTClient, '_get_available_vdev') @mock.patch.object(smtclient.SMTClient, '_is_OSA_free') def test_dedicate_OSA_notFree(self, OSA_free, get_vdev): OSA_free.return_value = False get_vdev.return_value = '1000' self.assertRaises(exception.SDKConflictError, self._smtclient.dedicate_OSA, 'userid', 'OSA_device', 'nic_vdev', active=True) @mock.patch.object(database.NetworkDbOperator, 'switch_add_record') @mock.patch.object(smtclient.SMTClient, '_request') def test_private_dedicate_OSA_notActive(self, request, add_rec): request_response = ['', '', '', '', '', ''] request.side_effect = request_response self._smtclient._dedicate_OSA('userid', 'f000', '1000', active=False) request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1002 -r f002") add_rec.assert_called_once_with('userid', '1000', comments='OSA=f000') @mock.patch.object(smtclient.SMTClient, '_request') def test_private_dedicate_OSA_notActive_Fail_Input(self, request): request_response = ['', ''] request_response.append(exception.SDKSMTRequestFailed( {'rc': 404, 'rs': 4}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 1, 'rs': 1}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 404, 'rs': 8}, 'err')) request.side_effect = request_response self.assertRaises(exception.SDKConflictError, self._smtclient._dedicate_OSA, 'userid', 'f000', '1000', active=False) request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1001") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1000") @mock.patch.object(smtclient.SMTClient, '_request') def test_private_dedicate_OSA_notActive_Fail_Lock(self, request): request_response = ['', ''] request_response.append(exception.SDKSMTRequestFailed( {'rc': 404, 'rs': 12}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 1, 'rs': 1}, 'err')) request_response.append('') request.side_effect = request_response self.assertRaises(exception.SDKConflictError, self._smtclient._dedicate_OSA, 'userid', 'f000', '1000', active=False) request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1001") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1000") @mock.patch.object(database.NetworkDbOperator, 'switch_add_record') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_private_dedicate_OSA_Active(self, power_state, request, add_rec): power_state.return_value = 'on' request_response = ['', '', '', '', '', ''] request.side_effect = request_response self._smtclient._dedicate_OSA('userid', 'f000', '1000', active=True) request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1002 -r f002") add_rec.assert_called_once_with('userid', '1000', comments='OSA=f000') @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_private_dedicate_OSA_Active_Fail(self, power_state, request): power_state.return_value = 'on' request_response = ['', '', '', '', ''] request_response.append(exception.SDKSMTRequestFailed( {'rc': 300, 'rs': 0}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 404, 'rs': 8}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 400, 'rs': 8}, 'err')) request_response.append('') request_response.append(exception.SDKSMTRequestFailed( {'rc': 204, 'rs': 8}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 200, 'rs': 8}, 'err')) request.side_effect = request_response self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient._dedicate_OSA, 'userid', 'f000', '1000', active=True) request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1000") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1001") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1002") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Undedicate ' "--operands -v 1000") request.assert_any_call('SMAPI userid API Image_Device_Undedicate ' "--operands -v 1001") @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'get_power_state') def test_private_dedicate_OSA_Active_Fail_Input(self, power, request): power.return_value = 'on' request_response = ['', '', '', '', ''] request_response.append(exception.SDKSMTRequestFailed( {'rc': 204, 'rs': 8}, 'err')) request_response.append('') request_response.append('') request_response.append('') request_response.append(exception.SDKSMTRequestFailed( {'rc': 204, 'rs': 8}, 'err')) request_response.append(exception.SDKSMTRequestFailed( {'rc': 200, 'rs': 8}, 'err')) request.side_effect = request_response self.assertRaises(exception.SDKConflictError, self._smtclient._dedicate_OSA, 'userid', 'f000', '1000', active=True) request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate_DM ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1000") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1001") request.assert_any_call('SMAPI userid API Image_Device_Undedicate_DM ' "--operands -v 1002") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1000 -r f000") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1001 -r f001") request.assert_any_call('SMAPI userid API Image_Device_Dedicate ' "--operands -v 1002 -r f002") request.assert_any_call('SMAPI userid API Image_Device_Undedicate ' "--operands -v 1000") request.assert_any_call('SMAPI userid API Image_Device_Undedicate ' "--operands -v 1001") @mock.patch.object(smtclient.SMTClient, '_request_with_error_ignored') def test_namelist_add(self, req): self._smtclient.namelist_add('tnlist', 'testid') rd = "SMAPI tnlist API Name_List_Add --operands -n testid" req.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request_with_error_ignored') def test_namelist_remove(self, req): self._smtclient.namelist_remove('tnlist', 'testid') rd = "SMAPI tnlist API Name_List_Remove --operands -n testid" req.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request_with_error_ignored') def test_namelist_query(self, req): req.return_value = {'response': ['t1', 't2']} resp = self._smtclient.namelist_query('tnlist') rd = "SMAPI tnlist API Name_List_Query" req.assert_called_once_with(rd) self.assertEqual(['t1', 't2'], resp) @mock.patch.object(smtclient.SMTClient, '_request') def test_namelist_query_err(self, req): req.side_effect = exception.SDKSMTRequestFailed({}, 'err') resp = self._smtclient.namelist_query('tnlist') rd = "SMAPI tnlist API Name_List_Query" req.assert_called_once_with(rd) self.assertEqual([], resp) @mock.patch.object(smtclient.SMTClient, '_request_with_error_ignored') def test_namelist_destroy(self, req): self._smtclient.namelist_destroy('tnlist') rd = "SMAPI tnlist API Name_List_Destroy" req.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') def test_private_get_defined_cpu_addrs(self, get_user_direct): get_user_direct.return_value = ['USER TESTUID LBYONLY 1024m 64G G', 'INCLUDE OSDFLT', 'CPU 00 BASE', 'CPU 0A', 'IPL 0100', 'MACHINE ESA 32', 'NICDEF 1000 TYPE QDIO LAN ' 'SYSTEM XCATVSW2 DEVICES 3', 'MDISK 0100 3390 52509 1100 OMB1AB MR', ''] (max_cpus, defined_addrs) = self._smtclient._get_defined_cpu_addrs( 'TESTUID') get_user_direct.assert_called_once_with('TESTUID') self.assertEqual(max_cpus, 32) self.assertEqual(defined_addrs, ['00', '0A']) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') def test_private_get_defined_cpu_addrs_no_max_cpu(self, get_user_direct): get_user_direct.return_value = ['USER TESTUID LBYONLY 1024m 64G G', 'INCLUDE OSDFLT', 'CPU 00 BASE', 'CPU 0A', 'IPL 0100', 'NICDEF 1000 TYPE QDIO LAN ' 'SYSTEM XCATVSW2 DEVICES 3', 'MDISK 0100 3390 52509 1100 OMB1AB MR', ''] (max_cpus, defined_addrs) = self._smtclient._get_defined_cpu_addrs( 'TESTUID') get_user_direct.assert_called_once_with('TESTUID') self.assertEqual(max_cpus, 0) self.assertEqual(defined_addrs, ['00', '0A']) def test_private_get_available_cpu_addrs(self): used = ['00', '01', '1A', '1F'] max = 32 avail_expected = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1B', '1C', '1D', '1E'] avail_addrs = self._smtclient._get_available_cpu_addrs(used, max) avail_addrs.sort() self.assertListEqual(avail_addrs, avail_expected) @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_private_get_active_cpu_addrs(self, exec_cmd): active_cpus = [('# The following is the parsable format, which can ' 'be fed to other'), ('# programs. Each different item in every column has ' 'an unique ID'), '# starting from zero.', '# Address', '0', '3', '10', '19'] exec_cmd.return_value = active_cpus addrs = self._smtclient.get_active_cpu_addrs('TESTUID') exec_cmd.assert_called_once_with('TESTUID', "lscpu --parse=ADDRESS") addrs.sort() self.assertListEqual(addrs, ['00', '03', '0A', '13']) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (1, ['02', '03'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst self._smtclient.live_resize_cpus(userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) cmd_def_cpu = "vmcp def cpu 02 03" cmd_rescan_cpu = "chcpu -r" exec_cmd.assert_has_calls([mock.call(userid, cmd_def_cpu), mock.call(userid, cmd_rescan_cpu)]) request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_equal_active(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01', '02', '03'] resize.return_value = (1, ['02', '03'], 32) self._smtclient.live_resize_cpus(userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_not_called() exec_cmd.assert_not_called() request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_less_active(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01', '02', '03', '04'] self.assertRaises(exception.SDKConflictError, self._smtclient.live_resize_cpus, userid, count) get_active.assert_called_once_with(userid) resize.assert_not_called() get_avail.assert_not_called() exec_cmd.assert_not_called() request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_revert_definition_equal(self, get_active, resize, get_avail, exec_cmd, request): # Test case: active update failed, definition not updated userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (0, [], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = [exception.SDKSMTRequestFailed({}, 'err'), ""] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_cpus, userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) exec_cmd.assert_called_once_with(userid, "vmcp def cpu 02 03") request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_revert_added_cpus(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 base.set_conf('zvm', 'user_default_share_unit', 100) get_active.return_value = ['00', '01'] resize.return_value = (1, ['01', '02', '03'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = [exception.SDKSMTRequestFailed({}, 'err'), ""] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_cpus, userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) exec_cmd.assert_called_once_with(userid, "vmcp def cpu 02 03") rd = ("SMAPI testuid API Image_Definition_Delete_DM --operands " "-k CPU=CPUADDR=01 -k CPU=CPUADDR=02 -k CPU=CPUADDR=03") rd2 = ("SMAPI testuid API Image_Definition_Update_DM --operands " "-k SHARE=RELATIVE=200") calls = [call(rd), call(rd2)] request.assert_has_calls(calls) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_revert_deleted_cpus(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (2, ['04', '0A'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = [exception.SDKSMTRequestFailed({}, 'err'), ""] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_cpus, userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) exec_cmd.assert_called_once_with(userid, "vmcp def cpu 02 03") rd = ("SMAPI testuid API Image_Definition_Create_DM --operands " "-k CPU=CPUADDR=04 -k CPU=CPUADDR=0A") rd2 = ("SMAPI testuid API Image_Definition_Update_DM --operands " "-k SHARE=RELATIVE=200") calls = [call(rd), call(rd2)] request.assert_has_calls(calls) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_revert_failed(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (2, ['04', '0A'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = [exception.SDKSMTRequestFailed({}, 'err'), ""] request.side_effect = [exception.SDKSMTRequestFailed({}, 'err'), ""] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_cpus, userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) exec_cmd.assert_called_once_with(userid, "vmcp def cpu 02 03") rd = ("SMAPI testuid API Image_Definition_Create_DM --operands " "-k CPU=CPUADDR=04 -k CPU=CPUADDR=0A") request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_rescan_failed(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (2, ['04', '0A'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = ["", exception.SDKSMTRequestFailed({}, 'err')] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_cpus, userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) cmd_def_cpu = "vmcp def cpu 02 03" cmd_rescan_cpu = "chcpu -r" exec_cmd.assert_has_calls([mock.call(userid, cmd_def_cpu), mock.call(userid, cmd_rescan_cpu)]) request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_redhat(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (1, ['02', '03'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = [[''], [''], ['Linux rhel82-ext4-eckd 4.18.0-193.el8.' 's390x #1 SMP Fri Mar 27 14:43:09 UTC' ' 2020 s390x s390x s390x GNU/Linux ']] self._smtclient.live_resize_cpus(userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) cmd_def_cpu = "vmcp def cpu 02 03" cmd_rescan_cpu = "chcpu -r" cmd_uname = "uname -a" exec_cmd.assert_has_calls([mock.call(userid, cmd_def_cpu), mock.call(userid, cmd_rescan_cpu), mock.call(userid, cmd_uname)]) request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, 'resize_cpus') @mock.patch.object(smtclient.SMTClient, 'get_active_cpu_addrs') def test_live_resize_cpus_ubuntu(self, get_active, resize, get_avail, exec_cmd, request): userid = 'testuid' count = 4 get_active.return_value = ['00', '01'] resize.return_value = (1, ['02', '03'], 32) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst exec_cmd.side_effect = [[''], [''], ['Linux ubuntu20-ext4-eckd 5.4.0-37-generic' ' #41-Ubuntu SMP Wed Jun 3 17:53:50 UTC ' '2020 s390x s390x s390x GNU/Linux'], ['']] self._smtclient.live_resize_cpus(userid, count) get_active.assert_called_once_with(userid) resize.assert_called_once_with(userid, count) get_avail.assert_called_once_with(['00', '01'], 32) cmd_def_cpu = "vmcp def cpu 02 03" cmd_rescan_cpu = "chcpu -r" cmd_uname = "uname -a" cmd_chcpu = "chcpu -e 02,03" exec_cmd.assert_has_calls([mock.call(userid, cmd_def_cpu), mock.call(userid, cmd_rescan_cpu), mock.call(userid, cmd_uname), mock.call(userid, cmd_chcpu)]) request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_equal_count(self, get_defined, get_avail, request): userid = 'testuid' count = 2 get_defined.return_value = (32, ['00', '01']) return_data = self._smtclient.resize_cpus(userid, count) self.assertTupleEqual(return_data, (0, [], 32)) get_defined.assert_called_once_with(userid) get_avail.assert_not_called() request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_add(self, get_defined, get_avail, request): userid = 'testuid' count = 4 get_defined.return_value = (32, ['00', '01']) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst return_data = self._smtclient.resize_cpus(userid, count) self.assertTupleEqual(return_data, (1, ['02', '03'], 32)) get_defined.assert_called_once_with(userid) get_avail.assert_called_once_with(['00', '01'], 32) rd = ("SMAPI testuid API Image_Definition_Update_DM --operands " "-k CPU=CPUADDR=02 -k CPU=CPUADDR=03 -k SHARE=RELATIVE=400") request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_delete(self, get_defined, get_avail, request): userid = 'testuid' count = 4 get_defined.return_value = (32, ['00', '1A', '02', '01', '11', '10']) return_data = self._smtclient.resize_cpus(userid, count) self.assertTupleEqual(return_data, (2, ['11', '1A'], 32)) get_defined.assert_called_once_with(userid) get_avail.assert_not_called() rd = ("SMAPI testuid API Image_Definition_Delete_DM --operands " "-k CPU=CPUADDR=11 -k CPU=CPUADDR=1A") rd2 = ("SMAPI testuid API Image_Definition_Update_DM --operands " "-k SHARE=RELATIVE=400") calls = [call(rd), call(rd2)] request.assert_has_calls(calls) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_max_not_defined(self, get_defined, get_avail, request): userid = 'testuid' count = 4 get_defined.return_value = (0, ['00', '01']) self.assertRaises(exception.SDKConflictError, self._smtclient.resize_cpus, userid, count) get_defined.assert_called_once_with(userid) get_avail.assert_not_called() request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_req_exceeds_max(self, get_defined, get_avail, request): userid = 'testuid' count = 40 get_defined.return_value = (32, ['00', '01']) self.assertRaises(exception.SDKConflictError, self._smtclient.resize_cpus, userid, count) get_defined.assert_called_once_with(userid) get_avail.assert_not_called() request.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_add_failed(self, get_defined, get_avail, request): userid = 'testuid' count = 4 get_defined.return_value = (32, ['00', '01']) avail_lst = ['02', '03', '04', '05', '06', '07', '08', '09', '0A', '0B', '0C', '0D', '0E', '0F', '10', '11', '12', '13', '14', '15', '16', '17', '18', '19', '1A', '1B', '1C', '1D', '1E', '1F'] get_avail.return_value = avail_lst request.side_effect = exception.SDKSMTRequestFailed({}, 'err') self.assertRaises(exception.SDKGuestOperationError, self._smtclient.resize_cpus, userid, count) get_defined.assert_called_once_with(userid) get_avail.assert_called_once_with(['00', '01'], 32) rd = ("SMAPI testuid API Image_Definition_Update_DM --operands " "-k CPU=CPUADDR=02 -k CPU=CPUADDR=03 -k SHARE=RELATIVE=400") request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_request') @mock.patch.object(smtclient.SMTClient, '_get_available_cpu_addrs') @mock.patch.object(smtclient.SMTClient, '_get_defined_cpu_addrs') def test_resize_cpus_delete_failed(self, get_defined, get_avail, request): userid = 'testuid' count = 4 get_defined.return_value = (32, ['00', '01', '02', '03', '04', '05']) request.side_effect = exception.SDKSMTRequestFailed({}, 'err') self.assertRaises(exception.SDKGuestOperationError, self._smtclient.resize_cpus, userid, count) get_defined.assert_called_once_with(userid) get_avail.assert_not_called() rd = ("SMAPI testuid API Image_Definition_Delete_DM --operands " "-k CPU=CPUADDR=04 -k CPU=CPUADDR=05") request.assert_called_once_with(rd) @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_reserved_not_defined(self, replace_def, get_defined): userid = 'testuid' size = '2g' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_defined.return_value = (4096, 65536, -1, sample_definition) self.assertRaises(exception.SDKConflictError, self._smtclient.resize_memory, userid, size) replace_def.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_exceed_max_size(self, replace_def, get_defined): userid = 'testuid' size = '65g' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_defined.return_value = (4096, 65536, 61440, sample_definition) self.assertRaises(exception.SDKConflictError, self._smtclient.resize_memory, userid, size) replace_def.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_equal_size(self, replace_def, get_defined): userid = 'testuid' size = '4g' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_defined.return_value = (4096, 65536, 61440, sample_definition) (action, defined_mem, max_mem, user_direct) = \ self._smtclient.resize_memory(userid, size) self.assertEqual(action, 0) replace_def.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_increase(self, replace_def, lock_def, get_def): userid = 'testuid' size = '10240M' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (4096, 65536, 61440, sample_definition) (action, defined_mem, max_mem, user_direct) = \ self._smtclient.resize_memory(userid, size) self.assertEqual(action, 1) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) new_entry = ("USER TESTUID LBYONLY 10240M 64G G\n" "INCLUDE OSDFLT\n" "COMMAND DEF STOR RESERVED 55296M\n" "CPU 00 BASE\n" "IPL 0100\n" "MDISK 0100 3390 5501 5500 OMB1BA MR\n") replace_def.assert_called_once_with(userid, new_entry) @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_decrease(self, replace_def, lock_def, get_def): userid = 'testuid' size = '2g' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (4096, 65536, 61440, sample_definition) (action, defined_mem, max_mem, user_direct) = \ self._smtclient.resize_memory(userid, size) self.assertEqual(action, 1) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) new_entry = ("USER TESTUID LBYONLY 2048M 64G G\n" "INCLUDE OSDFLT\n" "COMMAND DEF STOR RESERVED 63488M\n" "CPU 00 BASE\n" "IPL 0100\n" "MDISK 0100 3390 5501 5500 OMB1BA MR\n") replace_def.assert_called_once_with(userid, new_entry) @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_conf_max_stor_reserved_in_G(self, replace_def, lock_def, get_def): userid = 'testuid' size = '32768M' base.set_conf('zvm', 'user_default_max_reserved_memory', '64G') sample_definition = [u'USER TESTUID LBYONLY 65536M 128G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 65536M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (1024, 131072, 65536, sample_definition) (action, defined_mem, max_mem, user_direct) = \ self._smtclient.resize_memory(userid, size) self.assertEqual(action, 1) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) new_entry = ("USER TESTUID LBYONLY 32768M 128G G\n" "INCLUDE OSDFLT\n" "COMMAND DEF STOR RESERVED 65536M\n" "CPU 00 BASE\n" "IPL 0100\n" "MDISK 0100 3390 5501 5500 OMB1BA MR\n") replace_def.assert_called_once_with(userid, new_entry) @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_conf_max_stor_reserved_in_M(self, replace_def, lock_def, get_def): userid = 'testuid' size = '32768M' base.set_conf('zvm', 'user_default_max_reserved_memory', '65536M') sample_definition = [u'USER TESTUID LBYONLY 65536M 128G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 65536M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (1024, 131072, 65536, sample_definition) (action, defined_mem, max_mem, user_direct) = \ self._smtclient.resize_memory(userid, size) self.assertEqual(action, 1) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) new_entry = ("USER TESTUID LBYONLY 32768M 128G G\n" "INCLUDE OSDFLT\n" "COMMAND DEF STOR RESERVED 65536M\n" "CPU 00 BASE\n" "IPL 0100\n" "MDISK 0100 3390 5501 5500 OMB1BA MR\n") replace_def.assert_called_once_with(userid, new_entry) @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_default_max_stor_reserved(self, replace_def, lock_def, get_def): userid = 'testuid' size = '4096M' base.set_conf('zvm', 'user_default_max_reserved_memory', '128G') sample_definition = [u'USER TESTUID LBYONLY 1024M 256G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 131072M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (1024, 262144, 131072, sample_definition) (action, defined_mem, max_mem, user_direct) = \ self._smtclient.resize_memory(userid, size) self.assertEqual(action, 1) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) new_entry = ("USER TESTUID LBYONLY 4096M 256G G\n" "INCLUDE OSDFLT\n" "COMMAND DEF STOR RESERVED 131072M\n" "CPU 00 BASE\n" "IPL 0100\n" "MDISK 0100 3390 5501 5500 OMB1BA MR\n") replace_def.assert_called_once_with(userid, new_entry) @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_lock_failed(self, replace_def, lock_def, get_def): userid = 'testuid' size = '2g' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (4096, 65536, 61440, sample_definition) lock_def.side_effect = exception.SDKSMTRequestFailed({}, 'err') self.assertRaises(exception.SDKGuestOperationError, self._smtclient.resize_memory, userid, size) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) replace_def.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_defined_memory') @mock.patch.object(smtclient.SMTClient, '_lock_user_direct') @mock.patch.object(smtclient.SMTClient, '_replace_user_direct') def test_resize_memory_replace_failed(self, replace_def, lock_def, get_def): userid = 'testuid' size = '2g' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_def.return_value = (4096, 65536, 61440, sample_definition) replace_def.side_effect = exception.SDKSMTRequestFailed({}, 'err') self.assertRaises(exception.SDKGuestOperationError, self._smtclient.resize_memory, userid, size) get_def.assert_called_once_with(userid) lock_def.assert_called_once_with(userid) new_entry = ("USER TESTUID LBYONLY 2048M 64G G\n" "INCLUDE OSDFLT\n" "COMMAND DEF STOR RESERVED 63488M\n" "CPU 00 BASE\n" "IPL 0100\n" "MDISK 0100 3390 5501 5500 OMB1BA MR\n") replace_def.assert_called_once_with(userid, new_entry) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') def test_get_defined_memory(self, get_user_direct): userid = 'testuid' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_user_direct.return_value = sample_definition (defined_mem, max_mem, reserved_mem, user_direct) = \ self._smtclient._get_defined_memory(userid) self.assertEqual(defined_mem, 4096) self.assertEqual(max_mem, 65536) self.assertEqual(reserved_mem, 61440) self.assertListEqual(user_direct, sample_definition) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') def test_get_defined_memory_reserved_not_defined(self, get_user_direct): userid = 'testuid' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_user_direct.return_value = sample_definition (defined_mem, max_mem, reserved_mem, user_direct) = \ self._smtclient._get_defined_memory(userid) self.assertEqual(defined_mem, 4096) self.assertEqual(max_mem, 65536) self.assertEqual(reserved_mem, -1) self.assertListEqual(user_direct, sample_definition) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') def test_get_defined_memory_user_more_than_6_fields(self, get_user_direct): userid = 'testuid' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G G 64', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_user_direct.return_value = sample_definition (defined_mem, max_mem, reserved_mem, user_direct) = \ self._smtclient._get_defined_memory(userid) self.assertEqual(defined_mem, 4096) self.assertEqual(max_mem, 65536) self.assertEqual(reserved_mem, 61440) self.assertListEqual(user_direct, sample_definition) @mock.patch.object(smtclient.SMTClient, 'get_user_direct') def test_get_defined_memory_user_less_than_6_fields(self, get_user_direct): userid = 'testuid' sample_definition = [u'USER TESTUID LBYONLY 4096M 64G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] get_user_direct.return_value = sample_definition (defined_mem, max_mem, reserved_mem, user_direct) = \ self._smtclient._get_defined_memory(userid) self.assertEqual(defined_mem, -1) self.assertEqual(max_mem, -1) self.assertEqual(reserved_mem, -1) self.assertListEqual(user_direct, sample_definition) @mock.patch.object(smtclient.SMTClient, '_request') def test_replace_user_direct_err(self, req): userid = 'testuid' user_entry = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] req.side_effect = [exception.SDKSMTRequestFailed({}, 'err'), ""] self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient._replace_user_direct, userid, user_entry) @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_get_active_memory(self, execute_cmd): userid = 'testuid' sample_lsmem = [u'Address Range Size (MB) \ State Removable Device', u'==================================================\ =============================', u'0x0000000000000000-0x000000000fffffff 256 \ online no 0-1', u'0x0000000010000000-0x000000003fffffff 768 \ online yes 2-7', u'0x0000000040000000-0x000000007fffffff 1024 \ online no 8-15', u'0x0000000080000000-0x00000000ffffffff 2048 \ online yes 16-31', u'0x0000000100000000-0x0000000fffffffff 61440 \ offline - 32-511', u'', u'Memory device size : 128 MB', u'Memory block size : 256 MB', u'Total online memory : 4096 MB', u'Total offline memory: 61440 MB' ] execute_cmd.return_value = sample_lsmem active_mem = self._smtclient._get_active_memory(userid) self.assertEqual(active_mem, 4096) @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_get_active_memory_different_lsmem_output(self, execute_cmd): userid = 'testuid' sample_lsmem = [u'RANGE SIZE \ STATE REMOVABLE BLOCK', u'0x0000000000000000-0x000000000fffffff 256M \ online no 0', u'0x0000000010000000-0x000000004fffffff 1G \ online yes 1-4', u'0x0000000050000000-0x000000008fffffff 1G \ online no 5-8', u'0x0000000090000000-0x00000000cfffffff 1G \ online yes 9-12' u'0x00000000d0000000-0x00000000ffffffff 768M \ online no 13-15' u'0x0000000100000000-0x0000000fffffffff 60G \ offline - 16-255' u'Memory block size: 256M', u'Total online memory: 32G', u'Total offline memory: 32G' ] execute_cmd.return_value = sample_lsmem active_mem = self._smtclient._get_active_memory(userid) self.assertEqual(active_mem, 32768) @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') def test_live_resize_memory_less(self, resize_mem, get_active_mem): userid = 'testuid' req_mem = "1g" get_active_mem.return_value = 2048 self.assertRaises(exception.SDKConflictError, self._smtclient.live_resize_memory, userid, req_mem) resize_mem.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_live_resize_memory_equal(self, exec_cmd, resize_mem, get_active_mem): userid = 'testuid' req_mem = "2g" get_active_mem.return_value = 2048 resize_mem.return_value = (1, 2048, 65536, []) self._smtclient.live_resize_memory(userid, req_mem) resize_mem.assert_called_once_with(userid, req_mem) exec_cmd.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_revert_user_direct') def test_live_resize_memory_more(self, revert, exec_cmd, resize_mem, get_active_mem): userid = 'testuid' req_mem = "4096m" get_active_mem.return_value = 2048 resize_mem.return_value = (1, 2048, 65536, []) exec_cmd.side_effect = ['', ''] self._smtclient.live_resize_memory(userid, req_mem) resize_mem.assert_called_once_with(userid, req_mem) def_standby_cmd = "vmcp def storage standby 2048M" online_mem_cmd = "chmem -e 2048M" exec_cmd.assert_has_calls([mock.call(userid, def_standby_cmd), mock.call(userid, online_mem_cmd)]) revert.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_revert_user_direct') def test_live_resize_memory_standby_failed(self, revert, exec_cmd, resize_mem, get_active_mem): userid = 'testuid' req_mem = "4096m" get_active_mem.return_value = 2048 sample_direct = [u'USER TESTUID LBYONLY 2048M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] resize_mem.return_value = (1, 2048, 65536, sample_direct) exec_cmd.side_effect = exception.SDKSMTRequestFailed({}, 'fake err') self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_memory, userid, req_mem) resize_mem.assert_called_once_with(userid, req_mem) def_standby_cmd = "vmcp def storage standby 2048M" exec_cmd.assert_called_with(userid, def_standby_cmd) revert.assert_called_once_with(userid, sample_direct) @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_revert_user_direct') def test_live_resize_memory_standby_failed_no_revert(self, revert, exec_cmd, resize_mem, get_active_mem): userid = 'testuid' req_mem = "4096m" get_active_mem.return_value = 2048 sample_direct = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] resize_mem.return_value = (0, 4096, 65536, sample_direct) exec_cmd.side_effect = [exception.SDKSMTRequestFailed({}, 'fake err'), ''] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_memory, userid, req_mem) resize_mem.assert_called_once_with(userid, req_mem) def_standby_cmd = "vmcp def storage standby 2048M" exec_cmd.assert_called_with(userid, def_standby_cmd) revert.assert_not_called() @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') @mock.patch.object(smtclient.SMTClient, '_revert_user_direct') def test_live_resize_memory_online_failed(self, revert, exec_cmd, resize_mem, get_active_mem): userid = 'testuid' req_mem = "4096m" get_active_mem.return_value = 2048 sample_direct = [u'USER TESTUID LBYONLY 4096M 64G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 61440M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] resize_mem.return_value = (1, 4096, 65536, sample_direct) exec_cmd.side_effect = ['', exception.SDKSMTRequestFailed({}, 'fake err'), ''] self.assertRaises(exception.SDKGuestOperationError, self._smtclient.live_resize_memory, userid, req_mem) resize_mem.assert_called_once_with(userid, req_mem) def_standby_cmd = "vmcp def storage standby 2048M" online_mem_cmd = "chmem -e 2048M" revert_standby_cmd = "vmcp def storage standby 0M" exec_cmd.assert_has_calls([mock.call(userid, def_standby_cmd), mock.call(userid, online_mem_cmd), mock.call(userid, revert_standby_cmd)]) revert.assert_called_once_with(userid, sample_direct) @mock.patch.object(smtclient.SMTClient, '_get_active_memory') @mock.patch.object(smtclient.SMTClient, 'resize_memory') @mock.patch.object(smtclient.SMTClient, 'execute_cmd') def test_live_resize_memory_exceed_max_stor_reserved(self, exec_cmd, resize_mem, get_active_mem): userid = 'testuid' req_mem = "252g" get_active_mem.return_value = 2048 sample_direct = [u'USER TESTUID LBYONLY 258048M 256G G', u'INCLUDE OSDFLT', u'COMMAND DEF STOR RESERVED 4096M', u'CPU 00 BASE', u'IPL 0100', u'MDISK 0100 3390 5501 5500 OMB1BA MR', u''] resize_mem.return_value = (1, 258048, 262144, sample_direct) self.assertRaises(exception.SDKConflictError, self._smtclient.live_resize_memory, userid, req_mem) resize_mem.assert_not_called() def test_guest_deploy_rhcos_no_ignition(self): userid = 'testuid' image_name = "test_image" transportfiles = None self.assertRaises(exception.SDKGuestOperationError, self._smtclient.guest_deploy_rhcos, userid, image_name, transportfiles) def test_is_rhcos(self): os_version = "rhel" output = self._smtclient.is_rhcos(os_version) self.assertFalse(output) os_version = "rhcos4.2" output = self._smtclient.is_rhcos(os_version) self.assertTrue(output) @mock.patch.object(smtclient.SMTClient, '_request') def test_host_get_ssi_info(self, req): res = ['ssi_name = ICIC2SSI', 'ssi_mode = Stable', 'ssi_pdr = IAS7CM_on_139E', 'cross_system_timeouts = Enabled', 'output.ssiInfoCount = 4', '', 'member_slot = 1', 'member_system_id = BOEIAAS7', 'member_state = Joined', 'member_pdr_heartbeat = 12/28/2021_05:10:21', 'member_received_heartbeat = 12/28/2021_05:10:21', '', 'member_slot = 2', 'member_system_id = BOEIAAS8', 'member_state = Joined', 'member_pdr_heartbeat = 12/28/2021_05:10:36', 'member_received_heartbeat = 12/28/2021_05:10:36', ''] ssi_res = {'overallRC': 0, 'rc': 0, 'rs': 0, 'errno': 0, 'strError': '', 'response': res, 'logEntries': []} not_ssi_res = {'overallRC': 4, 'rc': 4, 'rs': 3008, 'errno': 0, 'strError': 'Failed', 'response': ['not a member of an SSI cluster']} bad_res = {'overallRC': 8, 'rc': 8, 'rs': 3002, 'errno': 0, 'strError': 'Failed', 'response': ['Invalid parameter name']} req.side_effect = [ssi_res, exception.SDKSMTRequestFailed(not_ssi_res, 'err'), exception.SDKSMTRequestFailed(bad_res, 'err')] # case 1: host in SSI cluster, returns the SSI info result = self._smtclient.host_get_ssi_info() self.assertEqual(result, res) # case 2: host no in SSI cluster, returns [] result = self._smtclient.host_get_ssi_info() self.assertEqual(result, []) # case 3: error self.assertRaises(exception.SDKSMTRequestFailed, self._smtclient.host_get_ssi_info) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_dist.py0000664000175000017510000006042314266177632022560 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import os from jinja2 import Template from zvmsdk import dist from zvmsdk import smtclient from zvmsdk.tests.unit import base class RHEL7TestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(RHEL7TestCase, cls).setUpClass() cls.os_version = 'redhat7.2' def setUp(self): super(RHEL7TestCase, self).setUp() self.dist_manager = dist.LinuxDistManager() self.linux_dist = self.dist_manager.get_linux_dist(self.os_version)() def test_create_network_configuration_files(self): guest_networks = [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mtu': 1600}] file_path = '/etc/sysconfig/network-scripts/' first = False files_and_cmds = self.linux_dist.create_network_configuration_files( file_path, guest_networks, first, active=False) (net_conf_files, net_conf_cmds, clean_cmd, net_enable_cmd) = files_and_cmds cfg_str = net_conf_files[0][1].split('\n') self.assertEqual('DEVICE="enccw0.0.1000"', cfg_str[0]) self.assertEqual('BROADCAST="192.168.95.255"', cfg_str[2]) self.assertEqual('GATEWAY="192.168.95.1"', cfg_str[3]) self.assertEqual('IPADDR="192.168.95.10"', cfg_str[4]) self.assertEqual('MTU="1600"', cfg_str[11]) self.assertEqual('DNS1="9.0.2.1"', cfg_str[12]) self.assertEqual('DNS2="9.0.3.1"', cfg_str[13]) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_attach_configuration_cmds(self, get_template, template_render): """ RHEL7 """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') self.linux_dist.get_volume_attach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point) # check function called assertions get_template.assert_called_once_with("volumeops", "rhel7_attach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz') @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_1(self, get_template, template_render): """ RHEL7 """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 2 self.linux_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 2) get_template.assert_called_once_with("volumeops", "rhel7_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz', is_last_volume=0) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_2(self, get_template, template_render): """ RHEL7 """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections < 1 self.linux_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 0) get_template.assert_called_once_with("volumeops", "rhel7_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz', is_last_volume=1) def test_set_zfcp_config_files(self): """ RHEL7, same to rhel6""" pass class RHEL8TestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(RHEL8TestCase, cls).setUpClass() cls.os_version = 'redhat8.1' def setUp(self): super(RHEL8TestCase, self).setUp() self.dist_manager = dist.LinuxDistManager() self.linux_dist = self.dist_manager.get_linux_dist(self.os_version)() def test_create_network_configuration_files(self): guest_networks = [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mtu': 8000}] file_path = '/etc/sysconfig/network-scripts/' first = False files_and_cmds = self.linux_dist.create_network_configuration_files( file_path, guest_networks, first, active=False) (net_conf_files, net_conf_cmds, clean_cmd, net_enable_cmd) = files_and_cmds cfg_str = net_conf_files[0][1].split('\n') self.assertEqual('DEVICE="enc1000"', cfg_str[0]) self.assertEqual('BROADCAST="192.168.95.255"', cfg_str[2]) self.assertEqual('GATEWAY="192.168.95.1"', cfg_str[3]) self.assertEqual('IPADDR="192.168.95.10"', cfg_str[4]) self.assertEqual('MTU="8000"', cfg_str[11]) self.assertEqual('DNS1="9.0.2.1"', cfg_str[12]) self.assertEqual('DNS2="9.0.3.1"', cfg_str[13]) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_attach_configuration_cmds(self, get_template, template_render): """ RHEL8 """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') self.linux_dist.get_volume_attach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point) # check function called assertions get_template.assert_called_once_with("volumeops", "rhel8_attach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz') @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_1(self, get_template, template_render): """ RHEL8 """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 2 self.linux_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 2) get_template.assert_called_once_with("volumeops", "rhel8_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz', is_last_volume=0) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_2(self, get_template, template_render): """ RHEL8 """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 0 self.linux_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 0) get_template.assert_called_once_with("volumeops", "rhel8_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz', is_last_volume=1) class RHCOS4TestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(RHCOS4TestCase, cls).setUpClass() cls.os_version = 'rhcos4' os.makedirs("/tmp/FakeID") def setUp(self): super(RHCOS4TestCase, self).setUp() self.dist_manager = dist.LinuxDistManager() self.linux_dist = self.dist_manager.get_linux_dist(self.os_version)() self._smtclient = smtclient.SMTClient() @mock.patch.object(smtclient.SMTClient, 'get_guest_path') def test_create_coreos_parameter(self, guest_path): network_info = [{'nic_vdev': '1000', 'ip_addr': '10.10.0.217', 'gateway_addr': '10.10.0.1', 'dns_addr': ['10.10.0.250', '10.10.0.51'], 'mac_addr': 'fa:16:3e:7a:1b:87', 'cidr': '10.10.0.0/24', 'nic_id': 'adca70f3-8509-44d4-92d4-2c1c14b3f25e', 'mtu': '1000'}] userid = "FakeID" guest_path.return_value = "/tmp/FakeID" res = self.linux_dist.create_coreos_parameter(network_info, userid) self.assertEqual(res, "10.10.0.217::10.10.0.1:24:FakeID:enc1000:none:" "10.10.0.250:10.10.0.51;1000") @mock.patch.object(smtclient.SMTClient, 'get_guest_path') def test_read_coreos_parameter(self, guest_path): guest_path.return_value = "/tmp/FakeID" userid = "FakeID" network_info = [{'nic_vdev': '1000', 'ip_addr': '10.10.0.217', 'gateway_addr': '10.10.0.1', 'dns_addr': ['10.10.0.250', '10.10.0.51'], 'mac_addr': 'fa:16:3e:7a:1b:87', 'cidr': '10.10.0.0/24', 'nic_id': 'adca70f3-8509-44d4-92d4-2c1c14b3f25e', 'mtu': '1000'}] self.linux_dist.create_coreos_parameter_temp_file(network_info, userid) param = self.linux_dist.read_coreos_parameter(userid) self.assertEqual(param, '10.10.0.217::10.10.0.1:24:FakeID:' 'enc1000:none:10.10.0.250:10.10.0.51;1000') class SLESTestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(SLESTestCase, cls).setUpClass() def setUp(self): super(SLESTestCase, self).setUp() self.dist_manager = dist.LinuxDistManager() self.sles11_dist = self.dist_manager.get_linux_dist('sles11')() self.sles12_dist = self.dist_manager.get_linux_dist('sles12')() self.sles15_dist = self.dist_manager.get_linux_dist('sles15')() def test_create_network_configuration_files(self): guest_networks = [{'ip_addr': '192.168.95.10', 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mtu': 8000}] file_path = '/etc/sysconfig/network/' first = False files_and_cmds = self.sles15_dist.create_network_configuration_files( file_path, guest_networks, first, active=False) (net_conf_files, net_conf_cmds, clean_cmd, net_enable_cmd) = files_and_cmds cfg_str = net_conf_files[0][1].split('\n') self.assertEqual("BOOTPROTO='static'", cfg_str[0]) self.assertEqual("IPADDR='192.168.95.10'", cfg_str[1]) self.assertEqual("NETMASK='255.255.255.0'", cfg_str[2]) self.assertEqual("BROADCAST='192.168.95.255'", cfg_str[3]) self.assertEqual("NAME='OSA Express Network card (1000)'", cfg_str[5]) self.assertEqual("MTU='8000'", cfg_str[6]) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_attach_configuration_cmds(self, get_template, template_render): """ SLES """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') self.sles15_dist.get_volume_attach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point) # check function called assertions get_template.assert_called_once_with("volumeops", "sles_attach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz') @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_1(self, get_template, template_render): """ SLES """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 2 self.sles15_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 2) get_template.assert_called_once_with( "volumeops", "sles_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz', is_last_volume=0) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_2(self, get_template, template_render): """ SLES """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 0 and is_last_volume shoud be 1 self.sles15_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 0) get_template.assert_called_once_with( "volumeops", "sles_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', target_filename='sdz', is_last_volume=1) class UBUNTUTestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(UBUNTUTestCase, cls).setUpClass() def setUp(self): super(UBUNTUTestCase, self).setUp() self.dist_manager = dist.LinuxDistManager() self.linux_dist = self.dist_manager.get_linux_dist('ubuntu16')() self.ubuntu20_dist = self.dist_manager.get_linux_dist('ubuntu20')() class UBUNTU20TestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(UBUNTU20TestCase, cls).setUpClass() def setUp(self): super(UBUNTU20TestCase, self).setUp() self.dist_manager = dist.LinuxDistManager() self.linux_dist = self.dist_manager.get_linux_dist('ubuntu20')() def test_create_network_configuration_files(self): guest_networks = [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mtu': 6000}] file_path = '/etc/netplan/' first = True files_and_cmds = self.linux_dist.create_network_configuration_files( file_path, guest_networks, first, active=False) (net_conf_files, net_conf_cmds, clean_cmd, net_enable_cmd) = files_and_cmds ret = net_conf_files[0][1] expect = {'network': {'ethernets': {'enc1000': {'addresses': ['192.168.95.10/24'], 'gateway4': '192.168.95.1', 'mtu': '6000', 'nameservers': {'addresses': ['9.0.2.1', '9.0.3.1']} } }, 'version': 2 } } self.assertEqual(ret, expect) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_attach_configuration_cmds(self, get_template, template_render): """ UBUNTU """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0026000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') self.linux_dist.get_volume_attach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point) # check function called assertions get_template.assert_called_once_with("volumeops", "ubuntu_attach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0026000000000000', lun_id=38, target_filename='sdz') @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_1(self, get_template, template_render): """ UBUNTU """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0100000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 2 self.linux_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 2) get_template.assert_called_once_with( "volumeops", "ubuntu_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0100000000000000', lun_id='0x0100000000000000', target_filename='sdz', is_last_volume=0) @mock.patch('jinja2.Template.render') @mock.patch('zvmsdk.dist.LinuxDist.get_template') def test_get_volume_detach_configuration_cmds_2(self, get_template, template_render): """ UBUNTU """ fcp_list = '1fc5 2fc5' wwpns = '0x5005076812341234 0x5005076812345678' lun = '0x0100000000000000' multipath = True mount_point = '/dev/sdz' get_template.return_value = Template('fake template {{fcp}}') # connections == 0 self.linux_dist.get_volume_detach_configuration_cmds(fcp_list, wwpns, lun, multipath, mount_point, 0) get_template.assert_called_once_with("volumeops", "ubuntu_detach_volume.j2") template_render.assert_called_once_with(fcp_list='1fc5 2fc5', wwpns=wwpns, lun='0x0100000000000000', lun_id='0x0100000000000000', target_filename='sdz', is_last_volume=1) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_monitor.py0000664000175000017510000005410113672563714023300 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from zvmsdk import monitor from zvmsdk.tests.unit import base CPUMEM_SAMPLE1 = { 'userid': 'USERID1', 'guest_cpus': '1', 'used_cpu_time': '6185838 uS', 'elapsed_cpu_time': '35232895 uS', 'min_cpu_count': '2', 'max_cpu_limit': '10000', 'samples_cpu_in_use': '0', 'samples_cpu_delay': '0', 'used_memory': '290232 KB', 'max_memory': '2097152 KB', 'min_memory': '0 KB', 'shared_memory': '5222192 KB', } CPUMEM_SAMPLE2 = { 'userid': 'USERID2', 'guest_cpus': '3', 'used_cpu_time': '14293629 uS', 'elapsed_cpu_time': '4868976371 uS', 'min_cpu_count': '3', 'max_cpu_limit': '10000', 'samples_cpu_in_use': '0', 'samples_cpu_delay': '0', 'used_memory': '305020 KB', 'max_memory': '2097152 KB', 'min_memory': '0 KB', 'shared_memory': '5222190 KB', } MEM_KEYS = ['used_mem_kb', 'max_mem_kb', 'min_mem_kb', 'shared_mem_kb'] SMCLI_VSW_NIC_DATA = {'vswitches': [ {'vswitch_name': 'TESTVSW1', 'nics': [ {'nic_fr_rx_dsc': '0', 'nic_fr_rx_err': '0', 'nic_fr_tx_err': '4', 'userid': 'USERID1', 'nic_rx': '103024058', 'nic_fr_rx': '573952', 'nic_fr_tx': '548780', 'vdev': '0600', 'nic_fr_tx_dsc': '0', 'nic_tx': '102030890'}, {'nic_fr_rx_dsc': '0', 'nic_fr_rx_err': '0', 'nic_fr_tx_err': '4', 'userid': 'USERID2', 'nic_rx': '3111714', 'nic_fr_rx': '17493', 'nic_fr_tx': '16886', 'vdev': '0600', 'nic_fr_tx_dsc': '0', 'nic_tx': '3172646'}]}, {'vswitch_name': 'TESTVSW2', 'nics': [ {'nic_fr_rx_dsc': '0', 'nic_fr_rx_err': '0', 'nic_fr_tx_err': '0', 'userid': 'USERID1', 'nic_rx': '4684435', 'nic_fr_rx': '34958', 'nic_fr_tx': '16211', 'vdev': '1000', 'nic_fr_tx_dsc': '0', 'nic_tx': '3316601'}, {'nic_fr_rx_dsc': '0', 'nic_fr_rx_err': '0', 'nic_fr_tx_err': '0', 'userid': 'USERID2', 'nic_rx': '3577163', 'nic_fr_rx': '27211', 'nic_fr_tx': '12344', 'vdev': '1000', 'nic_fr_tx_dsc': '0', 'nic_tx': '2515045'}]}], 'vswitch_count': 2} INST_NICS_SAMPLE1 = [ {'nic_fr_rx': 573952, 'nic_fr_rx_dsc': 0, 'nic_fr_rx_err': 0, 'nic_fr_tx': 548780, 'nic_fr_tx_dsc': 0, 'nic_fr_tx_err': 4, 'nic_rx': 103024058, 'nic_tx': 102030890, 'nic_vdev': '0600', 'vswitch_name': 'TESTVSW1'}, {'nic_fr_rx': 34958, 'nic_fr_rx_dsc': 0, 'nic_fr_rx_err': 0, 'nic_fr_tx': 16211, 'nic_fr_tx_dsc': 0, 'nic_fr_tx_err': 0, 'nic_rx': 4684435, 'nic_tx': 3316601, 'nic_vdev': '1000', 'vswitch_name': 'TESTVSW2'} ] INST_NICS_SAMPLE2 = [ {'nic_fr_rx': 17493, 'nic_fr_rx_dsc': 0, 'nic_fr_rx_err': 0, 'nic_fr_tx': 16886, 'nic_fr_tx_dsc': 0, 'nic_fr_tx_err': 4, 'nic_rx': 3111714, 'nic_tx': 3172646, 'nic_vdev': '0600', 'vswitch_name': 'TESTVSW1'}, {'nic_fr_rx': 27211, 'nic_fr_rx_dsc': 0, 'nic_fr_rx_err': 0, 'nic_fr_tx': 12344, 'nic_fr_tx_dsc': 0, 'nic_fr_tx_err': 0, 'nic_rx': 3577163, 'nic_tx': 2515045, 'nic_vdev': '1000', 'vswitch_name': 'TESTVSW2'} ] class SDKMonitorTestCase(base.SDKTestCase): def setUp(self): self._monitor = monitor.ZVMMonitor() @mock.patch("zvmsdk.monitor.MeteringCache.get") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") def test_private_get_inspect_data_cache_hit_single(self, cache_enabled, get_ps, cache_get): cache_get.return_value = CPUMEM_SAMPLE1 rdata = self._monitor._get_inspect_data('cpumem', ['USERID1']) self.assertEqual(list(rdata.keys()), ['USERID1']) self.assertEqual(sorted(list(rdata['USERID1'].keys())), sorted(CPUMEM_SAMPLE1.keys())) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') self.assertEqual(rdata['USERID1']['shared_memory'], '5222192 KB') get_ps.assert_not_called() cache_enabled.assert_not_called() @mock.patch("zvmsdk.monitor.MeteringCache.get") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") def test_private_get_inspect_data_cache_hit_multi(self, cache_enabled, get_ps, cache_get): cache_get.side_effect = [CPUMEM_SAMPLE1, CPUMEM_SAMPLE2] rdata = self._monitor._get_inspect_data('cpumem', ['USERID1', 'USERID2']) self.assertEqual(sorted(rdata.keys()), ['USERID1', 'USERID2']) self.assertEqual(sorted(rdata['USERID1'].keys()), sorted(CPUMEM_SAMPLE1.keys())) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') self.assertEqual(rdata['USERID1']['shared_memory'], '5222192 KB') self.assertEqual(rdata['USERID2']['guest_cpus'], '3') self.assertEqual(rdata['USERID2']['used_cpu_time'], '14293629 uS') self.assertEqual(rdata['USERID2']['used_memory'], '305020 KB') self.assertEqual(rdata['USERID2']['shared_memory'], '5222190 KB') get_ps.assert_not_called() cache_enabled.assert_not_called() @mock.patch("zvmsdk.monitor.MeteringCache.get") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") @mock.patch("zvmsdk.monitor.ZVMMonitor._update_cpumem_data") def test_private_get_inspect_data_cache_miss_single(self, update_cpumem_data, get_ps, cache_get): cache_get.return_value = None get_ps.return_value = 'on' update_cpumem_data.return_value = { 'USERID1': CPUMEM_SAMPLE1, 'USERID2': CPUMEM_SAMPLE2 } rdata = self._monitor._get_inspect_data('cpumem', ['userid1']) get_ps.assert_called_once_with('userid1') update_cpumem_data.assert_called_once_with(['userid1']) self.assertEqual(sorted(rdata.keys()), sorted(['USERID1', 'USERID2'])) self.assertEqual(sorted(rdata['USERID1'].keys()), sorted(CPUMEM_SAMPLE1.keys())) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') @mock.patch("zvmsdk.monitor.MeteringCache.get") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") @mock.patch("zvmsdk.monitor.ZVMMonitor._update_cpumem_data") def test_private_get_inspect_data_cache_miss_multi(self, update_cpumem_data, get_ps, cache_get): cache_get.side_effect = [{ 'userid': 'USERID1', 'guest_cpus': '1', 'used_cpu_time': '7185838 uS', 'elapsed_cpu_time': '35232895 uS', 'min_cpu_count': '2', 'max_cpu_limit': '10000', 'samples_cpu_in_use': '0', 'samples_cpu_delay': '0', 'used_memory': '390232 KB', 'max_memory': '2097152 KB', 'min_memory': '0 KB', 'shared_memory': '4222192 KB', }, None] get_ps.return_value = 'on' update_cpumem_data.return_value = { 'USERID1': CPUMEM_SAMPLE1, 'USERID2': CPUMEM_SAMPLE2 } rdata = self._monitor._get_inspect_data('cpumem', ['userid1', 'userid2']) get_ps.assert_called_once_with('userid2') update_cpumem_data.assert_called_once_with(['userid1', 'userid2']) self.assertEqual(sorted(rdata.keys()), sorted(['USERID1', 'USERID2'])) self.assertEqual(sorted(rdata['USERID1'].keys()), sorted(CPUMEM_SAMPLE1.keys())) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') self.assertEqual(rdata['USERID1']['shared_memory'], '5222192 KB') @mock.patch("zvmsdk.monitor.MeteringCache.get") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") @mock.patch("zvmsdk.monitor.ZVMMonitor._update_cpumem_data") def test_private_get_inspect_data_guest_off(self, update_cpumem_data, get_ps, cache_get): cache_get.return_value = None get_ps.return_value = 'off' rdata = self._monitor._get_inspect_data('cpumem', ['userid1']) get_ps.assert_called_once_with('userid1') update_cpumem_data.assert_not_called() self.assertEqual(rdata, {}) @mock.patch("zvmsdk.monitor.MeteringCache.get") @mock.patch("zvmsdk.smtclient.SMTClient.get_power_state") @mock.patch("zvmsdk.monitor.ZVMMonitor._update_nic_data") def test_private_get_inspect_data_vnics(self, update_nic_data, get_ps, cache_get): cache_get.return_value = None get_ps.return_value = 'on' update_nic_data.return_value = {'USERID1': INST_NICS_SAMPLE1, 'USERID2': INST_NICS_SAMPLE2 } rdata = self._monitor._get_inspect_data('vnics', ['USERID1']) get_ps.assert_called_once_with('USERID1') update_nic_data.assert_called_once_with() self.assertEqual(rdata, {'USERID1': INST_NICS_SAMPLE1, 'USERID2': INST_NICS_SAMPLE2 }) @mock.patch("zvmsdk.smtclient.SMTClient.system_image_performance_query") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") @mock.patch("zvmsdk.smtclient.SMTClient.get_vm_list") @mock.patch("zvmsdk.smtclient.SMTClient.namelist_query") def test_private_update_cpumem_data_cache_enabled(self, namelist_query, get_vm_list, cache_enabled, image_performance_query): cache_enabled.return_value = True namelist_query.return_value = ['USERID1', 'USERID2'] get_vm_list.return_value = ['USERID1', 'USERID2'] image_performance_query.return_value = { 'USERID1': CPUMEM_SAMPLE1, 'USERID2': CPUMEM_SAMPLE2 } rdata = self._monitor._update_cpumem_data(['userid1']) image_performance_query.assert_called_once_with('TSTNLIST') namelist_query.assert_called_once_with('TSTNLIST') get_vm_list.assert_called_once_with() self.assertEqual(sorted(rdata.keys()), sorted(['USERID1', 'USERID2'])) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') self.assertEqual( self._monitor._cache._cache['cpumem']['data']['USERID2']['guest_cpus'], '3') @mock.patch("zvmsdk.smtclient.SMTClient.system_image_performance_query") @mock.patch("zvmsdk.smtclient.SMTClient.namelist_add") @mock.patch("zvmsdk.smtclient.SMTClient.get_vm_list") @mock.patch("zvmsdk.smtclient.SMTClient.namelist_query") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") def test_private_update_cpumem_data_cache_not_in_namelist(self, cache_enabled, namelist_query, get_vm_list, namelist_add, image_performance_query): cache_enabled.return_value = True namelist_query.return_value = ['USERID1'] get_vm_list.return_value = ['USERID1', 'USERID2'] image_performance_query.return_value = { 'USERID1': CPUMEM_SAMPLE1, 'USERID2': CPUMEM_SAMPLE2, } rdata = self._monitor._update_cpumem_data(['USERID1', 'USERID2']) image_performance_query.assert_called_once_with('TSTNLIST') namelist_query.assert_called_once_with('TSTNLIST') get_vm_list.assert_called_once_with() namelist_add.assert_called_once_with('TSTNLIST', 'USERID2') self.assertEqual(sorted(rdata.keys()), sorted(['USERID1', 'USERID2'])) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') self.assertEqual( self._monitor._cache._cache['cpumem']['data']['USERID2']['guest_cpus'], '3') @mock.patch("zvmsdk.smtclient.SMTClient.get_vm_list") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") @mock.patch("zvmsdk.smtclient.SMTClient.system_image_performance_query") @mock.patch("zvmsdk.smtclient.SMTClient.namelist_query") def test_private_update_cpumem_data_cache_disabled(self, namelist_query, image_perform_query, cache_enabled, get_vm_list): namelist_query.return_value = ['USERID1', 'USERID2'] cache_enabled.return_value = False image_perform_query.return_value = { 'USERID1': CPUMEM_SAMPLE1 } get_vm_list.return_value = ['USERID1', 'USERID2'] rdata = self._monitor._update_cpumem_data(['userid1']) namelist_query.assert_called_once_with('TSTNLIST') get_vm_list.assert_called_once_with() image_perform_query.assert_called_once_with('TSTNLIST') self.assertEqual(list(rdata.keys()), ['USERID1']) self.assertEqual(sorted(rdata['USERID1'].keys()), sorted(CPUMEM_SAMPLE1.keys())) self.assertEqual(rdata['USERID1']['guest_cpus'], '1') self.assertEqual(rdata['USERID1']['used_cpu_time'], '6185838 uS') self.assertEqual(rdata['USERID1']['used_memory'], '290232 KB') self.assertEqual( list(self._monitor._cache._cache['cpumem']['data'].keys()), []) @mock.patch("zvmsdk.monitor.ZVMMonitor._get_inspect_data") def test_inspect_stats_single(self, _get_inspect_data): _get_inspect_data.return_value = { 'USERID1': CPUMEM_SAMPLE1, 'USERID2': CPUMEM_SAMPLE2 } rdata = self._monitor.inspect_stats(['USERID1']) _get_inspect_data.assert_called_once_with('cpumem', ['USERID1']) self.assertEqual(sorted(rdata.keys()), sorted(['USERID1'])) self.assertEqual(rdata['USERID1']['guest_cpus'], 1) self.assertEqual(rdata['USERID1']['used_cpu_time_us'], 6185838) self.assertEqual(rdata['USERID1']['elapsed_cpu_time_us'], 35232895) self.assertEqual(rdata['USERID1']['min_cpu_count'], 2) self.assertEqual(rdata['USERID1']['max_cpu_limit'], 10000) self.assertEqual(rdata['USERID1']['used_mem_kb'], 290232) self.assertEqual(rdata['USERID1']['max_mem_kb'], 2097152) self.assertEqual(rdata['USERID1']['min_mem_kb'], 0) self.assertEqual(rdata['USERID1']['shared_mem_kb'], 5222192) @mock.patch("zvmsdk.monitor.ZVMMonitor._get_inspect_data") def test_inspect_stats_multi(self, _get_inspect_data): _get_inspect_data.return_value = { 'USERID1': CPUMEM_SAMPLE1, 'USERID2': CPUMEM_SAMPLE2 } rdata = self._monitor.inspect_stats(['USERID1', 'USERID2']) _get_inspect_data.assert_called_once_with('cpumem', ['USERID1', 'USERID2']) self.assertEqual(sorted(rdata.keys()), sorted(['USERID1', 'USERID2'])) self.assertEqual(rdata['USERID1']['guest_cpus'], 1) self.assertEqual(rdata['USERID1']['used_cpu_time_us'], 6185838) self.assertEqual(rdata['USERID1']['elapsed_cpu_time_us'], 35232895) self.assertEqual(rdata['USERID1']['min_cpu_count'], 2) self.assertEqual(rdata['USERID1']['max_cpu_limit'], 10000) self.assertEqual(rdata['USERID2']['guest_cpus'], 3) self.assertEqual(rdata['USERID2']['used_cpu_time_us'], 14293629) self.assertEqual(rdata['USERID2']['elapsed_cpu_time_us'], 4868976371) self.assertEqual(rdata['USERID2']['min_cpu_count'], 3) self.assertEqual(rdata['USERID2']['max_cpu_limit'], 10000) self.assertEqual(rdata['USERID1']['used_mem_kb'], 290232) self.assertEqual(rdata['USERID1']['max_mem_kb'], 2097152) self.assertEqual(rdata['USERID1']['min_mem_kb'], 0) self.assertEqual(rdata['USERID1']['shared_mem_kb'], 5222192) self.assertEqual(rdata['USERID2']['used_mem_kb'], 305020) self.assertEqual(rdata['USERID2']['max_mem_kb'], 2097152) self.assertEqual(rdata['USERID2']['min_mem_kb'], 0) self.assertEqual(rdata['USERID2']['shared_mem_kb'], 5222190) @mock.patch("zvmsdk.monitor.ZVMMonitor._get_inspect_data") def test_inspect_stats_single_off_or_not_exist(self, _get_inspect_data): _get_inspect_data.return_value = { 'USERID2': CPUMEM_SAMPLE2 } rdata = self._monitor.inspect_stats(['userid1']) _get_inspect_data.assert_called_once_with('cpumem', ['userid1']) self.assertEqual(rdata, {}) @mock.patch("zvmsdk.monitor.ZVMMonitor._get_inspect_data") def test_inspect_stats_multi_off_or_not_exist(self, _get_inspect_data): _get_inspect_data.return_value = { 'USERID1': CPUMEM_SAMPLE1 } rdata = self._monitor.inspect_stats(['USERID1', 'USERID2']) _get_inspect_data.assert_called_once_with('cpumem', ['USERID1', 'USERID2']) self.assertEqual(sorted(rdata.keys()), sorted(['USERID1'])) self.assertEqual(rdata['USERID1']['guest_cpus'], 1) self.assertEqual(rdata['USERID1']['used_cpu_time_us'], 6185838) self.assertEqual(rdata['USERID1']['elapsed_cpu_time_us'], 35232895) self.assertEqual(rdata['USERID1']['min_cpu_count'], 2) self.assertEqual(rdata['USERID1']['max_cpu_limit'], 10000) self.assertEqual(rdata['USERID1']['used_mem_kb'], 290232) self.assertEqual(rdata['USERID1']['max_mem_kb'], 2097152) self.assertEqual(rdata['USERID1']['min_mem_kb'], 0) self.assertEqual(rdata['USERID1']['shared_mem_kb'], 5222192) @mock.patch("zvmsdk.smtclient.SMTClient" ".virtual_network_vswitch_query_byte_stats") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") def test_private_update_nic_data(self, cache_enabled, smcli_iuo_query): smcli_iuo_query.return_value = SMCLI_VSW_NIC_DATA cache_enabled.return_value = True nics_dict = self._monitor._update_nic_data() self.assertEqual(sorted(["USERID1", "USERID2"]), sorted(nics_dict.keys())) self.assertEqual(nics_dict['USERID1'], INST_NICS_SAMPLE1) self.assertEqual(nics_dict['USERID2'], INST_NICS_SAMPLE2) self.assertEqual(self._monitor._cache.get('vnics', 'USERID1'), INST_NICS_SAMPLE1) self.assertEqual(self._monitor._cache.get('vnics', 'USERID2'), INST_NICS_SAMPLE2) @mock.patch("zvmsdk.smtclient.SMTClient" ".virtual_network_vswitch_query_byte_stats") @mock.patch("zvmsdk.monitor.ZVMMonitor._cache_enabled") def test_private_update_nic_data_cache_disabled(self, cache_enabled, smcli_iuo_query): smcli_iuo_query.return_value = SMCLI_VSW_NIC_DATA cache_enabled.return_value = False nics_dict = self._monitor._update_nic_data() self.assertEqual(sorted(["USERID1", "USERID2"]), sorted(nics_dict.keys())) self.assertEqual(self._monitor._cache.get('vnics', 'USERID1'), None) self.assertEqual(self._monitor._cache.get('vnics', 'USERID2'), None) zVMCloudConnector-1.6.3/zvmsdk/tests/unit/test_database.py0000775000175000017510000026170114315210052023341 0ustar ruirui00000000000000# Copyright 2017, 2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock import uuid import random from zvmsdk import utils from zvmsdk import config from zvmsdk import database from zvmsdk import exception from zvmsdk import log from zvmsdk.tests.unit import base CONF = config.CONF LOG = log.LOG class NetworkDbOperatorTestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(NetworkDbOperatorTestCase, cls).setUpClass() cls.db_op = database.NetworkDbOperator() cls.userid = 'FAKEUSER' cls.rec_list = [('ID01', '1000', 'port_id01'), ('ID01', '2000', 'port_id02'), ('ID02', '1000', 'port_id02'), ('ID03', '1000', 'port_id03')] @classmethod def tearDownClass(cls): with database.get_network_conn() as conn: conn.execute("DROP TABLE switch") super(NetworkDbOperatorTestCase, cls).tearDownClass() @mock.patch.object(database.NetworkDbOperator, '_create_switch_table') def test__init__(self, create_table): self.db_op.__init__() create_table.assert_called_once_with() def test_switch_add_record(self): interface = '1000' port = None # insert a record without port self.db_op.switch_add_record(self.userid, interface, port) # query switch_record = self.db_op.switch_select_table() expected = [{'userid': self.userid, 'interface': interface, 'switch': None, 'port': port, 'comments': None}] self.assertEqual(expected, switch_record) # clean test switch self.db_op.switch_delete_record_for_userid(self.userid) port = 'testport' # insert a record with port self.db_op.switch_add_record(self.userid, interface, port) # query switch_record = self.db_op.switch_select_table() expected = [{'userid': self.userid, 'interface': interface, 'switch': None, 'port': port, 'comments': None}] self.assertEqual(expected, switch_record) # clean test switch self.db_op.switch_delete_record_for_userid(self.userid) switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) def test_switch_add_record_migrated(self): interface = '1000' switch = 'XCATVSW1' self.db_op.switch_add_record_migrated(self.userid, interface, switch) # query switch_record = self.db_op.switch_select_table() expected = [{'userid': self.userid, 'interface': interface, 'switch': switch, 'port': None, 'comments': None}] self.assertEqual(expected, switch_record) # clean test switch self.db_op.switch_delete_record_for_userid(self.userid) @mock.patch.object(database.NetworkDbOperator, '_get_switch_by_user_interface') def test_switch_update_record_with_switch_fail(self, get_record): get_record.return_value = None interface = '1000' switch = 'testswitch' self.assertRaises(exception.SDKObjectNotExistError, self.db_op.switch_update_record_with_switch, self.userid, interface, switch) def test_switch_update_record_with_switch(self): interface = '1000' port = 'testport' switch = 'testswitch' # insert a record first self.db_op.switch_add_record(self.userid, interface, port) # update record with switch info self.db_op.switch_update_record_with_switch(self.userid, interface, switch) # query switch_record = self.db_op.switch_select_table() expected = [{'userid': self.userid, 'interface': interface, 'switch': switch, 'port': port, 'comments': None}] self.assertEqual(expected, switch_record) switch = None # update record to remove switch info self.db_op.switch_update_record_with_switch(self.userid, interface, switch) # query switch_record = self.db_op.switch_select_table() expected = [{'userid': self.userid, 'interface': interface, 'switch': switch, 'port': port, 'comments': None}] self.assertEqual(expected, switch_record) # clean test switch self.db_op.switch_delete_record_for_userid(self.userid) switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) def test_switch_delete_record_for_userid(self): # insert multiple records for (userid, interface, port) in self.rec_list: self.db_op.switch_add_record(userid, interface, port) self.addCleanup(self.db_op.switch_delete_record_for_userid, userid) # delete specific records userid = 'ID01' self.db_op.switch_delete_record_for_userid(userid) # query: specific records removed switch_record = self.db_op.switch_select_record_for_userid(userid) expected = [] self.assertEqual(expected, switch_record) # query: the other records still exist switch_record = self.db_op.switch_select_record_for_userid('ID02') expected = [{'userid': 'ID02', 'interface': '1000', 'switch': None, 'port': 'port_id02', 'comments': None}] self.assertEqual(expected, switch_record) switch_record = self.db_op.switch_select_record_for_userid('ID03') expected = [{'userid': 'ID03', 'interface': '1000', 'switch': None, 'port': 'port_id03', 'comments': None}] self.assertEqual(expected, switch_record) def test_switch_delete_record_for_nic(self): # insert multiple records for (userid, interface, port) in self.rec_list: self.db_op.switch_add_record(userid, interface, port) self.addCleanup(self.db_op.switch_delete_record_for_userid, userid) # query: specific record in the table record = {'userid': 'ID01', 'interface': '1000', 'switch': None, 'port': 'port_id01', 'comments': None} switch_record = self.db_op.switch_select_table() self.assertEqual(record in switch_record, True) # delete one specific record userid = 'ID01' interface = '1000' self.db_op.switch_delete_record_for_nic(userid, interface) # query: specific record not in the table switch_record = self.db_op.switch_select_table() self.assertEqual(record not in switch_record, True) # clean test switch self.db_op.switch_delete_record_for_userid('ID01') self.db_op.switch_delete_record_for_userid('ID02') self.db_op.switch_delete_record_for_userid('ID03') switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) def test_switch_select_table(self): # empty table switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) # insert multiple records for (userid, interface, port) in self.rec_list: self.db_op.switch_add_record(userid, interface, port) self.addCleanup(self.db_op.switch_delete_record_for_userid, userid) # query: specific record in the table record = [{'userid': 'ID01', 'interface': '1000', 'switch': None, 'port': 'port_id01', 'comments': None}, {'userid': 'ID01', 'interface': '2000', 'switch': None, 'port': 'port_id02', 'comments': None}, {'userid': 'ID02', 'interface': '1000', 'switch': None, 'port': 'port_id02', 'comments': None}, {'userid': 'ID03', 'interface': '1000', 'switch': None, 'port': 'port_id03', 'comments': None}] switch_record = self.db_op.switch_select_table() self.assertEqual(record, switch_record) # clean test switch self.db_op.switch_delete_record_for_userid('ID01') self.db_op.switch_delete_record_for_userid('ID02') self.db_op.switch_delete_record_for_userid('ID03') switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) def test_switch_select_record_for_userid(self): # insert multiple records for (userid, interface, port) in self.rec_list: self.db_op.switch_add_record(userid, interface, port) self.addCleanup(self.db_op.switch_delete_record_for_userid, userid) # query: specific record in the table record = [{'userid': 'ID01', 'interface': '1000', 'switch': None, 'port': 'port_id01', 'comments': None}, {'userid': 'ID01', 'interface': '2000', 'switch': None, 'port': 'port_id02', 'comments': None}] switch_record = self.db_op.switch_select_record_for_userid('ID01') self.assertEqual(record, switch_record) # clean test switch self.db_op.switch_delete_record_for_userid('ID01') self.db_op.switch_delete_record_for_userid('ID02') self.db_op.switch_delete_record_for_userid('ID03') switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) def test_switch_select_record(self): # insert multiple records for (userid, interface, port) in self.rec_list: self.db_op.switch_add_record(userid, interface, port) self.addCleanup(self.db_op.switch_delete_record_for_userid, userid) # all record record = [{'userid': 'ID01', 'interface': '1000', 'switch': 'switch01', 'port': 'port_id01', 'comments': None}, {'userid': 'ID01', 'interface': '2000', 'switch': 'switch01', 'port': 'port_id02', 'comments': None}, {'userid': 'ID02', 'interface': '1000', 'switch': 'switch02', 'port': 'port_id02', 'comments': None}, {'userid': 'ID03', 'interface': '1000', 'switch': 'switch02', 'port': 'port_id03', 'comments': None}] # update record with switch info self.db_op.switch_update_record_with_switch('ID01', '1000', 'switch01') self.db_op.switch_update_record_with_switch('ID01', '2000', 'switch01') self.db_op.switch_update_record_with_switch('ID02', '1000', 'switch02') self.db_op.switch_update_record_with_switch('ID03', '1000', 'switch02') switch_record = self.db_op.switch_select_record() self.assertEqual(record, switch_record) switch_record = self.db_op.switch_select_record(userid='ID01') self.assertEqual([record[0], record[1]], switch_record) switch_record = self.db_op.switch_select_record(nic_id='port_id02') self.assertEqual([record[1], record[2]], switch_record) switch_record = self.db_op.switch_select_record(vswitch='switch02') self.assertEqual([record[2], record[3]], switch_record) switch_record = self.db_op.switch_select_record(nic_id='port_id02', vswitch='switch02') self.assertEqual([record[2]], switch_record) # clean test switch self.db_op.switch_delete_record_for_userid('ID01') self.db_op.switch_delete_record_for_userid('ID02') self.db_op.switch_delete_record_for_userid('ID03') switch_record = self.db_op.switch_select_table() expected = [] self.assertEqual(expected, switch_record) class FCPDbOperatorTestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(FCPDbOperatorTestCase, cls).setUpClass() cls.db_op = database.FCPDbOperator() # tearDownClass deleted to work around bug of 'no such table:fcp' def get_path_of_fcp(self, fcp_id, fcp_template_id): with database.get_fcp_conn() as conn: result = conn.execute("SELECT path FROM template_fcp_mapping " "WHERE fcp_id=? and tmpl_id=?", (fcp_id, fcp_template_id)) path_info = result.fetchone() return path_info['path'] def _insert_data_into_fcp_table(self, fcp_info_list): # insert data into all columns of fcp table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO fcp " "(fcp_id, assigner_id, connections, " "reserved, wwpn_npiv, wwpn_phy, chpid, " "state, owner, tmpl_id) VALUES " "(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", fcp_info_list) def _insert_data_into_template_table(self, templates_info): # insert data into all columns of template table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template " "(id, name, description, is_default, min_fcp_paths_count) " "VALUES (?, ?, ?, ?, ?)", templates_info) def _insert_data_into_template_fcp_mapping_table(self, template_fcp_mapping): # insert data into all columns of template_fcp_mapping table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template_fcp_mapping " "(fcp_id, tmpl_id, path) " "VALUES (?, ?, ?)", template_fcp_mapping) def _insert_data_into_template_sp_mapping_table(self, template_sp_mapping): # insert data into all columns of template_sp_mapping table with database.get_fcp_conn() as conn: conn.executemany("INSERT INTO template_sp_mapping " "(sp_name, tmpl_id) " "VALUES (?, ?)", template_sp_mapping) def _prepare_fcp_info_for_a_test_fcp_template(self): """ Prepare FCP device info for test 1. create a FCP Multipath Template with fcp_devices 2. set some of the fcp_devices as inuse Note: Remember to do cleanup after using the func Example code-block: try: tid = self._prepare_fcp_info_for_a_test_fcp_template() # do some thing with tid test_my_function() finally: # clean up by call _purge_fcp_db() _purge_fcp_db() """ # a. create_fcp_template # b. bulk_insert_zvm_fcp_info_into_fcp_table # insert '1A00-1A03;1B00-1B03' # c. reserve_fcps # set assigner_id and tmpl_id # ('1a01', '1b03') # d. increase_connections # set connections # ('1a01', '1b03') tmpl_id = 'fake_id_' + str(random.randint(100000, 999999)) kwargs = { 'name': 'new_name', 'description': 'new_desc', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': []} self.db_op.create_fcp_template( tmpl_id, kwargs['name'], kwargs['description'], utils.expand_fcp_list(kwargs['fcp_devices']), host_default=kwargs['host_default'], default_sp_list=kwargs['default_sp_list'], min_fcp_paths_count=2) fcp_info = [ ('1a01', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1'), ('1b03', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1')] # set FCP ('1a01', '1b03') as inuse try: self.db_op.bulk_insert_zvm_fcp_info_into_fcp_table(fcp_info) except exception.SDKGuestOperationError as ex: if 'UNIQUE constraint failed' in str(ex): pass else: raise reserve_info = (('1a01', '1b03'), 'user1', tmpl_id) self.db_op.reserve_fcps(*reserve_info) self.increase_connections('1a01') self.increase_connections('1b03') return tmpl_id @staticmethod def increase_connections(fcp_id): """Increase the connections by 1 of a given FCP device""" with database.get_fcp_conn() as conn: result = conn.execute("SELECT * FROM fcp WHERE " "fcp_id=?", (fcp_id,)) fcp_info = result.fetchone() if not fcp_info: msg = 'FCP device %s does not exist in FCP DB.' % fcp_id LOG.error(msg) obj_desc = "FCP device %s" % fcp_id raise exception.SDKObjectNotExistError(obj_desc=obj_desc, modID='volume') connections = fcp_info['connections'] + 1 conn.execute("UPDATE fcp SET connections=? " "WHERE fcp_id=?", (connections, fcp_id)) # check the result result = conn.execute("SELECT connections FROM fcp " "WHERE fcp_id=?", (fcp_id,)) connections = result.fetchone()['connections'] return connections @staticmethod def _purge_fcp_db(): """ Delete all records in the fcp related tables """ with database.get_fcp_conn() as conn: conn.execute("DELETE FROM fcp") conn.execute("DELETE FROM template") conn.execute("DELETE FROM template_fcp_mapping") conn.execute("DELETE FROM template_sp_mapping") ######################################################### # Test cases for Table fcp # ######################################################### def test_unreserve_fcps(self): """Test API unreserve_fcps""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', '', 0, 0, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id), ('2222', '', 0, 1, 'c05076de33000222', 'c05076de33002641', '27', 'active', 'user1', template_id), ('3333', '', 0, 1, 'c05076de33000333', 'c05076de33002641', '27', 'active', 'user1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._insert_data_into_fcp_table(fcp_info_list) # test API function try: self.db_op.unreserve_fcps(fcp_id_list) userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('1111') # check default values of (assigner_id, connections) are correct self.assertEqual('', userid) self.assertEqual(0, conn) # check reserved value self.assertEqual(0, reserved) # tmpl_id set to '' self.assertEqual('', tmpl_id) userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('2222') self.assertEqual('', userid) self.assertEqual(0, conn) self.assertEqual(0, reserved) self.assertEqual('', tmpl_id) userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('3333') self.assertEqual('', userid) self.assertEqual(0, conn) self.assertEqual(0, reserved) self.assertEqual('', tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_reserve_fcps(self): """Test API reserve_fcps""" pass def test_bulk_insert(self): """Test API bulk_insert_zvm_fcp_info_into_fcp_table""" pass def test_bulk_delete(self): """Test API bulk_delete_from_fcp_table""" pass def test_bulk_update_fcp_info(self): """Test API bulk_update_zvm_fcp_info_in_fcp_table""" pass def test_bulk_update_fcp_state(self): """Test API bulk_update_state_in_fcp_table""" pass def test_get_all_fcps_of_assigner(self): """Test API get_all_fcps_of_assigner with assigner_id parameter""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' """ format of item in fcp_info_list: (fcp_id, assigner_id, connections, reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, tmpl_id) """ fcp_info_list = [('1111', 'user1', 0, 0, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('2222', 'user2', 0, 0, 'c05076de33000222', 'c05076de33002641', '27', 'active', 'owner2', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) # test API function try: # case 1, the assigner not specified res = self.db_op.get_all_fcps_of_assigner() # Format of return is like: # [(fcp_id, userid, connections, reserved, wwpn_npiv, wwpn_phy, # chpid, state, owner, tmpl_id), (...)]. self.assertEqual(len(res), 2) self.assertEqual(len(res[0]), 10) # connections == 0 self.assertEqual(res[0][2], 0) # case 2, specify an assigner_id res = self.db_op.get_all_fcps_of_assigner(assigner_id='user2') self.assertEqual(len(res), 1) self.assertEqual(len(res[0]), 10) self.assertEqual(res[0][1], 'user2') finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_get_all_fcps_exception(self): """Test API get_all_fcps_of_assigner when no data in DB""" self.assertRaises(exception.SDKObjectNotExistError, self.db_op.get_all_fcps_of_assigner, None) def test_get_usage_of_fcp(self): """Test API get_usage_of_fcp""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', '', 2, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) try: userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('1111') self.assertEqual('', userid) self.assertEqual(2, conn) self.assertEqual(1, reserved) self.assertEqual(template_id, tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_update_usage_of_fcp(self): """Test API update_usage_of_fcp""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', 'user2', 1, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) try: # update reserved to 1, connection to 2, assigner_id to user2 new_tmpl_id = 'newhost-1111-1111-1111-111111111111' self.db_op.update_usage_of_fcp('1111', 'user2', 1, 2, new_tmpl_id) userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('1111') self.assertEqual('user2', userid) self.assertEqual(1, reserved) self.assertEqual(2, conn) self.assertEqual(new_tmpl_id, tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_decrease_connections_of_not_exist_fcp(self): """Test API decrease_connections when fcp_id not exist""" self.assertRaises(exception.SDKObjectNotExistError, self.db_op.decrease_connections, 'xxxx') def test_decrease_connections_no_connections(self): """Test API decrease_connections when connections is 0""" template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', '', 0, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) # decrease when connections == 0 self.assertRaises(exception.SDKObjectNotExistError, self.db_op.decrease_connections, '1111') self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_decrease_connections(self): """Test API decrease_connections""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', '', 2, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'owner1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) try: self.db_op.decrease_connections('1111') userid, reserved, conn, tmpl_id = self.db_op.get_usage_of_fcp('1111') self.assertEqual('', userid) self.assertEqual(1, conn) self.assertEqual(1, reserved) self.assertEqual(template_id, tmpl_id) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_get_connections_from_fcp(self): """Test API get_connections_from_fcp""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', '', 2, 1, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'user1', template_id), ('2222', '', 3, 1, 'c05076de33000222', 'c05076de33002641', '27', 'active', 'user1', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) try: conn = self.db_op.get_connections_from_fcp('1111') self.assertEqual(2, conn) conn = self.db_op.get_connections_from_fcp('2222') self.assertEqual(3, conn) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) def test_get_all(self): pass def test_get_inuse_fcp_device_by_fcp_template(self): """ Test get_inuse_fcp_device_by_fcp_template """ try: # prepare test data by set inuse FCP ('1a01', '1b03') tmpl_id = self._prepare_fcp_info_for_a_test_fcp_template() expected = {'1a01', '1b03'} fcps = self.db_op.get_inuse_fcp_device_by_fcp_template(tmpl_id) result = {f['fcp_id'] for f in fcps} self.assertEqual(expected, result) finally: self._purge_fcp_db() ######################################################### # Test cases for Table template_fcp_mapping # ######################################################### def test_update_path_of_fcp_device(self): """Test API update_usage_of_fcp_device""" # pre create data in FCP DB for test template_id = 'fakehost-1111-1111-1111-111111111111' template_fcp = [('1111', template_id, 1), ('2222', template_id, 2)] fcp_id_list = [fcp_info[0] for fcp_info in template_fcp] # delete dirty data from other test cases self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self._insert_data_into_template_fcp_mapping_table(template_fcp) try: self.db_op.update_path_of_fcp_device((3, '1111', template_id)) path = self.get_path_of_fcp('1111', template_id) self.assertEqual(3, path) finally: self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) def test_bulk_delete_fcp_from_template(self): """Test API delete_fcp_from_path""" pass def test_get_path_count(self): pass def test_get_fcp_list_of_template(self): pass def test_bulk_delete_fcp_device_from_fcp_template(self): """ Test bulk_delete_fcp_device_from_fcp_template """ try: # prepare test data by create a template # with FCP devices as 1A00-1A03;1B00-1B03 tmpl_id = self._prepare_fcp_info_for_a_test_fcp_template() expected = {'1a00', '1a01', '1a03', '1b00', '1b01', '1b03'} rec = ((tmpl_id, '1a02'), (tmpl_id, '1b02')) self.db_op.bulk_delete_fcp_device_from_fcp_template(rec) # verify _, fcp_detail = self.db_op.get_fcp_templates_details([tmpl_id]) fcp_in_db = {f['fcp_id'] for f in fcp_detail} self.assertEqual(expected, fcp_in_db) finally: self._purge_fcp_db() def test_bulk_insert_fcp_device_into_fcp_template(self): """ Test bulk_insert_fcp_device_into_fcp_template """ try: # prepare test data by create a template # with FCP devices as 1A00-1A03;1B00-1B03 tmpl_id = self._prepare_fcp_info_for_a_test_fcp_template() expected = { '1a00', '1a01', '1a02', '1a03', '1a04', '1b00', '1b01', '1b02', '1b03', '1b04'} # bulk insert FCPs 1a04,1b04 rec = ((tmpl_id, '1a04', 0), (tmpl_id, '1b04', 1)) self.db_op.bulk_insert_fcp_device_into_fcp_template(rec) # verify _, fcp_detail = self.db_op.get_fcp_templates_details([tmpl_id]) fcp_in_db = {f['fcp_id'] for f in fcp_detail} self.assertEqual(expected, fcp_in_db) finally: self._purge_fcp_db() ######################################################### # Test cases for Table template # ######################################################### def test_fcp_template_exist_in_db(self): pass def test_update_basic_info_of_fcp_template(self): """ Test update_basic_info_of_fcp_template """ try: # prepare test data by create 2 templates # with is_default as False tmpl_id_1 = self._prepare_fcp_info_for_a_test_fcp_template() tmpl_id_2 = self._prepare_fcp_info_for_a_test_fcp_template() # case1: # set tmpl_id_1 is_default as True expected_1 = ('name1', 'desc1', True, 2, tmpl_id_1) self.db_op.update_basic_info_of_fcp_template(expected_1) # verify info_1 = self.db_op.get_fcp_templates_details([tmpl_id_1])[0][0] result_1 = ( info_1['name'], info_1['description'], bool(info_1['is_default']), info_1['min_fcp_paths_count'], tmpl_id_1) self.assertEqual(expected_1, result_1) info_2 = self.db_op.get_fcp_templates_details([tmpl_id_2])[0][0] self.assertEqual(False, bool(info_2['is_default'])) # case2: # set tmpl_id_2 is_default as True expected_2 = ('name2', 'desc2', True, 2, tmpl_id_2) self.db_op.update_basic_info_of_fcp_template(expected_2) # verify info_2 = self.db_op.get_fcp_templates_details([tmpl_id_2])[0][0] result_2 = ( info_2['name'], info_2['description'], bool(info_2['is_default']), info_2['min_fcp_paths_count'], tmpl_id_2) self.assertEqual(expected_2, result_2) info_1 = self.db_op.get_fcp_templates_details([tmpl_id_1])[0][0] self.assertEqual(False, bool(info_1['is_default'])) # case3: # set both tmpl_id_1 and tmpl_id_2 as False for is_default expected_1 = ('name1', 'desc1', False, 2, tmpl_id_1) expected_2 = ('name2', 'desc2', False, 4, tmpl_id_2) self.db_op.update_basic_info_of_fcp_template(expected_1) self.db_op.update_basic_info_of_fcp_template(expected_2) info_1 = self.db_op.get_fcp_templates_details([tmpl_id_1])[0][0] self.assertEqual(False, bool(info_1['is_default'])) info_2 = self.db_op.get_fcp_templates_details([tmpl_id_2])[0][0] self.assertEqual(False, bool(info_2['is_default'])) finally: self._purge_fcp_db() ######################################################### # Test cases for Table template_sp_mapping # ######################################################### def test_sp_name_exist_in_db(self): pass def test_bulk_set_sp_default_by_fcp_template(self): """ Test bulk_set_sp_default_by_fcp_template """ try: # create 1st template(tmpl_id_1) # with default_sp_list=['SP1', 'SP2'] tmpl_id_1 = self._prepare_fcp_info_for_a_test_fcp_template() self.db_op.edit_fcp_template(tmpl_id_1, default_sp_list=['SP1', 'SP2']) # create 2nd template(tmpl_id_2) # with default_sp_list=['SP3', 'SP4'] tmpl_id_2 = self._prepare_fcp_info_for_a_test_fcp_template() self.db_op.edit_fcp_template(tmpl_id_2, default_sp_list=['SP3', 'SP4']) # set tmpl_id_1 with ['SP3', 'SP4'] self.db_op.bulk_set_sp_default_by_fcp_template( tmpl_id_1, ['SP3', 'SP4']) info = self.db_op.get_fcp_templates_details([tmpl_id_1])[0] result = {r['sp_name'].upper() for r in info} expected = {'SP3', 'SP4'} self.assertEqual(expected, result) finally: self._purge_fcp_db() ######################################################### # Test cases related to multiple tables # ######################################################### def test_get_allocated_fcps_from_assigner(self): """Test API get_allocated_fcps_from_assigner""" # prepare data for FCP Multipath Template "1111;2222" # insert test data into table template_fcp_mapping template_id = 'fakehost-1111-1111-1111-111111111111' template_fcp = [('1111', template_id, 0), ('2222', template_id, 1)] self._insert_data_into_template_fcp_mapping_table(template_fcp) # insert test data into table fcp fcp_info_list = [('1111', 'user1', 0, 0, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('2222', 'user1', 0, 0, 'c05076de33000222', 'c05076de33002641', '27', 'active', 'owner1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) try: # case1: reserved and connections == 0 fcp_list = self.db_op.get_allocated_fcps_from_assigner( 'user1', template_id) self.assertEqual(0, len(fcp_list)) # case2: reserved == 0 and connections != 0 # increase connections to 1 self.increase_connections('1111') fcp_list = self.db_op.get_allocated_fcps_from_assigner( 'user1', template_id) self.assertEqual(1, len(fcp_list)) # case3: reserved != 0 and connections == 0 self.db_op.reserve_fcps(['2222'], 'user2', template_id) fcp_list = self.db_op.get_allocated_fcps_from_assigner( 'user2', template_id) self.assertEqual(1, len(fcp_list)) # case4: reserve !=0 and connections != 0 self.db_op.update_usage_of_fcp('1111', 'user2', 1, 1, template_id) self.db_op.update_usage_of_fcp('2222', 'user2', 1, 1, template_id) fcp_list = self.db_op.get_allocated_fcps_from_assigner( 'user2', template_id) self.assertEqual(2, len(fcp_list)) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) def test_get_reserved_fcps_from_assigner(self): # prepare data for FCP Multipath Template "1111;2222" # insert test data into table fcp template_id = 'fakehost-1111-1111-1111-111111111111' fcp_info_list = [('1111', 'user1', 0, 0, 'c05076de33000111', 'c05076de33002641', '27', 'active', 'owner1', template_id), ('2222', 'user1', 0, 0, 'c05076de33000222', 'c05076de33002641', '27', 'active', 'owner1', template_id)] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) # insert test data into table template_fcp_mapping template_fcp = [('1111', template_id, 0), ('2222', template_id, 1)] # delete dirty data from other test cases self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self._insert_data_into_template_fcp_mapping_table(template_fcp) try: # case1: reserved and connections == 0 fcp_list = self.db_op.get_reserved_fcps_from_assigner( 'user1', template_id) self.assertEqual(0, len(fcp_list)) # case2: reserved == 0 and connections != 0 # increase connections to 1 self.increase_connections('1111') fcp_list = self.db_op.get_reserved_fcps_from_assigner( 'user1', template_id) self.assertEqual(0, len(fcp_list)) # case3: reserved !=0 and connection == 0 # set reserved to 1 self.db_op.reserve_fcps(['2222'], 'user2', template_id) fcp_list = self.db_op.get_reserved_fcps_from_assigner( 'user2', template_id) self.assertEqual(1, len(fcp_list)) # case4: reserve !=0 and connections != 0 # set reserved to 1 self.db_op.reserve_fcps(fcp_id_list, 'user2', template_id) # set connections to 1 self.db_op.update_usage_of_fcp('1111', 'user2', 1, 1, template_id) self.db_op.update_usage_of_fcp('2222', 'user2', 1, 1, template_id) fcp_list = self.db_op.get_allocated_fcps_from_assigner( 'user2', template_id) self.assertEqual(2, len(fcp_list)) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) def test_get_fcp_devices_with_same_index(self): '''Test get_fcp_devices_with_same_index get_fcp_pair_with_same_index() only returns two possible values: case 1 an empty list(i.e. []) if no fcp exist in DB case 2 randomly choose a pair of below combinations: [1a00,1b00] ,[1a01,1b01] ,[1a02,1b02]... rather than below combinations: [1a00,1b02] ,[1a03,1b00] [1a02], [1b03] case 3 an empty list(i.e. []) if no expected pair found ''' # prepare data for FCP Multipath Template "1111;2222" template_id = 'fakehost-1111-1111-1111-111111111111' # insert test data into table fcp # Usage in test data: # 1a00 usage is connections == 2, reserved == 0 # 1a02 usage is connections == 1, reserved == 1 # 1b00 usage is connections == 0, reserved == 1 # others are connections ==0, reserved == 0 # State in test data: # 1a01, 1a03, 1a04, 1b01, 1b03 are free # 1a00, 1b04 are active # others are '' # WWPNs in test data: # 1b02 wwpns are empty, others are normal fcp_info_list = [('1a00', '', 2, 0, 'c05076de33000a00', 'c05076de33002641', '27', 'active', 'owner1', ''), ('1a01', '', 0, 0, 'c05076de33000a01', 'c05076de33002641', '27', 'free', 'owner1', ''), ('1a02', '', 1, 1, 'c05076de33000a02', 'c05076de33002641', '27', '', 'owner1', ''), ('1a03', '', 0, 0, 'c05076de33000a03', 'c05076de33002641', '27', 'free', 'owner1', ''), ('1a04', '', 0, 0, 'c05076de33000a04', 'c05076de33002641', '27', 'free', 'owner1', ''), ('1b00', '', 0, 1, 'c05076de33000b00', 'c05076de33002642', '30', '', 'owner1', ''), ('1b01', '', 0, 0, 'c05076de33000b01', 'c05076de33002642', '30', 'free', 'owner1', ''), ('1b02', '', 0, 0, '', '', '30', 'notfound', 'owner1', ''), ('1b03', '', 0, 0, 'c05076de33000b03', 'c05076de33002642', '30', 'free', 'owner1', ''), ('1b04', '', 0, 0, 'c05076de33000b04', 'c05076de33002642', '30', 'active', 'owner1', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self._insert_data_into_fcp_table(fcp_info_list) # insert test data into table template_fcp_mapping template_fcp = [('1a00', template_id, 0), ('1a01', template_id, 0), ('1a02', template_id, 0), ('1a03', template_id, 0), ('1a04', template_id, 0), ('1b00', template_id, 1), ('1b01', template_id, 1), ('1b02', template_id, 1), ('1b03', template_id, 1), ('1b04', template_id, 1)] self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self._insert_data_into_template_fcp_mapping_table(template_fcp) try: # test case1 fcp_list = self.db_op.get_fcp_devices_with_same_index('fakeid') self.assertEqual([], fcp_list) # test case2 # expected result: # it can not return 1a04 because # it does not have 1bxx with same index expected_results = {('1a01', '1b01'), ('1a03', '1b03')} result = set() for i in range(10): fcp_list = self.db_op.get_fcp_devices_with_same_index( template_id) result.add(tuple([fcp[0] for fcp in fcp_list])) self.assertEqual(result, expected_results) # test case3: self.db_op.reserve_fcps(['1a01', '1b03'], '', template_id) # after reserve_fcps, no available pair records with same index fcp_list = self.db_op.get_fcp_devices_with_same_index( template_id) self.assertEqual(fcp_list, []) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) def test_get_fcp_devices(self): '''Test API get_fcp_devices get_fcp_pair() only returns the following possible values: e.g. When FCP DB contains FCPs from 2 paths case 1 randomly choose one available FCP per path: [1a03,1b00] ,[1a02,1b01], [1a02,1b02]... [1a00,1b01] ,[1a01,1b02], ... case 2 if CONF.volume.min_fcp_paths_count is enabled, (such as, min_fcp_paths_count = 1) then it may also return a single FCP (such as, [1a02], [1b03], ...) case 3 an empty list(i.e. []) if no expected pair found ''' # prepare data for FCP Multipath Template "1a00-1a04;1b00-1b04" template_id = 'fakehost-1111-1111-1111-111111111111' # insert test data into table fcp # Usage in test data: # 1a00 usage is connections == 2, reserved == 0 # 1a02 usage is connections == 1, reserved == 1 # 1b00 usage is connections == 0, reserved == 1 # others are connections ==0, reserved == 0 # State in test data: # 1a01, 1a03, 1a04, 1b01, 1b03 are free # 1a00, 1b04 are active # others are '' # WWPNs in test data: # 1b02 wwpns are empty, others are normal fcp_info_list = [('1a00', '', 2, 0, 'c05076de33000a00', 'c05076de33002641', '27', 'active', 'owner1', ''), ('1a01', '', 0, 0, 'c05076de33000a01', 'c05076de33002641', '27', 'free', 'owner1', ''), ('1a02', '', 1, 1, 'c05076de33000a02', 'c05076de33002641', '27', '', 'owner1', ''), ('1a03', '', 0, 0, 'c05076de33000a03', 'c05076de33002641', '27', 'free', 'owner1', ''), ('1a04', '', 0, 0, 'c05076de33000a04', 'c05076de33002641', '27', 'free', 'owner1', ''), ('1b00', '', 0, 1, 'c05076de33000b00', 'c05076de33002642', '30', '', 'owner1', ''), ('1b01', '', 0, 0, 'c05076de33000b01', 'c05076de33002642', '30', 'free', 'owner1', ''), ('1b02', '', 0, 0, '', '', '30', 'notfound', 'owner1', ''), ('1b03', '', 0, 0, 'c05076de33000b03', 'c05076de33002642', '30', 'free', 'owner1', ''), ('1b04', '', 0, 0, 'c05076de33000b04', 'c05076de33002642', '30', 'active', 'owner1', '')] fcp_id_list = [fcp_info[0] for fcp_info in fcp_info_list] # delete dirty data from other test cases self.db_op.bulk_delete_from_fcp_table(fcp_id_list) # insert new test data self._insert_data_into_fcp_table(fcp_info_list) # insert test data into table template_fcp_mapping template_fcp = [('1a00', template_id, 0), ('1a01', template_id, 0), ('1a02', template_id, 0), ('1a03', template_id, 0), ('1a04', template_id, 0), ('1b00', template_id, 1), ('1b01', template_id, 1), ('1b02', template_id, 1), ('1b03', template_id, 1), ('1b04', template_id, 1)] # delete dirty data from other test cases self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) # insert new test data self._insert_data_into_template_fcp_mapping_table(template_fcp) # insert date to template table template_info = [(template_id, 'name', 'desc', False, -1)] self._insert_data_into_template_table(template_info) try: # expected result all_possible_pairs = { ('1a01', '1b01'), ('1a01', '1b03'), ('1a03', '1b01'), ('1a03', '1b03'), ('1a04', '1b01'), ('1a04', '1b03') } # exhaustion to get all possible pairs result = set() for i in range(300): fcp_list = self.db_op.get_fcp_devices(template_id) # fcp_list include fcp_id, wwpn_npiv, wwpn_phy # we test fcp_id only result.add(tuple([fcp[0] for fcp in fcp_list])) self.assertEqual(result, all_possible_pairs) # test case2: no available fcp device in one path # reserve all fcp devices in path 0 self.db_op.reserve_fcps(['1a01', '1a03', '1a04'], '', template_id) # expected result for i in range(10): fcp_list = self.db_op.get_fcp_devices(template_id) self.assertEqual(fcp_list, []) # test case3: min_fcp_paths_count was set to 1 # set min_fcp_paths_count to 1 self.db_op.edit_fcp_template(template_id, min_fcp_paths_count=1) all_possible_pairs = {('1b01',), ('1b03',)} result = set() for i in range(10): fcp_list = self.db_op.get_fcp_devices(template_id) result.add(tuple([fcp[0] for fcp in fcp_list])) self.assertEqual(result, all_possible_pairs) finally: self.db_op.bulk_delete_from_fcp_table(fcp_id_list) self.db_op.bulk_delete_fcp_from_template(fcp_id_list, template_id) self.db_op.delete_fcp_template(template_id) def test_create_fcp_template_with_name_and_desc(self): """Create a FCP Multipath Template only with name and description, other parameters are all default values""" fcp_template_id = 'fake_tmpl_id' name = 'tmpl_1' description = 'this is the description.' fcp_devices_by_path = [] host_default = False default_sp_list = None self.db_op.create_fcp_template(fcp_template_id, name, description, fcp_devices_by_path, host_default, default_sp_list) actual_tmpl = self.db_op.get_fcp_templates([fcp_template_id]) self.assertEqual(actual_tmpl[0]['id'], fcp_template_id) self.db_op.delete_fcp_template(fcp_template_id) def test_validate_min_fcp_paths_count_with_fcp_and_minCount_no_err(self): fcp_devices = '1a10;1b10;1c10;1d10' min_fcp_paths_count = 4 fcp_template_id = 'fcp_tmpl_1' self.db_op._validate_min_fcp_paths_count(fcp_devices, min_fcp_paths_count, fcp_template_id) @mock.patch("zvmsdk.database.FCPDbOperator.get_min_fcp_paths_count_from_db") def test_validate_min_fcp_paths_count_only_with_fcp_error(self, get_min_fcp_paths_count_from_db): fcp_devices = '1a10;1b10' fcp_template_id = 'fcc_tmpl_1' get_min_fcp_paths_count_from_db.return_value = 4 self.assertRaisesRegex(exception.SDKConflictError, 'min_fcp_paths_count 4 is larger than fcp device path count 2', self.db_op._validate_min_fcp_paths_count, fcp_devices, None, fcp_template_id) @mock.patch("zvmsdk.database.FCPDbOperator.get_path_count") def test_validate_min_fcp_paths_count_only_with_minCount_error(self, get_path_count): fcp_devices = None min_fcp_paths_count = 4 fcp_template_id = 'fcc_tmpl_1' get_path_count.return_value = 2 self.assertRaisesRegex(exception.SDKConflictError, 'min_fcp_paths_count 4 is larger than fcp device path count 2', self.db_op._validate_min_fcp_paths_count, fcp_devices, min_fcp_paths_count, fcp_template_id) @mock.patch("zvmsdk.database.FCPDbOperator.get_path_count") @mock.patch("zvmsdk.database.FCPDbOperator.get_min_fcp_paths_count_from_db") def test_get_min_fcp_paths_count_not_set_minCount(self, get_min_fcp_paths_count_from_db, get_path_count): get_path_count.return_value = 2 get_min_fcp_paths_count_from_db.return_value = -1 ret = self.db_op.get_min_fcp_paths_count('template_id') self.assertEqual(2, ret) @mock.patch("zvmsdk.database.FCPDbOperator.get_path_count") @mock.patch("zvmsdk.database.FCPDbOperator.get_min_fcp_paths_count_from_db") def test_get_min_fcp_paths_count_with_minCount(self, get_min_fcp_paths_count_from_db, get_path_count): get_path_count.return_value = 2 get_min_fcp_paths_count_from_db.return_value = 4 ret = self.db_op.get_min_fcp_paths_count('template_id') self.assertEqual(4, ret) def test_get_min_fcp_paths_count_with_non_template(self): self.assertRaisesRegex(exception.SDKObjectNotExistError, 'min_fcp_paths_count from fcp_template_id', self.db_op.get_min_fcp_paths_count, None) @mock.patch("zvmsdk.database.FCPDbOperator.get_min_fcp_paths_count_from_db") def test_get_min_fcp_paths_count_with_none_mincount(self, get_min_fcp_paths_count_from_db): get_min_fcp_paths_count_from_db.return_value = None self.assertRaisesRegex(exception.SDKObjectNotExistError, 'min_fcp_paths_count from fcp_template_id', self.db_op.get_min_fcp_paths_count, 'fake_fcp_template_id') def test_edit_fcp_template(self): """ Test edit_fcp_template() """ tmpl_id = 'fake_id_0000' kwargs = { 'name': 'new_name', 'description': 'new_desc', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': [] } try: # case1: # validate: FCP Multipath Template obj_desc = ("FCP Multipath Template {}".format(tmpl_id)) with self.assertRaises(exception.SDKObjectNotExistError) as cm: self.db_op.edit_fcp_template(tmpl_id, **kwargs) # The following 3 assertions are the same # (Pdb) pp str(cm.exception) # 'FCP Multipath Template fake_id_0000 does not exist.' self.assertIn(obj_desc, cm.exception.message) self.assertIn(obj_desc, str(cm.exception)) self.assertRaisesRegex(exception.SDKObjectNotExistError, obj_desc, self.db_op.edit_fcp_template, tmpl_id, **kwargs) # case2: # validate: add or delete path from FCP Multipath Template # preparation: # a. create_fcp_template # b. bulk_insert_zvm_fcp_info_into_fcp_table # insert '1A00-1A03;1B00-1B03' # c. reserve_fcps # set assigner_id and tmpl_id # ('1a01', '1b03') # d. increase_connections # set connections # ('1a01', '1b03') kwargs['fcp_devices'] = '1A00-1A03;1B00-1B03' self.db_op.create_fcp_template( tmpl_id, kwargs['name'], kwargs['description'], utils.expand_fcp_list(kwargs['fcp_devices']), host_default=kwargs['host_default'], default_sp_list=kwargs['default_sp_list']) fcp_info = [ ('1a01', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1'), ('1b03', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1')] # set FCP ('1a01', '1b03') as inuse self.db_op.bulk_insert_zvm_fcp_info_into_fcp_table(fcp_info) reserve_info = (('1a01', '1b03'), 'user1', tmpl_id) self.db_op.reserve_fcps(*reserve_info) self.increase_connections('1a01') self.increase_connections('1b03') # add path kwargs['fcp_devices'] = '1A00-1A03;1B00-1B03;1c00' detail = "Adding or deleting a FCP device path" self.assertRaisesRegex(exception.SDKConflictError, detail, self.db_op.edit_fcp_template, tmpl_id, **kwargs) # delete path kwargs['fcp_devices'] = '1A00-1A03' self.assertRaisesRegex(exception.SDKConflictError, detail, self.db_op.edit_fcp_template, tmpl_id, **kwargs) # case3 # validate: not allowed to remove inuse FCP # Prepare 2 templates with the same FCP devices # 1a01, 1b03 are allocated from template 'fake_id_0000' # 1a02, 1b02 are allocated from template 'fake_id_1111' self.db_op.create_fcp_template( 'fake_id_1111', "fake_name", 'fake_desc', utils.expand_fcp_list('1A00-1A03;1B00-1B03'), host_default=False, default_sp_list=[]) fcp_info = [ ('1a02', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1'), ('1b02', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1')] self.db_op.bulk_insert_zvm_fcp_info_into_fcp_table(fcp_info) reserve_info = (('1a02', '1b02'), 'user1', 'fake_id_1111') self.db_op.reserve_fcps(*reserve_info) self.increase_connections('1a02') self.increase_connections('1b02') # case-3.1: # delete (1a01,1b03) from 'fake_id_0000' must fail fcp_device_list = '1A00,1A02-1A03;1B00-1B02' not_allow_for_del = {'1a01', '1b03'} detail = ("The FCP devices ({}) are missing from the FCP device list." .format(utils.shrink_fcp_list(list(not_allow_for_del)))) with self.assertRaises(exception.SDKConflictError) as cm: self.db_op.edit_fcp_template('fake_id_0000', fcp_devices=fcp_device_list) self.assertIn(detail, str(cm.exception)) # case-3.2: # delete (1a01,1b03) from 'fake_id_1111' must success self.db_op.edit_fcp_template('fake_id_1111', fcp_devices=fcp_device_list) # case4 # DML: table template_fcp_mapping # (based on the preparation done in case2) # a. insert fcp device : 1a05-1a07, 1b05-1b07 # b. remove fcp device : 1a02, 1b02 # c. update fcp path : # change 1a01,1a03 from path0 to path1 # change 1b01,1b03 from path1 to path0 kwargs['fcp_devices'] = '1A00,1B01,1B03;1B00,1A01,1A03' self.db_op.edit_fcp_template( tmpl_id, fcp_devices=kwargs['fcp_devices']) expected = utils.expand_fcp_list(kwargs['fcp_devices']) _, fcp_detail = self.db_op.get_fcp_templates_details([tmpl_id]) fcp_in_db = {0: set(), 1: set()} for row in fcp_detail: fcp_in_db[row['path']].add(row['fcp_id']) self.assertEqual(expected, fcp_in_db) # case5 # DML: table(template and template_sp_mapping) # (based on the preparation done in case2) kwargs['name'] = 'test_name' kwargs['description'] = 'test_desc' kwargs['host_default'] = True kwargs['default_sp_list'] = ['SP1', 'SP2'] tmpl_basic = self.db_op.edit_fcp_template(tmpl_id, **kwargs) expected = {'fcp_template': { 'id': tmpl_id, 'name': kwargs['name'], 'description': kwargs['description'], 'host_default': kwargs['host_default'], 'storage_providers': kwargs['default_sp_list'], 'min_fcp_paths_count': 2 }} self.assertEqual(expected, tmpl_basic) finally: # clean up self._purge_fcp_db() def test_get_fcp_templates(self): """test get_fcp_templates""" self._purge_fcp_db() try: # prepare test data tmpl_id_1 = 'fake_id_0001' kwargs1 = { 'name': 'new_name1', 'description': 'new_desc1', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': [] } tmpl_id_2 = 'fake_id_0002' kwargs2 = { 'name': 'new_name2', 'description': 'new_desc2', 'fcp_devices': '1C00-1C03;1D00-1D03', 'host_default': True, 'default_sp_list': ['fake_sp'] } self.db_op.create_fcp_template( tmpl_id_1, kwargs1['name'], kwargs1['description'], utils.expand_fcp_list(kwargs1['fcp_devices']), host_default=kwargs1['host_default'], default_sp_list=kwargs1['default_sp_list']) self.db_op.create_fcp_template( tmpl_id_2, kwargs2['name'], kwargs2['description'], utils.expand_fcp_list(kwargs2['fcp_devices']), host_default=kwargs2['host_default'], default_sp_list=kwargs2['default_sp_list']) # case1: get_fcp_templates by template_id_list expected_1 = (tmpl_id_1, 'new_name1', 'new_desc1', False, None) info_1 = self.db_op.get_fcp_templates([tmpl_id_1])[0] result_1 = ( info_1[0], info_1[1], info_1[2], bool(info_1[3]), info_1[5]) self.assertEqual(expected_1, result_1) # case2: get_fcp_templates without parameter, will get all # templates info expected_2 = (tmpl_id_2, 'new_name2', 'new_desc2', True, 'fake_sp') info_all = self.db_op.get_fcp_templates() self.assertEqual(2, len(info_all)) result_1 = ( info_all[0][0], info_all[0][1], info_all[0][2], bool(info_all[0][3]), info_all[0][5]) result_2 = ( info_all[1][0], info_all[1][1], info_all[1][2], bool(info_all[1][3]), info_all[1][5]) self.assertEqual(expected_1, result_1) self.assertEqual(expected_2, result_2) finally: self._purge_fcp_db() def test_get_host_default_fcp_template(self): """test get_host_default_fcp_template""" try: # prepare test data tmpl_id_1 = 'fake_id_0001' kwargs1 = { 'name': 'new_name1', 'description': 'new_desc1', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': True, 'default_sp_list': [] } tmpl_id_2 = 'fake_id_0002' kwargs2 = { 'name': 'new_name2', 'description': 'new_desc2', 'fcp_devices': '1C00-1C03;1D00-1D03', 'host_default': False, 'default_sp_list': ['fake_sp'] } self.db_op.create_fcp_template( tmpl_id_1, kwargs1['name'], kwargs1['description'], utils.expand_fcp_list(kwargs1['fcp_devices']), host_default=kwargs1['host_default'], default_sp_list=kwargs1['default_sp_list']) self.db_op.create_fcp_template( tmpl_id_2, kwargs2['name'], kwargs2['description'], utils.expand_fcp_list(kwargs2['fcp_devices']), host_default=kwargs2['host_default'], default_sp_list=kwargs2['default_sp_list']) # get by host_default=True info_1 = self.db_op.get_host_default_fcp_template()[0] expected_1 = (tmpl_id_1, 'new_name1', 'new_desc1', True, -1, None) result_1 = ( info_1[0], info_1[1], info_1[2], bool(info_1[3]), info_1[4], info_1[5]) self.assertEqual(expected_1, result_1) # get by host_default=False info_2 = self.db_op.get_host_default_fcp_template(False)[0] expected_2 = (tmpl_id_2, 'new_name2', 'new_desc2', False, 'fake_sp') result_2 = ( info_2[0], info_2[1], info_2[2], bool(info_2[3]), info_2[5]) self.assertEqual(expected_2, result_2) finally: self._purge_fcp_db() def test_get_sp_default_fcp_template(self): """test get_sp_default_fcp_template""" try: # prepare test data tmpl_id_1 = 'fake_id_0001' kwargs1 = { 'name': 'new_name1', 'description': 'new_desc1', 'fcp_devices': '1A00-1A03;1B00-1B03', 'host_default': False, 'default_sp_list': ['v7k60'] } tmpl_id_2 = 'fake_id_0002' kwargs2 = { 'name': 'new_name2', 'description': 'new_desc2', 'fcp_devices': '1C00-1C03;1D00-1D03', 'host_default': True, 'default_sp_list': ['ds8k'] } self.db_op.create_fcp_template( tmpl_id_1, kwargs1['name'], kwargs1['description'], utils.expand_fcp_list(kwargs1['fcp_devices']), host_default=kwargs1['host_default'], default_sp_list=kwargs1['default_sp_list'], min_fcp_paths_count=2) self.db_op.create_fcp_template( tmpl_id_2, kwargs2['name'], kwargs2['description'], utils.expand_fcp_list(kwargs2['fcp_devices']), host_default=kwargs2['host_default'], default_sp_list=kwargs2['default_sp_list'], min_fcp_paths_count=2) # case1: get by one sp info_1 = self.db_op.get_sp_default_fcp_template(['v7k60'])[0] expected_1 = (tmpl_id_1, 'new_name1', 'new_desc1', False, 'v7k60') result_1 = ( info_1[0], info_1[1], info_1[2], bool(info_1[3]), info_1[5]) self.assertEqual(expected_1, result_1) # case2: get by 'all' sp expected_2 = (tmpl_id_2, 'new_name2', 'new_desc2', True, 'ds8k') info_all = self.db_op.get_sp_default_fcp_template(['all']) self.assertEqual(2, len(info_all)) result_1 = ( info_all[0][0], info_all[0][1], info_all[0][2], bool(info_all[0][3]), info_all[0][5]) result_2 = ( info_all[1][0], info_all[1][1], info_all[1][2], bool(info_all[1][3]), info_all[1][5]) self.assertEqual(expected_1, result_1) self.assertEqual(expected_2, result_2) finally: self._purge_fcp_db() def test_get_fcp_template_by_assigner_id(self): """test get_fcp_template_by_assigner_id""" try: # prepare test data, template_1 has assigner_id='user1' tmpl_id_1 = self._prepare_fcp_info_for_a_test_fcp_template() # template_2 does not have assigner_id tmpl_id_2 = 'fake_id_0002' kwargs2 = { 'name': 'new_name2', 'description': 'new_desc2', 'fcp_devices': '1C00-1C03;1D00-1D03', 'host_default': True, 'default_sp_list': ['fake_sp'] } self.db_op.create_fcp_template( tmpl_id_2, kwargs2['name'], kwargs2['description'], utils.expand_fcp_list(kwargs2['fcp_devices']), host_default=kwargs2['host_default'], default_sp_list=kwargs2['default_sp_list']) info_1 = self.db_op.get_fcp_template_by_assigner_id('user1')[0] expected_1 = (tmpl_id_1, 'new_name', 'new_desc', False, 2, None) result_1 = ( info_1[0], info_1[1], info_1[2], bool(info_1[3]), info_1[4], info_1[5]) self.assertEqual(expected_1, result_1) finally: self._purge_fcp_db() def test_get_fcp_templates_details(self): """test get_fcp_templates_details""" try: # prepare test data, template_1 has assigner_id='user1' tmpl_id_1 = 'fake_id_' + str(random.randint(100000, 999999)) kwargs1 = { 'name': 'new_name1', 'description': 'new_desc1', 'fcp_devices': '1A00', 'host_default': False, 'default_sp_list': [] } # template_2 does not have assigner_id tmpl_id_2 = 'fake_id_' + str(random.randint(100000, 999999)) kwargs2 = { 'name': 'new_name2', 'description': 'new_desc2', 'fcp_devices': '1C00-1C01;1D00-1D01', 'host_default': True, 'default_sp_list': ['fake_sp'] } self.db_op.create_fcp_template( tmpl_id_1, kwargs1['name'], kwargs1['description'], utils.expand_fcp_list(kwargs1['fcp_devices']), host_default=kwargs1['host_default'], default_sp_list=kwargs1['default_sp_list']) self.db_op.create_fcp_template( tmpl_id_2, kwargs2['name'], kwargs2['description'], utils.expand_fcp_list(kwargs2['fcp_devices']), host_default=kwargs2['host_default'], default_sp_list=kwargs2['default_sp_list']) fcp_info = [ ('1a00', 'wwpn_npiv_1', 'wwpn_phy_1', '27', 'active', 'user1')] try: self.db_op.bulk_insert_zvm_fcp_info_into_fcp_table(fcp_info) except exception.SDKGuestOperationError as ex: if 'UNIQUE constraint failed' in str(ex): pass else: raise reserve_info = (('1a00'), 'user1', tmpl_id_1) self.db_op.reserve_fcps(*reserve_info) # case1: get all templates detail result = self.db_op.get_fcp_templates_details() tmpl_result = result[0] fcp_result = result[1] # should include 2 templates info self.assertEqual(2, len(tmpl_result)) expected_template_info_1 = (tmpl_id_1, 'new_name1', 'new_desc1', False, -1) template_info_1 = (tmpl_result[0][0], tmpl_result[0][1], tmpl_result[0][2], bool(tmpl_result[0][3]), tmpl_result[0][4]) self.assertEqual(template_info_1, expected_template_info_1) expected_template_info_2 = (tmpl_id_2, 'new_name2', 'new_desc2', True, -1) template_info_2 = (tmpl_result[1][0], tmpl_result[1][1], tmpl_result[1][2], bool(tmpl_result[1][3]), tmpl_result[1][4]) self.assertEqual(template_info_2, expected_template_info_2) # should include 5 fcps info of the two templates self.assertEqual(5, len(fcp_result)) # case2: get by template_id_list result = self.db_op.get_fcp_templates_details([tmpl_id_1]) # should include 1 template info self.assertEqual(1, len(result[0])) expected_template_info_1 = (tmpl_id_1, 'new_name1', 'new_desc1', False, -1, None) template_info_1 = (tmpl_result[0][0], tmpl_result[0][1], tmpl_result[0][2], bool(tmpl_result[0][3]), tmpl_result[0][4], tmpl_result[0][5]) self.assertEqual(template_info_1, expected_template_info_1) # should include 1 fcp info of the template self.assertEqual(1, len(result[1])) fcp_in_db = result[1] expected = ('1a00', tmpl_id_1, 0, '', 0, 1, 'wwpn_npiv_1', 'wwpn_phy_1', 27, 'active', 'user1', tmpl_id_1) i = 0 for fcp in fcp_in_db: self.assertEqual(fcp[i], expected[i]) i += 1 finally: self._purge_fcp_db() def test_delete_fcp_template(self): try: # case1: delete a in-use template tmpl_id_1 = self._prepare_fcp_info_for_a_test_fcp_template() self.assertRaises(exception.SDKConflictError, self.db_op.delete_fcp_template, tmpl_id_1) # case2: normal case self.db_op.unreserve_fcps(['1A01', '1B03']) self.db_op.delete_fcp_template(tmpl_id_1) # case3: delete a non-exist template obj_desc = ("FCP Multipath Template {}".format(tmpl_id_1)) with self.assertRaises(exception.SDKObjectNotExistError) as cm: self.db_op.delete_fcp_template(tmpl_id_1) # The following 3 assertions are the same # 'FCP Multipath Template fake_id_0000 does not exist.' self.assertIn(obj_desc, cm.exception.message) self.assertIn(obj_desc, str(cm.exception)) self.assertRaisesRegex(exception.SDKObjectNotExistError, obj_desc, self.db_op.delete_fcp_template, tmpl_id_1) finally: self._purge_fcp_db() class GuestDbOperatorTestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(GuestDbOperatorTestCase, cls).setUpClass() cls.db_op = database.GuestDbOperator() cls.userid = 'FAKEUSER' @classmethod def tearDownClass(cls): with database.get_guest_conn() as conn: conn.execute("DROP TABLE guests") super(GuestDbOperatorTestCase, cls).tearDownClass() @mock.patch.object(uuid, 'uuid4') def test_add_guest(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Query, the guest should in table guests = self.db_op.get_guest_list() self.assertEqual(1, len(guests)) self.assertListEqual([(u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'fakemeta=1, fakemeta2=True', 0, u'')], guests) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_add_guest_registered(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' net = 1 get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest_registered(self.userid, meta, net) # Query, the guest should in table guests = self.db_op.get_guest_list() self.assertEqual(1, len(guests)) self.assertListEqual([(u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'fakemeta=1, fakemeta2=True', 1, None)], guests) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_add_guest_twice_error(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Add same user the second time self.assertRaises(exception.SDKGuestOperationError, self.db_op.add_guest, self.userid) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_delete_guest_by_id(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Delete self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') guests = self.db_op.get_guest_list() self.assertListEqual([], guests) def test_delete_guest_by_id_not_exist(self): self.db_op.delete_guest_by_id('Fakeid') @mock.patch.object(uuid, 'uuid4') def test_delete_guest_by_userid(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Delete self.db_op.delete_guest_by_userid(self.userid) guests = self.db_op.get_guest_list() self.assertListEqual([], guests) @mock.patch.object(uuid, 'uuid4') def test_get_guest_metadata_with_userid_exist(self, get_uuid): meta = 'os_version=rhel8.3' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Delete guest = self.db_op.get_guest_metadata_with_userid(self.userid) self.assertListEqual([(u'os_version=rhel8.3',)], guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') guests = self.db_op.get_guest_metadata_with_userid(self.userid) self.assertListEqual([], guests) @mock.patch.object(uuid, 'uuid4') def test_get_guest_metadata_with_userid_no_exist(self, get_uuid): meta = u'' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) guest = self.db_op.get_guest_metadata_with_userid(self.userid) self.assertListEqual([(u'',)], guest) # Delete self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') def test_delete_guest_by_userid_not_exist(self): self.db_op.delete_guest_by_id(self.userid) @mock.patch.object(uuid, 'uuid4') def test_get_guest_by_userid(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # get guest guest = self.db_op.get_guest_by_userid(self.userid) self.assertEqual((u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'fakemeta=1, fakemeta2=True', 0, u''), guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_get_metadata_by_userid(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77d' self.db_op.add_guest(self.userid, meta=meta) # get metadata metadata = self.db_op.get_metadata_by_userid(self.userid) self.assertEqual(meta, metadata) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77d') def test_get_guest_by_userid_not_exist(self): guest = self.db_op.get_guest_by_userid(self.userid) self.assertEqual(None, guest) @mock.patch.object(uuid, 'uuid4') def test_get_guest_by_id(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # get guest guest = self.db_op.get_guest_by_id( 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') self.assertEqual((u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'fakemeta=1, fakemeta2=True', 0, u''), guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') def test_get_guest_by_id_not_exist(self): guest = self.db_op.get_guest_by_id( 'aa8f352e-4c9e-4335-aafa-4f4eb2fcc77c') self.assertEqual(None, guest) @mock.patch.object(uuid, 'uuid4') def test_update_guest_by_id(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Update self.db_op.update_guest_by_id( 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', meta='newmeta', net_set='1', comments='newcomment') guest = self.db_op.get_guest_by_id( 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') self.assertEqual((u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'newmeta', 1, u'newcomment'), guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_update_guest_by_id_wrong_input(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Update self.assertRaises(exception.SDKInternalError, self.db_op.update_guest_by_id, 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') def test_update_guest_by_id_not_exist(self): self.assertRaises(exception.SDKObjectNotExistError, self.db_op.update_guest_by_id, 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', meta='newmeta') @mock.patch.object(uuid, 'uuid4') def test_update_guest_by_id_null_value(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Update self.db_op.update_guest_by_id( 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', meta='', comments='') guest = self.db_op.get_guest_by_id( 'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') self.assertEqual((u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'', 0, u''), guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_update_guest_by_userid(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Update self.db_op.update_guest_by_userid( self.userid, meta='newmetauserid', net_set='1', comments={'newcommentuserid': '1'}) guest = self.db_op.get_guest_by_userid(self.userid) self.assertEqual((u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'newmetauserid', 1, u'{"newcommentuserid": "1"}'), guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') @mock.patch.object(uuid, 'uuid4') def test_update_guest_by_userid_wrong_input(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Update self.assertRaises(exception.SDKInternalError, self.db_op.update_guest_by_userid, self.userid) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') def test_update_guest_by_userid_not_exist(self): self.assertRaises(exception.SDKObjectNotExistError, self.db_op.update_guest_by_userid, self.userid, meta='newmeta') @mock.patch.object(uuid, 'uuid4') def test_update_guest_by_userid_null_value(self, get_uuid): meta = 'fakemeta=1, fakemeta2=True' get_uuid.return_value = u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c' self.db_op.add_guest(self.userid, meta=meta) # Update self.db_op.update_guest_by_userid( self.userid, meta='', comments='') guest = self.db_op.get_guest_by_userid(self.userid) self.assertEqual((u'ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c', u'FAKEUSER', u'', 0, u'""'), guest) self.db_op.delete_guest_by_id('ad8f352e-4c9e-4335-aafa-4f4eb2fcc77c') class ImageDbOperatorTestCase(base.SDKTestCase): @classmethod def setUpClass(cls): super(ImageDbOperatorTestCase, cls).setUpClass() cls.db_op = database.ImageDbOperator() @classmethod def tearDownClass(cls): with database.get_image_conn() as conn: conn.execute("DROP TABLE image") super(ImageDbOperatorTestCase, cls).tearDownClass() def test_image_add_query_delete_record(self): imagename = 'test' imageosdistro = 'rhel6.5' md5sum = 'c73ce117eef8077c3420bfc8f473ac2f' disk_size_units = '3338:CYL' image_size_in_bytes = '5120000' type = 'netboot' # Add an record self.db_op.image_add_record( imagename, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type) # Query the record image_record = self.db_op.image_query_record(imagename) self.assertEqual(1, len(image_record)) self.assertListEqual( [{'imagename': u'test', 'imageosdistro': u'rhel6.5', 'md5sum': u'c73ce117eef8077c3420bfc8f473ac2f', 'disk_size_units': u'3338:CYL', 'image_size_in_bytes': u'5120000', 'type': u'netboot', 'comments': None}], image_record) # Delete it self.db_op.image_delete_record(imagename) self.assertRaises(exception.SDKObjectNotExistError, self.db_op.image_query_record, imagename) def test_image_add_record_with_existing_imagename(self): imagename = 'test' imageosdistro = 'rhel6.5' md5sum = 'c73ce117eef8077c3420bfc8f473ac2f' disk_size_units = '3338:CYL' image_size_in_bytes = '5120000' type = 'netboot' # Add an record self.db_op.image_add_record( imagename, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type) self.assertRaises( exception.SDKDatabaseException, self.db_op.image_add_record, imagename, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type) self.db_op.image_delete_record(imagename) def test_image_query_record_multiple_image(self): imagename1 = 'testimage1' imagename2 = 'testimage2' imageosdistro = 'rhel6.5' md5sum = 'c73ce117eef8077c3420bfc8f473ac2f' disk_size_units = '3338:CYL' image_size_in_bytes = '5120000' type = 'netboot' # Add two records self.db_op.image_add_record( imagename1, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type) self.db_op.image_add_record( imagename2, imageosdistro, md5sum, disk_size_units, image_size_in_bytes, type) image_records = self.db_op.image_query_record() self.assertEqual(2, len(image_records)) self.assertListEqual( [{'imagename': u'testimage1', 'imageosdistro': u'rhel6.5', 'md5sum': u'c73ce117eef8077c3420bfc8f473ac2f', 'disk_size_units': u'3338:CYL', 'image_size_in_bytes': u'5120000', 'type': u'netboot', 'comments': None}, {'imagename': u'testimage2', 'imageosdistro': u'rhel6.5', 'md5sum': u'c73ce117eef8077c3420bfc8f473ac2f', 'disk_size_units': u'3338:CYL', 'image_size_in_bytes': u'5120000', 'type': u'netboot', 'comments': None}], image_records) # Clean up the images self.db_op.image_delete_record(imagename1) self.db_op.image_delete_record(imagename2) zVMCloudConnector-1.6.3/zvmsdk/tests/__init__.py0000664000175000017510000000000013575566551021323 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/monitor.py0000775000175000017510000002020414315210052020073 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import threading import time from zvmsdk import config from zvmsdk import log from zvmsdk import smtclient from zvmsdk import utils as zvmutils _MONITOR = None CONF = config.CONF LOG = log.LOG def get_monitor(): global _MONITOR if _MONITOR is None: _MONITOR = ZVMMonitor() return _MONITOR class ZVMMonitor(object): """Monitor support for ZVM""" _TYPES = ('cpumem', 'vnics') def __init__(self): self._cache = MeteringCache(self._TYPES) self._smtclient = smtclient.get_smtclient() self._namelist = zvmutils.get_namelist() def inspect_stats(self, uid_list): cpumem_data = self._get_inspect_data('cpumem', uid_list) # construct and return final result stats_data = {} for uid in uid_list: if uid in cpumem_data: with zvmutils.expect_invalid_resp_data(): user_data = cpumem_data[uid] guest_cpus = int(user_data['guest_cpus']) used_cpu_time = user_data['used_cpu_time'] used_cpu_time = int(used_cpu_time.partition(' ')[0]) elapsed_cpu_time = int( user_data['elapsed_cpu_time'].partition(' ')[0]) used_mem = int(user_data['used_memory'].partition(' ')[0]) max_mem = int(user_data['max_memory'].partition(' ')[0]) min_mem = int(user_data['min_memory'].partition(' ')[0]) shared_mem = int( user_data['shared_memory'].partition(' ')[0]) stats_data[uid] = { 'guest_cpus': guest_cpus, 'used_cpu_time_us': used_cpu_time, 'elapsed_cpu_time_us': elapsed_cpu_time, 'min_cpu_count': int(user_data['min_cpu_count']), 'max_cpu_limit': int(user_data['max_cpu_limit']), 'samples_cpu_in_use': int(user_data['samples_cpu_in_use']), 'samples_cpu_delay': int(user_data['samples_cpu_delay']), 'used_mem_kb': used_mem, 'max_mem_kb': max_mem, 'min_mem_kb': min_mem, 'shared_mem_kb': shared_mem } return stats_data def inspect_vnics(self, uid_list): vnics = self._get_inspect_data('vnics', uid_list) # construct and return final result target_vnics = {} for uid in uid_list: if uid in vnics: with zvmutils.expect_invalid_resp_data(): target_vnics[uid] = vnics[uid] return target_vnics def _cache_enabled(self): return CONF.monitor.cache_interval > 0 def _get_inspect_data(self, type, uid_list): inspect_data = {} update_needed = False for uid in uid_list: if not zvmutils.valid_userid(uid): continue cache_data = self._cache.get(type, uid) if cache_data is not None: inspect_data[uid] = cache_data else: if self._smtclient.get_power_state(uid) == 'on': update_needed = True inspect_data = {} break # If all data are found in cache, just return if not update_needed: return inspect_data # Call client to query latest data rdata = {} if type == 'cpumem': rdata = self._update_cpumem_data(uid_list) elif type == 'vnics': rdata = self._update_nic_data() return rdata def _update_cpumem_data(self, uid_list): namelist_uids = self._smtclient.namelist_query(self._namelist) sdk_managed_uids = self._smtclient.get_vm_list() mis_uids = list((set(uid_list) - set(namelist_uids)).intersection(set(sdk_managed_uids))) for muid in mis_uids: self._smtclient.namelist_add(self._namelist, muid) rdata = {} if self._cache_enabled(): rdata = self._smtclient.system_image_performance_query( self._namelist) self._cache.refresh('cpumem', rdata) else: rdata = self._smtclient.system_image_performance_query( self._namelist) return rdata def _update_nic_data(self): nics = {} vsw_dict = self._smtclient.virtual_network_vswitch_query_byte_stats() with zvmutils.expect_invalid_resp_data(): for vsw in vsw_dict['vswitches']: for nic in vsw['nics']: userid = nic['userid'] nic_entry = { 'vswitch_name': vsw['vswitch_name'], 'nic_vdev': nic['vdev'], 'nic_fr_rx': int(nic['nic_fr_rx']), 'nic_fr_tx': int(nic['nic_fr_tx']), 'nic_fr_rx_dsc': int(nic['nic_fr_rx_dsc']), 'nic_fr_tx_dsc': int(nic['nic_fr_tx_dsc']), 'nic_fr_rx_err': int(nic['nic_fr_rx_err']), 'nic_fr_tx_err': int(nic['nic_fr_tx_err']), 'nic_rx': int(nic['nic_rx']), 'nic_tx': int(nic['nic_tx'])} if nics.get(userid, None) is None: nics[userid] = [nic_entry] else: nics[userid].append(nic_entry) # Update cache if enabled if self._cache_enabled(): self._cache.refresh('vnics', nics) return nics class MeteringCache(object): """Cache for metering data.""" def __init__(self, types): self._cache = {} self._types = types self._lock = threading.RLock() self._reset(types) def _reset(self, types): with zvmutils.acquire_lock(self._lock): for type in types: self._cache[type] = {'expiration': time.time(), 'data': {}, } def _get_ctype_cache(self, ctype): return self._cache[ctype] def set(self, ctype, key, data): """Set or update cache content. :param ctype: cache type :param key: the key to be set value :param data: cache data """ with zvmutils.acquire_lock(self._lock): target_cache = self._get_ctype_cache(ctype) target_cache['data'][key] = data def get(self, ctype, key): with zvmutils.acquire_lock(self._lock): target_cache = self._get_ctype_cache(ctype) if (time.time() > target_cache['expiration']): return None else: return target_cache['data'].get(key, None) def delete(self, ctype, key): with zvmutils.acquire_lock(self._lock): target_cache = self._get_ctype_cache(ctype) if key in target_cache['data']: del target_cache['data'][key] def clear(self, ctype='all'): with zvmutils.acquire_lock(self._lock): if ctype == 'all': self._reset() else: target_cache = self._get_ctype_cache(ctype) target_cache['data'] = {} def refresh(self, ctype, data): with zvmutils.acquire_lock(self._lock): self.clear(ctype) target_cache = self._get_ctype_cache(ctype) target_cache['expiration'] = (time.time() + float(CONF.monitor.cache_interval)) for (k, v) in data.items(): self.set(ctype, k, v) zVMCloudConnector-1.6.3/zvmsdk/vmactions/0000775000175000017510000000000014315232035020041 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/vmactions/__init__.py0000664000175000017510000000000013672563714022160 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/vmactions/templates/0000775000175000017510000000000014315232035022037 5ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/vmactions/templates/grow_root_volume.j20000664000175000017510000002061114263437505025717 0ustar ruirui00000000000000#!/bin/bash ############################################################################### # Copyright 2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # ############################################################################### # COMPONENT: gpartvol.sh # # # # Grow the root partition on multipath disk of the deployed guest. # ############################################################################### function check_partition_layout { # Check whether the partition count and layout is supported partition_count=`lsblk /dev/mapper/$mpathx -l | awk 'NR>1 && $6=="part" {print $0}' | wc -l` if [[ $? -ne 0 ]]; then echo "Failed to get partition number with lsblk /dev/mapper/$mpathx cmd." echo "Aborting extending partition. " exit 1 fi if [[ $partition_count -eq 1 ]]; then echo "One partition found on /dev/mapper/$mpathx." elif [[ $partition_count -eq 2 ]]; then echo "Two partitions found on /dev/mapper/$mpathx. Will extend the second partition." # check lvm tool installed output=`which pvdisplay` if [[ $? -ne 0 ]]; then echo "Error: lvm tool is not installed while two partitions found." echo "abort extending partition." exit 1 fi # continue to check the second partition is a LVM physical volume output=`pvdisplay /dev/mapper/${part_prefix}2` rc=$? if [[ $rc -ne 0 ]]; then echo "Error: the second partition is not a LVM partition. output: $output." echo "Aborting extending partition" exit 1 fi else echo "Error: $partition_count partitions found on /dev/mapper/$mpathx." echo "Only one or two partitions is supported on the root volume, aborting extending partition." exit 1 fi } function resize_single_rootfs { # handle the filesystem resize of single-nonlvm-root scenario echo "Start to resize root file system." # get the root partition device if [[ ${mpath:(-1)} == 'p' ]]; then # handle the 36005076802880052a000000000001069p1 case mpathx=$mpath fi # get the root partition type root_partition="/dev/mapper/${mpathx}1" rootfs_type=`blkid -o export $root_partition | grep TYPE= | awk -F'=' '{print $2}'` if [[ $rootfs_type == 'xfs' ]]; then # resize with xfs_growfs out=`xfs_growfs / 2>&1` rc=$? if [[ $rc -ne 0 ]]; then echo "Failed to resize root file system, RC: $rc, Output: $out." exit 1 else echo "Root file system resized successfully." exit 0 fi else # ext file system, resize fs with resize2fs out=`resize2fs /dev/mapper/${mpathx}1 2>&1` rc=$? if [[ $rc -ne 0 ]]; then echo "Failed to resize root file system, RC: $rc, Output: $out." exit 1 else # In some scenario, the resize2fs does not recognize the new partition size # Sample: "The filesystem is already 2621184 (4k) blocks long. Nothing to do!" # So add some retry here. if [[ $out =~ "Nothing to do!" ]]; then echo "Doing some retry for resize2fs." sleepTimes=".001 .01 .1 .5 1 2 3 5 8 15 22 34 60" for seconds in $sleepTimes; do sleep $seconds out=`resize2fs /dev/mapper/${mpathx}1 2>&1` rc=$? if [[ $rc -ne 0 ]]; then echo "Failed to resize root file system, RC: $rc, Output: $out." exit 1 fi if [[ ! ($out =~ "Nothing to do!") ]]; then break # successful - leave loop fi done if [[ $out =~ "Nothing to do!" ]]; then echo "Failed to resize root file system!" exit 1 fi fi echo "Root file system resized successfully." exit 0 fi fi } function extend_lvm { # Scenario: # two partitions: # part1 is standard partition # part2 is lvm partition (used as lvm physical volume) # Handling: # extend the physical volume to use additional space of part2 output=`pvresize /dev/mapper/${part_prefix}2` rc=$? if [[ $rc -ne 0 ]]; then echo "Error: Failed to resize physical volume ${part_prefix}2, RC: $rc, Output: $output." exit 1 else echo "Physical volume resized successfully." exit 0 fi } # check multipathd service running multipathd_status=`systemctl status multipathd | grep "active (running)"` if [[ $? -ne 0 ]]; then echo "multipathd service is inactive, abort extending the root partition." exit 1 fi # In some scenario, the df output the wrong multipath name echo "Reloading multipath mapping." multipath -r &> /dev/null # check parted tool is installed output=`which parted` if [[ $? -ne 0 ]]; then echo "parted is not installed, please install this tool in the base image." echo "abort extending the root partition." exit 1 fi # get root partition mpath=`lsblk -s -l -o NAME,MOUNTPOINT | egrep ' /$' | awk '{print $1}'` rc=$? # sample mpath # on RHEL7 and RHEL8 # depends on user_friendly_names value in /etc/multipath.conf # if value is yes: # mpatha1 # else value is no (by defalut): # 36005076802880052a000000000001069 or # 36005076802880052a000000000001069p1 (sometimes with an additional 'p' before '1') if [[ $rc -ne 0 || "$mpath" == "" ]]; then echo "Unable to find a multipath root partition with lsblk -s." exit 1 fi # remove the last number 1 mpath=${mpath%1*} part_prefix=$mpath # handle the 36005076802880052a000000000001069p1 case if [[ ${mpath:(-1)} == 'p' ]]; then mpathx=${mpath%p*} else mpathx=$mpath fi echo "root device found : $mpathx." # Ensure the $mpathx path exists if [[ ! -r "/dev/mapper/$mpathx" ]]; then echo "/dev/mapper/$mpathx does not exist, abort extending partition." exit 1 fi # check partition number check_partition_layout # Check partition size and volume size (only for the one single / scenario) if [[ $partition_count -eq 1 ]]; then # Sample lsblk: # lsblk /dev/mapper/mpathb -l # NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT # mpathb 253:0 0 10G 0 mpath # mpathb1 253:1 0 10G 0 part / part_size=`lsblk /dev/mapper/$mpathx -l | awk 'NR>1 && $6=="part" {print $4}'` vol_size=`lsblk /dev/mapper/$mpathx -l | awk 'NR>1 && $6=="mpath" {print $4}'` echo "Partition size: $part_size, volume size: $vol_size." if [[ "$part_size" == "$vol_size" ]]; then echo "Partition size equals to the root volume size, no need to extend partition." exit 0 fi fi # sample dm_name: dm-0 dm_name=`readlink /dev/mapper/$mpathx | awk -F'/' '{print $2}'` echo "Find root partition on dm path: $dm_name" # get the devices name sdx to be used by growpart sds=`ls /sys/devices/virtual/block/$dm_name/slaves` part_idx=$partition_count # grow partition success=0 for sdN in $sds do out=`parted -s /dev/$sdN resizepart $part_idx 100% 2>&1` if [[ $? -eq 0 ]]; then success=1 else echo "/dev/$sdN resize failed with parted command, error: $out." continue fi done if [[ $success == 0 ]]; then echo "All slaves of $mpathx failed to be resized, abort extending the root partition." exit 1 fi # continue to resize multipath partition out=`parted -s /dev/mapper/$mpathx resizepart $part_idx 100% 2>&1` rc=$? # On RHEL8 the parted for mpathx would return 1 even if the partition is resized successfully. # so cann't decide whether this is success or not based on the rc and just print all info out. echo "Partition extended. RC: $rc, Output: $out." # partprobe - inform the OS of partition table changes out=`partprobe 2>&1` rc=$? if [[ $rc -ne 0 ]]; then echo "Failed to partprobe, RC: $rc, Output: $out." fi # resize the / for single root partition scenario # extend the lvm volume group for LVM scenario if [[ $partition_count -eq 1 ]]; then resize_single_rootfs else extend_lvm fizVMCloudConnector-1.6.3/zvmsdk/api.py0000775000175000017510000030522414315210052017165 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import netaddr import six import ast from zvmsdk import config from zvmsdk import constants from zvmsdk import exception from zvmsdk import hostops from zvmsdk import imageops from zvmsdk import log from zvmsdk import monitor from zvmsdk import networkops from zvmsdk import vmops from zvmsdk import smtclient from zvmsdk import volumeop from zvmsdk import database from zvmsdk import utils as zvmutils CONF = config.CONF LOG = log.LOG def check_guest_exist(check_index=0): """Check guest exist in database. :param check_index: The parameter index of userid(s), default as 0 """ def outer(f): @six.wraps(f) def inner(self, *args, **kw): userids = args[check_index] if isinstance(userids, list): # convert all userids to upper case userids = [uid.upper() for uid in userids] new_args = (args[:check_index] + (userids,) + args[check_index + 1:]) else: # convert the userid to upper case userids = userids.upper() new_args = (args[:check_index] + (userids,) + args[check_index + 1:]) userids = [userids] self._vmops.check_guests_exist_in_db(userids) return f(self, *new_args, **kw) return inner return outer def check_fcp_exist(check_index=0): """Check FCP exist in database. :param check_index: The parameter index of fcp, default as 0 """ def outer(f): @six.wraps(f) def inner(self, *args, **kw): fcp = args[check_index] self._volumeop.check_fcp_exist_in_db(fcp) return f(self, *args, **kw) return inner return outer class SDKAPI(object): """Compute action interfaces.""" def __init__(self, **kwargs): self._vmops = vmops.get_vmops() self._smtclient = smtclient.get_smtclient() self._hostops = hostops.get_hostops() self._networkops = networkops.get_networkops() self._imageops = imageops.get_imageops() self._monitor = monitor.get_monitor() self._volumeop = volumeop.get_volumeop() self._GuestDbOperator = database.GuestDbOperator() self._NetworkDbOperator = database.NetworkDbOperator() @check_guest_exist() def guest_start(self, userid, timeout=0): """Power on a virtual machine. :param str userid: the id of the virtual machine to be power on :param int timeout: the timeout of waiting virtual machine reachable default as 0, which mean not wait for virtual machine reachable status :returns: None """ action = "start guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_start(userid, timeout) @check_guest_exist() def guest_stop(self, userid, **kwargs): """Power off a virtual machine. :param str userid: the id of the virtual machine to be power off :param dict kwargs: - timeout=: Integer, time to wait for vm to be deactivate, the recommended value is 300 - poll_interval= Integer, how often to signal guest while waiting for it to be deactivate, the recommended value is 20 :returns: None """ action = "stop guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_stop(userid, **kwargs) @check_guest_exist() def guest_softstop(self, userid, **kwargs): """Issue a shutdown command to shutdown the OS in a virtual machine and then log the virtual machine off z/VM.. :param str userid: the id of the virtual machine to be power off :param dict kwargs: - timeout=: Integer, time to wait for vm to be deactivate, the recommended value is 300 - poll_interval= Integer, how often to signal guest while waiting for it to be deactivate, the recommended value is 20 :returns: None """ action = "soft stop guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_softstop(userid, **kwargs) @check_guest_exist() def guest_reboot(self, userid): """Reboot a virtual machine :param str userid: the id of the virtual machine to be reboot :returns: None """ action = "reboot guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_reboot(userid) @check_guest_exist() def guest_reset(self, userid): """reset a virtual machine :param str userid: the id of the virtual machine to be reset :returns: None """ action = "reset guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_reset(userid) @check_guest_exist() def guest_pause(self, userid): """Pause a virtual machine. :param str userid: the id of the virtual machine to be paused :returns: None """ action = "pause guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_pause(userid) @check_guest_exist() def guest_unpause(self, userid): """Unpause a virtual machine. :param str userid: the id of the virtual machine to be unpaused :returns: None """ action = "unpause guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_unpause(userid) @check_guest_exist() def guest_get_power_state(self, userid): """Returns power state.""" if not zvmutils.check_userid_exist(userid.upper()): LOG.error("User directory of '%s' does not exist " "although it is in DB. The guest could have been " "deleted out of z/VM Cloud Connector." % userid) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % userid), modID='guest', rs=3) action = "get power state of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.get_power_state(userid) @check_guest_exist() def guest_get_info(self, userid): """Get the status of a virtual machine. :param str userid: the id of the virtual machine :returns: Dictionary contains: power_state: (str) the running state, one of on | off max_mem_kb: (int) the maximum memory in KBytes allowed mem_kb: (int) the memory in KBytes used by the instance num_cpu: (int) the number of virtual CPUs for the instance cpu_time_us: (int) the CPU time used in microseconds """ action = "get info of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.get_info(userid) def guest_get_power_state_real(self, userid): """Returns power state of a virtual machine from hypervisor.""" action = "get power state of guest '%s' from hypervisor" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.get_power_state(userid) def guest_get_adapters_info(self, userid): """Get the network information of a virtual machine. this userid may not in zCC. :param str userid: the id of the virtual machine :returns: Dictionary contains: ip: (str) the IP address of the virtual machine mac: (str) the MAC address of the virtual machine """ action = "get network info of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.get_adapters_info(userid) def guest_get_user_direct(self, userid): """Get user direct of the specified guest vm :param str userid: the user id of the guest vm :returns: Dictionary describing user direct and check info result :rtype: dict """ action = "get the user direct of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): inst_info = self._vmops.get_definition_info(userid) user_direct = inst_info['user_direct'] item = -1 new_info = "" for info in user_direct: item += 1 # replace password with ****** if info.startswith('USER') or info.startswith('IDENTITY'): fields = info.split() for i in range(len(fields)): if i != 2: new_info += (fields[i] + ' ') else: new_info += ('******' + ' ') user_direct[item] = new_info break inst_info['user_direct'] = user_direct return inst_info def guest_list(self): """list names of all the VMs on this host. :returns: names of the vm on this host, in a list. """ action = "list guests on host" with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.guest_list() def host_get_info(self): """ Retrieve host information including host, memory, disk etc. :returns: Dictionary describing resources """ action = "get host information" with zvmutils.log_and_reraise_sdkbase_error(action): return self._hostops.get_info() def host_get_diskpool_volumes(self, disk_pool=None): """ Retrieve diskpool volumes. :param str disk_pool: the disk pool info. It use ':' to separate disk pool type and pool name, eg "ECKD:eckdpool" or "FBA:fbapool" :returns: Dictionary describing disk pool usage info """ # disk_pool is optional. disk_pool default to None because # it is more convenient for users to just type function name when # they want to get the disk pool info of CONF.zvm.disk_pool. # The default value of CONF.zvm.disk_pool is None, if it's configured, # the format must be "ECKD:eckdpool" or "FBA:fbapool". disk_pool = disk_pool or CONF.zvm.disk_pool if disk_pool is None: # Support disk_pool not configured, return empty list return {} if ':' not in disk_pool: msg = ('Invalid input parameter disk_pool, expect ":" in' 'disk_pool, eg. ECKD:eckdpool') LOG.error(msg) raise exception.SDKInvalidInputFormat(msg) diskpool_type = disk_pool.split(':')[0].upper() if diskpool_type not in ('ECKD', 'FBA'): msg = ('Invalid disk pool type found in disk_pool, expect' 'disk_pool like ECKD:eckdpool or FBA:fbapool') LOG.error(msg) raise exception.SDKInvalidInputFormat(msg) action = "get the volumes of disk pool: '%s'" % disk_pool with zvmutils.log_and_reraise_sdkbase_error(action): return self._hostops.diskpool_get_volumes(disk_pool) def host_get_volume_info(self, volume=None): """ Retrieve volume information. :param str volume: the volume name to identify the DASD device. It's 1 to 6 hexadecimal characters. :returns: Dictionary describing the volume description info """ volume_name = volume if volume_name is None: errmsg = ("Invalid volume input None, volume must be specified.") LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) action = "get information of the volume: '%s'" % volume_name with zvmutils.log_and_reraise_sdkbase_error(action): return self._hostops.get_volume_info(volume_name.upper()) def host_get_guest_list(self): """list names of all the VMs on the host. :returns: names of the vm on this hypervisor, in a list. """ action = "list guests on the host" with zvmutils.log_and_reraise_sdkbase_error(action): return self._hostops.guest_list() def host_diskpool_get_info(self, disk_pool=None): """ Retrieve diskpool information. :param str disk_pool: the disk pool info. It use ':' to separate disk pool type and pool name, eg "ECKD:eckdpool" or "FBA:fbapool" :returns: Dictionary describing disk pool usage info """ # disk_pool is optional. disk_pool default to None because # it is more convenient for users to just type function name when # they want to get the disk pool info of CONF.zvm.disk_pool. # The default value of CONF.zvm.disk_pool is None, if it's configured, # the format must be "ECKD:eckdpool" or "FBA:fbapool". disk_pool = disk_pool or CONF.zvm.disk_pool if disk_pool is None: # Return 0 directly if disk_pool not configured return {'disk_total': 0, 'disk_used': 0, 'disk_available': 0} if ':' not in disk_pool: msg = ('Invalid input parameter disk_pool, expect ":" in' 'disk_pool, eg. ECKD:eckdpool') LOG.error(msg) raise exception.SDKInvalidInputFormat(msg) diskpool_type = disk_pool.split(':')[0].upper() diskpool_name = disk_pool.split(':')[1] if diskpool_type not in ('ECKD', 'FBA'): msg = ('Invalid disk pool type found in disk_pool, expect' 'disk_pool like ECKD:eckdpool or FBA:fbapool') LOG.error(msg) raise exception.SDKInvalidInputFormat(msg) action = "get information of disk pool: '%s'" % disk_pool with zvmutils.log_and_reraise_sdkbase_error(action): return self._hostops.diskpool_get_info(diskpool_name) def image_delete(self, image_name): """Delete image from image repository :param image_name: the name of the image to be deleted """ try: self._imageops.image_delete(image_name) except exception.SDKBaseException: LOG.error("Failed to delete image '%s'" % image_name) raise def image_get_root_disk_size(self, image_name): """Get the root disk size of the image :param image_name: the image name in image Repository :returns: the disk size in units CYL or BLK """ try: return self._imageops.image_get_root_disk_size(image_name) except exception.SDKBaseException: LOG.error("Failed to get root disk size units of image '%s'" % image_name) raise def image_import(self, image_name, url, image_meta, remote_host=None): """Import image to zvmsdk image repository :param image_name: image name that can be uniquely identify an image :param str url: image url to specify the location of image such as http://netloc/path/to/file.tar.gz.0 https://netloc/path/to/file.tar.gz.0 file:///path/to/file.tar.gz.0 :param dict image_meta: a dictionary to describe the image info, such as md5sum, os_version. For example: {'os_version': 'rhel6.2', 'md5sum': ' 46f199c336eab1e35a72fa6b5f6f11f5', 'disk_type': 'DASD'} :param string remote_host: if the image url schema is file, the remote_host is used to indicate where the image comes from, the format is username@IP eg. nova@192.168.99.1, the default value is None, it indicate the image is from a local file system. If the image url schema is http/https, this value will be useless """ try: self._imageops.image_import(image_name, url, image_meta, remote_host=remote_host) except exception.SDKBaseException: LOG.error("Failed to import image '%s'" % image_name) raise def image_query(self, imagename=None): """Get the list of image info in image repository :param imagename: Used to retrieve the specified image info, if not specified, all images info will be returned :returns: A list that contains the specified or all images info """ try: return self._imageops.image_query(imagename) except exception.SDKBaseException: LOG.error("Failed to query image") raise def image_export(self, image_name, dest_url, remote_host=None): """Export the image to the specified location :param image_name: image name that can be uniquely identify an image :param dest_url: the location of exported image, eg. file:///opt/images/export.img, now only support export to remote server or local server's file system :param remote_host: the server that the image will be export to, if remote_host is None, the image will be stored in the dest_path in local server, the format is username@IP eg. nova@9.x.x.x :returns a dictionary that contains the exported image info { 'image_name': the image_name that exported 'image_path': the image_path after exported 'os_version': the os version of the exported image 'md5sum': the md5sum of the original image 'comments': the comments of the original image } """ try: return self._imageops.image_export(image_name, dest_url, remote_host) except exception.SDKBaseException: LOG.error("Failed to export image '%s'" % image_name) raise @check_guest_exist() def guest_deploy(self, userid, image_name, transportfiles=None, remotehost=None, vdev=None, hostname=None, skipdiskcopy=False): """ Deploy the image to vm. :param userid: (str) the user id of the vm :param image_name: (str) If the skipdiskcopy is False, this would be used as the name of image that used to deploy the vm; Otherwise, this value should be the os version. :param transportfiles: (str) the files that used to customize the vm :param remotehost: the server where the transportfiles located, the format is username@IP, eg nova@192.168.99.1 :param vdev: (str) the device that image will be deploy to :param hostname: (str) the hostname of the vm. This parameter will be ignored if transportfiles present. :param skipdiskcopy: (bool) whether to skip the disk copy process. If True, the os version should be specified in the parameter image_name. """ action = ("deploy image '%(img)s' to guest '%(vm)s'" % {'img': image_name, 'vm': userid}) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_deploy(userid, image_name, transportfiles, remotehost, vdev, hostname, skipdiskcopy) @check_guest_exist() def guest_capture(self, userid, image_name, capture_type='rootonly', compress_level=6): """ Capture the guest to generate a image :param userid: (str) the user id of the vm :param image_name: (str) the unique image name after capture :param capture_type: (str) the type of capture, the value can be: rootonly: indicate just root device will be captured alldisks: indicate all the devices of the userid will be captured :param compress_level: the compression level of the image, default is 6 """ action = ("capture guest '%(vm)s' to generate image '%(img)s'" % {'vm': userid, 'img': image_name}) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_capture(userid, image_name, capture_type=capture_type, compress_level=compress_level) @check_guest_exist() def guest_create_nic(self, userid, vdev=None, nic_id=None, mac_addr=None, active=False): """ Create the nic for the vm, add NICDEF record into the user direct. :param str userid: the user id of the vm :param str vdev: nic device number, 1- to 4- hexadecimal digits :param str nic_id: nic identifier :param str mac_addr: mac address, it is only be used when changing the guest's user direct. Format should be xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit :param bool active: whether add a nic on active guest system :returns: nic device number, 1- to 4- hexadecimal digits :rtype: str """ if mac_addr is not None: if not zvmutils.valid_mac_addr(mac_addr): raise exception.SDKInvalidInputFormat( msg=("Invalid mac address, format should be " "xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit")) return self._networkops.create_nic(userid, vdev=vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) @check_guest_exist() def guest_delete_nic(self, userid, vdev, active=False): """ delete the nic for the vm :param str userid: the user id of the vm :param str vdev: nic device number, 1- to 4- hexadecimal digits :param bool active: whether delete a nic on active guest system """ self._networkops.delete_nic(userid, vdev, active=active) @check_guest_exist() def guest_get_definition_info(self, userid, **kwargs): """Get definition info for the specified guest vm, also could be used to check specific info. :param str userid: the user id of the guest vm :param dict kwargs: Dictionary used to check specific info in user direct. Valid keywords for kwargs: nic_coupled=, where is the virtual device number of the nic to be checked the couple status. :returns: Dictionary describing user direct and check info result :rtype: dict """ action = "get the definition info of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.get_definition_info(userid, **kwargs) """Parse the nics' info from the user directory :param user_direct: (str) the user directory info to be parsed """ def _parse_nic_info(self, user_direct): nics_info = {} for nic_info in user_direct: if nic_info.startswith('NICDEF'): split_info = nic_info.split() nic_id = split_info[1].strip() count = 2 one_nic = nics_info.get(nic_id, {}) while count < len(split_info): if split_info[count] == 'LAN': one_nic['vswitch'] = split_info[count + 2].strip() count += 3 continue elif split_info[count] == 'MACID': one_nic['mac'] = split_info[count + 1].strip() count += 2 continue elif split_info[count] == 'VLAN': one_nic['vid'] = split_info[count + 1].strip() count += 2 continue else: count += 1 nics_info[nic_id] = one_nic return nics_info def guest_register(self, userid, meta, net_set, port_macs=None): """Register vm by inserting or updating DB for e.g. migration and onboarding :param userid: (str) the userid of the vm to be relocated or tested :param meta: (str) the metadata of the vm to be relocated or tested :param net_set: (str) the net_set of the vm, default is 1. :param port_macs: (dir) the virtual interface port id maps with mac id Format: { macid1 : portid1, macid2 : portid2}. For example, { 'EF5091':'6e2ecc4f-14a2-4f33-9f12-5ac4a42f97e7', '69FCF1':'389dee5e-7b03-405c-b1e8-7c9c235d1425' } """ if port_macs is not None and not isinstance(port_macs, dict): msg = ('Invalid input parameter port_macs, expect dict') LOG.error(msg) raise exception.SDKInvalidInputFormat(msg) userid = userid.upper() if not zvmutils.check_userid_exist(userid): LOG.error("User directory '%s' does not exist." % userid) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % userid), modID='guest') else: action = "query the guest in database." with zvmutils.log_and_reraise_sdkbase_error(action): guest = self._GuestDbOperator.get_guest_by_userid(userid) if guest is not None: # The below handling is for migration action = "list all guests in database which has been migrated." with zvmutils.log_and_reraise_sdkbase_error(action): guests = self._GuestDbOperator.get_migrated_guest_list() if userid in str(guests): """change comments for vm""" comments = self._GuestDbOperator.get_comments_by_userid( userid) comments['migrated'] = 0 action = "update guest '%s' in database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.update_guest_by_userid(userid, comments=comments) LOG.info("Guest %s comments updated." % userid) # We just return no matter onboarding or migration # since the guest exists return # add one record for new vm for both onboarding and migration, # and even others later. action = "add guest '%s' to database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.add_guest_registered(userid, meta, net_set) # We need to query and add vswitch to the database. action = "add switches of guest '%s' to database" % userid # The result of get_adpaters_info # [{'adapter_address': '1000', 'adapter_status': '02', # 'lan_owner': 'SYSTEM', 'lan_name': 'VSC11590', # 'mac_address': '02:55:36:00:00:10', 'mac_ip_version': '4', # 'mac_ip_address': '9.152.85.95'}] adapters_info = self._smtclient.get_adapters_info(userid) for adapter in adapters_info: interface = adapter.get('adapter_address') switch = adapter.get('lan_name') port = None if port_macs is not None: if adapter.get('mac_address'): mac = ''.join( adapter.get('mac_address').split(':'))[6:].upper() if mac in port_macs.keys(): port = port_macs[mac] if port is None: LOG.warning("Port not found for nic %s, %s." % (interface, port_macs)) else: LOG.info("Port found for nic %s." % interface) with zvmutils.log_and_reraise_sdkbase_error(action): self._NetworkDbOperator.switch_add_record( userid, interface, port, switch) LOG.info("Guest %s registered." % userid) # Deregister the guest (not delete), this function has no relationship with # migration. def guest_deregister(self, userid): """DB operation for deregister vm for offboard (dismiss) request. :param userid: (str) the userid of the vm to be deregistered """ userid = userid.upper() # We don't check if the VM exists in the LPAR or zCC DB, just delete it # from DB anyway, cause there could be the case that the VM is deleted # outside of zCC e.g. smcli, and the DB record is still there. if not self._vmops.check_guests_exist_in_db(userid, raise_exc=False): LOG.warning("User directory '%s' does not exist in guest DB." "But let's still delete it as there is also switch" " table" % userid) action = "delete switches of guest '%s' from database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._NetworkDbOperator.switch_delete_record_for_userid(userid) action = "delete guest '%s' from database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.delete_guest_by_userid(userid) LOG.info("Guest %s deregistered." % userid) @check_guest_exist() def guest_live_migrate(self, userid, dest_zcc_userid, destination, parms, lgr_action): """Move an eligible, running z/VM(R) virtual machine transparently from one z/VM system to another within an SSI cluster. :param userid: (str) the userid of the vm to be relocated or tested :param dest_zcc_userid: (str) the userid of zcc on destination. If None, no any userid is set into the guest. :param destination: (str) the system ID of the z/VM system to which the specified vm will be relocated or tested. :param parms: (dict) a dictionary of options for relocation. It has one dictionary that contains some of the below keys: {'maxtotal': i, 'maxquiesce': i, 'immediate': str} In which, 'maxtotal':indicates the maximum total time (in seconds) that the command issuer is willing to wait for the entire relocation to complete or -1 to indicate there is no limit for time. 'maxquiesce':indicates the maximum quiesce time for this relocation. This is the amount of time (in seconds) a virtual machine may be stopped during a relocation attempt or -1 to indicate there is no limit for time. 'immediate':If present, immediate=YES is set, which causes the VMRELOCATE command to do one early pass through virtual machine storage and then go directly to the quiesce stage. :param lgr_action: (str) indicates the action is move or test for vm. """ if lgr_action.lower() == 'move': if dest_zcc_userid is None or dest_zcc_userid.strip() == '': msg = "dest_zcc_userid is empty so it will not be set " \ "during LGR." LOG.info(msg) # Live_migrate the guest operation = "Move guest '%s' to SSI '%s'" % (userid, destination) with zvmutils.log_and_reraise_sdkbase_error(operation): self._vmops.live_migrate_vm(userid, destination, parms, lgr_action) comments = self._GuestDbOperator.get_comments_by_userid(userid) comments['migrated'] = 1 action = "update guest '%s' in database" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.update_guest_by_userid(userid, comments=comments) # Skip IUCV authorization for RHCOS guests is_rhcos = 'rhcos' in self._GuestDbOperator.get_guest_by_userid( userid)[2].lower() if is_rhcos: LOG.debug("Skip IUCV authorization when migrating RHCOS " "guests: %s" % userid) # Add authorization for new zcc. # This should be done after migration succeeds. # If the dest_zcc_userid is empty, nothing will be done because # this should be a onboarded guest and no permission to do it. if (dest_zcc_userid is not None and dest_zcc_userid.strip() != '' and not is_rhcos): cmd = ('echo -n %s > /etc/iucv_authorized_userid\n' % dest_zcc_userid) rc = self._smtclient.execute_cmd(userid, cmd) if rc != 0: err_msg = ("Add authorization for new zcc failed") LOG.error(err_msg) if lgr_action.lower() == 'test': operation = "Test move guest '%s' to SSI '%s'" % (userid, destination) with zvmutils.log_and_reraise_sdkbase_error(operation): self._vmops.live_migrate_vm(userid, destination, parms, lgr_action) def guest_create(self, userid, vcpus, memory, disk_list=None, user_profile='', max_cpu=CONF.zvm.user_default_max_cpu, max_mem=CONF.zvm.user_default_max_memory, ipl_from='', ipl_param='', ipl_loadparam='', dedicate_vdevs=None, loaddev={}, account='', comment_list=None, cschedule='', cshare='', rdomain='', pcif=''): """create a vm in z/VM :param userid: (str) the userid of the vm to be created :param vcpus: (int) amount of vcpus :param memory: (int) size of memory in MB :param disk_list: (dict) a list of disks info for the guest. It has one dictionary that contain some of the below keys for each disk, the root disk should be the first element in the list, the format is: {'size': str, 'format': str, 'is_boot_disk': bool, 'disk_pool': str} In which, 'size': case insensitive, the unit can be in Megabytes (M), Gigabytes (G), or number of cylinders/blocks, eg 512M, 1g or just 2000. 'format': can be ext2, ext3, ext4, xfs and none. 'is_boot_disk': For root disk, this key must be set to indicate the image that will be deployed on this disk. 'disk_pool': optional, if not specified, the disk will be created by using the value from configure file,the format is ECKD:eckdpoolname or FBA:fbapoolname. For example: [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1'}, {'size': '200000', 'disk_pool': 'FBA:fbapool1', 'format': 'ext3'}, {'size': '1g', 'format': 'ext3'}] In this case it will create one disk 0100(in case the vdev for root disk is 0100) with size 1g from ECKD disk pool eckdpool1 for guest , then set IPL 0100 in guest's user directory, and it will create 0101 with 200000 blocks from FBA disk pool fbapool1, and formated with ext3. As for the third case, if the disk_pool isn't configured in configure file, the default value is None, the disk_pool here is None, report error. If it's configured, such as ECKD:eckdpool2, it will create 0102 with size 1g from ECKD diskpool eckdpool2 for guest. :param user_profile: (str) the profile for the guest :param max_cpu: (int) the maximum number of virtual cpu this user can define. The value should be a decimal value between 1 and 64. :param max_mem: (str) the maximum size of memory the user can define. The value should be specified by 1-4 bits of number suffixed by either M (Megabytes) or G (Gigabytes). And the number should be an integer. :param ipl_from: (str) where to ipl the guest from, it can be given by guest input param, e.g CMS. :param ipl_param: the param to use when IPL for as PARM :param ipl_loadparam: the param to use when IPL for as LOADPARM :param dedicate_vdevs: (list) the list of device vdevs to dedicate to the guest. :param loaddev: (dict) the loaddev parms to add in the guest directory. Current supported key includes: 'portname', 'lun'. Both the 'portname' and 'lun' can specify only one one- to eight-byte hexadecimal value in the range of 0-FFFFFFFFFFFFFFFF The format should be: {'portname': str, 'lun': str} :param account: (str) account string, see https://www.ibm.com/docs/en/zvm/6.4?topic=SSB27U_6.4.0/ com.ibm.zvm.v640.hcpa5/daccoun.htm#daccoun :param comment_list: (array) a list of comment string :param cschedule: a command input for schedule cpu pool :param cshare: a command input for share settings :param rdomain: a command input for relocation domain :param pcif: a command input for pci function """ dedicate_vdevs = dedicate_vdevs or [] userid = userid.upper() if disk_list: # special case for swap disk, for boot from volume, might add swap # disk but not disk pool given, then we use vdisk instead swap_only = False if len(disk_list) == 1: disk = disk_list[0] if 'format' in disk and disk['format'].lower() == 'swap': swap_only = True for disk in disk_list: if not isinstance(disk, dict): errmsg = ('Invalid "disk_list" input, it should be a ' 'dictionary. Details could be found in doc.') LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) # 'size' is required for each disk if 'size' not in disk.keys(): errmsg = ('Invalid "disk_list" input, "size" is required ' 'for each disk.') LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) # check disk_pool disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool if not swap_only: if disk_pool is None: errmsg = ("Invalid disk_pool input, disk_pool should" " be configured for sdkserver.") LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) # 'disk_pool' format check if ':' not in disk_pool or (disk_pool.split(':')[0].upper() not in ['ECKD', 'FBA']): errmsg = ("Invalid disk_pool input, its format must be" " ECKD:eckdpoolname or FBA:fbapoolname") LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) else: # in this case, it's swap only, and we will check whether # no VDISK is allowed, if not allow, then return error if disk_pool is None and CONF.zvm.swap_force_mdisk: errmsg = ("Invalid disk_pool input, disk_pool should" " be configured for sdkserver and use" " VDISK as swap disk is not configured." " check CONF.zvm.swap_force_mdisk for" " additional information.") LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) # 'format' value check if ('format' in disk.keys()) and (disk['format'].lower() not in ('ext2', 'ext3', 'ext4', 'swap', 'xfs', 'none')): errmsg = ("Invalid disk_pool input, supported 'format' " "includes 'ext2', 'ext3', 'ext4', 'xfs', " "'swap', 'none'") LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) if dedicate_vdevs and not isinstance(dedicate_vdevs, list): errmsg = ('Invalid "dedicate_vdevs" input, it should be a ' 'list. Details could be found in doc.') LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) if loaddev and not isinstance(loaddev, dict): errmsg = ('Invalid "loaddev" input, it should be a ' 'dictionary. Details could be found in doc.') LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) if not user_profile or len(user_profile) == 0: user_profile = CONF.zvm.user_profile action = "create guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.create_vm(userid, vcpus, memory, disk_list, user_profile, max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam, dedicate_vdevs, loaddev, account, comment_list, cschedule, cshare, rdomain, pcif) @check_guest_exist() def guest_live_resize_cpus(self, userid, cpu_cnt): """Live resize virtual cpus of guests. :param userid: (str) the userid of the guest to be live resized :param cpu_cnt: (int) The number of virtual cpus that the guest should have in active state after live resize. The value should be an integer between 1 and 64. """ action = "live resize guest '%s' to have '%i' virtual cpus" % (userid, cpu_cnt) LOG.info("Begin to %s" % action) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.live_resize_cpus(userid, cpu_cnt) LOG.info("%s successfully." % action) @check_guest_exist() def guest_resize_cpus(self, userid, cpu_cnt): """Resize virtual cpus of guests. :param userid: (str) the userid of the guest to be resized :param cpu_cnt: (int) The number of virtual cpus that the guest should have defined in user directory after resize. The value should be an integer between 1 and 64. """ action = "resize guest '%s' to have '%i' virtual cpus" % (userid, cpu_cnt) LOG.info("Begin to %s" % action) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.resize_cpus(userid, cpu_cnt) LOG.info("%s successfully." % action) @check_guest_exist() def guest_live_resize_mem(self, userid, size): """Live resize memory of guests. :param userid: (str) the userid of the guest to be live resized :param size: (str) The memory size that the guest should have in available status after live resize. The value should be specified by 1-4 bits of number suffixed by either M (Megabytes) or G (Gigabytes). And the number should be an integer. """ action = "live resize guest '%s' to have '%s' memory" % (userid, size) LOG.info("Begin to %s" % action) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.live_resize_memory(userid, size) LOG.info("%s successfully." % action) @check_guest_exist() def guest_resize_mem(self, userid, size): """Resize memory of guests. :param userid: (str) the userid of the guest to be resized :param size: (str) The memory size that the guest should have defined in user directory after resize. The value should be specified by 1-4 bits of number suffixed by either M (Megabytes) or G (Gigabytes). And the number should be an integer. """ action = "resize guest '%s' to have '%s' memory" % (userid, size) LOG.info("Begin to %s" % action) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.resize_memory(userid, size) LOG.info("%s successfully." % action) @check_guest_exist() def guest_create_disks(self, userid, disk_list): """Add disks to an existing guest vm. :param userid: (str) the userid of the vm to be created :param disk_list: (list) a list of disks info for the guest. It has one dictionary that contain some of the below keys for each disk, the root disk should be the first element in the list, the format is: {'size': str, 'format': str, 'is_boot_disk': bool, 'disk_pool': str} In which, 'size': case insensitive, the unit can be in Megabytes (M), Gigabytes (G), or number of cylinders/blocks, eg 512M, 1g or just 2000. 'format': optional, can be ext2, ext3, ext4, xfs, if not specified, the disk will not be formatted. 'is_boot_disk': For root disk, this key must be set to indicate the image that will be deployed on this disk. 'disk_pool': optional, if not specified, the disk will be created by using the value from configure file,the format is ECKD:eckdpoolname or FBA:fbapoolname. For example: [{'size': '1g', 'is_boot_disk': True, 'disk_pool': 'ECKD:eckdpool1'}, {'size': '200000', 'disk_pool': 'FBA:fbapool1', 'format': 'ext3'}, {'size': '1g', 'format': 'ext3'}] In this case it will create one disk 0100(in case the vdev for root disk is 0100) with size 1g from ECKD disk pool eckdpool1 for guest , then set IPL 0100 in guest's user directory, and it will create 0101 with 200000 blocks from FBA disk pool fbapool1, and formated with ext3. As for the third case, if the disk_pool isn't configured in configure file, the default value is None, the disk_pool here is None, report error. If it's configured, such as ECKD:eckdpool2, it will create 0102 with size 1g from ECKD diskpool eckdpool2 for guest. """ if disk_list == [] or disk_list is None: # nothing to do LOG.debug("No disk specified when calling guest_create_disks, " "nothing happened") return for disk in disk_list: if not isinstance(disk, dict): errmsg = ('Invalid "disk_list" input, it should be a ' 'dictionary. Details could be found in doc.') LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) # check disk_pool disk_pool = disk.get('disk_pool') or CONF.zvm.disk_pool if disk_pool is None: errmsg = ("Invalid disk_pool input, it should be configured" " for sdkserver.") LOG.error(errmsg) raise exception.SDKInvalidInputFormat(msg=errmsg) action = "create disks '%s' for guest '%s'" % (str(disk_list), userid) with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.create_disks(userid, disk_list) @check_guest_exist() def guest_delete_disks(self, userid, disk_vdev_list): """Delete disks from an existing guest vm. :param userid: (str) the userid of the vm to be deleted :param disk_vdev_list: (list) the vdev list of disks to be deleted, for example: ['0101', '0102'] """ action = "delete disks '%s' from guest '%s'" % (str(disk_vdev_list), userid) with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.delete_disks(userid, disk_vdev_list) @check_guest_exist() def guest_nic_couple_to_vswitch(self, userid, nic_vdev, vswitch_name, active=False, vlan_id=-1): """ Couple nic device to specified vswitch. :param str userid: the user's name who owns the nic :param str nic_vdev: nic device number, 1- to 4- hexadecimal digits :param str vswitch_name: the name of the vswitch :param bool active: whether make the change on active guest system :param str vlan_id: the VLAN ID of the NIC """ self._networkops.couple_nic_to_vswitch(userid, nic_vdev, vswitch_name, active=active, vlan_id=vlan_id) @check_guest_exist() def guest_nic_uncouple_from_vswitch(self, userid, nic_vdev, active=False): """ Disonnect nic device with network. :param str userid: the user's name who owns the nic :param str nic_vdev: nic device number, 1- to 4- hexadecimal digits :param bool active: whether make the change on active guest system """ self._networkops.uncouple_nic_from_vswitch(userid, nic_vdev, active=active) def vswitch_get_list(self): """ Get the vswitch list. :returns: vswitch name list :rtype: list """ return self._networkops.get_vswitch_list() def vswitch_create(self, name, rdev=None, controller='*', connection='CONNECT', network_type='ETHERNET', router="NONROUTER", vid='UNAWARE', port_type='ACCESS', gvrp='GVRP', queue_mem=8, native_vid=1, persist=True): """ Create vswitch. :param str name: the vswitch name :param str rdev: the real device number, a maximum of three devices, all 1-4 characters in length, delimited by blanks. 'NONE' may also be specified :param str controller: the vswitch's controller, it could be the userid controlling the real device, or '*' to specifies that any available controller may be used :param str connection: - CONnect: Activate the real device connection. - DISCONnect: Do not activate the real device connection. - NOUPLINK: The vswitch will never have connectivity through the UPLINK port :param str network_type: Specifies the transport mechanism to be used for the vswitch, as follows: IP, ETHERNET :param str router: - NONrouter: The OSA-Express device identified in real_device_address= will not act as a router to the vswitch - PRIrouter: The OSA-Express device identified in real_device_address= will act as a primary router to the vswitch - Note: If the network_type is ETHERNET, this value must be unspecified, otherwise, if this value is unspecified, default is NONROUTER :param str/int vid: the VLAN ID. This can be any of the following values: UNAWARE, AWARE or 1-4094 :param str port_type: - ACCESS: The default porttype attribute for guests authorized for the virtual switch. The guest is unaware of VLAN IDs and sends and receives only untagged traffic - TRUNK: The default porttype attribute for guests authorized for the virtual switch. The guest is VLAN aware and sends and receives tagged traffic for those VLANs to which the guest is authorized. If the guest is also authorized to the natvid, untagged traffic sent or received by the guest is associated with the native VLAN ID (natvid) of the virtual switch. :param str gvrp: - GVRP: Indicates that the VLAN IDs in use on the virtual switch should be registered with GVRP-aware switches on the LAN. This provides dynamic VLAN registration and VLAN registration removal for networking switches. This eliminates the need to manually configure the individual port VLAN assignments. - NOGVRP: Do not register VLAN IDs with GVRP-aware switches on the LAN. When NOGVRP is specified VLAN port assignments must be configured manually :param int queue_mem: A number between 1 and 8, specifying the QDIO buffer size in megabytes. :param int native_vid: the native vlan id, 1-4094 or None :param bool persist: whether create the vswitch in the permanent configuration for the system """ if ((queue_mem < 1) or (queue_mem > 8)): errmsg = ('API vswitch_create: Invalid "queue_mem" input, ' 'it should be 1-8') raise exception.SDKInvalidInputFormat(msg=errmsg) if isinstance(vid, int) or vid.upper() != 'UNAWARE': if ((native_vid is not None) and ((native_vid < 1) or (native_vid > 4094))): errmsg = ('API vswitch_create: Invalid "native_vid" input, ' 'it should be 1-4094 or None') raise exception.SDKInvalidInputFormat(msg=errmsg) if network_type.upper() == 'ETHERNET': router = None self._networkops.add_vswitch(name, rdev=rdev, controller=controller, connection=connection, network_type=network_type, router=router, vid=vid, port_type=port_type, gvrp=gvrp, queue_mem=queue_mem, native_vid=native_vid, persist=persist) @check_guest_exist() def guest_get_console_output(self, userid): """Get the console output of the guest virtual machine. :param str userid: the user id of the vm :returns: console log string :rtype: str """ action = "get the console output of guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): output = self._vmops.get_console_output(userid) return output def guest_delete(self, userid): """Delete guest. :param userid: the user id of the vm """ # check guest exist in database or not userid = userid.upper() if not self._vmops.check_guests_exist_in_db(userid, raise_exc=False): if zvmutils.check_userid_exist(userid): LOG.error("Guest '%s' does not exist in guests database" % userid) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % userid), modID='guest') else: LOG.debug("The guest %s does not exist." % userid) return action = "delete guest '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): return self._vmops.delete_vm(userid) @check_guest_exist() def guest_inspect_stats(self, userid_list): """Get the statistics including cpu and mem of the guests :param userid_list: a single userid string or a list of guest userids :returns: dictionary describing the cpu statistics of the vm in the form {'UID1': { 'guest_cpus': xx, 'used_cpu_time_us': xx, 'elapsed_cpu_time_us': xx, 'min_cpu_count': xx, 'max_cpu_limit': xx, 'samples_cpu_in_use': xx, 'samples_cpu_delay': xx, 'used_mem_kb': xx, 'max_mem_kb': xx, 'min_mem_kb': xx, 'shared_mem_kb': xx }, 'UID2': { 'guest_cpus': xx, 'used_cpu_time_us': xx, 'elapsed_cpu_time_us': xx, 'min_cpu_count': xx, 'max_cpu_limit': xx, 'samples_cpu_in_use': xx, 'samples_cpu_delay': xx, 'used_mem_kb': xx, 'max_mem_kb': xx, 'min_mem_kb': xx, 'shared_mem_kb': xx } } for the guests that are shutdown or not exist, no data returned in the dictionary """ if not isinstance(userid_list, list): userid_list = [userid_list] action = "get the statistics of guest '%s'" % str(userid_list) with zvmutils.log_and_reraise_sdkbase_error(action): return self._monitor.inspect_stats(userid_list) @check_guest_exist() def guest_inspect_vnics(self, userid_list): """Get the vnics statistics of the guest virtual machines :param userid_list: a single userid string or a list of guest userids :returns: dictionary describing the vnics statistics of the vm in the form {'UID1': [{ 'vswitch_name': xx, 'nic_vdev': xx, 'nic_fr_rx': xx, 'nic_fr_tx': xx, 'nic_fr_rx_dsc': xx, 'nic_fr_tx_dsc': xx, 'nic_fr_rx_err': xx, 'nic_fr_tx_err': xx, 'nic_rx': xx, 'nic_tx': xx }, ], 'UID2': [{ 'vswitch_name': xx, 'nic_vdev': xx, 'nic_fr_rx': xx, 'nic_fr_tx': xx, 'nic_fr_rx_dsc': xx, 'nic_fr_tx_dsc': xx, 'nic_fr_rx_err': xx, 'nic_fr_tx_err': xx, 'nic_rx': xx, 'nic_tx': xx }, ] } for the guests that are shutdown or not exist, no data returned in the dictionary """ if not isinstance(userid_list, list): userid_list = [userid_list] action = "get the vnics statistics of guest '%s'" % str(userid_list) with zvmutils.log_and_reraise_sdkbase_error(action): return self._monitor.inspect_vnics(userid_list) @check_guest_exist(check_index=1) def vswitch_grant_user(self, vswitch_name, userid): """Set vswitch to grant user :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm """ self._networkops.grant_user_to_vswitch(vswitch_name, userid) def vswitch_revoke_user(self, vswitch_name, userid): """Revoke user for vswitch :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm """ self._networkops.revoke_user_from_vswitch(vswitch_name, userid) @check_guest_exist(check_index=1) def vswitch_set_vlan_id_for_user(self, vswitch_name, userid, vlan_id): """Set vlan id for user when connecting to the vswitch :param str vswitch_name: the name of the vswitch :param str userid: the user id of the vm :param int vlan_id: the VLAN id """ self._networkops.set_vswitch_port_vlan_id(vswitch_name, userid, vlan_id) @check_guest_exist() def guest_config_minidisks(self, userid, disk_info): """Punch the script that used to process additional disks to vm :param str userid: the user id of the vm :param disk_info: a list contains disks info for the guest. It contains dictionaries that describes disk info for each disk. Each dictionary has 3 keys, format is required, vdev and mntdir are optional. For example, if vdev is not specified, it will start from the next vdev of CONF.zvm.user_root_vdev, eg. if CONF.zvm.user_root_vdev is 0100, zvmsdk will use 0101 as the vdev for first additional disk in disk_info, and if mntdir is not specified, zvmsdk will use /mnt/ephemeral0 as the mount point of first additional disk Here are some examples: [{'vdev': '0101', 'format': 'ext3', 'mntdir': '/mnt/ephemeral0'}] In this case, the zvmsdk will treat 0101 as additional disk's vdev, and it's formatted with ext3, and will be mounted to /mnt/ephemeral0 [{'format': 'ext3'}, {'format': 'ext4'}] In this case, if CONF.zvm.user_root_vdev is 0100, zvmsdk will configure the first additional disk as 0101, mount it to /mnt/ephemeral0 with ext3, and configure the second additional disk 0102, mount it to /mnt/ephemeral1 with ext4. """ action = "config disks for userid '%s'" % userid with zvmutils.log_and_reraise_sdkbase_error(action): self._vmops.guest_config_minidisks(userid, disk_info) @check_guest_exist() def guest_grow_root_volume(self, userid, os_version): """ Punch script to guest to grow root partition and extend root file system. Note: 1. Only multipath SCSI disk is supported. 2. Only one partition is supported. 3. xfs file system is not supported. :param str userid: the user id of the vm :param str os_version: operating system version of the guest """ return self._vmops.guest_grow_root_volume(userid, os_version) def vswitch_set(self, vswitch_name, **kwargs): """Change the configuration of an existing virtual switch :param str vswitch_name: the name of the virtual switch :param dict kwargs: - grant_userid=: A userid to be added to the access list - user_vlan_id=: user VLAN ID. Support following ways: 1. As single values between 1 and 4094. A maximum of four values may be specified, separated by blanks. Example: 1010 2020 3030 4040 2. As a range of two numbers, separated by a dash (-). A maximum of two ranges may be specified. Example: 10-12 20-22 - revoke_userid=: A userid to be removed from the access list - real_device_address=: The real device address or the real device address and OSA Express port number of a QDIO OSA Express device to be used to create the switch to the virtual adapter. If using a real device and an OSA Express port number, specify the real device number followed by a period(.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of three device addresses, all 1-7 characters in length, may be specified, delimited by blanks. 'None' may also be specified - port_name=: The name used to identify the OSA Expanded adapter. A maximum of three port names, all 1-8 characters in length, may be specified, delimited by blanks. - controller_name=: One of the following: 1. The userid controlling the real device. A maximum of eight userids, all 1-8 characters in length, may be specified, delimited by blanks. 2. '*': Specifies that any available controller may be used - connection_value=: One of the following values: CONnect: Activate the real device connection. DISCONnect: Do not activate the real device connection. - queue_memory_limit=: A number between 1 and 8 specifying the QDIO buffer size in megabytes. - routing_value=: Specifies whether the OSA-Express QDIO device will act as a router to the virtual switch, as follows: NONrouter: The OSA-Express device identified in real_device_address= will not act as a router to the vswitch PRIrouter: The OSA-Express device identified in real_device_address= will act as a primary router to the vswitch - port_type=: Specifies the port type, ACCESS or TRUNK - persist=: one of the following values: NO: The vswitch is updated on the active system, but is not updated in the permanent configuration for the system. YES: The vswitch is updated on the active system and also in the permanent configuration for the system. If not specified, the default is NO. - gvrp_value=: GVRP or NOGVRP - mac_id=: A unique identifier (up to six hexadecimal digits) used as part of the vswitch MAC address - uplink=: One of the following: NO: The port being enabled is not the vswitch's UPLINK port. YES: The port being enabled is the vswitch's UPLINK port. - nic_userid=: One of the following: 1. The userid of the port to/from which the UPLINK port will be connected or disconnected. If a userid is specified, then nic_vdev= must also be specified 2. '*': Disconnect the currently connected guest port to/from the special virtual switch UPLINK port. (This is equivalent to specifying NIC NONE on CP SET VSWITCH). - nic_vdev=: The virtual device to/from which the the UPLINK port will be connected/disconnected. If this value is specified, nic_userid= must also be specified, with a userid. - lacp=: One of the following values: ACTIVE: Indicates that the virtual switch will initiate negotiations with the physical switch via the link aggregation control protocol (LACP) and will respond to LACP packets sent by the physical switch. INACTIVE: Indicates that aggregation is to be performed, but without LACP. - Interval=: The interval to be used by the control program (CP) when doing load balancing of conversations across multiple links in the group. This can be any of the following values: 1 - 9990: Indicates the number of seconds between load balancing operations across the link aggregation group. OFF: Indicates that no load balancing is done. - group_rdev=: The real device address or the real device address and OSA Express port number of a QDIO OSA Express devcie to be affected within the link aggregation group associated with this vswitch. If using a real device and an OSA Express port number, specify the real device number followed by a period (.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of eight device addresses all 1-7 characters in length, may be specified, delimited by blanks. Note: If a real device address is specified, this device will be added to the link aggregation group associated with this vswitch. (The link aggregation group will be created if it does not already exist.) - iptimeout=: A number between 1 and 240 specifying the length of time in minutes that a remote IP address table entry remains in the IP address table for the virtual switch. - port_isolation=: ON or OFF - promiscuous=: One of the following: NO: The userid or port on the grant is not authorized to use the vswitch in promiscuous mode YES: The userid or port on the grant is authorized to use the vswitch in promiscuous mode. - MAC_protect=: ON, OFF or UNSPECified - VLAN_counters=: ON or OFF """ for k in kwargs.keys(): if k not in constants.SET_VSWITCH_KEYWORDS: errmsg = ('API vswitch_set: Invalid keyword %s' % k) raise exception.SDKInvalidInputFormat(msg=errmsg) self._networkops.set_vswitch(vswitch_name, **kwargs) def vswitch_delete(self, vswitch_name, persist=True): """ Delete vswitch. :param str name: the vswitch name :param bool persist: whether delete the vswitch from the permanent configuration for the system """ self._networkops.delete_vswitch(vswitch_name, persist) def get_volume_connector(self, userid, reserve=False, fcp_template_id=None, sp_name=None): """Get connector information of the guest for attaching to volumes. This API is for Openstack Cinder driver only now. Connector information is a dictionary representing the machine that will be making the connection as follows:: { 'zvm_fcp': fcp 'wwpns': [wwpn] 'host': host 'phy_to_virt_initiators': {}, 'fcp_paths': 0, 'fcp_template_id': fcp_template_id } This information will be used by IBM storwize FC driver in Cinder. :param str userid: the user id of the guest :param boolean reserve: the flag to reserve FCP device :param str fcp_template_id: the FCP Multipath Template id which FCP devices are allocated by :param str sp_name: the storage provider name """ return self._volumeop.get_volume_connector( userid, reserve, fcp_template_id, sp_name) def get_fcp_templates(self, template_id_list=None, assigner_id=None, default_sp_list= None, host_default=None): """Get template base info :param template_id_list: (list) a list of template id, if it is None, get FCP Multipath Templates with other parameter :param assigner_id: (str) a string of VM userid :param host_default: (boolean) whether or not get host default fcp template :param default_sp_list: (list) a list of storage provider, to get the list of storage provider's default FCP Multipath Templates :return: (dict) the base info of template example: { templates: [ { name: t1, id: template1_id, description: description, host_default: 0, sp_default: [sp1] }, { name: t2, id: template2_id, description: description, host_default: 1, sp_default: [sp1, sp2] } ] } """ # pass in template_id_list and default_sp_list is string: # "['36439338-db14-11ec-bb41-0201018b1dd2']" # convert to list if template_id_list and not isinstance(template_id_list, list): template_id_list = ast.literal_eval(template_id_list) if default_sp_list and not isinstance(default_sp_list, list): default_sp_list = ast.literal_eval(default_sp_list) return self._volumeop.get_fcp_templates( template_id_list=template_id_list, assigner_id=assigner_id, default_sp_list=default_sp_list, host_default=host_default) def get_fcp_templates_details(self, template_id_list=None, raw=False, statistics=True, sync_with_zvm=False): """Get FCP Multipath Templates detail info. :param template_list: (list) if is None, will get all the templates on the host :return: (dict) the raw and/or statistic data of temlate_list FCP devices if sync_with_zvm: self.fcp_mgr._sync_db_with_zvm() if FCP DB is NOT empty and raw=True statistics=True { "fcp_templates":[ { "id":"36439338-db14-11ec-bb41-0201018b1dd2", "name":"default_template", "description":"This is Default template", "is_default":True, "sp_name":[ "sp4", "v7k60" ], "raw":{ # (fcp_id, template_id, assigner_id, connections, # reserved, wwpn_npiv, wwpn_phy, chpid, state, owner, # tmpl_id) "0":[ [ "1a0f", "36439338-db14-11ec-bb41-0201018b1dd2", "HLP0000B", 0, 0, "c05076de3300038b", "c05076de33002e41", "27", "free", "none", "36439338-db14-11ec-bb41-0201018b1dd2" ], [ "1a0e", "36439338-db14-11ec-bb41-0201018b1dd2", "", 0, 0, "c05076de330003a2", "c05076de33002e41", "27", "free", "none", "36439338-db14-11ec-bb41-0201018b1dd2" ] ], "1":[ [ "1c0d", "36439338-db14-11ec-bb41-0201018b1dd2", "", 0, 0, "c05076de33000353", "c05076de33002641", "32", "free", "none", "36439338-db14-11ec-bb41-0201018b1dd2" ] ] }, "statistics":{ "0":{ "total":"1A0E - 1A0F", "available":"1A0E - 1A0F", "allocated":"", "reserve_only":"", "connection_only":"", "unallocated_but_active":[ ], "allocated_but_free":"", "notfound":"", "offline":"", "CHPIDs":{ "27":"1A0E - 1A0F" } }, "1":{ "total":"1C0D", "available":"1C0D", "allocated":"", "reserve_only":"", "connection_only":"", "unallocated_but_active":[ ], "allocated_but_free":"", "notfound":"", "offline":"", "CHPIDs":{ "32":"1C0D" } } } } ] } """ # pass in template_id_list is string: # "['36439338-db14-11ec-bb41-0201018b1dd2']" # convert to list if template_id_list and not isinstance(template_id_list, list): template_id_list = ast.literal_eval(template_id_list) return self._volumeop.get_fcp_templates_details( template_id_list=template_id_list, raw=raw, statistics=statistics, sync_with_zvm=sync_with_zvm) def delete_fcp_template(self, template_id): return self._volumeop.delete_fcp_template(template_id) @check_fcp_exist() def get_fcp_usage(self, fcp): """API for getting FCP usage in database manually. :param str userid: the user id of the guest :param str fcp: the fcp ID of FCP device :returns: list describing reserved,connections values of the FCP in database. For example, ['fakeid', 1, 3, 'b7ad5cba-f225-11ec-a5cf-02553600000f'] means the userid is fakeid, reserved value is 1, connections is 3, fcp_template_id is 'b7ad5cba-f225-11ec-a5cf-02553600000f'. """ return self._volumeop.get_fcp_usage(fcp) @check_fcp_exist() def set_fcp_usage(self, fcp, userid, reserved, connections, fcp_template_id): """API for setting FCP usage in database manually. :param str userid: the user id of the guest :param str fcp: the fcp ID of FCP device :param int reserved: the value set to reserved value of FCP database :param int connections: the value set to connections value of FCP database :param str fcp_template_id: the ID of the FCP Multipath Template. """ return self._volumeop.set_fcp_usage(userid, fcp, reserved, connections, fcp_template_id) def create_fcp_template(self, name, description: str = '', fcp_devices: str = '', host_default: bool = False, default_sp_list: list = [], min_fcp_paths_count: int = None): """API for creating a FCP Multipath Template in database. :param str name: the name of the template :param str description: the description for the template :param str fcp_devices: a fcp list is composed of fcp device IDs, range indicator '-', and split indicator ';'. :param bool host_default: this template is default to this host or not :param list default_sp_list: the list of storage providers that will use this FCP Multipath Template as default FCP Multipath Template. If None, it means no storage provider would use this FCP Multipath Template as default. :param min_fcp_paths_count: The minimum number of FCP paths that should be defined to a vm when attachinga data volume to a vm or BFV (deploying a vm from SCSI image). """ return self._volumeop.create_fcp_template( name, description, fcp_devices, host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count) def edit_fcp_template(self, fcp_template_id, name=None, description=None, fcp_devices=None, host_default=None, default_sp_list=None, min_fcp_paths_count: int = None): """ Edit a FCP Multipath Template. The kwargs values are pre-validated in two places: validate kwargs types in zvmsdk/sdkwsgi/schemas/volume.py set a kwarg as None if not passed by user in zvmsdk/sdkwsgi/handlers/volume.py If any kwarg is None, the kwarg will not be updated. :param fcp_template_id: template id :param name: template name :param description: template desc :param fcp_devices: FCP devices divided into different paths by semicolon Format: "fcp-devices-from-path0;fcp-devices-from-path1;..." Example: "0011-0013;0015;0017-0018", :param host_default: (bool) :param default_sp_list: (list) Example: ["SP1", "SP2"] :param min_fcp_paths_count: min fcp paths count :return: Example { 'fcp_template': { 'name': 'bjcb-test-template', 'id': '36439338-db14-11ec-bb41-0201018b1dd2', 'description': 'This is Default template', 'is_default': True, 'sp_name': ['sp4', 'v7k60'] } } """ return self._volumeop.edit_fcp_template( fcp_template_id, name=name, description=description, fcp_devices=fcp_devices, host_default=host_default, default_sp_list=default_sp_list, min_fcp_paths_count=min_fcp_paths_count) def volume_attach(self, connection_info): """ Attach a volume to a guest. It's prerequisite to active multipath feature on the guest before utilizing persistent volumes. :param dict connection_info: - alias: of type string. A constant valid alias of the volume after it being attached onto the guest, i.e. '/dev/vda'. Because the system generating device name could change after each rebooting, it's necessary to have a constant name to represent the volume in its life time. - protocol: of type string. The protocol by which the volume is connected to the guest. The only one supported now is 'fc' which implies FibreChannel. - fcps: of type list. The address of the FCP devices used by the guest to connect to the volume. They should belong to different channel path IDs in order to work properly. - wwpns: of type list. The WWPN values through which the volume can be accessed, excluding prefixing '0x'. - dedicate: of type list. The address of the FCP devices which will be dedicated to the guest before accessing the volume. They should belong to different channel path IDs in order to work properly. """ self._volumeop.attach_volume_to_instance(connection_info) def volume_refresh_bootmap(self, fcpchannels, wwpns, lun, wwid='', transportfiles=None, guest_networks=None, fcp_template_id=None): """ Refresh a volume's bootmap info. :param list of fcpchannels :param list of wwpns :param string lun :param wwid: (str) the wwid of the target volume :param transportfiles: (str) the files that used to customize the vm :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address or None, 'dns_addr': (list) dns addresses or None, 'gateway_addr': (str) gateway address or None, 'cidr': (str) cidr format, 'nic_vdev': (str)nic VDEV, 1- to 4- hexadecimal digits or None, 'nic_id': (str) nic identifier or None, 'mac_addr': (str) mac address or None, it is only be used when changing the guest's user direct. Format should be xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit 'osa_device': (str) OSA address or None, 'hostname': (str) Optional. The hostname of the guest} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56', 'hostname': 'instance-00001'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}], :param fcp_template_id """ return self._volumeop.volume_refresh_bootmap(fcpchannels, wwpns, lun, wwid=wwid, transportfiles=transportfiles, guest_networks=guest_networks, fcp_template_id=fcp_template_id) def volume_detach(self, connection_info): """ Detach a volume from a guest. It's prerequisite to active multipath feature on the guest before utilizing persistent volumes. :param dict connection_info: A dict comprised of a list of information used to establish host-volume connection, including: - alias: of type string. A constant valid alias of the volume after it being attached onto the guest, i.e. '/dev/vda'. Because the system generating device name could change after each rebooting, it's necessary to have a constant name to represent the volume in its life time. - protocol: of type string. The protocol by which the volume is connected to the guest. The only one supported now is 'fc' which implies FibreChannel. - fcps: of type list. The address of the FCP devices used by the guest to connect to the volume. - wwpns: of type list. The WWPN values through which the volume can be accessed, excluding prefixing '0x'. - dedicate: of type list. The address of the FCP devices which will be undedicated from the guest after removing the volume. """ self._volumeop.detach_volume_from_instance(connection_info) @check_guest_exist() def guest_create_network_interface(self, userid, os_version, guest_networks, active=False): """ Create network interface(s) for the guest inux system. It will create the nic for the guest, add NICDEF record into the user direct. It will also construct network interface configuration files and punch the files to the guest. These files will take effect when initializing and configure guest. :param str userid: the user id of the guest :param str os_version: operating system version of the guest :param list guest_networks: a list of network info for the guest. It has one dictionary that contain some of the below keys for each network, the format is: {'ip_addr': (str) IP address or None, 'dns_addr': (list) dns addresses or None, 'gateway_addr': (str) gateway address or None, 'cidr': (str) cidr format, 'nic_vdev': (str)nic VDEV, 1- to 4- hexadecimal digits or None, 'nic_id': (str) nic identifier or None, 'mac_addr': (str) mac address or None, it is only be used when changing the guest's user direct. Format should be xx:xx:xx:xx:xx:xx, and x is a hexadecimal digit 'osa_device': (str) OSA address or None, 'hostname': (str) Optional. The hostname of the vm.} Example for guest_networks: [{'ip_addr': '192.168.95.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.95.1', 'cidr': "192.168.95.0/24", 'nic_vdev': '1000', 'mac_addr': '02:00:00:12:34:56', 'hostname': 'instance-00001'}, {'ip_addr': '192.168.96.10', 'dns_addr': ['9.0.2.1', '9.0.3.1'], 'gateway_addr': '192.168.96.1', 'cidr': "192.168.96.0/24", 'nic_vdev': '1003}] :param bool active: whether add a nic on active guest system :returns: guest_networks list, including nic_vdev for each network :rtype: list """ if len(guest_networks) == 0: errmsg = ("API guest_create_network_interface: " "Network information is required but not provided") raise exception.SDKInvalidInputFormat(msg=errmsg) for network in guest_networks: vdev = nic_id = mac_addr = ip_addr = OSA = None if 'nic_vdev' in network.keys(): vdev = network['nic_vdev'] if 'osa_device' in network.keys(): OSA = network['osa_device'] if 'nic_id' in network.keys(): nic_id = network['nic_id'] if (('mac_addr' in network.keys()) and (network['mac_addr'] is not None)): mac_addr = network['mac_addr'] if not zvmutils.valid_mac_addr(mac_addr): errmsg = ("API guest_create_network_interface: " "Invalid mac address, format should be " "xx:xx:xx:xx:xx:xx, and x is a hexadecimal " "digit") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('ip_addr' in network.keys()) and (network['ip_addr'] is not None)): ip_addr = network['ip_addr'] if not netaddr.valid_ipv4(ip_addr): errmsg = ("API guest_create_network_interface: " "Invalid management IP address, it should be " "the value between 0.0.0.0 and 255.255.255.255") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('dns_addr' in network.keys()) and (network['dns_addr'] is not None)): if not isinstance(network['dns_addr'], list): raise exception.SDKInvalidInputTypes( 'guest_config_network', str(list), str(type(network['dns_addr']))) for dns in network['dns_addr']: if not netaddr.valid_ipv4(dns): errmsg = ("API guest_create_network_interface: " "Invalid dns IP address, it should be the " "value between 0.0.0.0 and 255.255.255.255") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('gateway_addr' in network.keys()) and (network['gateway_addr'] is not None)): if not netaddr.valid_ipv4( network['gateway_addr']): errmsg = ("API guest_create_network_interface: " "Invalid gateway IP address, it should be " "the value between 0.0.0.0 and 255.255.255.255") raise exception.SDKInvalidInputFormat(msg=errmsg) if (('cidr' in network.keys()) and (network['cidr'] is not None)): if not zvmutils.valid_cidr(network['cidr']): errmsg = ("API guest_create_network_interface: " "Invalid CIDR, format should be a.b.c.d/n, and " "a.b.c.d is IP address, n is the value " "between 0-32") raise exception.SDKInvalidInputFormat(msg=errmsg) try: if OSA is None: used_vdev = self._networkops.create_nic(userid, vdev=vdev, nic_id=nic_id, mac_addr=mac_addr, active=active) else: used_vdev = self._networkops.dedicate_OSA(userid, OSA, vdev=vdev, active=active) network['nic_vdev'] = used_vdev except exception.SDKBaseException: LOG.error(('Failed to create nic on vm %s') % userid) raise try: self._networkops.network_configuration(userid, os_version, guest_networks, active=active) except exception.SDKBaseException: LOG.error(('Failed to set network configuration file on vm %s') % userid) raise return guest_networks def guests_get_nic_info(self, userid=None, nic_id=None, vswitch=None): """ Retrieve nic information in the network database according to the requirements, the nic information will include the guest name, nic device number, vswitch name that the nic is coupled to, nic identifier and the comments. :param str userid: the user id of the vm :param str nic_id: nic identifier :param str vswitch: the name of the vswitch :returns: list describing nic information, format is [ (userid, interface, vswitch, nic_id, comments), (userid, interface, vswitch, nic_id, comments) ], such as [ ('VM01', '1000', 'xcatvsw2', '1111-2222', None), ('VM02', '2000', 'xcatvsw3', None, None) ] :rtype: list """ action = "get nic information" with zvmutils.log_and_reraise_sdkbase_error(action): return self._networkops.get_nic_info(userid=userid, nic_id=nic_id, vswitch=vswitch) def vswitch_query(self, vswitch_name): """Check the virtual switch status :param str vswitch_name: the name of the virtual switch :returns: Dictionary describing virtual switch info :rtype: dict """ action = "get virtual switch information" with zvmutils.log_and_reraise_sdkbase_error(action): return self._networkops.vswitch_query(vswitch_name) @check_guest_exist() def guest_delete_network_interface(self, userid, os_version, vdev, active=False): """ delete the nic and network configuration for the vm :param str userid: the user id of the guest :param str os_version: operating system version of the guest :param str vdev: nic device number, 1- to 4- hexadecimal digits :param bool active: whether delete a nic on active guest system """ self._networkops.delete_nic(userid, vdev, active=active) self._networkops.delete_network_configuration(userid, os_version, vdev, active=active) def host_get_ssi_info(self): """Get z/VM host SSI information. :returns: If current z/VM host is an SSI cluster member, returns a list of SSI cluster info, format is: ['ssi_name = SSI', 'ssi_mode = Stable', 'ssi_pdr = IAS7CM_on_139E', 'cross_system_timeouts = Enabled', 'output.ssiInfoCount = 4', '', 'member_slot = 1', 'member_system_id = BOEIAAS7', 'member_state = Joined', 'member_pdr_heartbeat = 12/28/2021_05:10:21', 'member_received_heartbeat = 12/28/2021_05:10:21', '', 'member_slot = 2', 'member_system_id = BOEIAAS8', 'member_state = Joined', 'member_pdr_heartbeat = 12/28/2021_05:10:36', 'member_received_heartbeat = 12/28/2021_05:10:36', ''] otherwise, return []. :rtype: list """ return self._hostops.host_get_ssi_info() zVMCloudConnector-1.6.3/zvmsdk/__init__.py0000775000175000017510000000000013575566551020164 0ustar ruirui00000000000000zVMCloudConnector-1.6.3/zvmsdk/vmops.py0000775000175000017510000005237214315210052017563 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import six import shutil from zvmsdk import config from zvmsdk import dist from zvmsdk import exception from zvmsdk import log from zvmsdk import smtclient from zvmsdk import database from zvmsdk import utils as zvmutils _VMOPS = None CONF = config.CONF LOG = log.LOG def get_vmops(): global _VMOPS if _VMOPS is None: _VMOPS = VMOps() return _VMOPS class VMOps(object): def __init__(self): self._smtclient = smtclient.get_smtclient() self._dist_manager = dist.LinuxDistManager() self._pathutils = zvmutils.PathUtils() self._namelist = zvmutils.get_namelist() self._GuestDbOperator = database.GuestDbOperator() self._ImageDbOperator = database.ImageDbOperator() def get_power_state(self, userid): """Get power status of a z/VM instance.""" return self._smtclient.get_power_state(userid) def _get_cpu_num_from_user_dict(self, dict_info): cpu_num = 0 for inf in dict_info: if 'CPU ' in inf: cpu_num += 1 return cpu_num def _get_max_memory_from_user_dict(self, dict_info): with zvmutils.expect_invalid_resp_data(): mem = dict_info[0].split(' ')[4] return zvmutils.convert_to_mb(mem) * 1024 def get_info(self, userid): power_stat = self.get_power_state(userid) perf_info = self._smtclient.get_image_performance_info(userid) # Get the online CPU number, OS distro and kernel version try: act_cpus = self._smtclient.get_active_cpu_addrs(userid) act_cpus_num = len(act_cpus) LOG.debug('Online cpu info: %s, %d' % (act_cpus, act_cpus_num)) except exception.SDKSMTRequestFailed as err: msg = ('Failed to execute command on capture source vm %(vm)s ' 'to get online cpu number with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) act_cpus_num = 0 try: os_distro = self._smtclient.guest_get_os_version(userid) kernel_info = self._smtclient.guest_get_kernel_info(userid) LOG.debug('OS and kernel info: %s, %s' % (os_distro, kernel_info)) except exception.SDKSMTRequestFailed as err: msg = ('Failed to execute command on capture source vm %(vm)s ' 'to get OS distro with error %(err)s' % {'vm': userid, 'err': err.results['response'][0]}) LOG.error(msg) os_distro = '' kernel_info = '' if perf_info: try: max_mem_kb = int(perf_info['max_memory'].split()[0]) mem_kb = int(perf_info['used_memory'].split()[0]) num_cpu = int(perf_info['guest_cpus']) cpu_time_us = int(perf_info['used_cpu_time'].split()[0]) except (ValueError, TypeError, IndexError, AttributeError, KeyError) as err: LOG.error('Parse performance_info encounter error: %s', str(perf_info)) raise exception.SDKInternalError(msg=str(err), modID='guest') return {'power_state': power_stat, 'max_mem_kb': max_mem_kb, 'mem_kb': mem_kb, 'num_cpu': num_cpu, 'cpu_time_us': cpu_time_us, 'online_cpu_num': act_cpus_num, 'os_distro': os_distro, 'kernel_info': kernel_info} else: # virtual machine in shutdown state or not exists dict_info = self._smtclient.get_user_direct(userid) return { 'power_state': power_stat, 'max_mem_kb': self._get_max_memory_from_user_dict(dict_info), 'mem_kb': 0, 'num_cpu': self._get_cpu_num_from_user_dict(dict_info), 'cpu_time_us': 0, 'online_cpu_num': act_cpus_num, 'os_distro': os_distro, 'kernel_info': kernel_info} def get_adapters_info(self, userid): adapters_info = self._smtclient.get_adapters_info(userid) if not adapters_info: msg = 'Get network information failed on: %s' % userid LOG.error(msg) raise exception.SDKInternalError(msg=msg, modID='guest') return {'adapters': adapters_info} def instance_metadata(self, instance, content, extra_md): pass def add_instance_metadata(self): pass def is_reachable(self, userid): """Reachable through IUCV communication channel.""" return self._smtclient.get_guest_connection_status(userid) def wait_for_reachable(self, userid, timeout=CONF.guest.reachable_timeout): """Return until guest reachable or timeout.""" def _check_reachable(): if not self.is_reachable(userid): raise exception.SDKRetryException() zvmutils.looping_call(_check_reachable, 5, 0, 5, timeout, exception.SDKRetryException) def guest_start(self, userid, timeout=0): """"Power on z/VM instance.""" LOG.info("Begin to power on vm %s", userid) self._smtclient.guest_start(userid) if timeout > 0: self.wait_for_reachable(userid, timeout) if not self.is_reachable(userid): msg = ("compute node is not able to connect to the virtual " "machine in %d seconds" % timeout) raise exception.SDKGuestOperationError(rs=16, userid=userid, msg=msg) LOG.info("Complete power on vm %s", userid) def guest_stop(self, userid, **kwargs): LOG.info("Begin to power off vm %s", userid) self._smtclient.guest_stop(userid, **kwargs) LOG.info("Complete power off vm %s", userid) def guest_softstop(self, userid, **kwargs): LOG.info("Begin to soft power off vm %s", userid) self._smtclient.guest_softstop(userid, **kwargs) LOG.info("Complete soft power off vm %s", userid) def guest_pause(self, userid): LOG.info("Begin to pause vm %s", userid) self._smtclient.guest_pause(userid) LOG.info("Complete pause vm %s", userid) def guest_unpause(self, userid): LOG.info("Begin to unpause vm %s", userid) self._smtclient.guest_unpause(userid) LOG.info("Complete unpause vm %s", userid) def guest_reboot(self, userid): """Reboot a guest vm.""" LOG.info("Begin to reboot vm %s", userid) self._smtclient.guest_reboot(userid) LOG.info("Complete reboot vm %s", userid) def guest_reset(self, userid): """Reset z/VM instance.""" LOG.info("Begin to reset vm %s", userid) self._smtclient.guest_reset(userid) LOG.info("Complete reset vm %s", userid) def live_migrate_vm(self, userid, destination, parms, action): """Move an eligible, running z/VM(R) virtual machine transparently from one z/VM system to another within an SSI cluster.""" # Check guest state is 'on' state = self.get_power_state(userid) if state != 'on': LOG.error("Failed to live migrate guest %s, error: " "guest is inactive, cann't perform live migrate." % userid) raise exception.SDKConflictError(modID='guest', rs=1, userid=userid) # Do live migrate if action.lower() == 'move': LOG.info("Moving the specific vm %s", userid) self._smtclient.live_migrate_move(userid, destination, parms) LOG.info("Complete move vm %s", userid) if action.lower() == 'test': LOG.info("Testing the eligiblity of specific vm %s", userid) self._smtclient.live_migrate_test(userid, destination) def create_vm(self, userid, cpu, memory, disk_list, user_profile, max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam, dedicate_vdevs, loaddev, account, comment_list, cschedule, cshare, rdomain, pcif): """Create z/VM userid into user directory for a z/VM instance.""" LOG.info("Creating the user directory for vm %s", userid) info = self._smtclient.create_vm(userid, cpu, memory, disk_list, user_profile, max_cpu, max_mem, ipl_from, ipl_param, ipl_loadparam, dedicate_vdevs, loaddev, account, comment_list, cschedule, cshare, rdomain, pcif) # add userid into smapi namelist self._smtclient.namelist_add(self._namelist, userid) return info def create_disks(self, userid, disk_list): LOG.info("Beging to create disks for vm: %(userid)s, list: %(list)s", {'userid': userid, 'list': disk_list}) user_direct = self._smtclient.get_user_direct(userid) exist_disks = [] for ent in user_direct: if ent.strip().startswith('MDISK'): md_vdev = ent.split()[1].strip() exist_disks.append(md_vdev) if exist_disks: start_vdev = hex(int(max(exist_disks), 16) + 1)[2:].rjust(4, '0') else: start_vdev = None info = self._smtclient.add_mdisks(userid, disk_list, start_vdev) LOG.info("Complete create disks for vm: %s", userid) return info def delete_disks(self, userid, vdev_list): LOG.info("Begin to delete disk on vm: %(userid), vdev list: %(list)s", {'userid': userid, 'list': vdev_list}) # not support delete disks when guest is active if self._smtclient.get_power_state(userid) == 'on': func = 'delete disks when guest is active' raise exception.SDKFunctionNotImplementError(func) self._smtclient.remove_mdisks(userid, vdev_list) LOG.info("Complete delete disks for vm: %s", userid) def guest_config_minidisks(self, userid, disk_info): LOG.info("Begin to configure disks on vm: %(userid), info: %(info)s", {'userid': userid, 'info': disk_info}) if disk_info != []: self._smtclient.process_additional_minidisks(userid, disk_info) LOG.info("Complete configure disks for vm: %s", userid) else: LOG.info("No disk to handle on %s." % userid) def guest_grow_root_volume(self, userid, os_version): """ Punch the grow partition script to the target guest. """ # firstly check if user wants to extend the volume if CONF.guest.extend_partition_fs.lower() != 'true': return LOG.debug('Begin to punch grow partition commands to guest: %s', userid) linuxdist = self._dist_manager.get_linux_dist(os_version)() # get configuration commands config_cmds = linuxdist.get_extend_partition_cmds() # Creating tmp file with these cmds temp_folder = self._pathutils.get_guest_temp_path(userid) file_path = os.path.join(temp_folder, 'gpartvol.sh') LOG.debug('Creating file %s to contain root partition extension ' 'commands' % file_path) with open(file_path, "w") as f: f.write(config_cmds) try: self._smtclient.punch_file(userid, file_path, "X") finally: LOG.debug('Removing the folder %s ', temp_folder) shutil.rmtree(temp_folder) def is_powered_off(self, instance_name): """Return True if the instance is powered off.""" return self._smtclient.get_power_state(instance_name) == 'off' def delete_vm(self, userid): """Delete z/VM userid for the instance.""" LOG.info("Begin to delete vm %s", userid) self._smtclient.delete_vm(userid) LOG.info("Complete delete vm %s", userid) def execute_cmd(self, userid, cmdStr): """Execute commands on the guest vm.""" LOG.debug("executing cmd: %s", cmdStr) return self._smtclient.execute_cmd(userid, cmdStr) def set_hostname(self, userid, hostname, os_version): """Punch a script that used to set the hostname of the guest. :param str guest: the user id of the guest :param str hostname: the hostname of the guest :param str os_version: version of guest operation system """ tmp_path = self._pathutils.get_guest_temp_path(userid) if not os.path.exists(tmp_path): os.makedirs(tmp_path) tmp_file = tmp_path + '/hostname.sh' lnxdist = self._dist_manager.get_linux_dist(os_version)() lines = lnxdist.generate_set_hostname_script(hostname) with open(tmp_file, 'w') as f: f.writelines(lines) requestData = "ChangeVM " + userid + " punchfile " + \ tmp_file + " --class x" LOG.debug("Punch script to guest %s to set hostname" % userid) try: self._smtclient._request(requestData) except exception.SDKSMTRequestFailed as err: msg = ("Failed to punch set_hostname script to userid '%s'. SMT " "error: %s" % (userid, err.format_message())) LOG.error(msg) raise exception.SDKSMTRequestFailed(err.results, msg) finally: self._pathutils.clean_temp_folder(tmp_path) def guest_deploy(self, userid, image_name, transportfiles=None, remotehost=None, vdev=None, hostname=None, skipdiskcopy=False): LOG.info("Begin to deploy image on vm %s", userid) if not skipdiskcopy: os_version = self._smtclient.image_get_os_distro(image_name) else: os_version = image_name if not self._smtclient.is_rhcos(os_version): self._smtclient.guest_deploy(userid, image_name, transportfiles, remotehost, vdev, skipdiskcopy) # punch scripts to set hostname if (transportfiles is None) and hostname: self.set_hostname(userid, hostname, os_version) else: self._smtclient.guest_deploy_rhcos(userid, image_name, transportfiles, remotehost, vdev, hostname, skipdiskcopy) def guest_capture(self, userid, image_name, capture_type='rootonly', compress_level=6): LOG.info("Begin to capture vm %(userid), image name is %(name)s", {'userid': userid, 'name': image_name}) self._smtclient.guest_capture(userid, image_name, capture_type=capture_type, compress_level=compress_level) LOG.info("Complete capture image on vm %s", userid) def guest_list(self): return self._smtclient.get_vm_list() def get_definition_info(self, userid, **kwargs): check_command = ["nic_coupled"] direct_info = self._smtclient.get_user_direct(userid) info = {} info['user_direct'] = direct_info for k, v in kwargs.items(): if k in check_command: if (k == 'nic_coupled'): info['nic_coupled'] = False nstr = "NICDEF %s TYPE QDIO LAN SYSTEM" % v for inf in direct_info: if nstr in inf: info['nic_coupled'] = True break else: raise exception.SDKInvalidInputFormat( msg=("invalid check option for user direct: %s") % k) return info def get_console_output(self, userid): def append_to_log(log_data, log_path): LOG.debug('log_data: %(log_data)r, log_path: %(log_path)r', {'log_data': log_data, 'log_path': log_path}) with open(log_path, 'a+') as fp: fp.write(log_data) return log_path LOG.info("Begin to capture console log on vm %s", userid) log_size = CONF.guest.console_log_size * 1024 console_log = self._smtclient.get_user_console_output(userid) log_path = self._pathutils.get_console_log_path(userid) # TODO: need consider shrink log file size append_to_log(console_log, log_path) log_fp = open(log_path, 'rb') try: log_data, remaining = zvmutils.last_bytes(log_fp, log_size) log_data = bytes.decode(log_data) except Exception as err: msg = ("Failed to truncate console log, error: %s" % six.text_type(err)) LOG.error(msg) raise exception.SDKInternalError(msg) if remaining > 0: LOG.info('Truncated console log returned, %d bytes ignored' % remaining) LOG.info("Complete get console output on vm %s", userid) return log_data def check_guests_exist_in_db(self, userids, raise_exc=True): if not isinstance(userids, list): # convert userid string to list userids = [userids] all_userids = self.guest_list() userids_not_in_db = list(set(userids) - set(all_userids)) if userids_not_in_db: if raise_exc: # log and raise exception userids_not_in_db = ' '.join(userids_not_in_db) LOG.error("Guest '%s' does not exist in guests database" % userids_not_in_db) raise exception.SDKObjectNotExistError( obj_desc=("Guest '%s'" % userids_not_in_db), modID='guest') else: return False else: userids_migrated = self._GuestDbOperator.get_migrated_guest_list() userids_in_migrated = list(set(userids) & set(userids_migrated)) # case1 userid has been migrated. if userids_in_migrated: if raise_exc: migrated_userids = ' '.join(userids_in_migrated) LOG.error("Guest(s) '%s' has been migrated." % migrated_userids) raise exception.SDKObjectNotExistError( obj_desc=("Guest(s) '%s'" % migrated_userids), modID='guest') else: return False flag = True for uid in userids: # case2 userid has been shudown and started on other host. if zvmutils.check_userid_on_others(uid): flag = False comment = self._GuestDbOperator.get_comments_by_userid(uid) comment['migrated'] = 1 action = "update guest '%s' in database" % uid with zvmutils.log_and_reraise_sdkbase_error(action): self._GuestDbOperator.update_guest_by_userid( uid, comments=comment) return flag def live_resize_cpus(self, userid, count): # Check power state is 'on' state = self.get_power_state(userid) if state != 'on': LOG.error("Failed to live resize cpus of guest %s, error: " "guest is inactive, cann't perform live resize." % userid) raise exception.SDKConflictError(modID='guest', rs=1, userid=userid) # Do live resize self._smtclient.live_resize_cpus(userid, count) LOG.info("Complete live resize cpu on vm %s", userid) def resize_cpus(self, userid, count): LOG.info("Begin to resize cpu on vm %s", userid) # Do resize self._smtclient.resize_cpus(userid, count) LOG.info("Complete resize cpu on vm %s", userid) def live_resize_memory(self, userid, memory): # Check power state is 'on' state = self.get_power_state(userid) if state != 'on': LOG.error("Failed to live resize memory of guest %s, error: " "guest is inactive, cann't perform live resize." % userid) raise exception.SDKConflictError(modID='guest', rs=1, userid=userid) # Do live resize self._smtclient.live_resize_memory(userid, memory) LOG.info("Complete live resize memory on vm %s", userid) def resize_memory(self, userid, memory): LOG.info("Begin to resize memory on vm %s", userid) # Do resize self._smtclient.resize_memory(userid, memory) LOG.info("Complete resize memory on vm %s", userid) zVMCloudConnector-1.6.3/zvmsdk/hostops.py0000664000175000017510000001503514263437505020126 0ustar ruirui00000000000000# Copyright 2017,2022 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import time from zvmsdk import config from zvmsdk import constants as const from zvmsdk import exception from zvmsdk import log from zvmsdk import smtclient from zvmsdk import utils as zvmutils _HOSTOPS = None CONF = config.CONF LOG = log.LOG def get_hostops(): global _HOSTOPS if _HOSTOPS is None: _HOSTOPS = HOSTOps() return _HOSTOPS class HOSTOps(object): def __init__(self): self._smtclient = smtclient.get_smtclient() self._volume_infos = {} self._volumes = None self.cache_expiration = time.time() self.disk_pool = None def get_info(self): inv_info = self._smtclient.get_host_info() host_info = {} with zvmutils.expect_invalid_resp_data(inv_info): host_info['zcc_userid'] = inv_info['zcc_userid'] host_info['zvm_host'] = inv_info['zvm_host'] host_info['vcpus'] = int(inv_info['lpar_cpu_total']) host_info['vcpus_used'] = int(inv_info['lpar_cpu_used']) host_info['cpu_info'] = {} host_info['cpu_info'] = {'architecture': const.ARCHITECTURE, 'cec_model': inv_info['cec_model'], } mem_mb = zvmutils.convert_to_mb(inv_info['lpar_memory_total']) host_info['memory_mb'] = mem_mb mem_mb_used = zvmutils.convert_to_mb(inv_info['lpar_memory_used']) host_info['memory_mb_used'] = mem_mb_used host_info['hypervisor_type'] = const.HYPERVISOR_TYPE verl = inv_info['hypervisor_os'].split()[1].split('.') version = int(''.join(verl)) host_info['hypervisor_version'] = version host_info['hypervisor_hostname'] = inv_info['hypervisor_name'] host_info['ipl_time'] = inv_info['ipl_time'] disk_pool = CONF.zvm.disk_pool if disk_pool is None: dp_info = {'disk_total': 0, 'disk_used': 0, 'disk_available': 0} else: diskpool_name = disk_pool.split(':')[1] dp_info = self.diskpool_get_info(diskpool_name) host_info.update(dp_info) return host_info def guest_list(self): guest_list = self._smtclient.get_all_user_direct() with zvmutils.expect_invalid_resp_data(guest_list): # If the z/VM is an SSI cluster member, it could get # guests on other z/VMs in the same SSI cluster, need # get rid of these guests. if self._smtclient.host_get_ssi_info(): new_guest_list = [] for userid in guest_list: if not zvmutils.check_userid_on_others(userid): new_guest_list.append(userid) guest_list = new_guest_list return guest_list def _cache_enabled(self): return CONF.monitor.cache_interval > 0 def diskpool_get_volumes(self, disk_pool): pool_name = disk_pool.split(':')[1].upper() if self._cache_enabled(): if (time.time() > self.cache_expiration): self._volumes = None if self._volumes: if disk_pool == self.disk_pool: return self._volumes self._volumes = self._smtclient.get_diskpool_volumes(pool_name) self.cache_expiration = time.time() + \ float(CONF.monitor.cache_interval * 10) self.disk_pool = disk_pool return self._volumes else: self._volumes = self._smtclient. \ get_diskpool_volumes(pool_name) self.disk_pool = disk_pool return self._volumes def get_volume_info(self, volume_name): update_needed = False with zvmutils.expect_invalid_resp_data(): if self._volume_infos is not None: volume_info = self._volume_infos.get(volume_name) if not volume_info: update_needed = True else: return volume_info else: update_needed = True if update_needed: # results of get_volume_info() is the format like: # {'IAS100': { 'volume_type': '3390-54', # 'volume_size': '60102'}, # 'IAS101': { 'volume_type': '3390-09', # 'volume_size': '60102'}} self._volume_infos = self._smtclient.get_volume_info() volume_info = self._volume_infos.get(volume_name) if not volume_info: msg = ("Not found the volume info for the" " volume %(volume)s: make sure the volume" " is in the disk_pool configured for sdkserver.") \ % {'volume': volume_name} raise exception.ZVMNotFound(msg=msg) else: return volume_info def diskpool_get_info(self, pool): dp_info = self._smtclient.get_diskpool_info(pool) with zvmutils.expect_invalid_resp_data(dp_info): for k in list(dp_info.keys()): s = dp_info[k].strip().upper() if s.endswith('G'): sl = s[:-1].split('.') n1, n2 = int(sl[0]), int(sl[1]) if n2 >= 5: n1 += 1 dp_info[k] = n1 elif s.endswith('M'): n_mb = int(s[:-3]) n_gb, n_ad = n_mb // 1024, n_mb % 1024 if n_ad >= 512: n_gb += 1 dp_info[k] = n_gb else: exp = "ending with a 'G' or 'M'" errmsg = ("Invalid diskpool size format: %(invalid)s; " "Expected: %(exp)s") % {'invalid': s, 'exp': exp} LOG.error(errmsg) raise exception.SDKInternalError(msg=errmsg) return dp_info def host_get_ssi_info(self): return self._smtclient.host_get_ssi_info() zVMCloudConnector-1.6.3/zvmsdk/log.py0000775000175000017510000000510413575566551017220 0ustar ruirui00000000000000# Copyright 2017,2018 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging import os from zvmsdk import config class Logger(): def __init__(self, logger): # create a logger self.logger = logging.getLogger(logger) self.log_level = logging.INFO def getlog(self): return self.logger def setup(self, log_dir, log_level, log_file_name='zvmsdk.log'): # make sure target directory exists if not os.path.exists(log_dir): if os.access(log_dir, os.W_OK): os.makedirs(log_dir) else: log_dir = '/tmp/' # Setup log level self.updateloglevel(log_level) self.logger.setLevel(self.log_level) # create a handler for the file log_file = os.path.join(log_dir, log_file_name) fh = logging.FileHandler(log_file) fh.setLevel(self.log_level) # set the formate of the handler formatter = logging.Formatter( '[%(asctime)s] [%(levelname)s] %(message)s', '%Y-%m-%d %H:%M:%S') fh.setFormatter(formatter) # add handler in the logger self.logger.addHandler(fh) def updateloglevel(self, level): log_level = level.upper() if log_level in ('LOGGING.INFO', 'INFO'): log_level = logging.INFO elif log_level in ('LOGGING.DEBUG', 'DEBUG'): log_level = logging.DEBUG elif log_level in ('LOGGING.WARN', 'WARN'): log_level = logging.WARN elif log_level in ('LOGGING.ERROR', 'ERROR'): log_level = logging.ERROR elif log_level in ('LOGGING.CRITICAL', 'CRITICAL'): log_level = logging.CRITICAL else: # default to logging.INFO log_level = logging.INFO self.log_level = log_level def getloglevel(self): return self.log_level def setup_log(): global LOGGER LOGGER.setup(log_dir=config.CONF.logging.log_dir, log_level=config.CONF.logging.log_level) LOGGER = Logger('ZVMSDK') LOG = LOGGER.getlog() zVMCloudConnector-1.6.3/zvmsdk/config.py0000775000175000017510000007156414315210052017670 0ustar ruirui00000000000000# Copyright 2017-2020 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os from six.moves import configparser class Opt(object): def __init__(self, opt_name, section='default', opt_type='str', help='', default=None, required=False): self.name = opt_name self.section = section self.opt_type = opt_type self.default = default self.required = required self.help = help zvm_opts = [ # logging options Opt('log_dir', section='logging', default='/var/log/zvmsdk/', help=''' Directory where log file to be put into. SDK has a set of logs to help administrator to debug and aduit actions performed through SDK. Edit this option if you want to put logs into specified place. Please ensure the service running on the consume which consumes SDK has the authorization to write to the path. '''), Opt('log_level', section='logging', default='logging.INFO', help=''' Level of the log. SDK utilize python logging package to help admin debug or analyze issues. it's recommend to set this value to logging.DEBUG to get more detailed logs and set it to logging.INFO(default) in normal situation. recommend values: logging.ERROR: level above ERROR will be written to log file. logging.WARNINS: level above WARNING(ERROR, WARNING) will be written to log file. logging.INFO: level above INFO(ERROR, WARNING, INFO) will be written to log file. logging.DEBUG: All log level (ERROR, WARNING, INFO, DEBUG) will be written to log file. '''), # zvm options Opt('default_nic_vdev', section='zvm', default='1000', help=''' Virtual device number for default NIC address. This value is the first NIC virtual device number, each NIC needs 3 numbers for control/read/write, so by default the first NIC's address is 1000, the second one is 1003 etc. Possible values: An integer value in hex format, between 0 and 65536 (x'FFFF'). It should not conflict with other device numbers in the z/VM guest's configuration, for example device numbers of the root or ephemeral or persistent disks. Sample NIC definitions in the z/VM user directory: NICDEF 1000 TYPE QDIO LAN SYSTEM MACID NICDEF 1003 TYPE QDIO LAN SYSTEM MACID ''' ), Opt('user_default_share_unit', section='zvm', opt_type='int', default=100, help=''' The default SHARE settings configuration. The recommend value of SHARE. From z/VM doc, SHARE is relative value of virtual machine and if you set SHARE to 100 while virtual CPUs are 4, then each vCPU get 25 entitlement. So the mechanism currently is: 1) If a share is given, set SHARE value to the VM 2) If no SHARE is given during creation, check user_default_share_unit 3) If user_default_share_unit is 0, do nothing 4) If user_default_share_unit it not 0(current default is 100), then insert statement `SHARE RELATIVE user_default_share_unit*vCPU` into user direct, for example, with user_default_share_unit=100, 4 vCPU will create `SHARE RELATIVE 400`. This align the best practice of z/VM recommendation. '''), Opt('default_admin_userid', section='zvm', help=''' Default LOGONBY userid(s) for the cloud. This is a set of z/VM userid(s) which are allowed to logon using the LOGONBY keyword to the guests created by the z/VM SDK solution, compatible with the LBYONLY keyword of the user directory statement. This value is only used when a guest is created. If you change this value, existing guests' directory entries are not automatically updated with the new value. When an ESM is installed, this parameter only governs when the ESM defers to CP's processing. Usage note: The default is empty string with nothing set. '' is an invalid value and it will cause VM deploying failed. Thus, DO NOT set default_admin_userid=''. When a non-empty string is provided, blank chars will be used as delimiter, you can use LOGONBY xxx command to log on the guest using the corresponding admin userid's password. For example, when you set this value to 'oper1 oper2 oper3 jones', it means you can use any one of 'oper1', 'oper2', 'oper3', 'jones' as an admin user. see the z/VM CP Planning and Administration for additional information. Possible values: A maximum of 8 blank-delimited strings. Each non-blank string must be a valid z/VM userid. e.g 'oper1 oper2' is a valid value. 'o1 o2 o3 o4 o5 o6 o7 o8 o9' is NOT a valid value. '''), # FIXME: remove this option when switch to smt Opt('user_default_password', section='zvm'), Opt('disk_pool', section='zvm', default=None, required=False, help=''' zVM disk pool and type for root/ephemeral disks. The option is combined by 2 parts and use : as separator. The first part is the type of disks in the disk pool. The disks in one disk pool must in same type (ECKD or FBA). Possible values of the disk pool type: A string, either ECKD or FBA. The second part is the volume group name defined in your directory manager on your z/VM system, which will be used for allocating disks for new guest. A dollar sign ($) is not allowed in the name. Sample disk_pool values: ECKD:diskpo1 FBA:testpool '''), Opt('user_profile', section='zvm', required=True, help=''' PROFILE name to use when creating a z/VM guest. When SDK deploys an guest on z/VM, it can include some common statements from a PROFILE definition. This PROFILE must already be included in your z/VM user directory. Possible values: An 8 character name of a PROFILE that is already defined in the z/VM user directory. '''), Opt('user_root_vdev', section='zvm', default='0100', help=''' Virtual device number for root disk. When SDK deploys an guest, it creates a root disk and potentially several data disks. This value is the virtual device number of the root disk. Possible values: An integer value in hex format, between 0 and 65536 (x'FFFF'). It should not conflict with other device numbers in the z/VM guest's configuration, for example device numbers of the NICs or ephemeral or persistent disks. Sample root disk in user directory: MDISK 0100 '''), Opt('user_default_max_cpu', section='zvm', default=32, opt_type='int', help=''' The default maximum number of virtual processers the user can define. This value is used as the default value for maximum vcpu number when create a guest with no max_cpu specified. The number must be a decimal value between 1 and 64. '''), Opt('user_default_max_memory', section='zvm', default='64G', help=''' The default maximum size of memory the user can define. This value is used as the default value for maximum memory size when create a guest with no max_mem specified. The value can be specified by 1-4 bits of number suffixed by either M (Megabytes) or G (Gigabytes) and the number must be a whole number, values such as 4096.8M or 32.5G are not supported. The value should be adjusted based on your system capacity. '''), Opt('user_default_max_reserved_memory', section='zvm', default='64G', help=''' The default maximum size of reserved memory in a vm's direct entry. This value is used as the default value for maximum reserved memory size for a guest. The value can be specified by 1-4 bits of number suffixed by either M (Megabytes) or G (Gigabytes) and the number must be a whole number, values such as 4096.8M or 32.5G are not supported. The value should be adjusted based on your system capacity. '''), Opt('namelist', section='zvm', help=''' The name of a list containing names of virtual servers to be queried. The list which contains the userid list by default is named: VSMWORK1 NAMELIST, see DMSSICNF COPY key: NameListFileIdAny. The list has to be accessible to the SMAPI servers. The length of namelist must no longer than 64. '''), Opt('swap_force_mdisk', section='zvm', default=False, help=''' For swap disk to create from mdisk instead of vdisk. In boot from volume case, there might be no disk pool at all, then the only choice is to use vdisk (or using FCP LUN which is complicated), if customer doesn't want vdisk, then set this value to `True` so VDISK will not be used and in turn it will fail check. '''), Opt('remotehost_sshd_port', section='zvm', default='22', help=''' The port number of remotehost sshd. '''), Opt('bypass_smapiout', section='zvm', default=False, help=''' Only used for SMAPIOUT is not ready. '''), # image options Opt('default_compress_level', section='image', default='6', opt_type='str', help=''' Default compress level for captured image. '''), Opt('sdk_image_repository', section='image', default='/var/lib/zvmsdk/images', help=''' Directory to store sdk images. SDK image repository to store the imported images and the staging images that is in snapshotting. Once snapshot finished, the image will be removed to the netboot directory accordingly. Two kinds of image repository looks like: /var/lib/zvmsdk/images/netboot// /var/lib/zvmsdk/images/staging// '''), # file options Opt('file_repository', section='file', default='/var/lib/zvmsdk/files', help=''' Directory to store sdk imported or exported files. SDK file repository to store the imported files and the files that will be exported, the imported files will be put into /imported the files to be exported will be put into /exported '''), # network options Opt('my_ip', section='network', required=True, help=''' IP address of the Linux machine which is running SDK on. Some remote copy operations need to be performed during guest creation, this option tell the SDK the host ip which can be used to perform copy from and copy to operations. '''), # guest options Opt('console_log_size', section='guest', default=100, opt_type='int', help=''' The maximum allowed console log size, in kilobytes. Console logs might be transferred to sdk user, this option controls how large each file can be. A smaller size may mean more calls will be needed to transfer large consoles, which may not be desirable for performance reasons. '''), Opt('extend_partition_fs', section='guest', default='True', help=''' Whether to automatically extend the partition and filesystem of guest. If set to True, when deploying an image to a larger disk, zvmsdk automatically extends the last partition and the file system to use up the whole disk. If do not want to do the extend action automaictly, you must set this option to be False. '''), Opt('reachable_timeout', section='guest', default=180, opt_type='int', help=''' The maximum time waiting until the guest reachable after started. When starting a guest, specify the timeout value will check the guest status untils it becames reachable or timeout. '''), Opt('softstop_timeout', section='guest', default=120, opt_type='int', help=''' The maximum time waiting until the guest shut down. Sometimes, the shutdown action will take a bit lone time to complete. If you want to make sure the guest in shut-down status after executing action of softstop, this will help. '''), Opt('softstop_interval', section='guest', default=10, opt_type='int', help=''' The interval time between 2 retries, in seconds. This will take effect only when you set softstop_retries item. What's more, the value of softstop_timeout/softstop_interval is the times retried. '''), # monitor options Opt('cache_interval', section='monitor', default=300, opt_type='int', help=''' Cached monitor data update interval This is used to prevent excessive effort spent retrieving the monitor data by calling the SDK backend utilities. When this cache is enabled, a inspect call will only call the SDK backend utilities when the inspected guest's info does not exist in the cache or when the cache data is expired. And when an cache update is needed, all the existing guests' data will be retrieved in a single call to the backend. When this value is below or equal to zero, the cache will be disabled and each inspect call will need to call the backend utilities to get the inspected guest's monitor data. ''' ), # wsgi options # this option is used when sending http request # to sdk wsgi, default to none so no token validation # will be used. Opt('auth', section='wsgi', default='none', opt_type='str', help=''' Whether auth will be used. When sending http request from outside to running zvmsdk, Client will be requested to input username/password in order to authorize the call. Set this to 'none' indicated no auth will be used and 'auth' means username and password need to be specified. Possible value: 'none': no auth will be required 'auth': need auth, currently pyjwt is used to return a token to caller if the username and password is correct. ''', ), Opt('token_validation_period', section='wsgi', default=3600, opt_type='int', help=''' How long the token is valid. If a token auth is used, the token return to user will be expired after the period passed. This ensure an user who get this token will not be authorized all the time, a new token need to be recreated after certain time period. ''', ), Opt('token_path', section='wsgi', default='/etc/zvmsdk/token.dat', opt_type='str', help=''' file path that contains admin-token to access sdk http server. Admin-token in order to get a user-token from zvm sdk, and the user-token will be used to validate request before user-token expire. ''' ), Opt('max_concurrent_deploy_capture', section='wsgi', default=20, opt_type='int', help=''' The max total number of concurrent deploy and capture requests allowed in a single z/VM Cloud Connector process. If more requests than this value are revieved concurrently, the z/VM Cloud Connector would reject the requests and return error to avoid resource exhaustion. . ''' ), # Daemon server options Opt('bind_addr', section='sdkserver', default='127.0.0.1', opt_type='str', help=''' The IP address that the SDK server is listen on. When the SDK server deamon starts, it will try to bind to this address and port bind_port, and wait for the SDK client connection to handle API request. ''' ), Opt('bind_port', section='sdkserver', opt_type='int', default=2000, help=''' The port that the SDK server is listen on. This will work as a pair with bind_addr when the SDK server daemon starts, more info can be found in that configuration description. ''' ), Opt('request_queue_size', section='sdkserver', opt_type='int', default=128, help=''' The size of request queue in SDK server. SDK server maintains a queue to keep all the accepted but not handled requests, and the SDK server workers fetch requests from this queue. To some extend, this queue size decides the max socket opened in SDK server. This value should be adjusted according to the system resource. ''' ), Opt('max_worker_count', section='sdkserver', opt_type='int', default=64, help=''' The maximum number of worker thread in SDK server to handle client requests. These worker threads would work concurrently to handle requests from client. This value should be adjusted according to the system resource and workload. ''' ), # database options Opt('dir', section='database', default='/var/lib/zvmsdk/databases/', opt_type='str', help=''' Directory to store database. SDK databases are used to store a set of tables which contain the information of network, volume, image, etc. This option is used to tell SDK where to store the database files, make sure the process running SDK is able to read write and execute the directory. ''' ), # volume options Opt('fcp_list', section='volume', default='', opt_type='str', help=''' volume fcp list. SDK will only use the fcp devices in the scope of this value. ''' ), Opt('refresh_bootmap_timeout', section='volume', default=1200, opt_type='int', help=''' The timeout value for waiting refresh_bootmap execution, in seconds. The default value is 1200 seconds, if the execution of refresh_bootmap reached the timeout, the process of refresh_bootmap will be stopped. ''' ), Opt('punch_script_execution_timeout', section='volume', default=1800, opt_type='int', help=''' The timeout value for waiting attach/detach punch scripts execution, in seconds. The default value is 1800 seconds, if the execution of punch scripts reached the timeout, the attach/detach will fail. ''' ), Opt('get_fcp_pair_with_same_index', section='volume', default='0', opt_type='int', help=''' fcp pair selection algorithm fcp_list example: fa00-fa02; fb00-fb02 If use get_fcp_pair_with_same_index, then fcp pair is randomly selected from below combinations. [fa00,fb00],[fa01,fb01],[fa02,fb02] If use get_fcp_pair, then fcp pair is randomly selected from below combinations. [fa00,fb00],[fa01,fb00],[fa02,fb00] [fa00,fb01],[fa01,fb01],[fa02,fb01] [fa00,fb02],[fa01,fb02],[fa02,fb02] Possible value: 0 : use get_fcp_pair. this is the default 1 : use get_fcp_pair_with_same_index ''' ), Opt('force_capture_disk', section='zvm', required=False, opt_type='str', default=None, help=''' Virtual device number for capture function. This value identity the virtual device number for capture image when z/VM guest is power off. Possible values: An string value identify disk number like '0100'. If this value has been configured, capture image function will use this value as disk info to capture with first priority when z/VM guest is power off. This value don't work if z/VM guest status is power on. Sample root disk in user directory: MDISK 0100 '''), # tests options Opt('images', section='tests', opt_type='str', ), Opt('userid_prefix', section='tests', default='tst', opt_type='str', ), Opt('ip_addr_list', section='tests', default='192.168.0.2 192.168.0.3 192.168.0.4 192.168.0.5 192.168.0.6', opt_type='str', ), Opt('vswitch', section='tests', opt_type='str', ), Opt('gateway_v4', section='tests'), Opt('cidr', section='tests'), Opt('restapi_url', section='tests', default='http://127.0.0.1:8888'), Opt('zvm_fcp', section='tests'), Opt('target_wwpn', section='tests'), Opt('target_lun', section='tests'), Opt('mount_point', section='tests'), ] class ConfigOpts(object): def __init__(self): self.dicts = {} def get_config_dicts_default(self, opts): _dict = {} for opt in opts: sec = opt.section if _dict.get(sec) is None: _dict[sec] = {} _dict[sec][opt.name] = {'required': opt.required, 'default': opt.default, 'type': opt.opt_type, 'help': opt.help} return _dict def register(self, opts): # Register the defined options and parse to dict self.dicts = self.get_config_dicts_default(opts) return self.clear_and_to_dict() def config(self): # Load config file and merge with default definitions # read config file override_dicts = self.read_config_file_to_dicts() # overwrite default value try: self.dicts = self.merge(self.dicts, override_dicts) except ImportError: pass # Check config value self._check_value(self.dicts) # Clear unused attributes of each option, and parse to our defined dict return self.clear_and_to_dict() def read_config_file_to_dicts(self): configs = {} read_file = self.find_config_file(project="zvmsdk") if read_file is None: raise ConfFileMissingError() else: cf = configparser.ConfigParser() cf.read(read_file) # read each section and option to dict secs = cf.sections() for sec in secs: configs[sec] = {} # get all options of the section in a list opts = cf.options(sec) for opt in opts: val = cf.get(sec, opt) configs[sec][opt] = val return configs def merge(self, defaults, override): # merge the defaults and overridden # The overridden options would only have 'default' set in the # resulted dicts r = {} for k, v in defaults.items(): if k in override: if isinstance(v, dict) and isinstance(override[k], dict): r[k] = self.merge(v, override[k]) elif isinstance(v, dict): if override[k] is not None: v['default'] = override[k] r[k] = v else: r[k] = override[k] else: r[k] = v return r def clear_and_to_dict(self): # This function would clear the dict to remove the unused keys # ('required', 'default', 'type', 'help'), set the opt value to # the final value merged in 'default'. # And then, convert the python dict to our defined Dict object clear_dict = {} pydict = self.dicts for k1, v1 in pydict.items(): r_con = {} for k2, v2 in v1.items(): r_con[k2] = v2['default'] clear_dict[k1] = r_con return self.toDict(clear_dict) def _check_value(self, conf): for k1, v1 in conf.items(): for k2, v2 in v1.items(): # Check required options if v2['required'] and (v2['default'] is None): raise RequiredOptMissingError(k1, k2) # Convert type if v2['type'] == 'int': v2['default'] = int(v2['default']) # Check format if (k2 == "disk_pool") and (v2['default'] is not None): self._check_zvm_disk_pool(v2['default']) # check user_default_max_memory if (k2 == "user_default_max_memory") and ( v2['default'] is not None): self._check_user_default_max_memory(v2['default']) # check user_default_max_reserved_memory if (k2 == "user_default_max_reserved_memory") and ( v2['default'] is not None): self._check_user_default_max_reserved_memory(v2['default']) # check user_default_max_cpu if (k2 == "user_default_max_cpu") and ( v2['default'] is not None): self._check_user_default_max_cpu(v2['default']) def _check_zvm_disk_pool(self, value): disks = value.split(':') if (len(disks) != 2) or (disks[0].upper() not in ['FBA', 'ECKD']) or ( disks[1] == ''): raise OptFormatError("zvm", "disk_pool", value) def _check_user_default_max_memory(self, value): suffix = value[-1].upper() size = value[:-1] if (suffix not in ['G', 'M']) or (len(size) > 4) or ( size.strip('0123456789') != ''): raise OptFormatError("zvm", "user_default_max_memory", value) def _check_user_default_max_reserved_memory(self, value): suffix = value[-1].upper() size = value[:-1] if (suffix not in ['G', 'M']) or (len(size) > 4) or ( size.strip('0123456789') != ''): raise OptFormatError("zvm", "user_default_max_reserved_memory", value) def _check_user_default_max_cpu(self, value): if (value < 1) or (value > 64): raise OptFormatError("zvm", "user_default_max_cpu", value) def toDict(self, d): D = Dict() for k, v in d.items(): D[k] = self.toDict(v) if isinstance(v, dict) else v return D def _fixpath(self, p): """Apply tilde expansion and absolutization to a path.""" return os.path.abspath(os.path.expanduser(p)) def _get_config_dirs(self): """Return a list of directories where config files may be located. following directories are returned:: ./ ../etc ~/ /etc/zvmsdk/ """ _cwd = os.path.split(os.path.abspath(__file__))[0] _pdir = os.path.split(_cwd)[0] _etcdir = ''.join((_pdir, '/', 'etc/')) cfg_dirs = [ self._fixpath(_cwd), self._fixpath('/etc/zvmsdk/'), self._fixpath('/etc/'), self._fixpath('~'), self._fixpath(_etcdir), ] return [x for x in cfg_dirs if x] def _search_dirs(self, dirs, basename, extension=""): """Search a list of directories for a given filename or directory name. Iterator over the supplied directories, returning the first file found with the supplied name and extension. :param dirs: a list of directories :param basename: the filename :param extension: the file extension, for example '.conf' :returns: the path to a matching file, or None """ for d in dirs: path = os.path.join(d, '%s%s' % (basename, extension)) if os.path.exists(path): return path return None def find_config_file(self, project=None, extension='.conf'): """Return the config file. :param project: "zvmsdk" :param extension: the type of the config file """ cfg_dirs = self._get_config_dirs() config_files = self._search_dirs(cfg_dirs, project, extension) return config_files class Dict(dict): ''' Simple dict but support access as x.y style. ''' def __init__(self, names=(), values=(), **kw): super(Dict, self).__init__(**kw) for k, v in zip(names, values): self[k] = v def __getattr__(self, key): try: return self[key] except KeyError: raise AttributeError(r"'CONF' object has no attribute '%s'" % key) def __setattr__(self, key, value): self[key] = value class RequiredOptMissingError(Exception): """Raised if an option is required but no value is supplied by the user.""" def __init__(self, grp_name, opt_name): self.grp_name = grp_name self.opt_name = opt_name def __str__(self): return "value required for option %s - %s" % (self.grp_name, self.opt_name) class ConfFileMissingError(Exception): """Raised if the configuration file zvmsdk.conf cann't be found.""" def __init__(self): message = "zvmsdk.conf is not found." super(ConfFileMissingError, self).__init__(message) class OptFormatError(Exception): """Raised if an option is required but no value is supplied by the user.""" def __init__(self, grp_name, opt_name, value): self.grp_name = grp_name self.opt_name = opt_name self.value = value def __str__(self): return "value %s for option %s - %s is invalid" % (self.value, self.grp_name, self.opt_name) CONFOPTS = ConfigOpts() CONF = CONFOPTS.register(zvm_opts) def load_config(): global CONF CONF = CONFOPTS.config() zVMCloudConnector-1.6.3/zvmsdk/version.py0000664000175000017510000000154414263437505020114 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import sys __all__ = ['__version__'] __version__ = '1.6.3' # Check supported Python versions _PYTHON_M = sys.version_info[0] _PYTHON_N = sys.version_info[1] if _PYTHON_M == 2 and _PYTHON_N < 7: raise RuntimeError('On Python 2, zvm sdk requires Python 2.7') zVMCloudConnector-1.6.3/zvmsdk/configdrive.py0000775000175000017510000001232113575566551020735 0ustar ruirui00000000000000# Copyright 2017 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os import dist import tarfile import shutil import stat from zvmsdk import config CONF = config.CONF _DEFAULT_MODE = stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO def _generate_vdev(base, offset): """Generate virtual device number based on base vdev :param base: base virtual device number, string of 4 bit hex. :param offset: offset to base, integer. """ vdev = hex(int(base, 16) + offset)[2:] return vdev.rjust(4, '0') def get_cfg_str(network_interface_info, os_version): ip_v4 = network_interface_info['ip_addr'] address_read = network_interface_info['nic_vdev'] broadcast_v4 = network_interface_info['broadcast_v4'] gateway_v4 = network_interface_info['gateway_v4'] netmask_v4 = network_interface_info['netmask_v4'] nic_vdev = network_interface_info['nic_vdev'] subchannels = ','.join(('0.0.' + nic_vdev, '0.0.' + _generate_vdev(nic_vdev, 1), '0.0.' + _generate_vdev(nic_vdev, 2))) linuxdist = dist.LinuxDistManager().get_linux_dist(os_version)() device_num = 0 device_name = linuxdist.get_device_name(device_num) cfg_str = 'DEVICE=' + device_name + '\n' cfg_str += 'BOOTPROTO=static\n' cfg_str += 'BROADCAST=' + broadcast_v4 + '\n' cfg_str += 'GATEWAY=' + gateway_v4 + '\n' cfg_str += 'IPADDR=' + ip_v4 + '\n' cfg_str += 'NETMASK=' + netmask_v4 + '\n' cfg_str += 'NETTYPE=qeth\n' cfg_str += 'ONBOOT=yes\n' cfg_str += 'PORTNAME=PORT' + address_read + '\n' cfg_str += 'OPTIONS=\"layer2=1\"\n' cfg_str += 'SUBCHANNELS=' + subchannels + '\n' return cfg_str def generate_net_file(network_interface_info, net_file_path, os_version): cfg_str = get_cfg_str(network_interface_info, os_version) generate_file(cfg_str, net_file_path) def get_znetconfig_str(os_version): linuxdist = dist.LinuxDistManager().get_linux_dist(os_version)() udev_settle = linuxdist.get_znetconfig_contents() znetconfig = '\n'.join(('# !/bin/sh', udev_settle)) znetconfig += '\nrm -rf /tmp/znetconfig.sh\n' return znetconfig def generate_znetconfig_file(znetconfig_path, os_version): znetconfig = get_znetconfig_str(os_version) generate_file(znetconfig, znetconfig_path) def get_meta_data_str(): meta_data = '{\"files\":[{\"path\":' +\ '\"/etc/sysconfig/network-scripts/ifcfg-enccw0.0.1000\", ' meta_data += '\"content_path\": \"/content/0000\"},' +\ '{\"path\": \"/tmp/znetconfig.sh\", \"content_path\":' +\ ' \"/content/0001\"}], ' meta_data += '\"uuid\": \"4ec7a80d-201a-4c17-afbc-b0a93b66133b\", ' meta_data += '\"availability_zone\": \"nova\", ' meta_data += '\"hostname\": \"eckdrh72.5.novalocal\", ' meta_data += '\"launch_index\": 0, ' meta_data += '\"project_id\": \"94f8dc6644f24785a1383959dbba3f9e\", ' meta_data += '\"name\": \"eckdrh72.5\"}' return meta_data def generate_meta_data(meta_data_path): meta_data = get_meta_data_str() generate_file(meta_data, meta_data_path) def generate_file(file_content, path): f = open(path, 'w') f.write(file_content) f.close() def create_config_drive(network_interface_info, os_version): """Generate config driver for zVM guest vm. :param dict network_interface_info: Required keys: ip_addr - (str) IP address nic_vdev - (str) VDEV of the nic gateway_v4 - IPV4 gateway broadcast_v4 - IPV4 broadcast address netmask_v4 - IPV4 netmask :param str os_version: operating system version of the guest """ temp_path = CONF.guest.temp_path if not os.path.exists(temp_path): os.mkdir(temp_path) cfg_dir = os.path.join(temp_path, 'openstack') if os.path.exists(cfg_dir): shutil.rmtree(cfg_dir) content_dir = os.path.join(cfg_dir, 'content') latest_dir = os.path.join(cfg_dir, 'latest') os.mkdir(cfg_dir) os.mkdir(content_dir) os.mkdir(latest_dir) net_file = os.path.join(content_dir, '0000') generate_net_file(network_interface_info, net_file, os_version) znetconfig_file = os.path.join(content_dir, '0001') generate_znetconfig_file(znetconfig_file, os_version) meta_data_path = os.path.join(latest_dir, 'meta_data.json') generate_meta_data(meta_data_path) network_data_path = os.path.join(latest_dir, 'network_data.json') generate_file('{}', network_data_path) vendor_data_path = os.path.join(latest_dir, 'vendor_data.json') generate_file('{}', vendor_data_path) tar_path = os.path.join(temp_path, 'cfgdrive.tgz') tar = tarfile.open(tar_path, "w:gz") os.chdir(temp_path) tar.add('openstack') tar.close() return tar_path zVMCloudConnector-1.6.3/zvmsdk/returncode.py0000664000175000017510000004751214315210052020566 0ustar ruirui00000000000000# Copyright 2017,2021 IBM Corp. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Each error corresponds to a dictionary contains: 'overallRC', 'modID', 'rc', 'rs'. -- 'overallRC' is used to indicate the overall return code, all the errors are classified into group with different overallRC value. -- 'modID' is used to indicate which module this error happens in. The available Module and their modID value to use is listed in the following: ModName ModID ------ -- SMT 1 GUEST 10 NETWORK 20 VOLUME 30 IMAGE 40 MONITOR 50 SDKSERVER 100 SDKWSGI 120 SDKGENERAL 400 -- 'rc' is used together with rs to indicate the specific error. If 'rs' is enough to indicate the error, 'rc' would be set same to the overallRC. -- 'rs' is used to indicate the specific error. It is defined specificly by each module. Different error inside a module should use different 'rc'/'rs' combination. ------------------------------------------------------------------------------- ErrorCode General Classification ------------------------------------------------------------------------------- ErrorClass overallRC modID rc rs Description SMT 1-99 1 xx xx Used by SMT, refer to smtlayer/msgs.py Invalid input 100 SDKGEN 100 1 SDK API parameter number error 100 SDKGEN 100 2 SDK API input type error 100 SDKGEN 100 3 SDK API parameter format error ( value not in expected format or range) Socket Error 101 MODRC 101 x The SDK server or client socket error. Other Module Error 300 MODRC 300 x The module-specific error during SDK API handling that not belongs to other general module-shared error. Invalid API name 400 MODRC 400 x The SDK server received a invalid API name. REST API Req Err 400 MODRC 400 X The SDKWSGI detects an exception Object Not Exist 404 MODRC 404 1 The operated object does not exist, eg, the guest/vswitch/ image/volume. Conflict 409 MODRC 409 x The status of the to-be-updated object is conflict with the update request. Object Deleted 410 MODRC 410 1 The operated object has been deleted and not exist any more. This can be used for some module that support deleted=1 in DB. Internal error 500 MODRC 500 1 The SDK module got unexpected error, eg, typeerror, keyerror, etc. SDK server would take all the exceptions not belonging to SDKBaseexception as InternalError .Such error generally means bug report, SDK code should avoid using this return code. Service Unavailable 503 MODRC 503 1 The SDK REST reject deploy/capture requests because of the concurrent capture/deploy running exceeds the maximum number. Not Implementation 501 MODRC 501 1 The requested SDK function has not been implemented """ # ----------------------------------------------------------------------------- # Detail Module RC definition of each error # ----------------------------------------------------------------------------- ModRCs = { 'smt': 1, 'guest': 10, 'network': 20, 'volume': 30, 'image': 40, 'monitor': 50, 'file': 60, 'sdkserver': 100, 'sdkwsgi': 120, # The 'zvmsdk' is used as the default module if module is not specified # when raising exception 'zvmsdk': 400 } errors = { # Each entry defined here corresponds to a kind of error indicated by the # following list of info: # 1. the dict of 'overallRC', 'rc' # 2. the dict containing all the possible rs and its error message # 3. The general error description. This should be used for doc generation # Invalid input error 'input': [{'overallRC': 100, 'modID': ModRCs['zvmsdk'], 'rc': 100}, {1: ("Invalid API arg count, API: %(api)s, %(expected)d expected" " while %(provided)d provided."), 2: ("Invalid API arg type, API: %(api)s, expected types: " "'%(expected)s', input types: '%(inputtypes)s'"), 3: ("Invalid API arg format, error: %(msg)s"), 4: ("Missing required option: %(msg)s"), }, "Invalid API Input", ], # General Errors for each module, same overallRC = 300 # Guest Operation failed 'guest': [{'overallRC': 300, 'modID': ModRCs['guest'], 'rc': 300}, {1: "Database operation failed, error: %(msg)s", 2: "Failed to add mdisks when creating guest, error: %(msg)s", 3: ("Failed to deploy image to userid: '%(userid)s', " "unpackdiskimage failed with rc: %(unpack_rc)d, " "error: %(err)s"), 4: ("Failed to deploy image to userid: '%(userid)s', " "copy configure drive failed: %(err_info)s"), 5: ("Failed to capture userid %(userid)s to generate image, " "error: %(msg)s"), 6: ("Failed to resize cpus of guest: '%(userid)s', " "error: update cpu definition in user entry failed with " "smt error: '%(err)s'."), 7: ("Failed to live resize cpus of guest: '%(userid)s', " "error: define new cpu to active failed with smt error: " "'%(err)s'."), 8: ("Failed to live resize cpus of guest: '%(userid)s', " "error: rescan cpus to hot-plug new defined cpus failed: " "'%(err)s'."), 9: ("Failed to resize memory of guest: '%(userid)s', " "error: lock user entry failed with " "smt error: '%(err)s'."), 10: ("Failed to resize memory of guest: '%(userid)s', " "error: replace user entry failed with " "smt error: '%(err)s'."), 11: ("Failed to live resize memory of guest: '%(userid)s', " "error: define standby memory failed with " "smt error: '%(err)s'."), 12: ("Failed to deploy image to userid: '%(userid)s', " "get unpackdiskimage cmd failed: %(err)s"), 13: ("Failed to deploy image to userid: '%(userid)s', " "ignition file is required when deploying RHCOS image"), 14: ("Failed to deploy image to userid: '%(userid)s', %(msg)s"), 15: ("Failed to live resize cpus of guest: '%(userid)s', " "error: enable new defined cpus failed: '%(err)s'."), 16: ("Failed to start the guest: '%(userid)s', %(msg)s") }, "Operation on Guest failed" ], # Network Operation failed 'network': [{'overallRC': 300, 'modID': ModRCs['network'], 'rc': 300}, {1: "Database operation failed, error: %(msg)s", 2: "ZVMSDK network error: %(msg)s", 3: ("Failed to couple nic %(nic)s to vswitch %(vswitch)s " "on the active guest system, error: %(couple_err)s, and " "failed to revoke user direct's changes, " "error: %(revoke_err)s "), 4: ("Failed to create nic %(nic)s for %(userid)s on the " "active guest system, error: %(create_err)s, and " "failed to revoke user direct's changes, " "error: %(revoke_err)s "), 5: ("Failed to actively change network setting for user " "%(userid)s, error: %(msg)s") }, "Operation on Network failed" ], # Image Operation failed 'image': [{'overallRC': 300, 'modID': ModRCs['image'], 'rc': 300}, {1: "Database operation failed, error: %(msg)s", 2: "No image schema found for %(schema)s", 3: "Image import error: Failed to calculate the md5sum of the" " image", 4: "Image import error: The md5sum after import is not same as" " source image, it is possible that the image has been " "broken during import", 5: "Image import error: Failed to get the root disk size units" " of the image via hexdump", 6: "Image import error: The header of image does not contain" " built-in disk size units", 7: "Image import error: The image's disk type is not valid." " Currently only FBA or CKD type image is supported", 8: "Image import error: Failed to get the physical size of" " image in bytes", 9: "Import image from http server failed with reason %(err)s", 10: "Image import error: Copying image file from remote" " filesystem failed with error %(err)s", 11: "The specified remote_host %(rh)s format invalid", 12: "Import image from local file system failed with error" " %(err)s", 13: "Image import error: image name %(img)s already exist in " "image database", 14: "Image import error: %(msg)s", 20: "The image record of %(img)s does not exist", 21: "Image Export error: Failed to copy image file to remote " "host with reason: %(msg)s", 22: "Export image to local file system failed: %(err)s", 23: "Image file of %(img)s does not exist, " "so failed to get its timestamp.", }, "Operation on Image failed" ], # Volume Operation failed 'volume': [{'overallRC': 300, 'modID': ModRCs['volume'], 'rc': 300}, {1: "Database operation failed, error: %(msg)s", 3: "Volume %(vol)s has already been attached on instance " "%(inst)s", 4: "Volume %(vol)s is not attached on instance %(inst)s", 5: "Refresh bootmap fails, error code: %(errcode)s and " "reason: %(errmsg)s", 6: "IUCV failed to get authorization from instance " "%(userid)s with reason %(msg)s", 7: "Refresh bootmap timeout with reason %(msg)s", 8: "Failed to attach volume to instance " "%(userid)s with reason %(msg)s", 9: "Failed to detach volume from instance " "%(userid)s with reason %(msg)s", 10: "Failed to refresh bootmap for RHCOS: " "transportfiles are required", 11: "Failed to get volume connector of %(userid)s " "because %(msg)s", }, "Operation on Volume failed" ], # Monitor Operation failed 'monitor': [{'overallRC': 300, 'modID': ModRCs['monitor'], 'rc': 300}, {1: "Database operation failed, error: %(msg)s", }, "Operation on Monitor failed" ], # File Operation failed 'file': [{'overallRC': 300, 'modID': ModRCs['file'], 'rc': 300}, {1: "File import operation failed", 2: "File export operation failed"}, "Operation on file failed" ], # REST API Request error (Only used by sdkwsgi) # 'modID' would be set to ModRC['sdkwsgi'] 'RESTAPI': [{'overallRC': 400, 'modID': ModRCs['sdkwsgi'], 'rc': 400}, {1: "Invalid request", }, "REST API Request error" ], # Object not exist # Used when the operated object does not exist. # 'modID' would be set to each module rc when raise the exception # 'rs' is always 1 'notExist': [{'overallRC': 404, 'modID': None, 'rc': 404}, {1: "%(obj_desc)s does not exist.", 2: "Not found error: '%(msg)s'", 3: ("%(obj_desc)s does not exist in directory " "although it is in DB. The guest could have been " "deleted out of z/VM Cloud Connector.")}, "The operated object does not exist" ], 'alreadyExist': [{'overallRC': 409, 'modID': None, 'rc': 409}, {1: "%(obj_desc)s already exists."} ], # Conflict Error (The to-be-updated object status conflict) 'conflict': [{'overallRC': 409, 'modID': None, 'rc': 409}, {1: "Guest '%(userid)s' is not in active status.", 2: ("Failed to live resize cpus of guest: '%(userid)s', " "error: current active cpu count: '%(active)i' is " "greater than requested count: '%(req)i'."), 3: ("Failed to resize cpus of guest: '%(userid)s', " "error: maximum number of cpus is not defined in user " "directory."), 4: ("Failed to resize cpus of guest: '%(userid)s', " "error: the requested number of cpus: '%(req)i' exceeds " "the maximum number of cpus allowed: '%(max)i'."), 5: ("Failed to set vswitch %(vsw)s, error: %(msg)s"), 6: ("Failed to create nic %(vdev)s for guest %(userid)s, " "error: %(msg)s"), 7: ("Failed to create nic %(vdev)s for guest %(userid)s, " "error: %(obj)s is locked"), 8: ("Failed to delete nic %(vdev)s for guest %(userid)s, " "error: %(msg)s"), 9: ("Failed to delete nic %(vdev)s for guest %(userid)s, " "error: %(obj)s is locked"), 10: ("Failed to couple nic %(vdev)s of guest %(userid)s " "with vswitch %(vsw)s, error: %(msg)s"), 11: ("Failed to couple nic %(vdev)s of guest %(userid)s " "with vswitch %(vsw)s, error: %(obj)s is locked"), 12: ("Failed to uncouple nic %(vdev)s of guest %(userid)s " "error: %(msg)s"), 13: ("Failed to uncouple nic %(vdev)s of guest %(userid)s " "error: %(obj)s is locked"), 14: ("Failed to dedicate OSA %(osa)s to guest %(userid)s " "error: %(msg)s"), 15: ("Failed to dedicate OSA %(osa)s to guest %(userid)s " "error: %(obj)s is locked"), 16: ("Failed to delete dedicated device from guest " "%(userid)s %(vdev)s, error: %(msg)s"), 17: ("Failed to delete dedicated device from guest " "%(userid)s %(vdev)s, error: %(obj)s is locked"), 18: ("Failed to live resize memory of guest: '%(userid)s', " "error: current active memory size: '%(active)i'm is " "greater than requested size: '%(req)i'm."), 19: ("Failed to resize memory of guest: '%(userid)s', " "error: user definition is not in expected format, " "cann't get the defined/max/reserved storage."), 20: ("Failed to resize memory of guest: '%(userid)s', " "error: the requested memory size: '%(req)im' exceeds " "the maximum memory size defined: '%(max)im'."), 21: ("Failed to live resize memory of guest: %(userid)s, " "error: the memory size to be increased: '%(inc)im' " "is greater than the maximum reserved memory size: " "'%(max)im'."), 22: ("Failed to delete FCP Multipath Template, " "error: %(msg)s"), 23: ("Failed to create or update FCP Multipath Template, " "error: %(msg)s"), 24: ("Failed to edit FCP Multipath Template, " "error: %(msg)s") }, "The operated object status conflict" ], # Object deleted. # The operated object has been deleted and not exist any more. # This can be used for some module that support deleted=1 in DB. 'deleted': [{'overallRC': 410, 'modID': None, 'rc': 410}, {}, "The operated object is deleted" ], # Internal error # Module Internal error, rc is not defined here, it will be set when raising # exception. when module id is not specified, the 'zvmsdk' module rc will be # used. 'internal': [{'overallRC': 500, 'modID': None, 'rc': 500}, {1: "Unexpected internal error in ZVM SDK, error: %(msg)s"}, "ZVM SDK Internal Error" ], # Service Unavailable # The SDK REST reject deploy/capture requests because of the concurrent # capture/deploy running exceeds the maximum number. 'serviceUnavail': [{'overallRC': 503, 'modID': ModRCs['sdkwsgi'], 'rc': 503}, {1: "Max concurrent deploy/capture requests received, " "request is rejected. %(req)s", }, "z/VM Cloud Connector service is unavailable" ], # Service not support # The requested function has not been implemented in current release, # the 'modID' would be set to each module rc when raise the exception # 'rs' is always 1 'serviceNotSupport': [{'overallRC': 501, 'modID': None, 'rc': 501}, {1: "The requested function: %(func)s has not been " "implemented in current release", }, "z/VM Cloud Connector function not implemented" ], } # smt internal error # This const defines the list of smt errors that should be converted to # internal error in SDK layer. # Each element in the list is a tuple consisting the 'overallRC', 'rc', # list of 'rs' # when the value is 'None', it means always match. SMT_INTERNAL_ERROR = [(4, 4, range(1, 18)), (2, 2, [99, ]), (25, None, None), (99, 99, [416, 417]) ]