tempest-2014.1.dev4108.gf22b6cc/ 0000775 0001750 0001750 00000000000 12332757136 016100 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/ 0000775 0001750 0001750 00000000000 12332757136 017561 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/config.py 0000664 0001750 0001750 00000126143 12332757070 021404 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import logging as std_logging
import os
from oslo.config import cfg
from tempest.openstack.common import log as logging
def register_opt_group(conf, opt_group, options):
conf.register_group(opt_group)
for opt in options:
conf.register_opt(opt, group=opt_group.name)
identity_group = cfg.OptGroup(name='identity',
title="Keystone Configuration Options")
IdentityGroup = [
cfg.StrOpt('catalog_type',
default='identity',
help="Catalog type of the Identity service."),
cfg.BoolOpt('disable_ssl_certificate_validation',
default=False,
help="Set to True if using self-signed SSL certificates."),
cfg.StrOpt('uri',
default=None,
help="Full URI of the OpenStack Identity API (Keystone), v2"),
cfg.StrOpt('uri_v3',
help='Full URI of the OpenStack Identity API (Keystone), v3'),
cfg.StrOpt('auth_version',
default='v2',
help="Identity API version to be used for authentication "
"for API tests."),
cfg.StrOpt('region',
default='RegionOne',
help="The identity region name to use. Also used as the other "
"services' region name unless they are set explicitly. "
"If no such region is found in the service catalog, the "
"first found one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the identity service."),
cfg.StrOpt('username',
default=None,
help="Username to use for Nova API requests."),
cfg.StrOpt('tenant_name',
default=None,
help="Tenant name to use for Nova API requests."),
cfg.StrOpt('admin_role',
default='admin',
help="Role required to administrate keystone."),
cfg.StrOpt('password',
default=None,
help="API key to use when authenticating.",
secret=True),
cfg.StrOpt('domain_name',
default=None,
help="Domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('alt_username',
default=None,
help="Username of alternate user to use for Nova API "
"requests."),
cfg.StrOpt('alt_tenant_name',
default=None,
help="Alternate user's Tenant name to use for Nova API "
"requests."),
cfg.StrOpt('alt_password',
default=None,
help="API key to use when authenticating as alternate user.",
secret=True),
cfg.StrOpt('alt_domain_name',
default=None,
help="Alternate domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
cfg.StrOpt('admin_username',
default=None,
help="Administrative Username to use for "
"Keystone API requests."),
cfg.StrOpt('admin_tenant_name',
default=None,
help="Administrative Tenant name to use for Keystone API "
"requests."),
cfg.StrOpt('admin_password',
default=None,
help="API key to use when authenticating as admin.",
secret=True),
cfg.StrOpt('admin_domain_name',
default=None,
help="Admin domain name for authentication (Keystone V3)."
"The same domain applies to user and project"),
]
identity_feature_group = cfg.OptGroup(name='identity-feature-enabled',
title='Enabled Identity Features')
IdentityFeatureGroup = [
cfg.BoolOpt('trust',
default=True,
help='Does the identity service have delegation and '
'impersonation enabled'),
cfg.BoolOpt('api_v2',
default=True,
help='Is the v2 identity API enabled'),
cfg.BoolOpt('api_v3',
default=True,
help='Is the v3 identity API enabled'),
]
compute_group = cfg.OptGroup(name='compute',
title='Compute Service Options')
ComputeGroup = [
cfg.BoolOpt('allow_tenant_isolation',
default=False,
help="Allows test cases to create/destroy tenants and "
"users. This option enables isolated test cases and "
"better parallel execution, but also requires that "
"OpenStack Identity API admin credentials are known."),
cfg.StrOpt('image_ref',
default="{$IMAGE_ID}",
help="Valid primary image reference to be used in tests."),
cfg.StrOpt('image_ref_alt',
default="{$IMAGE_ID_ALT}",
help="Valid secondary image reference to be used in tests."),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
cfg.StrOpt('flavor_ref_alt',
default="2",
help='Valid secondary flavor to be used in tests.'),
cfg.StrOpt('image_ssh_user',
default="root",
help="User name used to authenticate to an instance."),
cfg.StrOpt('image_ssh_password',
default="password",
help="Password used to authenticate to an instance."),
cfg.StrOpt('image_alt_ssh_user',
default="root",
help="User name used to authenticate to an instance using "
"the alternate image."),
cfg.StrOpt('image_alt_ssh_password',
default="password",
help="Password used to authenticate to an instance using "
"the alternate image."),
cfg.IntOpt('build_interval',
default=10,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for an instance to build."),
cfg.BoolOpt('run_ssh',
default=False,
help="Should the tests ssh to instances?"),
cfg.StrOpt('ssh_auth_method',
default='keypair',
help="Auth method used for authenticate to the instance. "
"Valid choices are: keypair, configured, adminpass. "
"keypair: start the servers with an ssh keypair. "
"configured: use the configured user and password. "
"adminpass: use the injected adminPass. "
"disabled: avoid using ssh when it is an option."),
cfg.StrOpt('ssh_connect_method',
default='fixed',
help="How to connect to the instance? "
"fixed: using the first ip belongs the fixed network "
"floating: creating and using a floating ip"),
cfg.StrOpt('ssh_user',
default='root',
help="User name used to authenticate to an instance."),
cfg.IntOpt('ping_timeout',
default=120,
help="Timeout in seconds to wait for ping to "
"succeed."),
cfg.IntOpt('ssh_timeout',
default=300,
help="Timeout in seconds to wait for authentication to "
"succeed."),
cfg.IntOpt('ready_wait',
default=0,
help="Additional wait time for clean state, when there is "
"no OS-EXT-STS extension available"),
cfg.IntOpt('ssh_channel_timeout',
default=60,
help="Timeout in seconds to wait for output from ssh "
"channel."),
cfg.StrOpt('fixed_network_name',
default='private',
help="Visible fixed network name "),
cfg.StrOpt('network_for_ssh',
default='public',
help="Network used for SSH connections."),
cfg.IntOpt('ip_version_for_ssh',
default=4,
help="IP version used for SSH connections."),
cfg.BoolOpt('use_floatingip_for_ssh',
default=True,
help="Does SSH use Floating IPs?"),
cfg.StrOpt('catalog_type',
default='compute',
help="Catalog type of the Compute service."),
cfg.StrOpt('region',
default='',
help="The compute region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the compute service."),
cfg.StrOpt('catalog_v3_type',
default='computev3',
help="Catalog type of the Compute v3 service."),
cfg.StrOpt('path_to_private_key',
default=None,
help="Path to a private key file for SSH access to remote "
"hosts"),
cfg.StrOpt('volume_device_name',
default='vdb',
help="Expected device name when a volume is attached to "
"an instance"),
cfg.IntOpt('shelved_offload_time',
default=0,
help='Time in seconds before a shelved instance is eligible '
'for removing from a host. -1 never offload, 0 offload '
'when shelved. This time should be the same as the time '
'of nova.conf, and some tests will run for as long as the '
'time.')
]
compute_features_group = cfg.OptGroup(name='compute-feature-enabled',
title="Enabled Compute Service Features")
ComputeFeaturesGroup = [
cfg.BoolOpt('api_v3',
default=True,
help="If false, skip all nova v3 tests."),
cfg.BoolOpt('disk_config',
default=True,
help="If false, skip disk config tests"),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled compute extensions with a special '
'entry all which indicates every extension is enabled'),
cfg.ListOpt('api_v3_extensions',
default=['all'],
help='A list of enabled v3 extensions with a special entry all'
' which indicates every extension is enabled'),
cfg.BoolOpt('change_password',
default=False,
help="Does the test environment support changing the admin "
"password?"),
cfg.BoolOpt('resize',
default=False,
help="Does the test environment support resizing?"),
cfg.BoolOpt('pause',
default=True,
help="Does the test environment support pausing?"),
cfg.BoolOpt('suspend',
default=True,
help="Does the test environment support suspend/resume?"),
cfg.BoolOpt('live_migration',
default=False,
help="Does the test environment support live migration "
"available?"),
cfg.BoolOpt('block_migration_for_live_migration',
default=False,
help="Does the test environment use block devices for live "
"migration"),
cfg.BoolOpt('block_migrate_cinder_iscsi',
default=False,
help="Does the test environment block migration support "
"cinder iSCSI volumes"),
cfg.BoolOpt('vnc_console',
default=False,
help='Enable VNC console. This configuration value should '
'be same as [nova.vnc]->vnc_enabled in nova.conf')
]
compute_admin_group = cfg.OptGroup(name='compute-admin',
title="Compute Admin Options")
ComputeAdminGroup = [
cfg.StrOpt('username',
default=None,
help="Administrative Username to use for Nova API requests."),
cfg.StrOpt('tenant_name',
default=None,
help="Administrative Tenant name to use for Nova API "
"requests."),
cfg.StrOpt('password',
default=None,
help="API key to use when authenticating as admin.",
secret=True),
cfg.StrOpt('domain_name',
default=None,
help="Domain name for authentication as admin (Keystone V3)."
"The same domain applies to user and project"),
]
image_group = cfg.OptGroup(name='image',
title="Image Service Options")
ImageGroup = [
cfg.StrOpt('catalog_type',
default='image',
help='Catalog type of the Image service.'),
cfg.StrOpt('region',
default='',
help="The image region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the image service."),
cfg.StrOpt('http_image',
default='http://download.cirros-cloud.net/0.3.1/'
'cirros-0.3.1-x86_64-uec.tar.gz',
help='http accessible image')
]
image_feature_group = cfg.OptGroup(name='image-feature-enabled',
title='Enabled image service features')
ImageFeaturesGroup = [
cfg.BoolOpt('api_v2',
default=True,
help="Is the v2 image API enabled"),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 image API enabled"),
]
network_group = cfg.OptGroup(name='network',
title='Network Service Options')
NetworkGroup = [
cfg.StrOpt('catalog_type',
default='network',
help='Catalog type of the Neutron service.'),
cfg.StrOpt('region',
default='',
help="The network region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the network service."),
cfg.StrOpt('tenant_network_cidr',
default="10.100.0.0/16",
help="The cidr block to allocate tenant ipv4 subnets from"),
cfg.IntOpt('tenant_network_mask_bits',
default=28,
help="The mask bits for tenant ipv4 subnets"),
cfg.StrOpt('tenant_network_v6_cidr',
default="2003::/64",
help="The cidr block to allocate tenant ipv6 subnets from"),
cfg.IntOpt('tenant_network_v6_mask_bits',
default=96,
help="The mask bits for tenant ipv6 subnets"),
cfg.BoolOpt('tenant_networks_reachable',
default=False,
help="Whether tenant network connectivity should be "
"evaluated directly"),
cfg.StrOpt('public_network_id',
default="",
help="Id of the public network that provides external "
"connectivity"),
cfg.StrOpt('public_router_id',
default="",
help="Id of the public router that provides external "
"connectivity"),
cfg.IntOpt('build_timeout',
default=300,
help="Timeout in seconds to wait for network operation to "
"complete."),
cfg.IntOpt('build_interval',
default=10,
help="Time in seconds between network operation status "
"checks."),
]
network_feature_group = cfg.OptGroup(name='network-feature-enabled',
title='Enabled network service features')
NetworkFeaturesGroup = [
cfg.BoolOpt('ipv6',
default=True,
help="Allow the execution of IPv6 tests"),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled network extensions with a special '
'entry all which indicates every extension is enabled'),
]
queuing_group = cfg.OptGroup(name='queuing',
title='Queuing Service')
QueuingGroup = [
cfg.StrOpt('catalog_type',
default='queuing',
help='Catalog type of the Queuing service.'),
]
volume_group = cfg.OptGroup(name='volume',
title='Block Storage Options')
VolumeGroup = [
cfg.IntOpt('build_interval',
default=10,
help='Time in seconds between volume availability checks.'),
cfg.IntOpt('build_timeout',
default=300,
help='Timeout in seconds to wait for a volume to become'
'available.'),
cfg.StrOpt('catalog_type',
default='volume',
help="Catalog type of the Volume Service"),
cfg.StrOpt('region',
default='',
help="The volume region name to use. If empty, the value "
"of identity.region is used instead. If no such region "
"is found in the service catalog, the first found one is "
"used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the volume service."),
cfg.StrOpt('backend1_name',
default='BACKEND_1',
help="Name of the backend1 (must be declared in cinder.conf)"),
cfg.StrOpt('backend2_name',
default='BACKEND_2',
help="Name of the backend2 (must be declared in cinder.conf)"),
cfg.StrOpt('storage_protocol',
default='iSCSI',
help='Backend protocol to target when creating volume types'),
cfg.StrOpt('vendor_name',
default='Open Source',
help='Backend vendor to target when creating volume types'),
cfg.StrOpt('disk_format',
default='raw',
help='Disk format to use when copying a volume to image'),
cfg.IntOpt('volume_size',
default=1,
help='Default size in GB for volumes created by volumes tests'),
]
volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
title='Enabled Cinder Features')
VolumeFeaturesGroup = [
cfg.BoolOpt('multi_backend',
default=False,
help="Runs Cinder multi-backend test (requires 2 backends)"),
cfg.BoolOpt('backup',
default=True,
help='Runs Cinder volumes backup test'),
cfg.BoolOpt('snapshot',
default=True,
help='Runs Cinder volume snapshot test'),
cfg.ListOpt('api_extensions',
default=['all'],
help='A list of enabled volume extensions with a special '
'entry all which indicates every extension is enabled'),
cfg.BoolOpt('api_v1',
default=True,
help="Is the v1 volume API enabled"),
cfg.BoolOpt('api_v2',
default=True,
help="Is the v2 volume API enabled"),
]
object_storage_group = cfg.OptGroup(name='object-storage',
title='Object Storage Service Options')
ObjectStoreGroup = [
cfg.StrOpt('catalog_type',
default='object-store',
help="Catalog type of the Object-Storage service."),
cfg.StrOpt('region',
default='',
help="The object-storage region name to use. If empty, the "
"value of identity.region is used instead. If no such "
"region is found in the service catalog, the first found "
"one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the object-store service."),
cfg.IntOpt('container_sync_timeout',
default=120,
help="Number of seconds to time on waiting for a container "
"to container synchronization complete."),
cfg.IntOpt('container_sync_interval',
default=5,
help="Number of seconds to wait while looping to check the "
"status of a container to container synchronization"),
cfg.StrOpt('operator_role',
default='Member',
help="Role to add to users created for swift tests to "
"enable creating containers"),
cfg.StrOpt('reseller_admin_role',
default='ResellerAdmin',
help="User role that has reseller admin"),
]
object_storage_feature_group = cfg.OptGroup(
name='object-storage-feature-enabled',
title='Enabled object-storage features')
ObjectStoreFeaturesGroup = [
cfg.ListOpt('discoverable_apis',
default=['all'],
help="A list of the enabled optional discoverable apis. "
"A single entry, all, indicates that all of these "
"features are expected to be enabled"),
]
database_group = cfg.OptGroup(name='database',
title='Database Service Options')
DatabaseGroup = [
cfg.StrOpt('catalog_type',
default='database',
help="Catalog type of the Database service."),
cfg.StrOpt('db_flavor_ref',
default="1",
help="Valid primary flavor to use in database tests."),
]
orchestration_group = cfg.OptGroup(name='orchestration',
title='Orchestration Service Options')
OrchestrationGroup = [
cfg.StrOpt('catalog_type',
default='orchestration',
help="Catalog type of the Orchestration service."),
cfg.StrOpt('region',
default='',
help="The orchestration region name to use. If empty, the "
"value of identity.region is used instead. If no such "
"region is found in the service catalog, the first found "
"one is used."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the orchestration service."),
cfg.BoolOpt('allow_tenant_isolation',
default=False,
help="Allows test cases to create/destroy tenants and "
"users. This option enables isolated test cases and "
"better parallel execution, but also requires that "
"OpenStack Identity API admin credentials are known."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
cfg.IntOpt('build_timeout',
default=1200,
help="Timeout in seconds to wait for a stack to build."),
cfg.StrOpt('instance_type',
default='m1.micro',
help="Instance type for tests. Needs to be big enough for a "
"full OS plus the test workload"),
cfg.StrOpt('image_ref',
default=None,
help="Name of heat-cfntools enabled image to use when "
"launching test instances."),
cfg.StrOpt('keypair_name',
default=None,
help="Name of existing keypair to launch servers with."),
cfg.IntOpt('max_template_size',
default=524288,
help="Value must match heat configuration of the same name."),
cfg.IntOpt('max_resources_per_stack',
default=1000,
help="Value must match heat configuration of the same name."),
]
telemetry_group = cfg.OptGroup(name='telemetry',
title='Telemetry Service Options')
TelemetryGroup = [
cfg.StrOpt('catalog_type',
default='metering',
help="Catalog type of the Telemetry service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the telemetry service."),
]
dashboard_group = cfg.OptGroup(name="dashboard",
title="Dashboard options")
DashboardGroup = [
cfg.StrOpt('dashboard_url',
default='http://localhost/',
help="Where the dashboard can be found"),
cfg.StrOpt('login_url',
default='http://localhost/auth/login/',
help="Login page for the dashboard"),
]
data_processing_group = cfg.OptGroup(name="data_processing",
title="Data Processing options")
DataProcessingGroup = [
cfg.StrOpt('catalog_type',
default='data_processing',
help="Catalog type of the data processing service."),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the data processing "
"service."),
]
boto_group = cfg.OptGroup(name='boto',
title='EC2/S3 options')
BotoGroup = [
cfg.StrOpt('ec2_url',
default="http://localhost:8773/services/Cloud",
help="EC2 URL"),
cfg.StrOpt('s3_url',
default="http://localhost:8080",
help="S3 URL"),
cfg.StrOpt('aws_secret',
default=None,
help="AWS Secret Key",
secret=True),
cfg.StrOpt('aws_access',
default=None,
help="AWS Access Key"),
cfg.StrOpt('aws_zone',
default="nova",
help="AWS Zone for EC2 tests"),
cfg.StrOpt('s3_materials_path',
default="/opt/stack/devstack/files/images/"
"s3-materials/cirros-0.3.0",
help="S3 Materials Path"),
cfg.StrOpt('ari_manifest',
default="cirros-0.3.0-x86_64-initrd.manifest.xml",
help="ARI Ramdisk Image manifest"),
cfg.StrOpt('ami_manifest',
default="cirros-0.3.0-x86_64-blank.img.manifest.xml",
help="AMI Machine Image manifest"),
cfg.StrOpt('aki_manifest',
default="cirros-0.3.0-x86_64-vmlinuz.manifest.xml",
help="AKI Kernel Image manifest"),
cfg.StrOpt('instance_type',
default="m1.tiny",
help="Instance type"),
cfg.IntOpt('http_socket_timeout',
default=3,
help="boto Http socket timeout"),
cfg.IntOpt('num_retries',
default=1,
help="boto num_retries on error"),
cfg.IntOpt('build_timeout',
default=60,
help="Status Change Timeout"),
cfg.IntOpt('build_interval',
default=1,
help="Status Change Test Interval"),
]
stress_group = cfg.OptGroup(name='stress', title='Stress Test Options')
StressGroup = [
cfg.StrOpt('nova_logdir',
default=None,
help='Directory containing log files on the compute nodes'),
cfg.IntOpt('max_instances',
default=16,
help='Maximum number of instances to create during test.'),
cfg.StrOpt('controller',
default=None,
help='Controller host.'),
# new stress options
cfg.StrOpt('target_controller',
default=None,
help='Controller host.'),
cfg.StrOpt('target_ssh_user',
default=None,
help='ssh user.'),
cfg.StrOpt('target_private_key_path',
default=None,
help='Path to private key.'),
cfg.StrOpt('target_logfiles',
default=None,
help='regexp for list of log files.'),
cfg.IntOpt('log_check_interval',
default=60,
help='time (in seconds) between log file error checks.'),
cfg.IntOpt('default_thread_number_per_action',
default=4,
help='The number of threads created while stress test.'),
cfg.BoolOpt('leave_dirty_stack',
default=False,
help='Prevent the cleaning (tearDownClass()) between'
' each stress test run if an exception occurs'
' during this run.'),
cfg.BoolOpt('full_clean_stack',
default=False,
help='Allows a full cleaning process after a stress test.'
' Caution : this cleanup will remove every objects of'
' every tenant.')
]
scenario_group = cfg.OptGroup(name='scenario', title='Scenario Test Options')
ScenarioGroup = [
cfg.StrOpt('img_dir',
default='/opt/stack/new/devstack/files/images/'
'cirros-0.3.1-x86_64-uec',
help='Directory containing image files'),
cfg.StrOpt('qcow2_img_file',
default='cirros-0.3.1-x86_64-disk.img',
help='QCOW2 image file name'),
cfg.StrOpt('ami_img_file',
default='cirros-0.3.1-x86_64-blank.img',
help='AMI image file name'),
cfg.StrOpt('ari_img_file',
default='cirros-0.3.1-x86_64-initrd',
help='ARI image file name'),
cfg.StrOpt('aki_img_file',
default='cirros-0.3.1-x86_64-vmlinuz',
help='AKI image file name'),
cfg.StrOpt('ssh_user',
default='cirros',
help='ssh username for the image file'),
cfg.IntOpt(
'large_ops_number',
default=0,
help="specifies how many resources to request at once. Used "
"for large operations testing.")
]
service_available_group = cfg.OptGroup(name="service_available",
title="Available OpenStack Services")
ServiceAvailableGroup = [
cfg.BoolOpt('cinder',
default=True,
help="Whether or not cinder is expected to be available"),
cfg.BoolOpt('neutron',
default=False,
help="Whether or not neutron is expected to be available"),
cfg.BoolOpt('glance',
default=True,
help="Whether or not glance is expected to be available"),
cfg.BoolOpt('swift',
default=True,
help="Whether or not swift is expected to be available"),
cfg.BoolOpt('nova',
default=True,
help="Whether or not nova is expected to be available"),
cfg.BoolOpt('heat',
default=False,
help="Whether or not Heat is expected to be available"),
cfg.BoolOpt('ceilometer',
default=True,
help="Whether or not Ceilometer is expected to be available"),
cfg.BoolOpt('horizon',
default=True,
help="Whether or not Horizon is expected to be available"),
cfg.BoolOpt('sahara',
default=False,
help="Whether or not Sahara is expected to be available"),
cfg.BoolOpt('ironic',
default=False,
help="Whether or not Ironic is expected to be available"),
cfg.BoolOpt('trove',
default=False,
help="Whether or not Trove is expected to be available"),
cfg.BoolOpt('marconi',
default=False,
help="Whether or not Marconi is expected to be available"),
]
debug_group = cfg.OptGroup(name="debug",
title="Debug System")
DebugGroup = [
cfg.BoolOpt('enable',
default=True,
help="Enable diagnostic commands"),
cfg.StrOpt('trace_requests',
default='',
help="""A regex to determine which requests should be traced.
This is a regex to match the caller for rest client requests to be able to
selectively trace calls out of specific classes and methods. It largely
exists for test development, and is not expected to be used in a real deploy
of tempest. This will be matched against the discovered ClassName:method
in the test environment.
Expected values for this field are:
* ClassName:test_method_name - traces one test_method
* ClassName:setUp(Class) - traces specific setup functions
* ClassName:tearDown(Class) - traces specific teardown functions
* ClassName:_run_cleanups - traces the cleanup functions
If nothing is specified, this feature is not enabled. To trace everything
specify .* as the regex.
""")
]
input_scenario_group = cfg.OptGroup(name="input-scenario",
title="Filters and values for"
" input scenarios")
InputScenarioGroup = [
cfg.StrOpt('image_regex',
default='^cirros-0.3.1-x86_64-uec$',
help="Matching images become parameters for scenario tests"),
cfg.StrOpt('flavor_regex',
default='^m1.nano$',
help="Matching flavors become parameters for scenario tests"),
cfg.StrOpt('non_ssh_image_regex',
default='^.*[Ww]in.*$',
help="SSH verification in tests is skipped"
"for matching images"),
cfg.StrOpt('ssh_user_regex',
default="[[\"^.*[Cc]irros.*$\", \"root\"]]",
help="List of user mapped to regex "
"to matching image names."),
]
baremetal_group = cfg.OptGroup(name='baremetal',
title='Baremetal provisioning service options')
BaremetalGroup = [
cfg.StrOpt('catalog_type',
default='baremetal',
help="Catalog type of the baremetal provisioning service"),
cfg.BoolOpt('driver_enabled',
default=False,
help="Whether the Ironic nova-compute driver is enabled"),
cfg.StrOpt('endpoint_type',
default='publicURL',
choices=['public', 'admin', 'internal',
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning "
"service"),
cfg.IntOpt('active_timeout',
default=300,
help="Timeout for Ironic node to completely provision"),
cfg.IntOpt('association_timeout',
default=10,
help="Timeout for association of Nova instance and Ironic "
"node"),
cfg.IntOpt('power_timeout',
default=20,
help="Timeout for Ironic power transitions."),
cfg.IntOpt('unprovision_timeout',
default=20,
help="Timeout for unprovisioning an Ironic node.")
]
cli_group = cfg.OptGroup(name='cli', title="cli Configuration Options")
CLIGroup = [
cfg.BoolOpt('enabled',
default=True,
help="enable cli tests"),
cfg.StrOpt('cli_dir',
default='/usr/local/bin',
help="directory where python client binaries are located"),
cfg.BoolOpt('has_manage',
default=True,
help=("Whether the tempest run location has access to the "
"*-manage commands. In a pure blackbox environment "
"it will not.")),
cfg.IntOpt('timeout',
default=15,
help="Number of seconds to wait on a CLI timeout"),
]
negative_group = cfg.OptGroup(name='negative', title="Negative Test Options")
NegativeGroup = [
cfg.StrOpt('test_generator',
default='tempest.common.' +
'generator.negative_generator.NegativeTestGenerator',
help="Test generator class for all negative tests"),
]
def register_opts():
register_opt_group(cfg.CONF, compute_group, ComputeGroup)
register_opt_group(cfg.CONF, compute_features_group,
ComputeFeaturesGroup)
register_opt_group(cfg.CONF, identity_group, IdentityGroup)
register_opt_group(cfg.CONF, identity_feature_group,
IdentityFeatureGroup)
register_opt_group(cfg.CONF, image_group, ImageGroup)
register_opt_group(cfg.CONF, image_feature_group, ImageFeaturesGroup)
register_opt_group(cfg.CONF, network_group, NetworkGroup)
register_opt_group(cfg.CONF, network_feature_group,
NetworkFeaturesGroup)
register_opt_group(cfg.CONF, queuing_group, QueuingGroup)
register_opt_group(cfg.CONF, volume_group, VolumeGroup)
register_opt_group(cfg.CONF, volume_feature_group,
VolumeFeaturesGroup)
register_opt_group(cfg.CONF, object_storage_group, ObjectStoreGroup)
register_opt_group(cfg.CONF, object_storage_feature_group,
ObjectStoreFeaturesGroup)
register_opt_group(cfg.CONF, database_group, DatabaseGroup)
register_opt_group(cfg.CONF, orchestration_group, OrchestrationGroup)
register_opt_group(cfg.CONF, telemetry_group, TelemetryGroup)
register_opt_group(cfg.CONF, dashboard_group, DashboardGroup)
register_opt_group(cfg.CONF, data_processing_group,
DataProcessingGroup)
register_opt_group(cfg.CONF, boto_group, BotoGroup)
register_opt_group(cfg.CONF, compute_admin_group, ComputeAdminGroup)
register_opt_group(cfg.CONF, stress_group, StressGroup)
register_opt_group(cfg.CONF, scenario_group, ScenarioGroup)
register_opt_group(cfg.CONF, service_available_group,
ServiceAvailableGroup)
register_opt_group(cfg.CONF, debug_group, DebugGroup)
register_opt_group(cfg.CONF, baremetal_group, BaremetalGroup)
register_opt_group(cfg.CONF, input_scenario_group, InputScenarioGroup)
register_opt_group(cfg.CONF, cli_group, CLIGroup)
register_opt_group(cfg.CONF, negative_group, NegativeGroup)
# this should never be called outside of this class
class TempestConfigPrivate(object):
"""Provides OpenStack configuration information."""
DEFAULT_CONFIG_DIR = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"etc")
DEFAULT_CONFIG_FILE = "tempest.conf"
def _set_attrs(self):
self.compute = cfg.CONF.compute
self.compute_feature_enabled = cfg.CONF['compute-feature-enabled']
self.identity = cfg.CONF.identity
self.identity_feature_enabled = cfg.CONF['identity-feature-enabled']
self.image = cfg.CONF.image
self.image_feature_enabled = cfg.CONF['image-feature-enabled']
self.network = cfg.CONF.network
self.network_feature_enabled = cfg.CONF['network-feature-enabled']
self.volume = cfg.CONF.volume
self.volume_feature_enabled = cfg.CONF['volume-feature-enabled']
self.object_storage = cfg.CONF['object-storage']
self.object_storage_feature_enabled = cfg.CONF[
'object-storage-feature-enabled']
self.database = cfg.CONF.database
self.orchestration = cfg.CONF.orchestration
self.queuing = cfg.CONF.queuing
self.telemetry = cfg.CONF.telemetry
self.dashboard = cfg.CONF.dashboard
self.data_processing = cfg.CONF.data_processing
self.boto = cfg.CONF.boto
self.compute_admin = cfg.CONF['compute-admin']
self.stress = cfg.CONF.stress
self.scenario = cfg.CONF.scenario
self.service_available = cfg.CONF.service_available
self.debug = cfg.CONF.debug
self.baremetal = cfg.CONF.baremetal
self.input_scenario = cfg.CONF['input-scenario']
self.cli = cfg.CONF.cli
self.negative = cfg.CONF.negative
if not self.compute_admin.username:
self.compute_admin.username = self.identity.admin_username
self.compute_admin.password = self.identity.admin_password
self.compute_admin.tenant_name = self.identity.admin_tenant_name
cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
group='identity')
cfg.CONF.set_default('alt_domain_name',
self.identity.admin_domain_name,
group='identity')
cfg.CONF.set_default('domain_name', self.identity.admin_domain_name,
group='compute-admin')
def __init__(self, parse_conf=True):
"""Initialize a configuration from a conf directory and conf file."""
super(TempestConfigPrivate, self).__init__()
config_files = []
failsafe_path = "/etc/tempest/" + self.DEFAULT_CONFIG_FILE
# Environment variables override defaults...
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR',
self.DEFAULT_CONFIG_DIR)
conf_file = os.environ.get('TEMPEST_CONFIG', self.DEFAULT_CONFIG_FILE)
path = os.path.join(conf_dir, conf_file)
if not os.path.isfile(path):
path = failsafe_path
# only parse the config file if we expect one to exist. This is needed
# to remove an issue with the config file up to date checker.
if parse_conf:
config_files.append(path)
cfg.CONF([], project='tempest', default_config_files=config_files)
logging.setup('tempest')
LOG = logging.getLogger('tempest')
LOG.info("Using tempest config file %s" % path)
register_opts()
self._set_attrs()
if parse_conf:
cfg.CONF.log_opt_values(LOG, std_logging.DEBUG)
class TempestConfigProxy(object):
_config = None
def __getattr__(self, attr):
if not self._config:
self._config = TempestConfigPrivate()
return getattr(self._config, attr)
CONF = TempestConfigProxy()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/ 0000775 0001750 0001750 00000000000 12332757136 021364 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_server_basic_ops.py 0000664 0001750 0001750 00000011327 12332757070 026326 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.scenario import utils as test_utils
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
load_tests = test_utils.load_tests_input_scenario_utils
class TestServerBasicOps(manager.OfficialClientTest):
"""
This smoke test case follows this basic set of operations:
* Create a keypair for use in launching an instance
* Create a security group to control network access in instance
* Add simple permissive rules to the security group
* Launch an instance
* Pause/unpause the instance
* Suspend/resume the instance
* Terminate the instance
"""
def setUp(self):
super(TestServerBasicOps, self).setUp()
# Setup image and flavor the test instance
# Support both configured and injected values
if not hasattr(self, 'image_ref'):
self.image_ref = CONF.compute.image_ref
if not hasattr(self, 'flavor_ref'):
self.flavor_ref = CONF.compute.flavor_ref
self.image_utils = test_utils.ImageUtils()
if not self.image_utils.is_flavor_enough(self.flavor_ref,
self.image_ref):
raise self.skipException(
'{image} does not fit in {flavor}'.format(
image=self.image_ref, flavor=self.flavor_ref
)
)
self.run_ssh = CONF.compute.run_ssh and \
self.image_utils.is_sshable_image(self.image_ref)
self.ssh_user = self.image_utils.ssh_user(self.image_ref)
LOG.debug('Starting test for i:{image}, f:{flavor}. '
'Run ssh: {ssh}, user: {ssh_user}'.format(
image=self.image_ref, flavor=self.flavor_ref,
ssh=self.run_ssh, ssh_user=self.ssh_user))
def add_keypair(self):
self.keypair = self.create_keypair()
def create_security_group(self):
sg_name = data_utils.rand_name('secgroup-smoke')
sg_desc = sg_name + " description"
self.secgroup = self.compute_client.security_groups.create(sg_name,
sg_desc)
self.assertEqual(self.secgroup.name, sg_name)
self.assertEqual(self.secgroup.description, sg_desc)
self.set_resource('secgroup', self.secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(secgroup_id=self.secgroup.id)
def boot_instance(self):
# Create server with image and flavor from input scenario
create_kwargs = {
'key_name': self.keypair.id
}
instance = self.create_server(image=self.image_ref,
flavor=self.flavor_ref,
create_kwargs=create_kwargs)
self.set_resource('instance', instance)
def terminate_instance(self):
instance = self.get_resource('instance')
instance.delete()
self.remove_resource('instance')
def verify_ssh(self):
if self.run_ssh:
# Obtain a floating IP
floating_ip = self.compute_client.floating_ips.create()
# Attach a floating IP
instance = self.get_resource('instance')
instance.add_floating_ip(floating_ip)
# Check ssh
try:
linux_client = self.get_remote_client(
server_or_ip=floating_ip.ip,
username=self.image_utils.ssh_user(self.image_ref),
private_key=self.keypair.private_key)
linux_client.validate_authentication()
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
raise
@test.services('compute', 'network')
def test_server_basicops(self):
self.add_keypair()
self.create_security_group()
self.boot_instance()
self.verify_ssh()
self.terminate_instance()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_network_advanced_server_ops.py 0000664 0001750 0001750 00000020651 12332757070 030563 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest.test import services
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
"""
This test case checks VM connectivity after some advanced
instance operations executed:
* Stop/Start an instance
* Reboot an instance
* Rebuild an instance
* Pause/Unpause an instance
* Suspend/Resume an instance
* Resize an instance
"""
@classmethod
def setUpClass(cls):
super(TestNetworkAdvancedServerOps, cls).setUpClass()
cls.check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
def cleanup_wrapper(self, resource):
self.cleanup_resource(resource, self.__class__.__name__)
def setUp(self):
super(TestNetworkAdvancedServerOps, self).setUp()
key_name = data_utils.rand_name('keypair-smoke-')
self.keypair = self.create_keypair(name=key_name)
self.addCleanup(self.cleanup_wrapper, self.keypair)
security_group =\
self._create_security_group_neutron(tenant_id=self.tenant_id)
self.addCleanup(self.cleanup_wrapper, security_group)
network = self._create_network(self.tenant_id)
self.addCleanup(self.cleanup_wrapper, network)
router = self._get_router(self.tenant_id)
self.addCleanup(self.cleanup_wrapper, router)
subnet = self._create_subnet(network)
self.addCleanup(self.cleanup_wrapper, subnet)
subnet.add_to_router(router.id)
public_network_id = CONF.network.public_network_id
create_kwargs = {
'nics': [
{'net-id': network.id},
],
'key_name': self.keypair.name,
'security_groups': [security_group.name],
}
server_name = data_utils.rand_name('server-smoke-%d-')
self.server = self.create_server(name=server_name,
create_kwargs=create_kwargs)
self.addCleanup(self.cleanup_wrapper, self.server)
self.floating_ip = self._create_floating_ip(self.server,
public_network_id)
self.addCleanup(self.cleanup_wrapper, self.floating_ip)
def _check_tenant_network_connectivity(self, server,
username,
private_key,
should_connect=True):
if not CONF.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
for net_name, ip_addresses in server.networks.iteritems():
for ip_address in ip_addresses:
self._check_vm_connectivity(ip_address,
username,
private_key,
should_connect=should_connect)
except Exception:
LOG.exception('Tenant network connectivity check failed')
self._log_console_output(servers=[server])
debug.log_ip_ns()
raise
def _check_public_network_connectivity(self, floating_ip,
username,
private_key,
should_connect=True):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
try:
self._check_vm_connectivity(floating_ip, username, private_key,
should_connect=should_connect)
except Exception:
LOG.exception("Public network connectivity check failed")
debug.log_ip_ns()
raise
def _check_network_connectivity(self, should_connect=True):
username = CONF.compute.image_ssh_user
private_key = self.keypair.private_key
self._check_tenant_network_connectivity(self.server,
username,
private_key,
should_connect=should_connect)
floating_ip = self.floating_ip.floating_ip_address
self._check_public_network_connectivity(floating_ip,
username,
private_key,
should_connect=should_connect)
def _wait_server_status_and_check_network_connectivity(self):
self.status_timeout(self.compute_client.servers, self.server.id,
'ACTIVE')
self._check_network_connectivity()
@services('compute', 'network')
def test_server_connectivity_stop_start(self):
self.server.stop()
self.status_timeout(self.compute_client.servers, self.server.id,
'SHUTOFF')
self._check_network_connectivity(should_connect=False)
self.server.start()
self._wait_server_status_and_check_network_connectivity()
@services('compute', 'network')
def test_server_connectivity_reboot(self):
self.server.reboot()
self._wait_server_status_and_check_network_connectivity()
@services('compute', 'network')
def test_server_connectivity_rebuild(self):
image_ref_alt = CONF.compute.image_ref_alt
self.server.rebuild(image_ref_alt)
self._wait_server_status_and_check_network_connectivity()
@testtools.skipUnless(CONF.compute_feature_enabled.pause,
'Pause is not available.')
@services('compute', 'network')
def test_server_connectivity_pause_unpause(self):
self.server.pause()
self.status_timeout(self.compute_client.servers, self.server.id,
'PAUSED')
self._check_network_connectivity(should_connect=False)
self.server.unpause()
self._wait_server_status_and_check_network_connectivity()
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@services('compute', 'network')
def test_server_connectivity_suspend_resume(self):
self.server.suspend()
self.status_timeout(self.compute_client.servers, self.server.id,
'SUSPENDED')
self._check_network_connectivity(should_connect=False)
self.server.resume()
self._wait_server_status_and_check_network_connectivity()
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize is not available.')
@services('compute', 'network')
def test_server_connectivity_resize(self):
resize_flavor = CONF.compute.flavor_ref_alt
if resize_flavor == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise self.skipException(msg)
resize_flavor = CONF.compute.flavor_ref_alt
self.server.resize(resize_flavor)
self.status_timeout(self.compute_client.servers, self.server.id,
'VERIFY_RESIZE')
self.server.confirm_resize()
self._wait_server_status_and_check_network_connectivity()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_load_balancer_basic.py 0000664 0001750 0001750 00000032241 12332757070 026703 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Mirantis.inc
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import urllib
from tempest.api.network import common as net_common
from tempest import config
from tempest import exceptions
from tempest.scenario import manager
from tempest import test
config = config.CONF
class TestLoadBalancerBasic(manager.NetworkScenarioTest):
"""
This test checks basic load balancing.
The following is the scenario outline:
1. Create an instance
2. SSH to the instance and start two servers
3. Create a load balancer with two members and with ROUND_ROBIN algorithm
associate the VIP with a floating ip
4. Send 10 requests to the floating ip and check that they are shared
between the two servers and that both of them get equal portions
of the requests
"""
@classmethod
def check_preconditions(cls):
super(TestLoadBalancerBasic, cls).check_preconditions()
cfg = config.network
if not test.is_extension_enabled('lbaas', 'network'):
msg = 'LBaaS Extension is not enabled'
cls.enabled = False
raise cls.skipException(msg)
if not (cfg.tenant_networks_reachable or cfg.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(TestLoadBalancerBasic, cls).setUpClass()
cls.check_preconditions()
cls.servers_keypairs = {}
cls.members = []
cls.floating_ips = {}
cls.server_ips = {}
cls.port1 = 80
cls.port2 = 88
def setUp(self):
super(TestLoadBalancerBasic, self).setUp()
self.server_ips = {}
self._create_security_group()
def cleanup_wrapper(self, resource):
self.cleanup_resource(resource, self.__class__.__name__)
def _create_security_group(self):
self.security_group = self._create_security_group_neutron(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
self.addCleanup(self.cleanup_wrapper, self.security_group)
def _create_security_group_rules_for_port(self, port):
rule = {
'direction': 'ingress',
'protocol': 'tcp',
'port_range_min': port,
'port_range_max': port,
}
self._create_security_group_rule(
client=self.network_client,
secgroup=self.security_group,
tenant_id=self.tenant_id,
**rule)
def _create_server(self, name):
keypair = self.create_keypair(name='keypair-%s' % name)
self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
net = self._list_networks(tenant_id=self.tenant_id)[0]
create_kwargs = {
'nics': [
{'net-id': net['id']},
],
'key_name': keypair.name,
'security_groups': security_groups,
}
server = self.create_server(name=name,
create_kwargs=create_kwargs)
self.addCleanup(self.cleanup_wrapper, server)
self.servers_keypairs[server.id] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips[floating_ip] = server
self.server_ips[server.id] = floating_ip.floating_ip_address
else:
self.server_ips[server.id] = server.networks[net['name']][0]
self.assertTrue(self.servers_keypairs)
return server
def _create_servers(self):
for count in range(2):
self._create_server(name=("server%s" % (count + 1)))
self.assertEqual(len(self.servers_keypairs), 2)
def _start_servers(self):
"""
Start two backends
1. SSH to the instance
2. Start two http backends listening on ports 80 and 88 respectively
In case there are two instances, each backend is created on a separate
instance.
The backends are the inetd services. To start them we need to edit
/etc/inetd.conf in the following way:
www stream tcp nowait root /bin/sh sh /home/cirros/script_name
Where /home/cirros/script_name is a path to a script which
echoes the responses:
echo -e 'HTTP/1.0 200 OK\r\n\r\nserver_name
If we want the server to listen on port 88, then we use
"kerberos" instead of "www".
"""
for server_id, ip in self.server_ips.iteritems():
private_key = self.servers_keypairs[server_id].private_key
server_name = self.compute_client.servers.get(server_id).name
ssh_client = self.get_remote_client(
server_or_ip=ip,
private_key=private_key)
ssh_client.validate_authentication()
# Create service for inetd
create_script = """sudo sh -c "echo -e \\"echo -e 'HTTP/1.0 """ \
"""200 OK\\\\\\r\\\\\\n\\\\\\r\\\\\\n""" \
"""%(server)s'\\" >>/home/cirros/%(script)s\""""
cmd = create_script % {
'server': server_name,
'script': 'script1'}
ssh_client.exec_command(cmd)
# Configure inetd
configure_inetd = """sudo sh -c "echo -e \\"%(service)s """ \
"""stream tcp nowait root /bin/sh sh """ \
"""/home/cirros/%(script)s\\" >> """ \
"""/etc/inetd.conf\""""
# "www" stands for port 80
cmd = configure_inetd % {'service': 'www',
'script': 'script1'}
ssh_client.exec_command(cmd)
if len(self.server_ips) == 1:
cmd = create_script % {'server': 'server2',
'script': 'script2'}
ssh_client.exec_command(cmd)
# "kerberos" stands for port 88
cmd = configure_inetd % {'service': 'kerberos',
'script': 'script2'}
ssh_client.exec_command(cmd)
# Get PIDs of inetd
pids = ssh_client.get_pids('inetd')
if pids != ['']:
# If there are any inetd processes, reload them
kill_cmd = "sudo kill -HUP %s" % ' '.join(pids)
ssh_client.exec_command(kill_cmd)
else:
# In other case start inetd
start_inetd = "sudo /usr/sbin/inetd /etc/inetd.conf"
ssh_client.exec_command(start_inetd)
def _check_connection(self, check_ip, port=80):
def try_connect(ip, port):
try:
resp = urllib.urlopen("http://{0}:{1}/".format(ip, port))
if resp.getcode() == 200:
return True
return False
except IOError:
return False
timeout = config.compute.ping_timeout
start = time.time()
while not try_connect(check_ip, port):
if (time.time() - start) > timeout:
message = "Timed out trying to connect to %s" % check_ip
raise exceptions.TimeoutException(message)
def _create_pool(self):
"""Create a pool with ROUND_ROBIN algorithm."""
# get tenant subnet and verify there's only one
subnet = self._list_subnets(tenant_id=self.tenant_id)[0]
self.subnet = net_common.DeletableSubnet(client=self.network_client,
**subnet)
self.pool = super(TestLoadBalancerBasic, self)._create_pool(
lb_method='ROUND_ROBIN',
protocol='HTTP',
subnet_id=self.subnet.id)
self.addCleanup(self.cleanup_wrapper, self.pool)
self.assertTrue(self.pool)
def _create_members(self):
"""
Create two members.
In case there is only one server, create both members with the same ip
but with different ports to listen on.
"""
for server_id, ip in self.server_ips.iteritems():
if len(self.server_ips) == 1:
member1 = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
self.addCleanup(self.cleanup_wrapper, member1)
member2 = self._create_member(address=ip,
protocol_port=self.port2,
pool_id=self.pool.id)
self.addCleanup(self.cleanup_wrapper, member2)
self.members.extend([member1, member2])
else:
member = self._create_member(address=ip,
protocol_port=self.port1,
pool_id=self.pool.id)
self.addCleanup(self.cleanup_wrapper, member)
self.members.append(member)
self.assertTrue(self.members)
def _assign_floating_ip_to_vip(self, vip):
public_network_id = config.network.public_network_id
port_id = vip.port_id
floating_ip = self._create_floating_ip(vip, public_network_id,
port_id=port_id)
self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(vip.id, [])
self.floating_ips[vip.id].append(floating_ip)
def _create_load_balancer(self):
self._create_pool()
self._create_members()
self.vip = self._create_vip(protocol='HTTP',
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
self.addCleanup(self.cleanup_wrapper, self.vip)
self.status_timeout(NeutronRetriever(self.network_client,
self.network_client.vip_path,
net_common.DeletableVip),
self.vip.id,
expected_status='ACTIVE')
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
self._assign_floating_ip_to_vip(self.vip)
self.vip_ip = self.floating_ips[
self.vip.id][0]['floating_ip_address']
else:
self.vip_ip = self.vip.address
def _check_load_balancing(self):
"""
1. Send 100 requests on the floating ip associated with the VIP
2. Check that the requests are shared between
the two servers and that both of them get equal portions
of the requests
"""
self._check_connection(self.vip_ip)
resp = self._send_requests(self.vip_ip)
self.assertEqual(set(["server1\n", "server2\n"]), set(resp))
self.assertEqual(50, resp.count("server1\n"))
self.assertEqual(50, resp.count("server2\n"))
def _send_requests(self, vip_ip):
resp = []
for count in range(100):
resp.append(
urllib.urlopen(
"http://{0}/".format(vip_ip)).read())
return resp
@test.skip_because(bug='1295165')
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_load_balancer_basic(self):
self._create_server('server1')
self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
class NeutronRetriever(object):
"""
Helper class to make possible handling neutron objects returned by GET
requests as attribute dicts.
Whet get() method is called, the returned dictionary is wrapped into
a corresponding DeletableResource class which provides attribute access
to dictionary values.
Usage:
This retriever is used to allow using status_timeout from
tempest.manager with Neutron objects.
"""
def __init__(self, network_client, path, resource):
self.network_client = network_client
self.path = path
self.resource = resource
def get(self, thing_id):
obj = self.network_client.get(self.path % thing_id)
return self.resource(client=self.network_client, **obj.values()[0])
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/orchestration/ 0000775 0001750 0001750 00000000000 12332757136 024250 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/orchestration/test_autoscaling.py 0000664 0001750 0001750 00000010727 12332757070 030176 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import heatclient.exc as heat_exceptions
import time
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class AutoScalingTest(manager.OrchestrationScenarioTest):
def setUp(self):
super(AutoScalingTest, self).setUp()
if not CONF.orchestration.image_ref:
raise self.skipException("No image available to test")
self.client = self.orchestration_client
def assign_keypair(self):
self.stack_name = self._stack_rand_name()
if CONF.orchestration.keypair_name:
self.keypair_name = CONF.orchestration.keypair_name
else:
self.keypair = self.create_keypair()
self.keypair_name = self.keypair.id
def launch_stack(self):
net = self._get_default_network()
self.parameters = {
'KeyName': self.keypair_name,
'InstanceType': CONF.orchestration.instance_type,
'ImageId': CONF.orchestration.image_ref,
'StackStart': str(time.time()),
'Subnet': net['subnets'][0]
}
# create the stack
self.template = self._load_template(__file__, 'test_autoscaling.yaml')
self.client.stacks.create(
stack_name=self.stack_name,
template=self.template,
parameters=self.parameters)
self.stack = self.client.stacks.get(self.stack_name)
self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
# if a keypair was set, do not delete the stack on exit to allow
# for manual post-mortums
if not CONF.orchestration.keypair_name:
self.set_resource('stack', self.stack)
@test.skip_because(bug="1257575")
@test.attr(type='slow')
@test.services('orchestration', 'compute')
def test_scale_up_then_down(self):
self.assign_keypair()
self.launch_stack()
sid = self.stack_identifier
timeout = CONF.orchestration.build_timeout
interval = 10
self.assertEqual('CREATE', self.stack.action)
# wait for create to complete.
self.status_timeout(self.client.stacks, sid, 'COMPLETE',
error_status='FAILED')
self.stack.get()
self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
# the resource SmokeServerGroup is implemented as a nested
# stack, so servers can be counted by counting the resources
# inside that nested stack
resource = self.client.resources.get(sid, 'SmokeServerGroup')
nested_stack_id = resource.physical_resource_id
def server_count():
# the number of servers is the number of resources
# in the nested stack
self.server_count = len(
self.client.resources.list(nested_stack_id))
return self.server_count
def assertScale(from_servers, to_servers):
test.call_until_true(lambda: server_count() == to_servers,
timeout, interval)
self.assertEqual(to_servers, self.server_count,
'Failed scaling from %d to %d servers. '
'Current server count: %s' % (
from_servers, to_servers,
self.server_count))
# he marched them up to the top of the hill
assertScale(1, 2)
assertScale(2, 3)
# and he marched them down again
assertScale(3, 2)
assertScale(2, 1)
# delete stack on completion
self.stack.delete()
self.status_timeout(self.client.stacks, sid, 'COMPLETE',
error_status='FAILED',
not_found_exception=heat_exceptions.NotFound)
try:
self.stack.get()
self.assertEqual('DELETE_COMPLETE', self.stack.stack_status)
except heat_exceptions.NotFound:
pass
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/orchestration/__init__.py 0000664 0001750 0001750 00000000000 12332757070 026344 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/orchestration/test_autoscaling.yaml 0000664 0001750 0001750 00000014353 12332757070 030507 0 ustar chuck chuck 0000000 0000000 HeatTemplateFormatVersion: '2012-12-12'
Description: |
Template which tests autoscaling and load balancing
Parameters:
KeyName:
Type: String
InstanceType:
Type: String
ImageId:
Type: String
Subnet:
Type: String
StackStart:
Description: Epoch seconds when the stack was launched
Type: Number
ConsumeStartSeconds:
Description: Seconds after invocation when memory should be consumed
Type: Number
Default: '60'
ConsumeStopSeconds:
Description: Seconds after StackStart when memory should be released
Type: Number
Default: '420'
ScaleUpThreshold:
Description: Memory percentage threshold to scale up on
Type: String
Default: '70'
ScaleDownThreshold:
Description: Memory percentage threshold to scale down on
Type: String
Default: '60'
ConsumeMemoryLimit:
Description: Memory percentage threshold to consume
Type: Number
Default: '71'
Resources:
SmokeServerGroup:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
AvailabilityZones: {'Fn::GetAZs': ''}
LaunchConfigurationName: {Ref: LaunchConfig}
MinSize: '1'
MaxSize: '3'
VPCZoneIdentifier: [{Ref: Subnet}]
SmokeServerScaleUpPolicy:
Type: AWS::AutoScaling::ScalingPolicy
Properties:
AdjustmentType: ChangeInCapacity
AutoScalingGroupName: {Ref: SmokeServerGroup}
Cooldown: '60'
ScalingAdjustment: '1'
SmokeServerScaleDownPolicy:
Type: AWS::AutoScaling::ScalingPolicy
Properties:
AdjustmentType: ChangeInCapacity
AutoScalingGroupName: {Ref: SmokeServerGroup}
Cooldown: '60'
ScalingAdjustment: '-1'
MEMAlarmHigh:
Type: AWS::CloudWatch::Alarm
Properties:
AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
MetricName: MemoryUtilization
Namespace: system/linux
Statistic: Average
Period: '10'
EvaluationPeriods: '1'
Threshold: {Ref: ScaleUpThreshold}
AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
Dimensions:
- Name: AutoScalingGroupName
Value: {Ref: SmokeServerGroup}
ComparisonOperator: GreaterThanThreshold
MEMAlarmLow:
Type: AWS::CloudWatch::Alarm
Properties:
AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
MetricName: MemoryUtilization
Namespace: system/linux
Statistic: Average
Period: '10'
EvaluationPeriods: '1'
Threshold: {Ref: ScaleDownThreshold}
AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
Dimensions:
- Name: AutoScalingGroupName
Value: {Ref: SmokeServerGroup}
ComparisonOperator: LessThanThreshold
CfnUser:
Type: AWS::IAM::User
SmokeKeys:
Type: AWS::IAM::AccessKey
Properties:
UserName: {Ref: CfnUser}
SmokeSecurityGroup:
Type: AWS::EC2::SecurityGroup
Properties:
GroupDescription: Standard firewall rules
SecurityGroupIngress:
- {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
- {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
LaunchConfig:
Type: AWS::AutoScaling::LaunchConfiguration
Metadata:
AWS::CloudFormation::Init:
config:
files:
/etc/cfn/cfn-credentials:
content:
Fn::Replace:
- $AWSAccessKeyId: {Ref: SmokeKeys}
$AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
- |
AWSAccessKeyId=$AWSAccessKeyId
AWSSecretKey=$AWSSecretKey
mode: '000400'
owner: root
group: root
/root/watch_loop:
content:
Fn::Replace:
- _hi_: {Ref: MEMAlarmHigh}
_lo_: {Ref: MEMAlarmLow}
- |
#!/bin/bash
while :
do
/opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
/opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
sleep 4
done
mode: '000700'
owner: root
group: root
/root/consume_memory:
content:
Fn::Replace:
- StackStart: {Ref: StackStart}
ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
- |
#!/usr/bin/env python
import psutil
import time
import datetime
import sys
a = []
sleep_until_consume = ConsumeStartSeconds
stack_start = StackStart
consume_stop_time = stack_start + ConsumeStopSeconds
memory_limit = ConsumeMemoryLimit
if sleep_until_consume > 0:
sys.stdout.flush()
time.sleep(sleep_until_consume)
while psutil.virtual_memory().percent < memory_limit:
sys.stdout.flush()
a.append(' ' * 10**5)
time.sleep(0.1)
sleep_until_exit = consume_stop_time - time.time()
if sleep_until_exit > 0:
time.sleep(sleep_until_exit)
mode: '000700'
owner: root
group: root
Properties:
ImageId: {Ref: ImageId}
InstanceType: {Ref: InstanceType}
KeyName: {Ref: KeyName}
SecurityGroups: [{Ref: SmokeSecurityGroup}]
UserData:
Fn::Base64:
Fn::Replace:
- ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
- |
#!/bin/bash -v
/opt/aws/bin/cfn-init
# report on memory consumption every 4 seconds
/root/watch_loop &
# wait ConsumeStartSeconds then ramp up memory consumption
# until it is over ConsumeMemoryLimit%
# then exits ConsumeStopSeconds seconds after stack launch
/root/consume_memory > /root/consume_memory.log &
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_large_ops.py 0000664 0001750 0001750 00000004516 12332757070 024753 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestLargeOpsScenario(manager.NetworkScenarioTest):
"""
Test large operations.
This test below:
* Spin up multiple instances in one nova call, and repeat three times
* as a regular user
* TODO: same thing for cinder
"""
@classmethod
def setUpClass(cls):
cls.set_network_resources()
super(TestLargeOpsScenario, cls).setUpClass()
def _wait_for_server_status(self, status):
for server in self.servers:
self.status_timeout(
self.compute_client.servers, server.id, status)
def nova_boot(self):
name = data_utils.rand_name('scenario-server-')
client = self.compute_client
flavor_id = CONF.compute.flavor_ref
secgroup = self._create_security_group_nova()
self.servers = client.servers.create(
name=name, image=self.image,
flavor=flavor_id,
min_count=CONF.scenario.large_ops_number,
security_groups=[secgroup.name])
# needed because of bug 1199788
self.servers = [x for x in client.servers.list() if name in x.name]
for server in self.servers:
self.set_resource(server.name, server)
self._wait_for_server_status('ACTIVE')
@test.services('compute', 'image')
def test_large_ops_scenario(self):
if CONF.scenario.large_ops_number < 1:
return
self.glance_image_create()
self.nova_boot()
self.nova_boot()
self.nova_boot()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_security_groups_basic_ops.py 0000664 0001750 0001750 00000044324 12332757070 030271 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import clients
from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestSecurityGroupsBasicOps(manager.NetworkScenarioTest):
"""
This test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify cross tenant connectivity as follows
ssh:
in order to overcome "ip namespace", each tenant has an "access point"
VM with floating-ip open to incoming ssh connection allowing network
commands (ping/ssh) to be executed from within the
tenant-network-namespace
Tempest host performs key-based authentication to the ssh server via
floating IP address
connectivity test is done by pinging destination server via source server
ssh connection.
success - ping returns
failure - ping_timeout reached
setup:
for primary tenant:
1. create a network&subnet
2. create a router (if public router isn't configured)
3. connect tenant network to public network via router
4. create an access point:
a. a security group open to incoming ssh connection
b. a VM with a floating ip
5. create a general empty security group (same as "default", but
without rules allowing in-tenant traffic)
tests:
1. _verify_network_details
2. _verify_mac_addr: for each access point verify that
(subnet, fix_ip, mac address) are as defined in the port list
3. _test_in_tenant_block: test that in-tenant traffic is disabled
without rules allowing it
4. _test_in_tenant_allow: test that in-tenant traffic is enabled
once an appropriate rule has been created
5. _test_cross_tenant_block: test that cross-tenant traffic is disabled
without a rule allowing it on destination tenant
6. _test_cross_tenant_allow:
* test that cross-tenant traffic is enabled once an appropriate
rule has been created on destination tenant.
* test that reverse traffic is still blocked
* test than revesre traffic is enabled once an appropriate rule has
been created on source tenant
assumptions:
1. alt_tenant/user existed and is different from primary_tenant/user
2. Public network is defined and reachable from the Tempest host
3. Public router can either be:
* defined, in which case all tenants networks can connect directly
to it, and cross tenant check will be done on the private IP of the
destination tenant
or
* not defined (empty string), in which case each tanant will have
its own router connected to the public network
"""
class TenantProperties():
"""
helper class to save tenant details
id
credentials
network
subnet
security groups
servers
access point
"""
def __init__(self, credentials):
self.manager = clients.OfficialClientManager(credentials)
# Credentials from manager are filled with both names and IDs
self.creds = self.manager.credentials
self.network = None
self.subnet = None
self.router = None
self.security_groups = {}
self.servers = list()
def set_network(self, network, subnet, router):
self.network = network
self.subnet = subnet
self.router = router
def _get_tenant_credentials(self):
# FIXME(andreaf) Unused method
return self.creds
@classmethod
def check_preconditions(cls):
super(TestSecurityGroupsBasicOps, cls).check_preconditions()
if (cls.alt_creds is None) or \
(cls.tenant_id is cls.alt_creds.tenant_id):
msg = 'No alt_tenant defined'
cls.enabled = False
raise cls.skipException(msg)
if not (CONF.network.tenant_networks_reachable or
CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(TestSecurityGroupsBasicOps, cls).setUpClass()
cls.alt_creds = cls.alt_credentials()
cls.alt_manager = clients.OfficialClientManager(cls.alt_creds)
# Credentials from the manager are filled with both IDs and Names
cls.alt_creds = cls.alt_manager.credentials
cls.check_preconditions()
# TODO(mnewby) Consider looking up entities as needed instead
# of storing them as collections on the class.
cls.floating_ips = {}
cls.tenants = {}
creds = cls.credentials()
cls.primary_tenant = cls.TenantProperties(creds)
cls.alt_tenant = cls.TenantProperties(cls.alt_creds)
for tenant in [cls.primary_tenant, cls.alt_tenant]:
cls.tenants[tenant.creds.tenant_id] = tenant
cls.floating_ip_access = not CONF.network.public_router_id
def cleanup_wrapper(self, resource):
self.cleanup_resource(resource, self.__class__.__name__)
def setUp(self):
super(TestSecurityGroupsBasicOps, self).setUp()
self._deploy_tenant(self.primary_tenant)
self._verify_network_details(self.primary_tenant)
self._verify_mac_addr(self.primary_tenant)
def _create_tenant_keypairs(self, tenant_id):
keypair = self.create_keypair(
name=data_utils.rand_name('keypair-smoke-'))
self.addCleanup(self.cleanup_wrapper, keypair)
self.tenants[tenant_id].keypair = keypair
def _create_tenant_security_groups(self, tenant):
access_sg = self._create_empty_security_group(
namestart='secgroup_access-',
tenant_id=tenant.creds.tenant_id
)
self.addCleanup(self.cleanup_wrapper, access_sg)
# don't use default secgroup since it allows in-tenant traffic
def_sg = self._create_empty_security_group(
namestart='secgroup_general-',
tenant_id=tenant.creds.tenant_id
)
self.addCleanup(self.cleanup_wrapper, def_sg)
tenant.security_groups.update(access=access_sg, default=def_sg)
ssh_rule = dict(
protocol='tcp',
port_range_min=22,
port_range_max=22,
direction='ingress',
)
rule = self._create_security_group_rule(secgroup=access_sg,
**ssh_rule)
self.addCleanup(self.cleanup_wrapper, rule)
def _verify_network_details(self, tenant):
# Checks that we see the newly created network/subnet/router via
# checking the result of list_[networks,routers,subnets]
# Check that (router, subnet) couple exist in port_list
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(tenant.network.name, seen_names)
self.assertIn(tenant.network.id, seen_ids)
seen_subnets = [(n['id'], n['cidr'], n['network_id'])
for n in self._list_subnets()]
mysubnet = (tenant.subnet.id, tenant.subnet.cidr, tenant.network.id)
self.assertIn(mysubnet, seen_subnets)
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(tenant.router.name, seen_router_names)
self.assertIn(tenant.router.id, seen_router_ids)
myport = (tenant.router.id, tenant.subnet.id)
router_ports = [(i['device_id'], i['fixed_ips'][0]['subnet_id']) for i
in self.network_client.list_ports()['ports']
if i['device_owner'] == 'network:router_interface']
self.assertIn(myport, router_ports)
def _create_server(self, name, tenant, security_groups=None):
"""
creates a server and assigns to security group
"""
self._set_compute_context(tenant)
if security_groups is None:
security_groups = [tenant.security_groups['default'].name]
create_kwargs = {
'nics': [
{'net-id': tenant.network.id},
],
'key_name': tenant.keypair.name,
'security_groups': security_groups,
'tenant_id': tenant.creds.tenant_id
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.addCleanup(self.cleanup_wrapper, server)
return server
def _create_tenant_servers(self, tenant, num=1):
for i in range(num):
name = 'server-{tenant}-gen-{num}-'.format(
tenant=tenant.creds.tenant_name,
num=i
)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant)
tenant.servers.append(server)
def _set_access_point(self, tenant):
"""
creates a server in a secgroup with rule allowing external ssh
in order to access tenant internal network
workaround ip namespace
"""
secgroups = [sg.name for sg in tenant.security_groups.values()]
name = 'server-{tenant}-access_point-'.format(
tenant=tenant.creds.tenant_name)
name = data_utils.rand_name(name)
server = self._create_server(name, tenant,
security_groups=secgroups)
tenant.access_point = server
self._assign_floating_ips(server)
def _assign_floating_ips(self, server):
public_network_id = CONF.network.public_network_id
floating_ip = self._create_floating_ip(server, public_network_id)
self.addCleanup(self.cleanup_wrapper, floating_ip)
self.floating_ips.setdefault(server, floating_ip)
def _create_tenant_network(self, tenant):
network, subnet, router = self._create_networks(tenant.creds.tenant_id)
for r in [network, router, subnet]:
self.addCleanup(self.cleanup_wrapper, r)
tenant.set_network(network, subnet, router)
def _set_compute_context(self, tenant):
self.compute_client = tenant.manager.compute_client
return self.compute_client
def _deploy_tenant(self, tenant_or_id):
"""
creates:
network
subnet
router (if public not defined)
access security group
access-point server
"""
if not isinstance(tenant_or_id, self.TenantProperties):
tenant = self.tenants[tenant_or_id]
tenant_id = tenant_or_id
else:
tenant = tenant_or_id
tenant_id = tenant.creds.tenant_id
self._set_compute_context(tenant)
self._create_tenant_keypairs(tenant_id)
self._create_tenant_network(tenant)
self._create_tenant_security_groups(tenant)
self._set_access_point(tenant)
def _get_server_ip(self, server, floating=False):
"""
returns the ip (floating/internal) of a server
"""
if floating:
server_ip = self.floating_ips[server].floating_ip_address
else:
server_ip = None
network_name = self.tenants[server.tenant_id].network.name
if network_name in server.networks:
server_ip = server.networks[network_name][0]
return server_ip
def _connect_to_access_point(self, tenant):
"""
create ssh connection to tenant access point
"""
access_point_ssh = \
self.floating_ips[tenant.access_point].floating_ip_address
private_key = tenant.keypair.private_key
access_point_ssh = self._ssh_to_server(access_point_ssh,
private_key=private_key)
return access_point_ssh
def _check_connectivity(self, access_point, ip, should_succeed=True):
if should_succeed:
msg = "Timed out waiting for %s to become reachable" % ip
else:
msg = "%s is reachable" % ip
try:
self.assertTrue(self._check_remote_connectivity(access_point, ip,
should_succeed),
msg)
except Exception:
debug.log_net_debug()
raise
def _test_in_tenant_block(self, tenant):
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server),
should_succeed=False)
def _test_in_tenant_allow(self, tenant):
ruleset = dict(
protocol='icmp',
remote_group_id=tenant.security_groups['default'].id,
direction='ingress'
)
rule = self._create_security_group_rule(
secgroup=tenant.security_groups['default'],
**ruleset
)
self.addCleanup(self.cleanup_wrapper, rule)
access_point_ssh = self._connect_to_access_point(tenant)
for server in tenant.servers:
self._check_connectivity(access_point=access_point_ssh,
ip=self._get_server_ip(server))
def _test_cross_tenant_block(self, source_tenant, dest_tenant):
"""
if public router isn't defined, then dest_tenant access is via
floating-ip
"""
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point=access_point_ssh, ip=ip,
should_succeed=False)
def _test_cross_tenant_allow(self, source_tenant, dest_tenant):
"""
check for each direction:
creating rule for tenant incoming traffic enables only 1way traffic
"""
ruleset = dict(
protocol='icmp',
direction='ingress'
)
rule_s2d = self._create_security_group_rule(
secgroup=dest_tenant.security_groups['default'],
**ruleset
)
self.addCleanup(self.cleanup_wrapper, rule_s2d)
access_point_ssh = self._connect_to_access_point(source_tenant)
ip = self._get_server_ip(dest_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point_ssh, ip)
# test that reverse traffic is still blocked
self._test_cross_tenant_block(dest_tenant, source_tenant)
# allow reverse traffic and check
rule_d2s = self._create_security_group_rule(
secgroup=source_tenant.security_groups['default'],
**ruleset
)
self.addCleanup(self.cleanup_wrapper, rule_d2s)
access_point_ssh_2 = self._connect_to_access_point(dest_tenant)
ip = self._get_server_ip(source_tenant.access_point,
floating=self.floating_ip_access)
self._check_connectivity(access_point_ssh_2, ip)
def _verify_mac_addr(self, tenant):
"""
verify that VM (tenant's access point) has the same ip,mac as listed in
port list
"""
access_point_ssh = self._connect_to_access_point(tenant)
mac_addr = access_point_ssh.get_mac_address()
mac_addr = mac_addr.strip().lower()
# Get the fixed_ips and mac_address fields of all ports. Select
# only those two columns to reduce the size of the response.
port_list = self.network_client.list_ports(
fields=['fixed_ips', 'mac_address'])['ports']
port_detail_list = [
(port['fixed_ips'][0]['subnet_id'],
port['fixed_ips'][0]['ip_address'],
port['mac_address'].lower())
for port in port_list if port['fixed_ips']
]
server_ip = self._get_server_ip(tenant.access_point)
subnet_id = tenant.subnet.id
self.assertIn((subnet_id, server_ip, mac_addr), port_detail_list)
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_cross_tenant_traffic(self):
try:
# deploy new tenant
self._deploy_tenant(self.alt_tenant)
self._verify_network_details(self.alt_tenant)
self._verify_mac_addr(self.alt_tenant)
# cross tenant check
source_tenant = self.primary_tenant
dest_tenant = self.alt_tenant
self._test_cross_tenant_block(source_tenant, dest_tenant)
self._test_cross_tenant_allow(source_tenant, dest_tenant)
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_in_tenant_traffic(self):
try:
self._create_tenant_servers(self.primary_tenant, num=1)
# in-tenant check
self._test_in_tenant_block(self.primary_tenant)
self._test_in_tenant_allow(self.primary_tenant)
except Exception:
for tenant in self.tenants.values():
self._log_console_output(servers=tenant.servers)
raise
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_dashboard_basic_ops.py 0000664 0001750 0001750 00000005123 12332757070 026744 0 ustar chuck chuck 0000000 0000000 # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import urllib
import urllib2
from lxml import html
from tempest import config
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
class TestDashboardBasicOps(manager.OfficialClientTest):
"""
This is a basic scenario test:
* checks that the login page is available
* logs in as a regular user
* checks that the user home page loads without error
"""
@classmethod
def setUpClass(cls):
cls.set_network_resources()
super(TestDashboardBasicOps, cls).setUpClass()
if not CONF.service_available.horizon:
raise cls.skipException("Horizon support is required")
def check_login_page(self):
response = urllib2.urlopen(CONF.dashboard.dashboard_url)
self.assertIn("
Log In
", response.read())
def user_login(self):
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
response = self.opener.open(CONF.dashboard.dashboard_url).read()
# Grab the CSRF token and default region
csrf_token = html.fromstring(response).xpath(
'//input[@name="csrfmiddlewaretoken"]/@value')[0]
region = html.fromstring(response).xpath(
'//input[@name="region"]/@value')[0]
# Prepare login form request
req = urllib2.Request(CONF.dashboard.login_url)
req.add_header('Content-type', 'application/x-www-form-urlencoded')
req.add_header('Referer', CONF.dashboard.dashboard_url)
params = {'username': CONF.identity.username,
'password': CONF.identity.password,
'region': region,
'csrfmiddlewaretoken': csrf_token}
self.opener.open(req, urllib.urlencode(params))
def check_home_page(self):
response = self.opener.open(CONF.dashboard.dashboard_url)
self.assertIn('Overview', response.read())
@test.services('dashboard')
def test_basic_scenario(self):
self.check_login_page()
self.user_login()
self.check_home_page()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/manager.py 0000664 0001750 0001750 00000122752 12332757070 023356 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import six
import subprocess
import netaddr
from neutronclient.common import exceptions as exc
from novaclient import exceptions as nova_exceptions
from tempest.api.network import common as net_common
from tempest import auth
from tempest import clients
from tempest.common import isolated_creds
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log
import tempest.test
CONF = config.CONF
LOG = log.getLogger(__name__)
# NOTE(afazekas): Workaround for the stdout logging
LOG_nova_client = logging.getLogger('novaclient.client')
LOG_nova_client.addHandler(log.NullHandler())
LOG_cinder_client = logging.getLogger('cinderclient.client')
LOG_cinder_client.addHandler(log.NullHandler())
class OfficialClientTest(tempest.test.BaseTestCase):
"""
Official Client test base class for scenario testing.
Official Client tests are tests that have the following characteristics:
* Test basic operations of an API, typically in an order that
a regular user would perform those operations
* Test only the correct inputs and action paths -- no fuzz or
random input data is sent, only valid inputs.
* Use only the default client tool for calling an API
"""
@classmethod
def setUpClass(cls):
super(OfficialClientTest, cls).setUpClass()
cls.isolated_creds = isolated_creds.IsolatedCreds(
cls.__name__, tempest_client=False,
network_resources=cls.network_resources)
cls.manager = clients.OfficialClientManager(
credentials=cls.credentials())
cls.compute_client = cls.manager.compute_client
cls.image_client = cls.manager.image_client
cls.baremetal_client = cls.manager.baremetal_client
cls.identity_client = cls.manager.identity_client
cls.network_client = cls.manager.network_client
cls.volume_client = cls.manager.volume_client
cls.object_storage_client = cls.manager.object_storage_client
cls.orchestration_client = cls.manager.orchestration_client
cls.data_processing_client = cls.manager.data_processing_client
cls.resource_keys = {}
cls.os_resources = []
@classmethod
def _get_credentials(cls, get_creds, ctype):
if CONF.compute.allow_tenant_isolation:
creds = get_creds()
else:
creds = auth.get_default_credentials(ctype)
return creds
@classmethod
def credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_primary_creds,
'user')
@classmethod
def alt_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_alt_creds,
'alt_user')
@classmethod
def admin_credentials(cls):
return cls._get_credentials(cls.isolated_creds.get_admin_creds,
'identity_admin')
@staticmethod
def cleanup_resource(resource, test_name):
LOG.debug("Deleting %r from shared resources of %s" %
(resource, test_name))
try:
# OpenStack resources are assumed to have a delete()
# method which destroys the resource...
resource.delete()
except Exception as e:
# If the resource is already missing, mission accomplished.
# add status code as workaround for bug 1247568
if (e.__class__.__name__ == 'NotFound' or
(hasattr(e, 'status_code') and e.status_code == 404)):
return
raise
def is_deletion_complete():
# Deletion testing is only required for objects whose
# existence cannot be checked via retrieval.
if isinstance(resource, dict):
return True
try:
resource.get()
except Exception as e:
# Clients are expected to return an exception
# called 'NotFound' if retrieval fails.
if e.__class__.__name__ == 'NotFound':
return True
raise
return False
# Block until resource deletion has completed or timed-out
tempest.test.call_until_true(is_deletion_complete, 10, 1)
@classmethod
def tearDownClass(cls):
# NOTE(jaypipes): Because scenario tests are typically run in a
# specific order, and because test methods in scenario tests
# generally create resources in a particular order, we destroy
# resources in the reverse order in which resources are added to
# the scenario test class object
while cls.os_resources:
thing = cls.os_resources.pop()
cls.cleanup_resource(thing, cls.__name__)
cls.isolated_creds.clear_isolated_creds()
super(OfficialClientTest, cls).tearDownClass()
@classmethod
def set_resource(cls, key, thing):
LOG.debug("Adding %r to shared resources of %s" %
(thing, cls.__name__))
cls.resource_keys[key] = thing
cls.os_resources.append(thing)
@classmethod
def get_resource(cls, key):
return cls.resource_keys[key]
@classmethod
def remove_resource(cls, key):
thing = cls.resource_keys[key]
cls.os_resources.remove(thing)
del cls.resource_keys[key]
def status_timeout(self, things, thing_id, expected_status,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
"""
Given a thing and an expected status, do a loop, sleeping
for a configurable amount of time, checking for the
expected status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
self._status_timeout(things, thing_id,
expected_status=expected_status,
error_status=error_status,
not_found_exception=not_found_exception)
def delete_timeout(self, things, thing_id,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
"""
Given a thing, do a loop, sleeping
for a configurable amount of time, checking for the
deleted status to show. At any time, if the returned
status of the thing is ERROR, fail out.
"""
self._status_timeout(things,
thing_id,
allow_notfound=True,
error_status=error_status,
not_found_exception=not_found_exception)
def _status_timeout(self,
things,
thing_id,
expected_status=None,
allow_notfound=False,
error_status='ERROR',
not_found_exception=nova_exceptions.NotFound):
log_status = expected_status if expected_status else ''
if allow_notfound:
log_status += ' or NotFound' if log_status != '' else 'NotFound'
def check_status():
# python-novaclient has resources available to its client
# that all implement a get() method taking an identifier
# for the singular resource to retrieve.
try:
thing = things.get(thing_id)
except not_found_exception:
if allow_notfound:
return True
else:
raise
new_status = thing.status
# Some components are reporting error status in lower case
# so case sensitive comparisons can really mess things
# up.
if new_status.lower() == error_status.lower():
message = ("%s failed to get to expected status (%s). "
"In %s state.") % (thing, expected_status,
new_status)
raise exceptions.BuildErrorException(message,
server_id=thing_id)
elif new_status == expected_status and expected_status is not None:
return True # All good.
LOG.debug("Waiting for %s to get to %s status. "
"Currently in %s status",
thing, log_status, new_status)
if not tempest.test.call_until_true(
check_status,
CONF.compute.build_timeout,
CONF.compute.build_interval):
message = ("Timed out waiting for thing %s "
"to become %s") % (thing_id, log_status)
raise exceptions.TimeoutException(message)
def _create_loginable_secgroup_rule_nova(self, client=None,
secgroup_id=None):
if client is None:
client = self.compute_client
if secgroup_id is None:
sgs = client.security_groups.list()
for sg in sgs:
if sg.name == 'default':
secgroup_id = sg.id
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
rules = list()
for ruleset in rulesets:
sg_rule = client.security_group_rules.create(secgroup_id,
**ruleset)
self.set_resource(sg_rule.id, sg_rule)
rules.append(sg_rule)
return rules
def create_server(self, client=None, name=None, image=None, flavor=None,
wait=True, create_kwargs={}):
if client is None:
client = self.compute_client
if name is None:
name = data_utils.rand_name('scenario-server-')
if image is None:
image = CONF.compute.image_ref
if flavor is None:
flavor = CONF.compute.flavor_ref
fixed_network_name = CONF.compute.fixed_network_name
if 'nics' not in create_kwargs and fixed_network_name:
networks = client.networks.list()
# If several networks found, set the NetID on which to connect the
# server to avoid the following error "Multiple possible networks
# found, use a Network ID to be more specific."
# See Tempest #1250866
if len(networks) > 1:
for network in networks:
if network.label == fixed_network_name:
create_kwargs['nics'] = [{'net-id': network.id}]
break
# If we didn't find the network we were looking for :
else:
msg = ("The network on which the NIC of the server must "
"be connected can not be found : "
"fixed_network_name=%s. Starting instance without "
"specifying a network.") % fixed_network_name
LOG.info(msg)
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
server = client.servers.create(name, image, flavor, **create_kwargs)
self.assertEqual(server.name, name)
self.set_resource(name, server)
if wait:
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
self.set_resource(name, server)
LOG.debug("Created server: %s", server)
return server
def create_volume(self, client=None, size=1, name=None,
snapshot_id=None, imageRef=None):
if client is None:
client = self.volume_client
if name is None:
name = data_utils.rand_name('scenario-volume-')
LOG.debug("Creating a volume (size: %s, name: %s)", size, name)
volume = client.volumes.create(size=size, display_name=name,
snapshot_id=snapshot_id,
imageRef=imageRef)
self.set_resource(name, volume)
self.assertEqual(name, volume.display_name)
self.status_timeout(client.volumes, volume.id, 'available')
LOG.debug("Created volume: %s", volume)
return volume
def create_server_snapshot(self, server, compute_client=None,
image_client=None, name=None):
if compute_client is None:
compute_client = self.compute_client
if image_client is None:
image_client = self.image_client
if name is None:
name = data_utils.rand_name('scenario-snapshot-')
LOG.debug("Creating a snapshot image for server: %s", server.name)
image_id = compute_client.servers.create_image(server, name)
self.addCleanup(image_client.images.delete, image_id)
self.status_timeout(image_client.images, image_id, 'active')
snapshot_image = image_client.images.get(image_id)
self.assertEqual(name, snapshot_image.name)
LOG.debug("Created snapshot image %s for server %s",
snapshot_image.name, server.name)
return snapshot_image
def create_keypair(self, client=None, name=None):
if client is None:
client = self.compute_client
if name is None:
name = data_utils.rand_name('scenario-keypair-')
keypair = client.keypairs.create(name)
self.assertEqual(keypair.name, name)
self.set_resource(name, keypair)
return keypair
def get_remote_client(self, server_or_ip, username=None, private_key=None):
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server_or_ip.networks[network_name_for_ssh][0]
if username is None:
username = CONF.scenario.ssh_user
if private_key is None:
private_key = self.keypair.private_key
return remote_client.RemoteClient(ip, username, pkey=private_key)
def _log_console_output(self, servers=None):
if not servers:
servers = self.compute_client.servers.list()
for server in servers:
LOG.debug('Console output for %s', server.id)
LOG.debug(server.get_console_output())
def wait_for_volume_status(self, status):
volume_id = self.volume.id
self.status_timeout(
self.volume_client.volumes, volume_id, status)
def _image_create(self, name, fmt, path, properties={}):
name = data_utils.rand_name('%s-' % name)
image_file = open(path, 'rb')
self.addCleanup(image_file.close)
params = {
'name': name,
'container_format': fmt,
'disk_format': fmt,
'is_public': 'True',
}
params.update(properties)
image = self.image_client.images.create(**params)
self.addCleanup(self.image_client.images.delete, image)
self.assertEqual("queued", image.status)
image.update(data=image_file)
return image.id
def glance_image_create(self):
qcow2_img_path = (CONF.scenario.img_dir + "/" +
CONF.scenario.qcow2_img_file)
aki_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.aki_img_file
ari_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ari_img_file
ami_img_path = CONF.scenario.img_dir + "/" + CONF.scenario.ami_img_file
LOG.debug("paths: img: %s, ami: %s, ari: %s, aki: %s"
% (qcow2_img_path, ami_img_path, ari_img_path, aki_img_path))
try:
self.image = self._image_create('scenario-img',
'bare',
qcow2_img_path,
properties={'disk_format':
'qcow2'})
except IOError:
LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {
'properties': {'kernel_id': kernel, 'ramdisk_id': ramdisk}
}
self.image = self._image_create('scenario-ami', 'ami',
path=ami_img_path,
properties=properties)
LOG.debug("image:%s" % self.image)
class BaremetalScenarioTest(OfficialClientTest):
@classmethod
def setUpClass(cls):
super(BaremetalScenarioTest, cls).setUpClass()
if (not CONF.service_available.ironic or
not CONF.baremetal.driver_enabled):
msg = 'Ironic not available or Ironic compute driver not enabled'
raise cls.skipException(msg)
# use an admin client manager for baremetal client
username, password, tenant = cls.admin_credentials()
manager = clients.OfficialClientManager(username, password, tenant)
cls.baremetal_client = manager.baremetal_client
# allow any issues obtaining the node list to raise early
cls.baremetal_client.node.list()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
if not isinstance(target_states, list):
target_states = [target_states]
def check_state():
node = self.get_node(node_id=node_id)
if getattr(node, state_attr) in target_states:
return True
return False
if not tempest.test.call_until_true(
check_state, timeout, interval):
msg = ("Timed out waiting for node %s to reach %s state(s) %s" %
(node_id, state_attr, target_states))
raise exceptions.TimeoutException(msg)
def wait_provisioning_state(self, node_id, state, timeout):
self._node_state_timeout(
node_id=node_id, state_attr='provision_state',
target_states=state, timeout=timeout)
def wait_power_state(self, node_id, state):
self._node_state_timeout(
node_id=node_id, state_attr='power_state',
target_states=state, timeout=CONF.baremetal.power_timeout)
def wait_node(self, instance_id):
"""Waits for a node to be associated with instance_id."""
from ironicclient import exc as ironic_exceptions
def _get_node():
node = None
try:
node = self.get_node(instance_id=instance_id)
except ironic_exceptions.HTTPNotFound:
pass
return node is not None
if not tempest.test.call_until_true(
_get_node, CONF.baremetal.association_timeout, 1):
msg = ('Timed out waiting to get Ironic node by instance id %s'
% instance_id)
raise exceptions.TimeoutException(msg)
def get_node(self, node_id=None, instance_id=None):
if node_id:
return self.baremetal_client.node.get(node_id)
elif instance_id:
return self.baremetal_client.node.get_by_instance_uuid(instance_id)
def get_ports(self, node_id):
ports = []
for port in self.baremetal_client.node.list_ports(node_id):
ports.append(self.baremetal_client.port.get(port.uuid))
return ports
class NetworkScenarioTest(OfficialClientTest):
"""
Base class for network scenario tests
"""
@classmethod
def check_preconditions(cls):
if (CONF.service_available.neutron):
cls.enabled = True
# verify that neutron_available is telling the truth
try:
cls.network_client.list_networks()
except exc.EndpointNotFound:
cls.enabled = False
raise
else:
cls.enabled = False
msg = 'Neutron not available'
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(NetworkScenarioTest, cls).setUpClass()
cls.tenant_id = cls.manager.identity_client.tenant_id
def _create_network(self, tenant_id, namestart='network-smoke-'):
name = data_utils.rand_name(namestart)
body = dict(
network=dict(
name=name,
tenant_id=tenant_id,
),
)
result = self.network_client.create_network(body=body)
network = net_common.DeletableNetwork(client=self.network_client,
**result['network'])
self.assertEqual(network.name, name)
self.set_resource(name, network)
return network
def _list_networks(self, **kwargs):
nets = self.network_client.list_networks(**kwargs)
return nets['networks']
def _list_subnets(self, **kwargs):
subnets = self.network_client.list_subnets(**kwargs)
return subnets['subnets']
def _list_routers(self, **kwargs):
routers = self.network_client.list_routers(**kwargs)
return routers['routers']
def _list_ports(self, **kwargs):
ports = self.network_client.list_ports(**kwargs)
return ports['ports']
def _get_tenant_own_network_num(self, tenant_id):
nets = self._list_networks(tenant_id=tenant_id)
return len(nets)
def _get_tenant_own_subnet_num(self, tenant_id):
subnets = self._list_subnets(tenant_id=tenant_id)
return len(subnets)
def _get_tenant_own_port_num(self, tenant_id):
ports = self._list_ports(tenant_id=tenant_id)
return len(ports)
def _create_subnet(self, network, namestart='subnet-smoke-', **kwargs):
"""
Create a subnet for the given network within the cidr block
configured for tenant networks.
"""
def cidr_in_use(cidr, tenant_id):
"""
:return True if subnet with cidr already exist in tenant
False else
"""
cidr_in_use = self._list_subnets(tenant_id=tenant_id, cidr=cidr)
return len(cidr_in_use) != 0
tenant_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
result = None
# Repeatedly attempt subnet creation with sequential cidr
# blocks until an unallocated block is found.
for subnet_cidr in tenant_cidr.subnet(
CONF.network.tenant_network_mask_bits):
str_cidr = str(subnet_cidr)
if cidr_in_use(str_cidr, tenant_id=network.tenant_id):
continue
body = dict(
subnet=dict(
name=data_utils.rand_name(namestart),
ip_version=4,
network_id=network.id,
tenant_id=network.tenant_id,
cidr=str_cidr,
),
)
body['subnet'].update(kwargs)
try:
result = self.network_client.create_subnet(body=body)
break
except exc.NeutronClientException as e:
is_overlapping_cidr = 'overlaps with another subnet' in str(e)
if not is_overlapping_cidr:
raise
self.assertIsNotNone(result, 'Unable to allocate tenant network')
subnet = net_common.DeletableSubnet(client=self.network_client,
**result['subnet'])
self.assertEqual(subnet.cidr, str_cidr)
self.set_resource(data_utils.rand_name(namestart), subnet)
return subnet
def _create_port(self, network, namestart='port-quotatest-'):
name = data_utils.rand_name(namestart)
body = dict(
port=dict(name=name,
network_id=network.id,
tenant_id=network.tenant_id))
result = self.network_client.create_port(body=body)
self.assertIsNotNone(result, 'Unable to allocate port')
port = net_common.DeletablePort(client=self.network_client,
**result['port'])
self.set_resource(name, port)
return port
def _get_server_port_id(self, server, ip_addr=None):
ports = self._list_ports(device_id=server.id, fixed_ip=ip_addr)
self.assertEqual(len(ports), 1,
"Unable to determine which port to target.")
return ports[0]['id']
def _create_floating_ip(self, thing, external_network_id, port_id=None):
if not port_id:
port_id = self._get_server_port_id(thing)
body = dict(
floatingip=dict(
floating_network_id=external_network_id,
port_id=port_id,
tenant_id=thing.tenant_id,
)
)
result = self.network_client.create_floatingip(body=body)
floating_ip = net_common.DeletableFloatingIp(
client=self.network_client,
**result['floatingip'])
self.set_resource(data_utils.rand_name('floatingip-'), floating_ip)
return floating_ip
def _associate_floating_ip(self, floating_ip, server):
port_id = self._get_server_port_id(server)
floating_ip.update(port_id=port_id)
self.assertEqual(port_id, floating_ip.port_id)
return floating_ip
def _disassociate_floating_ip(self, floating_ip):
"""
:param floating_ip: type DeletableFloatingIp
"""
floating_ip.update(port_id=None)
self.assertIsNone(floating_ip.port_id)
return floating_ip
def _ping_ip_address(self, ip_address, should_succeed=True):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
return (proc.returncode == 0) == should_succeed
return tempest.test.call_until_true(
ping, CONF.compute.ping_timeout, 1)
def _create_pool(self, lb_method, protocol, subnet_id):
"""Wrapper utility that returns a test pool."""
name = data_utils.rand_name('pool-')
body = {
"pool": {
"protocol": protocol,
"name": name,
"subnet_id": subnet_id,
"lb_method": lb_method
}
}
resp = self.network_client.create_pool(body=body)
pool = net_common.DeletablePool(client=self.network_client,
**resp['pool'])
self.assertEqual(pool['name'], name)
self.set_resource(name, pool)
return pool
def _create_member(self, address, protocol_port, pool_id):
"""Wrapper utility that returns a test member."""
body = {
"member": {
"protocol_port": protocol_port,
"pool_id": pool_id,
"address": address
}
}
resp = self.network_client.create_member(body)
member = net_common.DeletableMember(client=self.network_client,
**resp['member'])
self.set_resource(data_utils.rand_name('member-'), member)
return member
def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
"""Wrapper utility that returns a test vip."""
name = data_utils.rand_name('vip-')
body = {
"vip": {
"protocol": protocol,
"name": name,
"subnet_id": subnet_id,
"pool_id": pool_id,
"protocol_port": protocol_port
}
}
resp = self.network_client.create_vip(body)
vip = net_common.DeletableVip(client=self.network_client,
**resp['vip'])
self.assertEqual(vip['name'], name)
self.set_resource(name, vip)
return vip
def _check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True):
"""
:param ip_address: server to test against
:param username: server's ssh username
:param private_key: server's ssh private key to be used
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
self.assertTrue(self._ping_ip_address(ip_address,
should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
linux_client = self.get_remote_client(ip_address, username,
private_key)
linux_client.validate_authentication()
def _check_remote_connectivity(self, source, dest, should_succeed=True):
"""
check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
:param dest: and IP to ping against
:param should_succeed: boolean should ping succeed or not
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
def ping_remote():
try:
source.ping_host(dest)
except exceptions.SSHExecCommandFailed:
LOG.exception('Failed to ping host via ssh connection')
return not should_succeed
return should_succeed
return tempest.test.call_until_true(ping_remote,
CONF.compute.ping_timeout,
1)
def _create_security_group_nova(self, client=None,
namestart='secgroup-smoke-',
tenant_id=None):
if client is None:
client = self.compute_client
# Create security group
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
secgroup = client.security_groups.create(sg_name, sg_desc)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(secgroup.description, sg_desc)
self.set_resource(sg_name, secgroup)
# Add rules to the security group
self._create_loginable_secgroup_rule_nova(client, secgroup.id)
return secgroup
def _create_security_group_neutron(self, tenant_id, client=None,
namestart='secgroup-smoke-'):
if client is None:
client = self.network_client
secgroup = self._create_empty_security_group(namestart=namestart,
client=client,
tenant_id=tenant_id)
# Add rules to the security group
rules = self._create_loginable_secgroup_rule_neutron(secgroup=secgroup)
for rule in rules:
self.assertEqual(tenant_id, rule.tenant_id)
self.assertEqual(secgroup.id, rule.security_group_id)
return secgroup
def _create_empty_security_group(self, tenant_id, client=None,
namestart='secgroup-smoke-'):
"""Create a security group without rules.
Default rules will be created:
- IPv4 egress to any
- IPv6 egress to any
:param tenant_id: secgroup will be created in this tenant
:returns: DeletableSecurityGroup -- containing the secgroup created
"""
if client is None:
client = self.network_client
sg_name = data_utils.rand_name(namestart)
sg_desc = sg_name + " description"
sg_dict = dict(name=sg_name,
description=sg_desc)
sg_dict['tenant_id'] = tenant_id
body = dict(security_group=sg_dict)
result = client.create_security_group(body=body)
secgroup = net_common.DeletableSecurityGroup(
client=client,
**result['security_group']
)
self.assertEqual(secgroup.name, sg_name)
self.assertEqual(tenant_id, secgroup.tenant_id)
self.assertEqual(secgroup.description, sg_desc)
self.set_resource(sg_name, secgroup)
return secgroup
def _default_security_group(self, tenant_id, client=None):
"""Get default secgroup for given tenant_id.
:returns: DeletableSecurityGroup -- default secgroup for given tenant
"""
if client is None:
client = self.network_client
sgs = [
sg for sg in client.list_security_groups().values()[0]
if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
]
msg = "No default security group for tenant %s." % (tenant_id)
self.assertTrue(len(sgs) > 0, msg)
if len(sgs) > 1:
msg = "Found %d default security groups" % len(sgs)
raise exc.NeutronClientNoUniqueMatch(msg=msg)
return net_common.DeletableSecurityGroup(client=client,
**sgs[0])
def _create_security_group_rule(self, client=None, secgroup=None,
tenant_id=None, **kwargs):
"""Create a rule from a dictionary of rule parameters.
Create a rule in a secgroup. if secgroup not defined will search for
default secgroup in tenant_id.
:param secgroup: type DeletableSecurityGroup.
:param secgroup_id: search for secgroup by id
default -- choose default secgroup for given tenant_id
:param tenant_id: if secgroup not passed -- the tenant in which to
search for default secgroup
:param kwargs: a dictionary containing rule parameters:
for example, to allow incoming ssh:
rule = {
direction: 'ingress'
protocol:'tcp',
port_range_min: 22,
port_range_max: 22
}
"""
if client is None:
client = self.network_client
if secgroup is None:
secgroup = self._default_security_group(tenant_id)
ruleset = dict(security_group_id=secgroup.id,
tenant_id=secgroup.tenant_id,
)
ruleset.update(kwargs)
body = dict(security_group_rule=dict(ruleset))
sg_rule = client.create_security_group_rule(body=body)
sg_rule = net_common.DeletableSecurityGroupRule(
client=client,
**sg_rule['security_group_rule']
)
self.set_resource(sg_rule.id, sg_rule)
self.assertEqual(secgroup.tenant_id, sg_rule.tenant_id)
self.assertEqual(secgroup.id, sg_rule.security_group_id)
return sg_rule
def _create_loginable_secgroup_rule_neutron(self, client=None,
secgroup=None):
"""These rules are intended to permit inbound ssh and icmp
traffic from all sources, so no group_id is provided.
Setting a group_id would only permit traffic from ports
belonging to the same security group.
"""
if client is None:
client = self.network_client
rules = []
rulesets = [
dict(
# ssh
protocol='tcp',
port_range_min=22,
port_range_max=22,
),
dict(
# ping
protocol='icmp',
)
]
for ruleset in rulesets:
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
sg_rule = self._create_security_group_rule(
client=client, secgroup=secgroup, **ruleset)
except exc.NeutronClientException as ex:
# if rule already exist - skip rule and continue
if not (ex.status_code is 409 and 'Security group rule'
' already exists' in ex.message):
raise ex
else:
self.assertEqual(r_direction, sg_rule.direction)
rules.append(sg_rule)
return rules
def _ssh_to_server(self, server, private_key):
ssh_login = CONF.compute.image_ssh_user
return self.get_remote_client(server,
username=ssh_login,
private_key=private_key)
def _show_quota_network(self, tenant_id):
quota = self.network_client.show_quota(tenant_id)
return quota['quota']['network']
def _show_quota_subnet(self, tenant_id):
quota = self.network_client.show_quota(tenant_id)
return quota['quota']['subnet']
def _show_quota_port(self, tenant_id):
quota = self.network_client.show_quota(tenant_id)
return quota['quota']['port']
def _get_router(self, tenant_id):
"""Retrieve a router for the given tenant id.
If a public router has been configured, it will be returned.
If a public router has not been configured, but a public
network has, a tenant router will be created and returned that
routes traffic to the public network.
"""
router_id = CONF.network.public_router_id
network_id = CONF.network.public_network_id
if router_id:
result = self.network_client.show_router(router_id)
return net_common.AttributeDict(**result['router'])
elif network_id:
router = self._create_router(tenant_id)
router.add_gateway(network_id)
return router
else:
raise Exception("Neither of 'public_router_id' or "
"'public_network_id' has been defined.")
def _create_router(self, tenant_id, namestart='router-smoke-'):
name = data_utils.rand_name(namestart)
body = dict(
router=dict(
name=name,
admin_state_up=True,
tenant_id=tenant_id,
),
)
result = self.network_client.create_router(body=body)
router = net_common.DeletableRouter(client=self.network_client,
**result['router'])
self.assertEqual(router.name, name)
self.set_resource(name, router)
return router
def _create_networks(self, tenant_id=None):
"""Create a network with a subnet connected to a router.
:returns: network, subnet, router
"""
if tenant_id is None:
tenant_id = self.tenant_id
network = self._create_network(tenant_id)
router = self._get_router(tenant_id)
subnet = self._create_subnet(network)
subnet.add_to_router(router.id)
return network, subnet, router
class OrchestrationScenarioTest(OfficialClientTest):
"""
Base class for orchestration scenario tests
"""
@classmethod
def setUpClass(cls):
super(OrchestrationScenarioTest, cls).setUpClass()
if not CONF.service_available.heat:
raise cls.skipException("Heat support is required")
@classmethod
def credentials(cls):
admin_creds = auth.get_default_credentials('identity_admin')
creds = auth.get_default_credentials('user')
admin_creds.tenant_name = creds.tenant_name
return admin_creds
def _load_template(self, base_file, file_name):
filepath = os.path.join(os.path.dirname(os.path.realpath(base_file)),
file_name)
with open(filepath) as f:
return f.read()
@classmethod
def _stack_rand_name(cls):
return data_utils.rand_name(cls.__name__ + '-')
@classmethod
def _get_default_network(cls):
networks = cls.network_client.list_networks()
for net in networks['networks']:
if net['name'] == CONF.compute.fixed_network_name:
return net
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_aggregates_basic_ops.py 0000664 0001750 0001750 00000012666 12332757070 027140 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import tempest_fixtures as fixtures
from tempest.common.utils import data_utils
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
LOG = logging.getLogger(__name__)
class TestAggregatesBasicOps(manager.OfficialClientTest):
"""
Creates an aggregate within an availability zone
Adds a host to the aggregate
Checks aggregate details
Updates aggregate's name
Removes host from aggregate
Deletes aggregate
"""
@classmethod
def credentials(cls):
return cls.admin_credentials()
def _create_aggregate(self, **kwargs):
aggregate = self.compute_client.aggregates.create(**kwargs)
aggregate_name = kwargs['name']
availability_zone = kwargs['availability_zone']
self.assertEqual(aggregate.name, aggregate_name)
self.assertEqual(aggregate.availability_zone, availability_zone)
self.set_resource(aggregate.id, aggregate)
LOG.debug("Aggregate %s created." % (aggregate.name))
return aggregate
def _delete_aggregate(self, aggregate):
self.compute_client.aggregates.delete(aggregate.id)
self.remove_resource(aggregate.id)
LOG.debug("Aggregate %s deleted. " % (aggregate.name))
def _get_host_name(self):
hosts = self.compute_client.hosts.list()
self.assertTrue(len(hosts) >= 1)
hostname = hosts[0].host_name
return hostname
def _add_host(self, aggregate_name, host):
aggregate = self.compute_client.aggregates.add_host(aggregate_name,
host)
self.assertIn(host, aggregate.hosts)
LOG.debug("Host %s added to Aggregate %s." % (host, aggregate.name))
def _remove_host(self, aggregate_name, host):
aggregate = self.compute_client.aggregates.remove_host(aggregate_name,
host)
self.assertNotIn(host, aggregate.hosts)
LOG.debug("Host %s removed to Aggregate %s." % (host, aggregate.name))
def _check_aggregate_details(self, aggregate, aggregate_name, azone,
hosts, metadata):
aggregate = self.compute_client.aggregates.get(aggregate.id)
self.assertEqual(aggregate_name, aggregate.name)
self.assertEqual(azone, aggregate.availability_zone)
self.assertEqual(aggregate.hosts, hosts)
for meta_key in metadata.keys():
self.assertIn(meta_key, aggregate.metadata)
self.assertEqual(metadata[meta_key], aggregate.metadata[meta_key])
LOG.debug("Aggregate %s details match." % aggregate.name)
def _set_aggregate_metadata(self, aggregate, meta):
aggregate = self.compute_client.aggregates.set_metadata(aggregate.id,
meta)
for key, value in meta.items():
self.assertEqual(meta[key], aggregate.metadata[key])
LOG.debug("Aggregate %s metadata updated successfully." %
aggregate.name)
def _update_aggregate(self, aggregate, aggregate_name,
availability_zone):
values = {}
if aggregate_name:
values.update({'name': aggregate_name})
if availability_zone:
values.update({'availability_zone': availability_zone})
if values.keys():
aggregate = self.compute_client.aggregates.update(aggregate.id,
values)
for key, values in values.items():
self.assertEqual(getattr(aggregate, key), values)
return aggregate
@test.services('compute')
def test_aggregate_basic_ops(self):
self.useFixture(fixtures.LockFixture('availability_zone'))
az = 'foo_zone'
aggregate_name = data_utils.rand_name('aggregate-scenario')
aggregate = self._create_aggregate(name=aggregate_name,
availability_zone=az)
metadata = {'meta_key': 'meta_value'}
self._set_aggregate_metadata(aggregate, metadata)
host = self._get_host_name()
self._add_host(aggregate, host)
self._check_aggregate_details(aggregate, aggregate_name, az, [host],
metadata)
aggregate_name = data_utils.rand_name('renamed-aggregate-scenario')
aggregate = self._update_aggregate(aggregate, aggregate_name, None)
additional_metadata = {'foo': 'bar'}
self._set_aggregate_metadata(aggregate, additional_metadata)
metadata.update(additional_metadata)
self._check_aggregate_details(aggregate, aggregate.name, az, [host],
metadata)
self._remove_host(aggregate, host)
self._delete_aggregate(aggregate)
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/utils.py 0000664 0001750 0001750 00000012505 12332757070 023076 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 Hewlett-Packard, Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import re
import string
import unicodedata
import testscenarios
import testtools
from tempest import auth
from tempest import clients
from tempest.common.utils import misc
from tempest import config
CONF = config.CONF
@misc.singleton
class ImageUtils(object):
default_ssh_user = 'root'
def __init__(self):
# Load configuration items
self.ssh_users = json.loads(CONF.input_scenario.ssh_user_regex)
self.non_ssh_image_pattern = \
CONF.input_scenario.non_ssh_image_regex
# Setup clients
ocm = clients.OfficialClientManager(
auth.get_default_credentials('user'))
self.client = ocm.compute_client
def ssh_user(self, image_id):
_image = self.client.images.get(image_id)
for regex, user in self.ssh_users:
# First match wins
if re.match(regex, _image.name) is not None:
return user
else:
return self.default_ssh_user
def _is_sshable_image(self, image):
return not re.search(pattern=self.non_ssh_image_pattern,
string=str(image.name))
def is_sshable_image(self, image_id):
_image = self.client.images.get(image_id)
return self._is_sshable_image(_image)
def _is_flavor_enough(self, flavor, image):
return image.minDisk <= flavor.disk
def is_flavor_enough(self, flavor_id, image_id):
_image = self.client.images.get(image_id)
_flavor = self.client.flavors.get(flavor_id)
return self._is_flavor_enough(_flavor, _image)
@misc.singleton
class InputScenarioUtils(object):
"""
Example usage:
import testscenarios
(...)
load_tests = testscenarios.load_tests_apply_scenarios
class TestInputScenario(manager.OfficialClientTest):
scenario_utils = utils.InputScenarioUtils()
scenario_flavor = scenario_utils.scenario_flavors
scenario_image = scenario_utils.scenario_images
scenarios = testscenarios.multiply_scenarios(scenario_image,
scenario_flavor)
def test_create_server_metadata(self):
name = rand_name('instance')
_ = self.compute_client.servers.create(name=name,
flavor=self.flavor_ref,
image=self.image_ref)
"""
validchars = "-_.{ascii}{digit}".format(ascii=string.ascii_letters,
digit=string.digits)
def __init__(self):
ocm = clients.OfficialClientManager(
auth.get_default_credentials('user', fill_in=False))
self.client = ocm.compute_client
self.image_pattern = CONF.input_scenario.image_regex
self.flavor_pattern = CONF.input_scenario.flavor_regex
def _normalize_name(self, name):
nname = unicodedata.normalize('NFKD', name).encode('ASCII', 'ignore')
nname = ''.join(c for c in nname if c in self.validchars)
return nname
@property
def scenario_images(self):
"""
:return: a scenario with name and uuid of images
"""
if not CONF.service_available.glance:
return []
if not hasattr(self, '_scenario_images'):
images = self.client.images.list(detailed=False)
self._scenario_images = [
(self._normalize_name(i.name), dict(image_ref=i.id))
for i in images if re.search(self.image_pattern, str(i.name))
]
return self._scenario_images
@property
def scenario_flavors(self):
"""
:return: a scenario with name and uuid of flavors
"""
if not hasattr(self, '_scenario_flavors'):
flavors = self.client.flavors.list(detailed=False)
self._scenario_flavors = [
(self._normalize_name(f.name), dict(flavor_ref=f.id))
for f in flavors if re.search(self.flavor_pattern, str(f.name))
]
return self._scenario_flavors
def load_tests_input_scenario_utils(*args):
"""
Wrapper for testscenarios to set the scenarios to avoid running a getattr
on the CONF object at import.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
scenario_utils = InputScenarioUtils()
scenario_flavor = scenario_utils.scenario_flavors
scenario_image = scenario_utils.scenario_images
for test in testtools.iterate_tests(standard_tests):
setattr(test, 'scenarios', testscenarios.multiply_scenarios(
scenario_image,
scenario_flavor))
return testscenarios.load_tests_apply_scenarios(*args)
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_server_advanced_ops.py 0000664 0001750 0001750 00000006733 12332757070 027017 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestServerAdvancedOps(manager.OfficialClientTest):
"""
This test case stresses some advanced server instance operations:
* Resizing an instance
* Sequence suspend resume
"""
@classmethod
def setUpClass(cls):
cls.set_network_resources()
super(TestServerAdvancedOps, cls).setUpClass()
if CONF.compute.flavor_ref_alt == CONF.compute.flavor_ref:
msg = "Skipping test - flavor_ref and flavor_ref_alt are identical"
raise cls.skipException(msg)
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize is not available.')
@test.services('compute')
def test_resize_server_confirm(self):
# We create an instance for use in this test
instance = self.create_server()
instance_id = instance.id
resize_flavor = CONF.compute.flavor_ref_alt
LOG.debug("Resizing instance %s from flavor %s to flavor %s",
instance.id, instance.flavor, resize_flavor)
instance.resize(resize_flavor)
self.status_timeout(self.compute_client.servers, instance_id,
'VERIFY_RESIZE')
LOG.debug("Confirming resize of instance %s", instance_id)
instance.confirm_resize()
self.status_timeout(
self.compute_client.servers, instance_id, 'ACTIVE')
@testtools.skipUnless(CONF.compute_feature_enabled.suspend,
'Suspend is not available.')
@test.services('compute')
def test_server_sequence_suspend_resume(self):
# We create an instance for use in this test
instance = self.create_server()
instance_id = instance.id
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance.status)
instance.suspend()
self.status_timeout(self.compute_client.servers, instance_id,
'SUSPENDED')
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, instance.status)
instance.resume()
self.status_timeout(self.compute_client.servers, instance_id,
'ACTIVE')
LOG.debug("Suspending instance %s. Current status: %s",
instance_id, instance.status)
instance.suspend()
self.status_timeout(self.compute_client.servers, instance_id,
'SUSPENDED')
LOG.debug("Resuming instance %s. Current status: %s",
instance_id, instance.status)
instance.resume()
self.status_timeout(self.compute_client.servers, instance_id,
'ACTIVE')
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_volume_boot_pattern.py 0000664 0001750 0001750 00000016702 12332757070 027067 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestVolumeBootPattern(manager.OfficialClientTest):
"""
This test case attempts to reproduce the following steps:
* Create in Cinder some bootable volume importing a Glance image
* Boot an instance from the bootable volume
* Write content to the volume
* Delete an instance and Boot a new instance from the volume
* Check written content in the instance
* Create a volume snapshot while the instance is running
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
@classmethod
def setUpClass(cls):
super(TestVolumeBootPattern, cls).setUpClass()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _create_volume_from_image(self):
img_uuid = CONF.compute.image_ref
vol_name = data_utils.rand_name('volume-origin')
return self.create_volume(name=vol_name, imageRef=img_uuid)
def _boot_instance_from_volume(self, vol_id, keypair):
# NOTE(gfidente): the syntax for block_device_mapping is
# dev_name=id:type:size:delete_on_terminate
# where type needs to be "snap" if the server is booted
# from a snapshot, size instead can be safely left empty
bd_map = {
'vda': vol_id + ':::0'
}
create_kwargs = {
'block_device_mapping': bd_map,
'key_name': keypair.name
}
return self.create_server(image='', create_kwargs=create_kwargs)
def _create_snapshot_from_volume(self, vol_id):
volume_snapshots = self.volume_client.volume_snapshots
snap_name = data_utils.rand_name('snapshot')
snap = volume_snapshots.create(volume_id=vol_id,
force=True,
display_name=snap_name)
self.set_resource(snap.id, snap)
self.status_timeout(volume_snapshots,
snap.id,
'available')
return snap
def _create_volume_from_snapshot(self, snap_id):
vol_name = data_utils.rand_name('volume')
return self.create_volume(name=vol_name, snapshot_id=snap_id)
def _stop_instances(self, instances):
# NOTE(gfidente): two loops so we do not wait for the status twice
for i in instances:
self.compute_client.servers.stop(i)
for i in instances:
self.status_timeout(self.compute_client.servers,
i.id,
'SHUTOFF')
def _detach_volumes(self, volumes):
# NOTE(gfidente): two loops so we do not wait for the status twice
for v in volumes:
self.volume_client.volumes.detach(v)
for v in volumes:
self.status_timeout(self.volume_client.volumes,
v.id,
'available')
def _ssh_to_server(self, server, keypair):
if CONF.compute.use_floatingip_for_ssh:
floating_ip = self.compute_client.floating_ips.create()
fip_name = data_utils.rand_name('scenario-fip')
self.set_resource(fip_name, floating_ip)
server.add_floating_ip(floating_ip)
ip = floating_ip.ip
else:
network_name_for_ssh = CONF.compute.network_for_ssh
ip = server.networks[network_name_for_ssh][0]
try:
return self.get_remote_client(
ip,
private_key=keypair.private_key)
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
raise
def _get_content(self, ssh_client):
return ssh_client.exec_command('cat /tmp/text')
def _write_text(self, ssh_client):
text = data_utils.rand_name('text-')
ssh_client.exec_command('echo "%s" > /tmp/text; sync' % (text))
return self._get_content(ssh_client)
def _delete_server(self, server):
self.compute_client.servers.delete(server)
self.delete_timeout(self.compute_client.servers, server.id)
def _check_content_of_written_file(self, ssh_client, expected):
actual = self._get_content(ssh_client)
self.assertEqual(expected, actual)
@test.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
keypair = self.create_keypair()
self._create_loginable_secgroup_rule_nova()
# create an instance from volume
volume_origin = self._create_volume_from_image()
instance_1st = self._boot_instance_from_volume(volume_origin.id,
keypair)
# write content to volume on instance
ssh_client_for_instance_1st = self._ssh_to_server(instance_1st,
keypair)
text = self._write_text(ssh_client_for_instance_1st)
# delete instance
self._delete_server(instance_1st)
# create a 2nd instance from volume
instance_2nd = self._boot_instance_from_volume(volume_origin.id,
keypair)
# check the content of written file
ssh_client_for_instance_2nd = self._ssh_to_server(instance_2nd,
keypair)
self._check_content_of_written_file(ssh_client_for_instance_2nd, text)
# snapshot a volume
snapshot = self._create_snapshot_from_volume(volume_origin.id)
# create a 3rd instance from snapshot
volume = self._create_volume_from_snapshot(snapshot.id)
instance_from_snapshot = self._boot_instance_from_volume(volume.id,
keypair)
# check the content of written file
ssh_client = self._ssh_to_server(instance_from_snapshot, keypair)
self._check_content_of_written_file(ssh_client, text)
# NOTE(gfidente): ensure resources are in clean state for
# deletion operations to succeed
self._stop_instances([instance_2nd, instance_from_snapshot])
self._detach_volumes([volume_origin, volume])
class TestVolumeBootPatternV2(TestVolumeBootPattern):
def _boot_instance_from_volume(self, vol_id, keypair):
bdms = [{'uuid': vol_id, 'source_type': 'volume',
'destination_type': 'volume', 'boot_index': 0,
'delete_on_termination': False}]
create_kwargs = {
'block_device_mapping_v2': bdms,
'key_name': keypair.name
}
return self.create_server(image='', create_kwargs=create_kwargs)
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023460 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_baremetal_basic_ops.py 0000664 0001750 0001750 00000012251 12332757070 026751 0 ustar chuck chuck 0000000 0000000 #
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
# power/provision states as of icehouse
class PowerStates(object):
"""Possible power states of an Ironic node."""
POWER_ON = 'power on'
POWER_OFF = 'power off'
REBOOT = 'rebooting'
SUSPEND = 'suspended'
class ProvisionStates(object):
"""Possible provision states of an Ironic node."""
NOSTATE = None
INIT = 'initializing'
ACTIVE = 'active'
BUILDING = 'building'
DEPLOYWAIT = 'wait call-back'
DEPLOYING = 'deploying'
DEPLOYFAIL = 'deploy failed'
DEPLOYDONE = 'deploy complete'
DELETING = 'deleting'
DELETED = 'deleted'
ERROR = 'error'
class BaremetalBasicOptsPXESSH(manager.BaremetalScenarioTest):
"""
This smoke test tests the pxe_ssh Ironic driver. It follows this basic
set of operations:
* Creates a keypair
* Boots an instance using the keypair
* Monitors the associated Ironic node for power and
expected state transitions
* Validates Ironic node's driver_info has been properly
updated
* Validates Ironic node's port data has been properly updated
* Verifies SSH connectivity using created keypair via fixed IP
* Associates a floating ip
* Verifies SSH connectivity using created keypair via floating IP
* Deletes instance
* Monitors the associated Ironic node for power and
expected state transitions
"""
def add_keypair(self):
self.keypair = self.create_keypair()
def add_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
self.instance.add_floating_ip(floating_ip)
return floating_ip.ip
def verify_connectivity(self, ip=None):
if ip:
dest = self.get_remote_client(ip)
else:
dest = self.get_remote_client(self.instance)
dest.validate_authentication()
def validate_driver_info(self):
f_id = self.instance.flavor['id']
flavor_extra = self.compute_client.flavors.get(f_id).get_keys()
driver_info = self.node.driver_info
self.assertEqual(driver_info['pxe_deploy_kernel'],
flavor_extra['baremetal:deploy_kernel_id'])
self.assertEqual(driver_info['pxe_deploy_ramdisk'],
flavor_extra['baremetal:deploy_ramdisk_id'])
self.assertEqual(driver_info['pxe_image_source'],
self.instance.image['id'])
def validate_ports(self):
for port in self.get_ports(self.node.uuid):
n_port_id = port.extra['vif_port_id']
n_port = self.network_client.show_port(n_port_id)['port']
self.assertEqual(n_port['device_id'], self.instance.id)
self.assertEqual(n_port['mac_address'], port.address)
def boot_instance(self):
create_kwargs = {
'key_name': self.keypair.id
}
self.instance = self.create_server(
wait=False, create_kwargs=create_kwargs)
self.set_resource('instance', self.instance)
self.wait_node(self.instance.id)
self.node = self.get_node(instance_id=self.instance.id)
self.wait_power_state(self.node.uuid, PowerStates.POWER_ON)
self.wait_provisioning_state(
self.node.uuid,
[ProvisionStates.DEPLOYWAIT, ProvisionStates.ACTIVE],
timeout=15)
self.wait_provisioning_state(self.node.uuid, ProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout)
self.status_timeout(
self.compute_client.servers, self.instance.id, 'ACTIVE')
self.node = self.get_node(instance_id=self.instance.id)
self.instance = self.compute_client.servers.get(self.instance.id)
def terminate_instance(self):
self.instance.delete()
self.remove_resource('instance')
self.wait_power_state(self.node.uuid, PowerStates.POWER_OFF)
self.wait_provisioning_state(
self.node.uuid,
ProvisionStates.NOSTATE,
timeout=CONF.baremetal.unprovision_timeout)
@test.services('baremetal', 'compute', 'image', 'network')
def test_baremetal_server_ops(self):
self.add_keypair()
self.boot_instance()
self.validate_driver_info()
self.validate_ports()
self.verify_connectivity()
floating_ip = self.add_floating_ip()
self.verify_connectivity(ip=floating_ip)
self.terminate_instance()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/README.rst 0000664 0001750 0001750 00000003031 12332757070 023045 0 ustar chuck chuck 0000000 0000000 Tempest Field Guide to Scenario tests
=====================================
What are these tests?
---------------------
Scenario tests are "through path" tests of OpenStack
function. Complicated setups where one part might depend on completion
of a previous part. They ideally involve the integration between
multiple OpenStack services to exercise the touch points between them.
Any scenario test should have a real-life use case. An example would be:
- "As operator I want to start with a blank environment":
1. upload a glance image
2. deploy a vm from it
3. ssh to the guest
4. create a snapshot of the vm
Why are these tests in tempest?
-------------------------------
This is one of tempests core purposes, testing the integration between
projects.
Scope of these tests
--------------------
Scenario tests should use the official python client libraries for
OpenStack, as they provide a more realistic approach in how people
will interact with the services.
Tests should be tagged with which services they exercise, as
determined by which client libraries are used directly by the test.
Example of a good test
----------------------
While we are looking for interaction of 2 or more services, be
specific in your interactions. A giant "this is my data center" smoke
test is hard to debug when it goes wrong.
A flow of interactions between glance and nova, like in the
introduction, is a good example. Especially if it involves a repeated
interaction when a resource is setup, modified, detached, and then
reused later again.
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_swift_basic_ops.py 0000664 0001750 0001750 00000010163 12332757070 026151 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestSwiftBasicOps(manager.OfficialClientTest):
"""
Test swift with the follow operations:
* get swift stat.
* create container.
* upload a file to the created container.
* list container's objects and assure that the uploaded file is present.
* delete object from container.
* list container's objects and assure that the deleted file is gone.
* delete a container.
* list containers and assure that the deleted container is gone.
"""
@classmethod
def setUpClass(cls):
cls.set_network_resources()
super(TestSwiftBasicOps, cls).setUpClass()
if not CONF.service_available.swift:
skip_msg = ("%s skipped as swift is not available" %
cls.__name__)
raise cls.skipException(skip_msg)
def _get_swift_stat(self):
"""get swift status for our user account."""
self.object_storage_client.get_account()
LOG.debug('Swift status information obtained successfully')
def _create_container(self, container_name=None):
name = container_name or data_utils.rand_name(
'swift-scenario-container')
self.object_storage_client.put_container(name)
# look for the container to assure it is created
self._list_and_check_container_objects(name)
LOG.debug('Container %s created' % (name))
return name
def _delete_container(self, container_name):
self.object_storage_client.delete_container(container_name)
LOG.debug('Container %s deleted' % (container_name))
def _upload_object_to_container(self, container_name, obj_name=None):
obj_name = obj_name or data_utils.rand_name('swift-scenario-object')
self.object_storage_client.put_object(container_name, obj_name,
data_utils.rand_name('obj_data'),
content_type='text/plain')
return obj_name
def _delete_object(self, container_name, filename):
self.object_storage_client.delete_object(container_name, filename)
self._list_and_check_container_objects(container_name,
not_present_obj=[filename])
def _list_and_check_container_objects(self, container_name, present_obj=[],
not_present_obj=[]):
"""
List objects for a given container and assert which are present and
which are not.
"""
meta, response = self.object_storage_client.get_container(
container_name)
# create a list with file name only
object_list = [obj['name'] for obj in response]
if present_obj:
for obj in present_obj:
self.assertIn(obj, object_list)
if not_present_obj:
for obj in not_present_obj:
self.assertNotIn(obj, object_list)
@test.services('object_storage')
def test_swift_basic_ops(self):
self._get_swift_stat()
container_name = self._create_container()
obj_name = self._upload_object_to_container(container_name)
self._list_and_check_container_objects(container_name, [obj_name])
self._delete_object(container_name, obj_name)
self._delete_container(container_name)
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_snapshot_pattern.py 0000664 0001750 0001750 00000007145 12332757070 026375 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.openstack.common import log
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = log.getLogger(__name__)
class TestSnapshotPattern(manager.OfficialClientTest):
"""
This test is for snapshotting an instance and booting with it.
The following is the scenario outline:
* boot a instance and create a timestamp file in it
* snapshot the instance
* boot a second instance from the snapshot
* check the existence of the timestamp file in the second instance
"""
def _boot_image(self, image_id):
create_kwargs = {
'key_name': self.keypair.name
}
return self.create_server(image=image_id, create_kwargs=create_kwargs)
def _add_keypair(self):
self.keypair = self.create_keypair()
def _ssh_to_server(self, server_or_ip):
try:
return self.get_remote_client(server_or_ip)
except Exception:
LOG.exception()
self._log_console_output()
def _write_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('date > /tmp/timestamp; sync')
self.timestamp = ssh_client.exec_command('cat /tmp/timestamp')
def _check_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
got_timestamp = ssh_client.exec_command('cat /tmp/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
self.addCleanup(floating_ip.delete)
return floating_ip
def _set_floating_ip_to_server(self, server, floating_ip):
server.add_floating_ip(floating_ip)
@test.services('compute', 'network', 'image')
def test_snapshot_pattern(self):
# prepare for booting a instance
self._add_keypair()
self._create_loginable_secgroup_rule_nova()
# boot a instance and create a timestamp file in it
server = self._boot_image(CONF.compute.image_ref)
if CONF.compute.use_floatingip_for_ssh:
fip_for_server = self._create_floating_ip()
self._set_floating_ip_to_server(server, fip_for_server)
self._write_timestamp(fip_for_server.ip)
else:
self._write_timestamp(server)
# snapshot the instance
snapshot_image = self.create_server_snapshot(server=server)
# boot a second instance from the snapshot
server_from_snapshot = self._boot_image(snapshot_image.id)
# check the existence of the timestamp file in the second instance
if CONF.compute.use_floatingip_for_ssh:
fip_for_snapshot = self._create_floating_ip()
self._set_floating_ip_to_server(server_from_snapshot,
fip_for_snapshot)
self._check_timestamp(fip_for_snapshot.ip)
else:
self._check_timestamp(server_from_snapshot)
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_minimum_basic.py 0000664 0001750 0001750 00000010657 12332757070 025617 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import debug
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestMinimumBasicScenario(manager.OfficialClientTest):
"""
This is a basic minimum scenario test.
This test below:
* across the multiple components
* as a regular user
* with and without optional parameters
* check command outputs
"""
def _wait_for_server_status(self, status):
server_id = self.server.id
self.status_timeout(
self.compute_client.servers, server_id, status)
def nova_keypair_add(self):
self.keypair = self.create_keypair()
def nova_boot(self):
create_kwargs = {'key_name': self.keypair.name}
self.server = self.create_server(image=self.image,
create_kwargs=create_kwargs)
def nova_list(self):
servers = self.compute_client.servers.list()
LOG.debug("server_list:%s" % servers)
self.assertIn(self.server, servers)
def nova_show(self):
got_server = self.compute_client.servers.get(self.server)
LOG.debug("got server:%s" % got_server)
self.assertEqual(self.server, got_server)
def cinder_create(self):
self.volume = self.create_volume()
def cinder_list(self):
volumes = self.volume_client.volumes.list()
self.assertIn(self.volume, volumes)
def cinder_show(self):
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual(self.volume, volume)
def nova_volume_attach(self):
attach_volume_client = self.compute_client.volumes.create_server_volume
volume = attach_volume_client(self.server.id,
self.volume.id,
'/dev/vdb')
self.assertEqual(self.volume.id, volume.id)
self.wait_for_volume_status('in-use')
def nova_reboot(self):
self.server.reboot()
self._wait_for_server_status('ACTIVE')
def nova_floating_ip_create(self):
self.floating_ip = self.compute_client.floating_ips.create()
self.addCleanup(self.floating_ip.delete)
def nova_floating_ip_add(self):
self.server.add_floating_ip(self.floating_ip)
def ssh_to_server(self):
try:
self.linux_client = self.get_remote_client(self.floating_ip.ip)
self.linux_client.validate_authentication()
except Exception:
LOG.exception('ssh to server failed')
self._log_console_output()
debug.log_net_debug()
raise
def check_partitions(self):
partitions = self.linux_client.get_partitions()
self.assertEqual(1, partitions.count('vdb'))
def nova_volume_detach(self):
detach_volume_client = self.compute_client.volumes.delete_server_volume
detach_volume_client(self.server.id, self.volume.id)
self.wait_for_volume_status('available')
volume = self.volume_client.volumes.get(self.volume.id)
self.assertEqual('available', volume.status)
@test.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
self.glance_image_create()
self.nova_keypair_add()
self.nova_boot()
self.nova_list()
self.nova_show()
self.cinder_create()
self.cinder_list()
self.cinder_show()
self.nova_volume_attach()
self.addCleanup(self.nova_volume_detach)
self.cinder_show()
self.nova_floating_ip_create()
self.nova_floating_ip_add()
self._create_loginable_secgroup_rule_nova()
self.ssh_to_server()
self.nova_reboot()
self.ssh_to_server()
self.check_partitions()
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_stamp_pattern.py 0000664 0001750 0001750 00000017442 12332757070 025663 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 NEC Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from cinderclient import exceptions as cinder_exceptions
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest.scenario import manager
import tempest.test
CONF = config.CONF
LOG = logging.getLogger(__name__)
class TestStampPattern(manager.OfficialClientTest):
"""
This test is for snapshotting an instance/volume and attaching the volume
created from snapshot to the instance booted from snapshot.
The following is the scenario outline:
1. Boot an instance "instance1"
2. Create a volume "volume1"
3. Attach volume1 to instance1
4. Create a filesystem on volume1
5. Mount volume1
6. Create a file which timestamp is written in volume1
7. Unmount volume1
8. Detach volume1 from instance1
9. Get a snapshot "snapshot_from_volume" of volume1
10. Get a snapshot "snapshot_from_instance" of instance1
11. Boot an instance "instance2" from snapshot_from_instance
12. Create a volume "volume2" from snapshot_from_volume
13. Attach volume2 to instance2
14. Check the existence of a file which created at 6. in volume2
"""
@classmethod
def setUpClass(cls):
super(TestStampPattern, cls).setUpClass()
if not CONF.volume_feature_enabled.snapshot:
raise cls.skipException("Cinder volume snapshots are disabled")
def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
self.status_timeout(self.volume_client.volume_snapshots,
volume_snapshot.id, status)
def _boot_image(self, image_id):
create_kwargs = {
'key_name': self.keypair.name
}
return self.create_server(image=image_id, create_kwargs=create_kwargs)
def _add_keypair(self):
self.keypair = self.create_keypair()
def _create_floating_ip(self):
floating_ip = self.compute_client.floating_ips.create()
self.addCleanup(floating_ip.delete)
return floating_ip
def _add_floating_ip(self, server, floating_ip):
server.add_floating_ip(floating_ip)
def _ssh_to_server(self, server_or_ip):
return self.get_remote_client(server_or_ip)
def _create_volume_snapshot(self, volume):
snapshot_name = data_utils.rand_name('scenario-snapshot-')
volume_snapshots = self.volume_client.volume_snapshots
snapshot = volume_snapshots.create(
volume.id, display_name=snapshot_name)
def cleaner():
volume_snapshots.delete(snapshot)
try:
while volume_snapshots.get(snapshot.id):
time.sleep(1)
except cinder_exceptions.NotFound:
pass
self.addCleanup(cleaner)
self._wait_for_volume_status(volume, 'available')
self._wait_for_volume_snapshot_status(snapshot, 'available')
self.assertEqual(snapshot_name, snapshot.display_name)
return snapshot
def _wait_for_volume_status(self, volume, status):
self.status_timeout(
self.volume_client.volumes, volume.id, status)
def _create_volume(self, snapshot_id=None):
return self.create_volume(snapshot_id=snapshot_id)
def _attach_volume(self, server, volume):
attach_volume_client = self.compute_client.volumes.create_server_volume
attached_volume = attach_volume_client(server.id,
volume.id,
'/dev/vdb')
self.assertEqual(volume.id, attached_volume.id)
self._wait_for_volume_status(attached_volume, 'in-use')
def _detach_volume(self, server, volume):
detach_volume_client = self.compute_client.volumes.delete_server_volume
detach_volume_client(server.id, volume.id)
self._wait_for_volume_status(volume, 'available')
def _wait_for_volume_available_on_the_system(self, server_or_ip):
ssh = self.get_remote_client(server_or_ip)
def _func():
part = ssh.get_partitions()
LOG.debug("Partitions:%s" % part)
return 'vdb' in part
if not tempest.test.call_until_true(_func,
CONF.compute.build_timeout,
CONF.compute.build_interval):
raise exceptions.TimeoutException
def _create_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('sudo /usr/sbin/mkfs.ext4 /dev/vdb')
ssh_client.exec_command('sudo mount /dev/vdb /mnt')
ssh_client.exec_command('sudo sh -c "date > /mnt/timestamp;sync"')
self.timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
ssh_client.exec_command('sudo umount /mnt')
def _check_timestamp(self, server_or_ip):
ssh_client = self._ssh_to_server(server_or_ip)
ssh_client.exec_command('sudo mount /dev/vdb /mnt')
got_timestamp = ssh_client.exec_command('sudo cat /mnt/timestamp')
self.assertEqual(self.timestamp, got_timestamp)
@tempest.test.skip_because(bug="1205344")
@tempest.test.services('compute', 'network', 'volume', 'image')
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
self._create_loginable_secgroup_rule_nova()
# boot an instance and create a timestamp file in it
volume = self._create_volume()
server = self._boot_image(CONF.compute.image_ref)
# create and add floating IP to server1
if CONF.compute.use_floatingip_for_ssh:
floating_ip_for_server = self._create_floating_ip()
self._add_floating_ip(server, floating_ip_for_server)
ip_for_server = floating_ip_for_server.ip
else:
ip_for_server = server
self._attach_volume(server, volume)
self._wait_for_volume_available_on_the_system(ip_for_server)
self._create_timestamp(ip_for_server)
self._detach_volume(server, volume)
# snapshot the volume
volume_snapshot = self._create_volume_snapshot(volume)
# snapshot the instance
snapshot_image = self.create_server_snapshot(server=server)
# create second volume from the snapshot(volume2)
volume_from_snapshot = self._create_volume(
snapshot_id=volume_snapshot.id)
# boot second instance from the snapshot(instance2)
server_from_snapshot = self._boot_image(snapshot_image.id)
# create and add floating IP to server_from_snapshot
if CONF.compute.use_floatingip_for_ssh:
floating_ip_for_snapshot = self._create_floating_ip()
self._add_floating_ip(server_from_snapshot,
floating_ip_for_snapshot)
ip_for_snapshot = floating_ip_for_snapshot.ip
else:
ip_for_snapshot = server_from_snapshot
# attach volume2 to instance2
self._attach_volume(server_from_snapshot, volume_from_snapshot)
self._wait_for_volume_available_on_the_system(ip_for_snapshot)
# check the existence of the timestamp file in the volume2
self._check_timestamp(ip_for_snapshot)
tempest-2014.1.dev4108.gf22b6cc/tempest/scenario/test_network_basic_ops.py 0000664 0001750 0001750 00000041363 12332757070 026514 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import re
from tempest.api.network import common as net_common
from tempest.common import debug
from tempest.common.utils import data_utils
from tempest import config
from tempest.openstack.common import log as logging
from tempest.scenario import manager
from tempest import test
CONF = config.CONF
LOG = logging.getLogger(__name__)
Floating_IP_tuple = collections.namedtuple('Floating_IP_tuple',
['floating_ip', 'server'])
class TestNetworkBasicOps(manager.NetworkScenarioTest):
"""
This smoke test suite assumes that Nova has been configured to
boot VM's with Neutron-managed networking, and attempts to
verify network connectivity as follows:
There are presumed to be two types of networks: tenant and
public. A tenant network may or may not be reachable from the
Tempest host. A public network is assumed to be reachable from
the Tempest host, and it should be possible to associate a public
('floating') IP address with a tenant ('fixed') IP address to
facilitate external connectivity to a potentially unroutable
tenant IP address.
This test suite can be configured to test network connectivity to
a VM via a tenant network, a public network, or both. If both
networking types are to be evaluated, tests that need to be
executed remotely on the VM (via ssh) will only be run against
one of the networks (to minimize test execution time).
Determine which types of networks to test as follows:
* Configure tenant network checks (via the
'tenant_networks_reachable' key) if the Tempest host should
have direct connectivity to tenant networks. This is likely to
be the case if Tempest is running on the same host as a
single-node devstack installation with IP namespaces disabled.
* Configure checks for a public network if a public network has
been configured prior to the test suite being run and if the
Tempest host should have connectivity to that public network.
Checking connectivity for a public network requires that a
value be provided for 'public_network_id'. A value can
optionally be provided for 'public_router_id' if tenants will
use a shared router to access a public network (as is likely to
be the case when IP namespaces are not enabled). If a value is
not provided for 'public_router_id', a router will be created
for each tenant and use the network identified by
'public_network_id' as its gateway.
"""
@classmethod
def check_preconditions(cls):
super(TestNetworkBasicOps, cls).check_preconditions()
if not (CONF.network.tenant_networks_reachable
or CONF.network.public_network_id):
msg = ('Either tenant_networks_reachable must be "true", or '
'public_network_id must be defined.')
cls.enabled = False
raise cls.skipException(msg)
@classmethod
def setUpClass(cls):
super(TestNetworkBasicOps, cls).setUpClass()
for ext in ['router', 'security-group']:
if not test.is_extension_enabled(ext, 'network'):
msg = "%s extension not enabled." % ext
raise cls.skipException(msg)
cls.check_preconditions()
def cleanup_wrapper(self, resource):
self.cleanup_resource(resource, self.__class__.__name__)
def setUp(self):
super(TestNetworkBasicOps, self).setUp()
self.security_group = \
self._create_security_group_neutron(tenant_id=self.tenant_id)
self.addCleanup(self.cleanup_wrapper, self.security_group)
self.network, self.subnet, self.router = self._create_networks()
for r in [self.network, self.router, self.subnet]:
self.addCleanup(self.cleanup_wrapper, r)
self.check_networks()
self.servers = {}
name = data_utils.rand_name('server-smoke')
serv_dict = self._create_server(name, self.network)
self.servers[serv_dict['server']] = serv_dict['keypair']
self._check_tenant_network_connectivity()
self._create_and_associate_floating_ips()
def check_networks(self):
"""
Checks that we see the newly created network/subnet/router via
checking the result of list_[networks,routers,subnets]
"""
seen_nets = self._list_networks()
seen_names = [n['name'] for n in seen_nets]
seen_ids = [n['id'] for n in seen_nets]
self.assertIn(self.network.name, seen_names)
self.assertIn(self.network.id, seen_ids)
seen_subnets = self._list_subnets()
seen_net_ids = [n['network_id'] for n in seen_subnets]
seen_subnet_ids = [n['id'] for n in seen_subnets]
self.assertIn(self.network.id, seen_net_ids)
self.assertIn(self.subnet.id, seen_subnet_ids)
seen_routers = self._list_routers()
seen_router_ids = [n['id'] for n in seen_routers]
seen_router_names = [n['name'] for n in seen_routers]
self.assertIn(self.router.name,
seen_router_names)
self.assertIn(self.router.id,
seen_router_ids)
def _create_server(self, name, network):
keypair = self.create_keypair(name='keypair-%s' % name)
self.addCleanup(self.cleanup_wrapper, keypair)
security_groups = [self.security_group.name]
create_kwargs = {
'nics': [
{'net-id': network.id},
],
'key_name': keypair.name,
'security_groups': security_groups,
}
server = self.create_server(name=name, create_kwargs=create_kwargs)
self.addCleanup(self.cleanup_wrapper, server)
return dict(server=server, keypair=keypair)
def _check_tenant_network_connectivity(self):
if not CONF.network.tenant_networks_reachable:
msg = 'Tenant networks not configured to be reachable.'
LOG.info(msg)
return
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = CONF.compute.image_ssh_user
try:
for server, key in self.servers.iteritems():
for net_name, ip_addresses in server.networks.iteritems():
for ip_address in ip_addresses:
self._check_vm_connectivity(ip_address, ssh_login,
key.private_key)
except Exception:
LOG.exception('Tenant connectivity check failed')
self._log_console_output(servers=self.servers.keys())
debug.log_net_debug()
raise
def _create_and_associate_floating_ips(self):
public_network_id = CONF.network.public_network_id
for server in self.servers.keys():
floating_ip = self._create_floating_ip(server, public_network_id)
self.floating_ip_tuple = Floating_IP_tuple(floating_ip, server)
self.addCleanup(self.cleanup_wrapper, floating_ip)
def _check_public_network_connectivity(self, should_connect=True,
msg=None):
# The target login is assumed to have been configured for
# key-based authentication by cloud-init.
ssh_login = CONF.compute.image_ssh_user
LOG.debug('checking network connections')
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip.floating_ip_address
private_key = None
if should_connect:
private_key = self.servers[server].private_key
try:
self._check_vm_connectivity(ip_address,
ssh_login,
private_key,
should_connect=should_connect)
except Exception:
ex_msg = 'Public network connectivity check failed'
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
self._log_console_output(servers=self.servers.keys())
debug.log_net_debug()
raise
def _disassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
self._disassociate_floating_ip(floating_ip)
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, None)
def _reassociate_floating_ips(self):
floating_ip, server = self.floating_ip_tuple
name = data_utils.rand_name('new_server-smoke-')
# create a new server for the floating ip
serv_dict = self._create_server(name, self.network)
self.servers[serv_dict['server']] = serv_dict['keypair']
self._associate_floating_ip(floating_ip, serv_dict['server'])
self.floating_ip_tuple = Floating_IP_tuple(
floating_ip, serv_dict['server'])
def _create_new_network(self):
self.new_net = self._create_network(self.tenant_id)
self.addCleanup(self.cleanup_wrapper, self.new_net)
self.new_subnet = self._create_subnet(
network=self.new_net,
gateway_ip=None)
self.addCleanup(self.cleanup_wrapper, self.new_subnet)
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip.floating_ip_address
private_key = self.servers[server].private_key
ssh_client = self.get_remote_client(ip_address,
private_key=private_key)
old_nic_list = self._get_server_nics(ssh_client)
# get a port from a list of one item
port_list = self._list_ports(device_id=server.id)
self.assertEqual(1, len(port_list))
old_port = port_list[0]
self.compute_client.servers.interface_attach(server=server,
net_id=self.new_net.id,
port_id=None,
fixed_ip=None)
# move server to the head of the cleanup list
self.addCleanup(self.cleanup_wrapper, server)
def check_ports():
port_list = [port for port in
self._list_ports(device_id=server.id)
if port != old_port]
return len(port_list) == 1
test.call_until_true(check_ports, 60, 1)
new_port_list = [p for p in
self._list_ports(device_id=server.id)
if p != old_port]
self.assertEqual(1, len(new_port_list))
new_port = new_port_list[0]
new_port = net_common.DeletablePort(client=self.network_client,
**new_port)
new_nic_list = self._get_server_nics(ssh_client)
diff_list = [n for n in new_nic_list if n not in old_nic_list]
self.assertEqual(1, len(diff_list))
num, new_nic = diff_list[0]
ssh_client.assign_static_ip(nic=new_nic,
addr=new_port.fixed_ips[0]['ip_address'])
ssh_client.turn_nic_on(nic=new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P\d+): (?P\w+):')
ipatxt = ssh_client.get_ip_list()
return reg.findall(ipatxt)
def _check_network_internal_connectivity(self, network):
"""
via ssh check VM internal connectivity:
- ping internal gateway and DHCP port, implying in-tenant connectivity
pinging both, because L3 and DHCP agents might be on different nodes
"""
floating_ip, server = self.floating_ip_tuple
# get internal ports' ips:
# get all network ports in the new network
internal_ips = (p['fixed_ips'][0]['ip_address'] for p in
self._list_ports(tenant_id=server.tenant_id,
network_id=network.id)
if p['device_owner'].startswith('network'))
self._check_server_connectivity(floating_ip, internal_ips)
def _check_network_external_connectivity(self):
"""
ping public network default gateway to imply external connectivity
"""
if not CONF.network.public_network_id:
msg = 'public network not defined.'
LOG.info(msg)
return
subnet = self.network_client.list_subnets(
network_id=CONF.network.public_network_id)['subnets']
self.assertEqual(1, len(subnet), "Found %d subnets" % len(subnet))
external_ips = [subnet[0]['gateway_ip']]
self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
external_ips)
def _check_server_connectivity(self, floating_ip, address_list):
ip_address = floating_ip.floating_ip_address
private_key = self.servers[self.floating_ip_tuple.server].private_key
ssh_source = self._ssh_to_server(ip_address, private_key)
for remote_ip in address_list:
try:
self.assertTrue(self._check_remote_connectivity(ssh_source,
remote_ip),
"Timed out waiting for %s to become "
"reachable" % remote_ip)
except Exception:
LOG.exception("Unable to access {dest} via ssh to "
"floating-ip {src}".format(dest=remote_ip,
src=floating_ip))
debug.log_ip_ns()
raise
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_network_basic_ops(self):
"""
For a freshly-booted VM with an IP address ("port") on a given
network:
- the Tempest host can ping the IP address. This implies, but
does not guarantee (see the ssh check that follows), that the
VM has been assigned the correct IP address and has
connectivity to the Tempest host.
- the Tempest host can perform key-based authentication to an
ssh server hosted at the IP address. This check guarantees
that the IP address is associated with the target VM.
- the Tempest host can ssh into the VM via the IP address and
successfully execute the following:
- ping an external IP address, implying external connectivity.
- ping an external hostname, implying that dns is correctly
configured.
- ping an internal IP address, implying connectivity to another
VM on the same network.
- detach the floating-ip from the VM and verify that it becomes
unreachable
- associate detached floating ip to a new VM and verify connectivity.
VMs are created with unique keypair so connectivity also asserts that
floating IP is associated with the new VM instead of the old one
"""
self._check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._disassociate_floating_ips()
self._check_public_network_connectivity(should_connect=False,
msg="after disassociate "
"floating ip")
self._reassociate_floating_ips()
self._check_public_network_connectivity(should_connect=True,
msg="after re-associate "
"floating ip")
@test.attr(type='smoke')
@test.services('compute', 'network')
def test_hotplug_nic(self):
"""
1. create a new network, with no gateway (to prevent overwriting VM's
gateway)
2. connect VM to new network
3. set static ip and bring new nic up
4. check VM can ping new network dhcp port
"""
self._check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
self._check_network_internal_connectivity(network=self.new_net)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/ 0000775 0001750 0001750 00000000000 12332757136 020723 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_wrappers.py 0000664 0001750 0001750 00000007540 12332757070 024202 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import shutil
import StringIO
import subprocess
import tempfile
from tempest.tests import base
DEVNULL = open(os.devnull, 'wb')
class TestWrappers(base.TestCase):
def setUp(self):
super(TestWrappers, self).setUp()
# Setup test dirs
self.directory = tempfile.mkdtemp(prefix='tempest-unit')
self.addCleanup(shutil.rmtree, self.directory)
self.test_dir = os.path.join(self.directory, 'tests')
os.mkdir(self.test_dir)
# Setup Test files
self.testr_conf_file = os.path.join(self.directory, '.testr.conf')
self.setup_cfg_file = os.path.join(self.directory, 'setup.cfg')
self.subunit_trace = os.path.join(self.directory, 'subunit-trace.py')
self.passing_file = os.path.join(self.test_dir, 'test_passing.py')
self.failing_file = os.path.join(self.test_dir, 'test_failing.py')
self.init_file = os.path.join(self.test_dir, '__init__.py')
self.setup_py = os.path.join(self.directory, 'setup.py')
shutil.copy('tempest/tests/files/testr-conf', self.testr_conf_file)
shutil.copy('tempest/tests/files/passing-tests', self.passing_file)
shutil.copy('tempest/tests/files/failing-tests', self.failing_file)
shutil.copy('setup.py', self.setup_py)
shutil.copy('tempest/tests/files/setup.cfg', self.setup_cfg_file)
shutil.copy('tempest/tests/files/__init__.py', self.init_file)
shutil.copy('tools/subunit-trace.py', self.subunit_trace)
# copy over the pretty_tox scripts
shutil.copy('tools/pretty_tox.sh',
os.path.join(self.directory, 'pretty_tox.sh'))
shutil.copy('tools/pretty_tox_serial.sh',
os.path.join(self.directory, 'pretty_tox_serial.sh'))
self.stdout = StringIO.StringIO()
self.stderr = StringIO.StringIO()
# Change directory, run wrapper and check result
self.addCleanup(os.chdir, os.path.abspath(os.curdir))
os.chdir(self.directory)
def assertRunExit(self, cmd, expected):
p = subprocess.Popen(
"bash %s" % cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# wait in the general case is dangerous, however the amount of
# data coming back on those pipes is small enough it shouldn't be
# a problem.
p.wait()
self.assertEqual(
p.returncode, expected,
"Stdout: %s; Stderr: %s" % (p.stdout, p.stderr))
def test_pretty_tox(self):
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'], stderr=DEVNULL)
self.assertRunExit('pretty_tox.sh tests.passing', 0)
def test_pretty_tox_fails(self):
# Git init is required for the pbr testr command. pbr requires a git
# version or an sdist to work. so make the test directory a git repo
# too.
subprocess.call(['git', 'init'], stderr=DEVNULL)
self.assertRunExit('pretty_tox.sh', 1)
def test_pretty_tox_serial(self):
self.assertRunExit('pretty_tox_serial.sh tests.passing', 0)
def test_pretty_tox_serial_fails(self):
self.assertRunExit('pretty_tox_serial.sh', 1)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/ 0000775 0001750 0001750 00000000000 12332757136 022213 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/test_debug.py 0000664 0001750 0001750 00000012505 12332757070 024712 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest.common import debug
from tempest import config
from tempest.openstack.common.fixture import mockpatch
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
class TestDebug(base.TestCase):
def setUp(self):
super(TestDebug, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
common_pre = 'tempest.common.commands'
self.ip_addr_raw_mock = self.patch(common_pre + '.ip_addr_raw')
self.ip_route_raw_mock = self.patch(common_pre + '.ip_route_raw')
self.iptables_raw_mock = self.patch(common_pre + '.iptables_raw')
self.ip_ns_list_mock = self.patch(common_pre + '.ip_ns_list')
self.ip_ns_addr_mock = self.patch(common_pre + '.ip_ns_addr')
self.ip_ns_route_mock = self.patch(common_pre + '.ip_ns_route')
self.iptables_ns_mock = self.patch(common_pre + '.iptables_ns')
self.ovs_db_dump_mock = self.patch(common_pre + '.ovs_db_dump')
self.log_mock = self.patch('tempest.common.debug.LOG')
def test_log_ip_ns_debug_disabled(self):
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', False))
debug.log_ip_ns()
self.assertFalse(self.ip_addr_raw_mock.called)
self.assertFalse(self.log_mock.info.called)
def test_log_ip_ns_debug_enabled(self):
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', True))
self.ip_ns_list_mock.return_value = [1, 2]
debug.log_ip_ns()
self.ip_addr_raw_mock.assert_called_with()
self.assertTrue(self.log_mock.info.called)
self.ip_route_raw_mock.assert_called_with()
self.assertEqual(len(debug.TABLES), self.iptables_raw_mock.call_count)
for table in debug.TABLES:
self.assertIn(mock.call(table),
self.iptables_raw_mock.call_args_list)
self.ip_ns_list_mock.assert_called_with()
self.assertEqual(len(self.ip_ns_list_mock.return_value),
self.ip_ns_addr_mock.call_count)
self.assertEqual(len(self.ip_ns_list_mock.return_value),
self.ip_ns_route_mock.call_count)
for ns in self.ip_ns_list_mock.return_value:
self.assertIn(mock.call(ns),
self.ip_ns_addr_mock.call_args_list)
self.assertIn(mock.call(ns),
self.ip_ns_route_mock.call_args_list)
self.assertEqual(len(debug.TABLES) *
len(self.ip_ns_list_mock.return_value),
self.iptables_ns_mock.call_count)
for ns in self.ip_ns_list_mock.return_value:
for table in debug.TABLES:
self.assertIn(mock.call(ns, table),
self.iptables_ns_mock.call_args_list)
def test_log_ovs_db_debug_disabled(self):
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', False))
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'neutron', False))
debug.log_ovs_db()
self.assertFalse(self.ovs_db_dump_mock.called)
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', True))
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'neutron', False))
debug.log_ovs_db()
self.assertFalse(self.ovs_db_dump_mock.called)
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', False))
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'neutron', True))
debug.log_ovs_db()
self.assertFalse(self.ovs_db_dump_mock.called)
def test_log_ovs_db_debug_enabled(self):
self.useFixture(mockpatch.PatchObject(test.CONF.debug,
'enable', True))
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'neutron', True))
debug.log_ovs_db()
self.ovs_db_dump_mock.assert_called_with()
def test_log_net_debug(self):
self.log_ip_ns_mock = self.patch('tempest.common.debug.log_ip_ns')
self.log_ovs_db_mock = self.patch('tempest.common.debug.log_ovs_db')
debug.log_net_debug()
self.log_ip_ns_mock.assert_called_with()
self.log_ovs_db_mock.assert_called_with()
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024307 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/utils/ 0000775 0001750 0001750 00000000000 12332757136 023353 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/utils/test_file_utils.py 0000664 0001750 0001750 00000002165 12332757070 027124 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
from tempest.common.utils import file_utils
from tempest.tests import base
class TestFileUtils(base.TestCase):
def test_have_effective_read_path(self):
with patch('__builtin__.open', mock.mock_open(), create=True):
result = file_utils.have_effective_read_access('fake_path')
self.assertTrue(result)
def test_not_effective_read_path(self):
result = file_utils.have_effective_read_access('fake_path')
self.assertFalse(result)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/utils/test_misc.py 0000664 0001750 0001750 00000005241 12332757070 025716 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import misc
from tempest.tests import base
@misc.singleton
class TestFoo(object):
count = 0
def increment(self):
self.count += 1
return self.count
@misc.singleton
class TestBar(object):
count = 0
def increment(self):
self.count += 1
return self.count
class TestMisc(base.TestCase):
def test_singleton(self):
test = TestFoo()
self.assertEqual(0, test.count)
self.assertEqual(1, test.increment())
test2 = TestFoo()
self.assertEqual(1, test.count)
self.assertEqual(1, test2.count)
self.assertEqual(test, test2)
test3 = TestBar()
self.assertNotEqual(test, test3)
def test_find_test_caller_test_case(self):
# Calling it from here should give us the method we're in.
self.assertEqual('TestMisc:test_find_test_caller_test_case',
misc.find_test_caller())
def test_find_test_caller_setup_self(self):
def setUp(self):
return misc.find_test_caller()
self.assertEqual('TestMisc:setUp', setUp(self))
def test_find_test_caller_setup_no_self(self):
def setUp():
return misc.find_test_caller()
self.assertEqual(':setUp', setUp())
def test_find_test_caller_setupclass_cls(self):
def setUpClass(cls): # noqa
return misc.find_test_caller()
self.assertEqual('TestMisc:setUpClass', setUpClass(self.__class__))
def test_find_test_caller_teardown_self(self):
def tearDown(self):
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDown', tearDown(self))
def test_find_test_caller_teardown_no_self(self):
def tearDown():
return misc.find_test_caller()
self.assertEqual(':tearDown', tearDown())
def test_find_test_caller_teardown_class(self):
def tearDownClass(cls):
return misc.find_test_caller()
self.assertEqual('TestMisc:tearDownClass',
tearDownClass(self.__class__))
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/utils/test_data_utils.py 0000664 0001750 0001750 00000005500 12332757070 027112 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest.tests import base
class TestDataUtils(base.TestCase):
def test_rand_uuid(self):
actual = data_utils.rand_uuid()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]"
"{4}-[0-9a-f]{4}-[0-9a-f]{12}$")
actual2 = data_utils.rand_uuid()
self.assertNotEqual(actual, actual2)
def test_rand_uuid_hex(self):
actual = data_utils.rand_uuid_hex()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^[0-9a-f]{32}$")
actual2 = data_utils.rand_uuid_hex()
self.assertNotEqual(actual, actual2)
def test_rand_name(self):
actual = data_utils.rand_name()
self.assertIsInstance(actual, str)
actual2 = data_utils.rand_name()
self.assertNotEqual(actual, actual2)
actual = data_utils.rand_name('foo')
self.assertTrue(actual.startswith('foo'))
actual2 = data_utils.rand_name('foo')
self.assertTrue(actual.startswith('foo'))
self.assertNotEqual(actual, actual2)
def test_rand_int(self):
actual = data_utils.rand_int_id()
self.assertIsInstance(actual, int)
actual2 = data_utils.rand_int_id()
self.assertNotEqual(actual, actual2)
def test_rand_mac_address(self):
actual = data_utils.rand_mac_address()
self.assertIsInstance(actual, str)
self.assertRegexpMatches(actual, "^([0-9a-f][0-9a-f]:){5}"
"[0-9a-f][0-9a-f]$")
actual2 = data_utils.rand_mac_address()
self.assertNotEqual(actual, actual2)
def test_parse_image_id(self):
actual = data_utils.parse_image_id("/foo/bar/deadbeaf")
self.assertEqual("deadbeaf", actual)
def test_arbitrary_string(self):
actual = data_utils.arbitrary_string()
self.assertEqual(actual, "test")
actual = data_utils.arbitrary_string(size=30, base_text="abc")
self.assertEqual(actual, "abc" * (30 / len("abc")))
actual = data_utils.arbitrary_string(size=5, base_text="deadbeaf")
self.assertEqual(actual, "deadb")
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/common/utils/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025447 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/fake_identity.py 0000664 0001750 0001750 00000010603 12332757070 024111 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
import json
TOKEN = "fake_token"
ALT_TOKEN = "alt_fake_token"
# Fake Identity v2 constants
COMPUTE_ENDPOINTS_V2 = {
"endpoints": [
{
"adminURL": "http://fake_url/v2/first_endpoint/admin",
"region": "NoMatchRegion",
"internalURL": "http://fake_url/v2/first_endpoint/internal",
"publicURL": "http://fake_url/v2/first_endpoint/public"
},
{
"adminURL": "http://fake_url/v2/second_endpoint/admin",
"region": "FakeRegion",
"internalURL": "http://fake_url/v2/second_endpoint/internal",
"publicURL": "http://fake_url/v2/second_endpoint/public"
},
],
"type": "compute",
"name": "nova"
}
CATALOG_V2 = [COMPUTE_ENDPOINTS_V2, ]
ALT_IDENTITY_V2_RESPONSE = {
"access": {
"token": {
"expires": "2020-01-01T00:00:10Z",
"id": ALT_TOKEN,
"tenant": {
"id": "fake_tenant_id"
},
},
"user": {
"id": "fake_user_id",
},
"serviceCatalog": CATALOG_V2,
},
}
IDENTITY_V2_RESPONSE = {
"access": {
"token": {
"expires": "2020-01-01T00:00:10Z",
"id": TOKEN,
"tenant": {
"id": "fake_tenant_id"
},
},
"user": {
"id": "fake_user_id",
},
"serviceCatalog": CATALOG_V2,
},
}
# Fake Identity V3 constants
COMPUTE_ENDPOINTS_V3 = {
"endpoints": [
{
"id": "first_compute_fake_service",
"interface": "public",
"region": "NoMatchRegion",
"url": "http://fake_url/v3/first_endpoint/api"
},
{
"id": "second_fake_service",
"interface": "public",
"region": "FakeRegion",
"url": "http://fake_url/v3/second_endpoint/api"
},
{
"id": "third_fake_service",
"interface": "admin",
"region": "MiddleEarthRegion",
"url": "http://fake_url/v3/third_endpoint/api"
}
],
"type": "compute",
"id": "fake_compute_endpoint"
}
CATALOG_V3 = [COMPUTE_ENDPOINTS_V3, ]
IDENTITY_V3_RESPONSE = {
"token": {
"methods": [
"token",
"password"
],
"expires_at": "2020-01-01T00:00:10.000123Z",
"project": {
"domain": {
"id": "fake_domain_id",
"name": "fake"
},
"id": "project_id",
"name": "project_name"
},
"user": {
"domain": {
"id": "fake_domain_id",
"name": "domain_name"
},
"id": "fake_user_id",
"name": "username"
},
"issued_at": "2013-05-29T16:55:21.468960Z",
"catalog": CATALOG_V3
}
}
ALT_IDENTITY_V3 = IDENTITY_V3_RESPONSE
def _fake_v3_response(self, uri, method="GET", body=None, headers=None,
redirections=5, connection_type=None):
fake_headers = {
"status": "201",
"x-subject-token": TOKEN
}
return (httplib2.Response(fake_headers),
json.dumps(IDENTITY_V3_RESPONSE))
def _fake_v2_response(self, uri, method="GET", body=None, headers=None,
redirections=5, connection_type=None):
return (httplib2.Response({"status": "200"}),
json.dumps(IDENTITY_V2_RESPONSE))
def _fake_auth_failure_response():
# the response body isn't really used in this case, but lets send it anyway
# to have a safe check in some future change on the rest client.
body = {
"unauthorized": {
"message": "Unauthorized",
"code": "401"
}
}
return httplib2.Response({"status": "401"}), json.dumps(body)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_decorators.py 0000664 0001750 0001750 00000023774 12332757070 024513 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import testtools
from oslo.config import cfg
from tempest import config
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
class BaseDecoratorsTest(base.TestCase):
def setUp(self):
super(BaseDecoratorsTest, self).setUp()
self.config_fixture = self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
class TestAttrDecorator(BaseDecoratorsTest):
def _test_attr_helper(self, expected_attrs, **decorator_args):
@test.attr(**decorator_args)
def foo():
pass
# By our test.attr decorator the attribute __testtools_attrs will be
# set only for 'type' argument, so we test it first.
if 'type' in decorator_args:
# this is what testtools sets
self.assertEqual(getattr(foo, '__testtools_attrs'),
set(expected_attrs))
def test_attr_without_type(self):
self._test_attr_helper(expected_attrs='baz', bar='baz')
def test_attr_decorator_with_smoke_type(self):
# smoke passed as type, so smoke and gate must have been set.
self._test_attr_helper(expected_attrs=['smoke', 'gate'], type='smoke')
def test_attr_decorator_with_list_type(self):
# if type is 'smoke' we'll get the original list of types plus 'gate'
self._test_attr_helper(expected_attrs=['smoke', 'foo', 'gate'],
type=['smoke', 'foo'])
def test_attr_decorator_with_unknown_type(self):
self._test_attr_helper(expected_attrs=['foo'], type='foo')
def test_attr_decorator_with_duplicated_type(self):
self._test_attr_helper(expected_attrs=['foo'], type=['foo', 'foo'])
class TestServicesDecorator(BaseDecoratorsTest):
def _test_services_helper(self, *decorator_args):
class TestFoo(test.BaseTestCase):
@test.services(*decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
self.assertEqual(set(decorator_args), getattr(t.test_bar,
'__testtools_attrs'))
self.assertEqual(t.test_bar(), 0)
def test_services_decorator_with_single_service(self):
self._test_services_helper('compute')
def test_services_decorator_with_multiple_services(self):
self._test_services_helper('compute', 'network')
def test_services_decorator_with_duplicated_service(self):
self._test_services_helper('compute', 'compute')
def test_services_decorator_with_invalid_service(self):
self.assertRaises(exceptions.InvalidServiceTag,
self._test_services_helper, 'compute',
'bad_service')
def test_services_decorator_with_service_valid_and_unavailable(self):
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'cinder', False))
self.assertRaises(testtools.TestCase.skipException,
self._test_services_helper, 'compute',
'volume')
class TestStressDecorator(BaseDecoratorsTest):
def _test_stresstest_helper(self, expected_frequency='process',
expected_inheritance=False,
**decorator_args):
@test.stresstest(**decorator_args)
def foo():
pass
self.assertEqual(getattr(foo, 'st_class_setup_per'),
expected_frequency)
self.assertEqual(getattr(foo, 'st_allow_inheritance'),
expected_inheritance)
self.assertEqual(set(['stress']), getattr(foo, '__testtools_attrs'))
def test_stresstest_decorator_default(self):
self._test_stresstest_helper()
def test_stresstest_decorator_class_setup_frequency(self):
self._test_stresstest_helper('process', class_setup_per='process')
def test_stresstest_decorator_class_setup_frequency_non_default(self):
self._test_stresstest_helper(expected_frequency='application',
class_setup_per='application')
def test_stresstest_decorator_set_frequency_and_inheritance(self):
self._test_stresstest_helper(expected_frequency='application',
expected_inheritance=True,
class_setup_per='application',
allow_inheritance=True)
class TestSkipBecauseDecorator(BaseDecoratorsTest):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@test.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_interface_match(self):
self._test_skip_because_helper(bug='12346', interface='json')
def test_skip_because_bug_interface_not_match(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12347', interface='xml')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_condition_false_and_interface_match(self):
"""
Assure that only condition will be evaluated if both parameters are
passed.
"""
self._test_skip_because_helper(expected_to_skip=False,
bug='12350', condition=False,
interface='json')
def test_skip_because_bug_condition_true_and_interface_not_match(self):
"""
Assure that only condition will be evaluated if both parameters are
passed.
"""
self._test_skip_because_helper(bug='12351', condition=True,
interface='xml')
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False,
interface='json')
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestRequiresExtDecorator(BaseDecoratorsTest):
def setUp(self):
super(TestRequiresExtDecorator, self).setUp()
cfg.CONF.set_default('api_extensions', ['enabled_ext', 'another_ext'],
'compute-feature-enabled')
def _test_requires_ext_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
@test.requires_ext(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
self.assertEqual(t.test_bar(), 0)
def test_requires_ext_decorator(self):
self._test_requires_ext_helper(expected_to_skip=False,
extension='enabled_ext',
service='compute')
def test_requires_ext_decorator_disabled_ext(self):
self._test_requires_ext_helper(extension='disabled_ext',
service='compute')
def test_requires_ext_decorator_with_all_ext_enabled(self):
# disable fixture so the default (all) is used.
self.config_fixture.cleanUp()
self._test_requires_ext_helper(expected_to_skip=False,
extension='random_ext',
service='compute')
def test_requires_ext_decorator_bad_service(self):
self.assertRaises(KeyError,
self._test_requires_ext_helper,
extension='enabled_ext',
service='bad_service')
class TestSimpleNegativeDecorator(BaseDecoratorsTest):
@test.SimpleNegativeAutoTest
class FakeNegativeJSONTest(test.NegativeAutoTest):
_schema_file = 'fake/schemas/file.json'
def test_testfunc_exist(self):
self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
@mock.patch('tempest.test.NegativeAutoTest.execute')
def test_testfunc_calls_execute(self, mock):
obj = self.FakeNegativeJSONTest("test_fake_negative")
self.assertIn("test_fake_negative", dir(obj))
obj.test_fake_negative()
mock.assert_called_once_with(self.FakeNegativeJSONTest._schema_file)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/stress/ 0000775 0001750 0001750 00000000000 12332757136 022246 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/stress/test_stressaction.py 0000664 0001750 0001750 00000004356 12332757070 026405 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempest.stress.stressaction as stressaction
import tempest.test
class FakeStressAction(stressaction.StressAction):
def __init__(self, manager, max_runs=None, stop_on_error=False):
super(self.__class__, self).__init__(manager, max_runs, stop_on_error)
self._run_called = False
def run(self):
self._run_called = True
@property
def run_called(self):
return self._run_called
class FakeStressActionFailing(stressaction.StressAction):
def run(self):
raise Exception('FakeStressActionFailing raise exception')
class TestStressAction(tempest.test.BaseTestCase):
def _bulid_stats_dict(self, runs=0, fails=0):
return {'runs': runs, 'fails': fails}
def testStressTestRun(self):
stressAction = FakeStressAction(manager=None, max_runs=1)
stats = self._bulid_stats_dict()
stressAction.execute(stats)
self.assertTrue(stressAction.run_called)
self.assertEqual(stats['runs'], 1)
self.assertEqual(stats['fails'], 0)
def testStressMaxTestRuns(self):
stressAction = FakeStressAction(manager=None, max_runs=500)
stats = self._bulid_stats_dict(runs=499)
stressAction.execute(stats)
self.assertTrue(stressAction.run_called)
self.assertEqual(stats['runs'], 500)
self.assertEqual(stats['fails'], 0)
def testStressTestRunWithException(self):
stressAction = FakeStressActionFailing(manager=None, max_runs=1)
stats = self._bulid_stats_dict()
stressAction.execute(stats)
self.assertEqual(stats['runs'], 1)
self.assertEqual(stats['fails'], 1)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/stress/test_stress.py 0000664 0001750 0001750 00000003554 12332757070 025206 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import subprocess
import tempest.cli as cli
from tempest.openstack.common import log as logging
import tempest.test
LOG = logging.getLogger(__name__)
class StressFrameworkTest(tempest.test.BaseTestCase):
"""Basic test for the stress test framework.
"""
def _cmd(self, cmd, param):
"""Executes specified command."""
cmd = ' '.join([cmd, param])
LOG.info("running: '%s'" % cmd)
cmd_str = cmd
cmd = shlex.split(cmd)
result = ''
result_err = ''
try:
stdout = subprocess.PIPE
stderr = subprocess.PIPE
proc = subprocess.Popen(
cmd, stdout=stdout, stderr=stderr)
result, result_err = proc.communicate()
if proc.returncode != 0:
LOG.debug('error of %s:\n%s' % (cmd_str, result_err))
raise cli.CommandFailed(proc.returncode,
cmd,
result)
finally:
LOG.debug('output of %s:\n%s' % (cmd_str, result))
return proc.returncode
def test_help_function(self):
result = self._cmd("python", "-m tempest.stress.run_stress -h")
self.assertEqual(0, result)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/stress/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024342 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_auth.py 0000664 0001750 0001750 00000037262 12332757070 023304 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
from tempest import auth
from tempest.common import http
from tempest import config
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
from tempest.tests import fake_credentials
from tempest.tests import fake_http
from tempest.tests import fake_identity
class BaseAuthTestsSetUp(base.TestCase):
_auth_provider_class = None
credentials = fake_credentials.FakeCredentials()
def _auth(self, credentials, **params):
"""
returns auth method according to keystone
"""
return self._auth_provider_class(credentials, **params)
def setUp(self):
super(BaseAuthTestsSetUp, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
self.stubs.Set(auth, 'get_credentials',
fake_auth_provider.get_credentials)
self.stubs.Set(auth, 'get_default_credentials',
fake_auth_provider.get_default_credentials)
self.auth_provider = self._auth(self.credentials)
class TestBaseAuthProvider(BaseAuthTestsSetUp):
"""
This tests auth.AuthProvider class which is base for the other so we
obviously don't test not implemented method or the ones which strongly
depends on them.
"""
_auth_provider_class = auth.AuthProvider
def test_check_credentials_class(self):
self.assertRaises(NotImplementedError,
self.auth_provider.check_credentials,
auth.Credentials())
def test_check_credentials_bad_type(self):
self.assertFalse(self.auth_provider.check_credentials([]))
def test_instantiate_with_dict(self):
# Dict credentials are only supported for backward compatibility
auth_provider = self._auth(credentials={})
self.assertIsInstance(auth_provider.credentials, auth.Credentials)
def test_instantiate_with_bad_credentials_type(self):
"""
Assure that credentials with bad type fail with TypeError
"""
self.assertRaises(TypeError, self._auth, [])
def test_auth_data_property(self):
self.assertRaises(NotImplementedError, getattr, self.auth_provider,
'auth_data')
def test_auth_data_property_when_cache_exists(self):
self.auth_provider.cache = 'foo'
self.useFixture(mockpatch.PatchObject(self.auth_provider,
'is_expired',
return_value=False))
self.assertEqual('foo', getattr(self.auth_provider, 'auth_data'))
def test_delete_auth_data_property_through_deleter(self):
self.auth_provider.cache = 'foo'
del self.auth_provider.auth_data
self.assertIsNone(self.auth_provider.cache)
def test_delete_auth_data_property_through_clear_auth(self):
self.auth_provider.cache = 'foo'
self.auth_provider.clear_auth()
self.assertIsNone(self.auth_provider.cache)
def test_set_and_reset_alt_auth_data(self):
self.auth_provider.set_alt_auth_data('foo', 'bar')
self.assertEqual(self.auth_provider.alt_part, 'foo')
self.assertEqual(self.auth_provider.alt_auth_data, 'bar')
self.auth_provider.reset_alt_auth_data()
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
def test_fill_credentials(self):
self.assertRaises(NotImplementedError,
self.auth_provider.fill_credentials)
class TestKeystoneV2AuthProvider(BaseAuthTestsSetUp):
_endpoints = fake_identity.IDENTITY_V2_RESPONSE['access']['serviceCatalog']
_auth_provider_class = auth.KeystoneV2AuthProvider
credentials = fake_credentials.FakeKeystoneV2Credentials()
def setUp(self):
super(TestKeystoneV2AuthProvider, self).setUp()
self.stubs.Set(http.ClosingHttp, 'request',
fake_identity._fake_v2_response)
self.target_url = 'test_api'
def _get_fake_alt_identity(self):
return fake_identity.ALT_IDENTITY_V2_RESPONSE['access']
def _get_result_url_from_endpoint(self, ep, endpoint_type='publicURL',
replacement=None):
if replacement:
return ep[endpoint_type].replace('v2', replacement)
return ep[endpoint_type]
def _get_token_from_fake_identity(self):
return fake_identity.TOKEN
def _get_from_fake_identity(self, attr):
access = fake_identity.IDENTITY_V2_RESPONSE['access']
if attr == 'user_id':
return access['user']['id']
elif attr == 'tenant_id':
return access['token']['tenant']['id']
def _test_request_helper(self, filters, expected):
url, headers, body = self.auth_provider.auth_request('GET',
self.target_url,
filters=filters)
self.assertEqual(expected['url'], url)
self.assertEqual(expected['token'], headers['X-Auth-Token'])
self.assertEqual(expected['body'], body)
def _auth_data_with_expiry(self, date_as_string):
token, access = self.auth_provider.auth_data
access['token']['expires'] = date_as_string
return token, access
def test_request(self):
filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
url = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1]) + '/' + self.target_url
expected = {
'body': None,
'url': url,
'token': self._get_token_from_fake_identity(),
}
self._test_request_helper(filters, expected)
def test_request_with_alt_auth_cleans_alt(self):
self.auth_provider.set_alt_auth_data(
'body',
(fake_identity.ALT_TOKEN, self._get_fake_alt_identity()))
self.test_request()
# Assert alt auth data is clear after it
self.assertIsNone(self.auth_provider.alt_part)
self.assertIsNone(self.auth_provider.alt_auth_data)
def test_request_with_alt_part_without_alt_data(self):
"""
Assert that when alt_part is defined, the corresponding original
request element is kept the same.
"""
filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'fakeRegion'
}
self.auth_provider.set_alt_auth_data('url', None)
url, headers, body = self.auth_provider.auth_request('GET',
self.target_url,
filters=filters)
self.assertEqual(url, self.target_url)
self.assertEqual(self._get_token_from_fake_identity(),
headers['X-Auth-Token'])
self.assertEqual(body, None)
def test_request_with_bad_service(self):
filters = {
'service': 'BAD_SERVICE',
'endpoint_type': 'publicURL',
'region': 'fakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self.auth_provider.auth_request, 'GET',
self.target_url, filters=filters)
def test_request_without_service(self):
filters = {
'service': None,
'endpoint_type': 'publicURL',
'region': 'fakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self.auth_provider.auth_request, 'GET',
self.target_url, filters=filters)
def test_check_credentials_missing_attribute(self):
for attr in ['username', 'password']:
cred = copy.copy(self.credentials)
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred))
def test_fill_credentials(self):
self.auth_provider.fill_credentials()
creds = self.auth_provider.credentials
for attr in ['user_id', 'tenant_id']:
self.assertEqual(self._get_from_fake_identity(attr),
getattr(creds, attr))
def _test_base_url_helper(self, expected_url, filters,
auth_data=None):
url = self.auth_provider.base_url(filters, auth_data)
self.assertEqual(url, expected_url)
def test_base_url(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1])
self._test_base_url_helper(expected, self.filters)
def test_base_url_to_get_admin_endpoint(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'adminURL',
'region': 'FakeRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1], endpoint_type='adminURL')
self._test_base_url_helper(expected, self.filters)
def test_base_url_unknown_region(self):
"""
Assure that if the region is unknow the first endpoint is returned.
"""
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'AintNoBodyKnowThisRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][0])
self._test_base_url_helper(expected, self.filters)
def test_base_url_with_non_existent_service(self):
self.filters = {
'service': 'BAD_SERVICE',
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self._test_base_url_helper, None, self.filters)
def test_base_url_without_service(self):
self.filters = {
'endpoint_type': 'publicURL',
'region': 'FakeRegion'
}
self.assertRaises(exceptions.EndpointNotFound,
self._test_base_url_helper, None, self.filters)
def test_base_url_with_api_version_filter(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion',
'api_version': 'v12'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][1], replacement='v12')
self._test_base_url_helper(expected, self.filters)
def test_base_url_with_skip_path_filter(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'publicURL',
'region': 'FakeRegion',
'skip_path': True
}
expected = 'http://fake_url/'
self._test_base_url_helper(expected, self.filters)
def test_token_not_expired(self):
expiry_data = datetime.datetime.utcnow() + datetime.timedelta(days=1)
auth_data = self._auth_data_with_expiry(
expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
self.assertFalse(self.auth_provider.is_expired(auth_data))
def test_token_expired(self):
expiry_data = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
auth_data = self._auth_data_with_expiry(
expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
self.assertTrue(self.auth_provider.is_expired(auth_data))
def test_token_not_expired_to_be_renewed(self):
expiry_data = datetime.datetime.utcnow() + \
self.auth_provider.token_expiry_threshold / 2
auth_data = self._auth_data_with_expiry(
expiry_data.strftime(self.auth_provider.EXPIRY_DATE_FORMAT))
self.assertTrue(self.auth_provider.is_expired(auth_data))
class TestKeystoneV3AuthProvider(TestKeystoneV2AuthProvider):
_endpoints = fake_identity.IDENTITY_V3_RESPONSE['token']['catalog']
_auth_provider_class = auth.KeystoneV3AuthProvider
credentials = fake_credentials.FakeKeystoneV3Credentials()
def setUp(self):
super(TestKeystoneV3AuthProvider, self).setUp()
self.stubs.Set(http.ClosingHttp, 'request',
fake_identity._fake_v3_response)
def _get_fake_alt_identity(self):
return fake_identity.ALT_IDENTITY_V3['token']
def _get_result_url_from_endpoint(self, ep, replacement=None):
if replacement:
return ep['url'].replace('v3', replacement)
return ep['url']
def _auth_data_with_expiry(self, date_as_string):
token, access = self.auth_provider.auth_data
access['expires_at'] = date_as_string
return token, access
def _get_from_fake_identity(self, attr):
token = fake_identity.IDENTITY_V3_RESPONSE['token']
if attr == 'user_id':
return token['user']['id']
elif attr == 'project_id':
return token['project']['id']
elif attr == 'user_domain_id':
return token['user']['domain']['id']
elif attr == 'project_domain_id':
return token['project']['domain']['id']
def test_check_credentials_missing_attribute(self):
# reset credentials to fresh ones
self.credentials.reset()
for attr in ['username', 'password', 'user_domain_name',
'project_domain_name']:
cred = copy.copy(self.credentials)
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred),
"Credentials should be invalid without %s" % attr)
def test_check_domain_credentials_missing_attribute(self):
# reset credentials to fresh ones
self.credentials.reset()
domain_creds = fake_credentials.FakeKeystoneV3DomainCredentials()
for attr in ['username', 'password', 'user_domain_name']:
cred = copy.copy(domain_creds)
del cred[attr]
self.assertFalse(self.auth_provider.check_credentials(cred),
"Credentials should be invalid without %s" % attr)
def test_fill_credentials(self):
self.auth_provider.fill_credentials()
creds = self.auth_provider.credentials
for attr in ['user_id', 'project_id', 'user_domain_id',
'project_domain_id']:
self.assertEqual(self._get_from_fake_identity(attr),
getattr(creds, attr))
# Overwrites v2 test
def test_base_url_to_get_admin_endpoint(self):
self.filters = {
'service': 'compute',
'endpoint_type': 'admin',
'region': 'MiddleEarthRegion'
}
expected = self._get_result_url_from_endpoint(
self._endpoints[0]['endpoints'][2])
self._test_base_url_helper(expected, self.filters)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_tenant_isolation.py 0000664 0001750 0001750 00000055654 12332757070 025722 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystoneclient.v2_0.client as keystoneclient
from mock import patch
import neutronclient.v2_0.client as neutronclient
from oslo.config import cfg
from tempest import clients
from tempest.common import http
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.services.identity.json import identity_client as json_iden_client
from tempest.services.identity.xml import identity_client as xml_iden_client
from tempest.services.network.json import network_client as json_network_client
from tempest.services.network.xml import network_client as xml_network_client
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests import fake_http
from tempest.tests import fake_identity
class TestTenantIsolation(base.TestCase):
def setUp(self):
super(TestTenantIsolation, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request',
fake_identity._fake_v2_response)
def test_tempest_client(self):
iso_creds = isolated_creds.IsolatedCreds('test class')
self.assertTrue(isinstance(iso_creds.identity_admin_client,
json_iden_client.IdentityClientJSON))
self.assertTrue(isinstance(iso_creds.network_admin_client,
json_network_client.NetworkClientJSON))
def test_official_client(self):
self.useFixture(mockpatch.PatchObject(keystoneclient.Client,
'authenticate'))
self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
'_get_image_client'))
self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
'_get_object_storage_client'))
self.useFixture(mockpatch.PatchObject(clients.OfficialClientManager,
'_get_orchestration_client'))
iso_creds = isolated_creds.IsolatedCreds('test class',
tempest_client=False)
self.assertTrue(isinstance(iso_creds.identity_admin_client,
keystoneclient.Client))
self.assertTrue(isinstance(iso_creds.network_admin_client,
neutronclient.Client))
def test_tempest_client_xml(self):
iso_creds = isolated_creds.IsolatedCreds('test class', interface='xml')
self.assertEqual(iso_creds.interface, 'xml')
self.assertTrue(isinstance(iso_creds.identity_admin_client,
xml_iden_client.IdentityClientXML))
self.assertTrue(isinstance(iso_creds.network_admin_client,
xml_network_client.NetworkClientXML))
def _mock_user_create(self, id, name):
user_fix = self.useFixture(mockpatch.PatchObject(
json_iden_client.IdentityClientJSON,
'create_user',
return_value=({'status': 200},
{'id': id, 'name': name})))
return user_fix
def _mock_tenant_create(self, id, name):
tenant_fix = self.useFixture(mockpatch.PatchObject(
json_iden_client.IdentityClientJSON,
'create_tenant',
return_value=({'status': 200},
{'id': id, 'name': name})))
return tenant_fix
def _mock_network_create(self, iso_creds, id, name):
net_fix = self.useFixture(mockpatch.PatchObject(
iso_creds.network_admin_client,
'create_network',
return_value=({'status': 200},
{'network': {'id': id, 'name': name}})))
return net_fix
def _mock_subnet_create(self, iso_creds, id, name):
subnet_fix = self.useFixture(mockpatch.PatchObject(
iso_creds.network_admin_client,
'create_subnet',
return_value=({'status': 200},
{'subnet': {'id': id, 'name': name}})))
return subnet_fix
def _mock_router_create(self, id, name):
router_fix = self.useFixture(mockpatch.PatchObject(
json_network_client.NetworkClientJSON,
'create_router',
return_value=({'status': 200},
{'router': {'id': id, 'name': name}})))
return router_fix
@patch('tempest.common.rest_client.RestClient')
def test_primary_creds(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_user_create('1234', 'fake_prim_user')
primary_creds = iso_creds.get_primary_creds(old_style=False)
self.assertEqual(primary_creds.username, 'fake_prim_user')
self.assertEqual(primary_creds.tenant_name, 'fake_prim_tenant')
# Verify helper methods
tenant = iso_creds.get_primary_tenant()
user = iso_creds.get_primary_user()
self.assertEqual(tenant['id'], '1234')
self.assertEqual(user['id'], '1234')
@patch('tempest.common.rest_client.RestClient')
def test_admin_creds(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
self.useFixture(mockpatch.PatchObject(
json_iden_client.IdentityClientJSON,
'list_roles',
return_value=({'status': 200},
[{'id': '1234', 'name': 'admin'}])))
user_mock = patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role')
user_mock.start()
self.addCleanup(user_mock.stop)
with patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role') as user_mock:
admin_creds = iso_creds.get_admin_creds()
user_mock.assert_called_once_with('1234', '1234', '1234')
self.assertEqual(admin_creds.username, 'fake_admin_user')
self.assertEqual(admin_creds.tenant_name, 'fake_admin_tenant')
# Verify helper methods
tenant = iso_creds.get_admin_tenant()
user = iso_creds.get_admin_user()
self.assertEqual(tenant['id'], '1234')
self.assertEqual(user['id'], '1234')
@patch('tempest.common.rest_client.RestClient')
def test_all_cred_cleanup(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
user_fix = self._mock_user_create('1234', 'fake_prim_user')
iso_creds.get_primary_creds(old_style=False)
tenant_fix.cleanUp()
user_fix.cleanUp()
tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
user_fix = self._mock_user_create('12345', 'fake_alt_user')
iso_creds.get_alt_creds(old_style=False)
tenant_fix.cleanUp()
user_fix.cleanUp()
tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
user_fix = self._mock_user_create('123456', 'fake_admin_user')
self.useFixture(mockpatch.PatchObject(
json_iden_client.IdentityClientJSON,
'list_roles',
return_value=({'status': 200},
[{'id': '123456', 'name': 'admin'}])))
with patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role'):
iso_creds.get_admin_creds()
user_mock = self.patch(
'tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_user')
tenant_mock = self.patch(
'tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_tenant')
iso_creds.clear_isolated_creds()
# Verify user delete calls
calls = user_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify tenant delete calls
calls = tenant_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
@patch('tempest.common.rest_client.RestClient')
def test_alt_creds(self, MockRestClient):
cfg.CONF.set_default('neutron', False, 'service_available')
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
alt_creds = iso_creds.get_alt_creds(old_style=False)
self.assertEqual(alt_creds.username, 'fake_alt_user')
self.assertEqual(alt_creds.tenant_name, 'fake_alt_tenant')
# Verify helper methods
tenant = iso_creds.get_alt_tenant()
user = iso_creds.get_alt_user()
self.assertEqual(tenant['id'], '1234')
self.assertEqual(user['id'], '1234')
@patch('tempest.common.rest_client.RestClient')
def test_network_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self._mock_network_create(iso_creds, '1234', 'fake_net')
self._mock_subnet_create(iso_creds, '1234', 'fake_subnet')
self._mock_router_create('1234', 'fake_router')
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
iso_creds.get_primary_creds(old_style=False)
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_primary_network()
subnet = iso_creds.get_primary_subnet()
router = iso_creds.get_primary_router()
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_router')
@patch('tempest.common.rest_client.RestClient')
def test_network_cleanup(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
# Create primary tenant and network
user_fix = self._mock_user_create('1234', 'fake_prim_user')
tenant_fix = self._mock_tenant_create('1234', 'fake_prim_tenant')
net_fix = self._mock_network_create(iso_creds, '1234', 'fake_net')
subnet_fix = self._mock_subnet_create(iso_creds, '1234', 'fake_subnet')
router_fix = self._mock_router_create('1234', 'fake_router')
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
iso_creds.get_primary_creds(old_style=False)
router_interface_mock.called_once_with('1234', '1234')
router_interface_mock.reset_mock()
tenant_fix.cleanUp()
user_fix.cleanUp()
net_fix.cleanUp()
subnet_fix.cleanUp()
router_fix.cleanUp()
# Create alternate tenant and network
user_fix = self._mock_user_create('12345', 'fake_alt_user')
tenant_fix = self._mock_tenant_create('12345', 'fake_alt_tenant')
net_fix = self._mock_network_create(iso_creds, '12345', 'fake_alt_net')
subnet_fix = self._mock_subnet_create(iso_creds, '12345',
'fake_alt_subnet')
router_fix = self._mock_router_create('12345', 'fake_alt_router')
iso_creds.get_alt_creds(old_style=False)
router_interface_mock.called_once_with('12345', '12345')
router_interface_mock.reset_mock()
tenant_fix.cleanUp()
user_fix.cleanUp()
net_fix.cleanUp()
subnet_fix.cleanUp()
router_fix.cleanUp()
# Create admin tenant and networks
user_fix = self._mock_user_create('123456', 'fake_admin_user')
tenant_fix = self._mock_tenant_create('123456', 'fake_admin_tenant')
net_fix = self._mock_network_create(iso_creds, '123456',
'fake_admin_net')
subnet_fix = self._mock_subnet_create(iso_creds, '123456',
'fake_admin_subnet')
router_fix = self._mock_router_create('123456', 'fake_admin_router')
self.useFixture(mockpatch.PatchObject(
json_iden_client.IdentityClientJSON,
'list_roles',
return_value=({'status': 200},
[{'id': '123456', 'name': 'admin'}])))
with patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role'):
iso_creds.get_admin_creds()
self.patch('tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_user')
self.patch('tempest.services.identity.json.identity_client.'
'IdentityClientJSON.delete_tenant')
net = patch.object(iso_creds.network_admin_client,
'delete_network')
net_mock = net.start()
subnet = patch.object(iso_creds.network_admin_client,
'delete_subnet')
subnet_mock = subnet.start()
router = patch.object(iso_creds.network_admin_client,
'delete_router')
router_mock = router.start()
remove_router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'remove_router_interface_with_subnet_id')
port_list_mock = patch.object(iso_creds.network_admin_client,
'list_ports', return_value=(
{'status': 200}, {'ports': []}))
port_list_mock.start()
iso_creds.clear_isolated_creds()
# Verify remove router interface calls
calls = remove_router_interface_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1], calls)
self.assertIn(('1234', '1234'), args)
self.assertIn(('12345', '12345'), args)
self.assertIn(('123456', '123456'), args)
# Verify network delete calls
calls = net_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify subnet delete calls
calls = subnet_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
# Verify router delete calls
calls = router_mock.mock_calls
self.assertEqual(len(calls), 3)
args = map(lambda x: x[1][0], calls)
self.assertIn('1234', args)
self.assertIn('12345', args)
self.assertIn('123456', args)
@patch('tempest.common.rest_client.RestClient')
def test_network_alt_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_user_create('1234', 'fake_alt_user')
self._mock_tenant_create('1234', 'fake_alt_tenant')
self._mock_network_create(iso_creds, '1234', 'fake_alt_net')
self._mock_subnet_create(iso_creds, '1234', 'fake_alt_subnet')
self._mock_router_create('1234', 'fake_alt_router')
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
iso_creds.get_alt_creds()
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_alt_network()
subnet = iso_creds.get_alt_subnet()
router = iso_creds.get_alt_router()
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_alt_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_alt_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_alt_router')
@patch('tempest.common.rest_client.RestClient')
def test_network_admin_creation(self, MockRestClient):
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password')
self._mock_user_create('1234', 'fake_admin_user')
self._mock_tenant_create('1234', 'fake_admin_tenant')
self._mock_network_create(iso_creds, '1234', 'fake_admin_net')
self._mock_subnet_create(iso_creds, '1234', 'fake_admin_subnet')
self._mock_router_create('1234', 'fake_admin_router')
router_interface_mock = self.patch(
'tempest.services.network.json.network_client.NetworkClientJSON.'
'add_router_interface_with_subnet_id')
self.useFixture(mockpatch.PatchObject(
json_iden_client.IdentityClientJSON,
'list_roles',
return_value=({'status': 200},
[{'id': '123456', 'name': 'admin'}])))
with patch.object(json_iden_client.IdentityClientJSON,
'assign_user_role'):
iso_creds.get_admin_creds()
router_interface_mock.called_once_with('1234', '1234')
network = iso_creds.get_admin_network()
subnet = iso_creds.get_admin_subnet()
router = iso_creds.get_admin_router()
self.assertEqual(network['id'], '1234')
self.assertEqual(network['name'], 'fake_admin_net')
self.assertEqual(subnet['id'], '1234')
self.assertEqual(subnet['name'], 'fake_admin_subnet')
self.assertEqual(router['id'], '1234')
self.assertEqual(router['name'], 'fake_admin_router')
@patch('tempest.common.rest_client.RestClient')
def test_no_network_resources(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': False,
'dhcp': False,
}
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
net = patch.object(iso_creds.network_admin_client,
'delete_network')
net_mock = net.start()
subnet = patch.object(iso_creds.network_admin_client,
'delete_subnet')
subnet_mock = subnet.start()
router = patch.object(iso_creds.network_admin_client,
'delete_router')
router_mock = router.start()
iso_creds.get_primary_creds()
self.assertEqual(net_mock.mock_calls, [])
self.assertEqual(subnet_mock.mock_calls, [])
self.assertEqual(router_mock.mock_calls, [])
network = iso_creds.get_primary_network()
subnet = iso_creds.get_primary_subnet()
router = iso_creds.get_primary_router()
self.assertIsNone(network)
self.assertIsNone(subnet)
self.assertIsNone(router)
@patch('tempest.common.rest_client.RestClient')
def test_router_without_network(self, MockRestClient):
net_dict = {
'network': False,
'router': True,
'subnet': False,
'dhcp': False,
}
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(exceptions.InvalidConfiguration,
iso_creds.get_primary_creds)
@patch('tempest.common.rest_client.RestClient')
def test_subnet_without_network(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': True,
'dhcp': False,
}
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(exceptions.InvalidConfiguration,
iso_creds.get_primary_creds)
@patch('tempest.common.rest_client.RestClient')
def test_dhcp_without_subnet(self, MockRestClient):
net_dict = {
'network': False,
'router': False,
'subnet': False,
'dhcp': True,
}
iso_creds = isolated_creds.IsolatedCreds('test class',
password='fake_password',
network_resources=net_dict)
self._mock_user_create('1234', 'fake_prim_user')
self._mock_tenant_create('1234', 'fake_prim_tenant')
self.assertRaises(exceptions.InvalidConfiguration,
iso_creds.get_primary_creds)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_ssh.py 0000664 0001750 0001750 00000016216 12332757070 023134 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import socket
import mock
import testtools
from tempest.common import ssh
from tempest import exceptions
from tempest.tests import base
class TestSshClient(base.TestCase):
def test_pkey_calls_paramiko_RSAKey(self):
with contextlib.nested(
mock.patch('paramiko.RSAKey.from_private_key'),
mock.patch('cStringIO.StringIO')) as (rsa_mock, cs_mock):
cs_mock.return_value = mock.sentinel.csio
pkey = 'mykey'
ssh.Client('localhost', 'root', pkey=pkey)
rsa_mock.assert_called_once_with(mock.sentinel.csio)
cs_mock.assert_called_once_with('mykey')
rsa_mock.reset_mock()
cs_mock.rest_mock()
pkey = mock.sentinel.pkey
# Shouldn't call out to load a file from RSAKey, since
# a sentinel isn't a basestring...
ssh.Client('localhost', 'root', pkey=pkey)
rsa_mock.assert_not_called()
cs_mock.assert_not_called()
def test_get_ssh_connection(self):
c_mock = self.patch('paramiko.SSHClient')
aa_mock = self.patch('paramiko.AutoAddPolicy')
s_mock = self.patch('time.sleep')
t_mock = self.patch('time.time')
aa_mock.return_value = mock.sentinel.aa
def reset_mocks():
aa_mock.reset_mock()
c_mock.reset_mock()
s_mock.reset_mock()
t_mock.reset_mock()
# Test normal case for successful connection on first try
client_mock = mock.MagicMock()
c_mock.return_value = client_mock
client_mock.connect.return_value = True
client = ssh.Client('localhost', 'root', timeout=2)
client._get_ssh_connection(sleep=1)
aa_mock.assert_called_once_with()
client_mock.set_missing_host_key_policy.assert_called_once_with(
mock.sentinel.aa)
expected_connect = [mock.call(
'localhost',
username='root',
pkey=None,
key_filename=None,
look_for_keys=False,
timeout=10.0,
password=None
)]
self.assertEqual(expected_connect, client_mock.connect.mock_calls)
s_mock.assert_not_called()
t_mock.assert_called_once_with()
reset_mocks()
# Test case when connection fails on first two tries and
# succeeds on third try (this validates retry logic)
client_mock.connect.side_effect = [socket.error, socket.error, True]
t_mock.side_effect = [
1000, # Start time
1000, # LOG.warning() calls time.time() loop 1
1001, # Sleep loop 1
1001, # LOG.warning() calls time.time() loop 2
1002 # Sleep loop 2
]
client._get_ssh_connection(sleep=1)
expected_sleeps = [
mock.call(2),
mock.call(3)
]
self.assertEqual(expected_sleeps, s_mock.mock_calls)
reset_mocks()
# Test case when connection fails on first three tries and
# exceeds the timeout, so expect to raise a Timeout exception
client_mock.connect.side_effect = [
socket.error,
socket.error,
socket.error
]
t_mock.side_effect = [
1000, # Start time
1000, # LOG.warning() calls time.time() loop 1
1001, # Sleep loop 1
1001, # LOG.warning() calls time.time() loop 2
1002, # Sleep loop 2
1003, # Sleep loop 3
1004 # LOG.error() calls time.time()
]
with testtools.ExpectedException(exceptions.SSHTimeout):
client._get_ssh_connection()
def test_exec_command(self):
gsc_mock = self.patch('tempest.common.ssh.Client._get_ssh_connection')
ito_mock = self.patch('tempest.common.ssh.Client._is_timed_out')
select_mock = self.patch('select.poll')
client_mock = mock.MagicMock()
tran_mock = mock.MagicMock()
chan_mock = mock.MagicMock()
poll_mock = mock.MagicMock()
def reset_mocks():
gsc_mock.reset_mock()
ito_mock.reset_mock()
select_mock.reset_mock()
poll_mock.reset_mock()
client_mock.reset_mock()
tran_mock.reset_mock()
chan_mock.reset_mock()
select_mock.return_value = poll_mock
gsc_mock.return_value = client_mock
ito_mock.return_value = True
client_mock.get_transport.return_value = tran_mock
tran_mock.open_session.return_value = chan_mock
poll_mock.poll.side_effect = [
[0, 0, 0]
]
# Test for a timeout condition immediately raised
client = ssh.Client('localhost', 'root', timeout=2)
with testtools.ExpectedException(exceptions.TimeoutException):
client.exec_command("test")
chan_mock.fileno.assert_called_once_with()
chan_mock.exec_command.assert_called_once_with("test")
chan_mock.shutdown_write.assert_called_once_with()
SELECT_POLLIN = 1
poll_mock.register.assert_called_once_with(chan_mock, SELECT_POLLIN)
poll_mock.poll.assert_called_once_with(10)
# Test for proper reading of STDOUT and STDERROR and closing
# of all file descriptors.
reset_mocks()
select_mock.return_value = poll_mock
gsc_mock.return_value = client_mock
ito_mock.return_value = False
client_mock.get_transport.return_value = tran_mock
tran_mock.open_session.return_value = chan_mock
poll_mock.poll.side_effect = [
[1, 0, 0]
]
closed_prop = mock.PropertyMock(return_value=True)
type(chan_mock).closed = closed_prop
chan_mock.recv_exit_status.return_value = 0
chan_mock.recv.return_value = ''
chan_mock.recv_stderr.return_value = ''
client = ssh.Client('localhost', 'root', timeout=2)
client.exec_command("test")
chan_mock.fileno.assert_called_once_with()
chan_mock.exec_command.assert_called_once_with("test")
chan_mock.shutdown_write.assert_called_once_with()
SELECT_POLLIN = 1
poll_mock.register.assert_called_once_with(chan_mock, SELECT_POLLIN)
poll_mock.poll.assert_called_once_with(10)
chan_mock.recv_ready.assert_called_once_with()
chan_mock.recv.assert_called_once_with(1024)
chan_mock.recv_stderr_ready.assert_called_once_with()
chan_mock.recv_stderr.assert_called_once_with(1024)
chan_mock.recv_exit_status.assert_called_once_with()
closed_prop.assert_called_once_with()
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/fake_auth_provider.py 0000664 0001750 0001750 00000002060 12332757070 025131 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.tests import fake_credentials
def get_default_credentials(credential_type, fill_in=True):
return fake_credentials.FakeCredentials()
def get_credentials(credential_type=None, fill_in=True, **kwargs):
return fake_credentials.FakeCredentials()
class FakeAuthProvider(object):
def auth_request(self, method, url, headers=None, body=None, filters=None):
return url, headers, body
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/fake_config.py 0000664 0001750 0001750 00000005232 12332757070 023527 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo.config import cfg
from tempest import config
from tempest.openstack.common.fixture import config as conf_fixture
from tempest.openstack.common import importutils
class ConfigFixture(conf_fixture.Config):
def __init__(self):
config.register_opts()
# Register locking options
importutils.import_module('tempest.openstack.common.lockutils')
super(ConfigFixture, self).__init__()
def setUp(self):
super(ConfigFixture, self).setUp()
self.conf.set_default('build_interval', 10, group='compute')
self.conf.set_default('build_timeout', 10, group='compute')
self.conf.set_default('disable_ssl_certificate_validation', True,
group='identity')
self.conf.set_default('uri', 'http://fake_uri.com/auth',
group='identity')
self.conf.set_default('uri_v3', 'http://fake_uri_v3.com/auth',
group='identity')
self.conf.set_default('neutron', True, group='service_available')
self.conf.set_default('heat', True, group='service_available')
if not os.path.exists(str(os.environ.get('OS_TEST_LOCK_PATH'))):
os.mkdir(str(os.environ.get('OS_TEST_LOCK_PATH')))
self.conf.set_default('lock_path',
str(os.environ.get('OS_TEST_LOCK_PATH')))
self.conf.set_default('auth_version', 'v2', group='identity')
for config_option in ['username', 'password', 'tenant_name']:
# Identity group items
for prefix in ['', 'alt_', 'admin_']:
self.conf.set_default(prefix + config_option,
'fake_' + config_option,
group='identity')
# Compute Admin group items
self.conf.set_default(config_option, 'fake_' + config_option,
group='compute-admin')
class FakePrivate(config.TempestConfigPrivate):
def __init__(self):
cfg.CONF([], default_config_files=[])
self._set_attrs()
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/files/ 0000775 0001750 0001750 00000000000 12332757136 022025 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/files/testr-conf 0000664 0001750 0001750 00000000265 12332757070 024034 0 ustar chuck chuck 0000000 0000000 [DEFAULT]
test_command=${PYTHON:-python} -m subunit.run discover -t ./ ./tests $LISTOPT $IDOPTION
test_id_option=--load-list $IDFILE
test_list_option=--list
group_regex=([^\.]*\.)*
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/files/setup.cfg 0000664 0001750 0001750 00000001142 12332757070 023641 0 ustar chuck chuck 0000000 0000000 [metadata]
name = tempest_unit_tests
version = 1
summary = Fake Project for testing wrapper scripts
author = OpenStack
author-email = openstack-dev@lists.openstack.org
home-page = http://www.openstack.org/
classifier =
Intended Audience :: Information Technology
Intended Audience :: System Administrators
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
[global]
setup-hooks =
pbr.hooks.setup_hook
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/files/passing-tests 0000664 0001750 0001750 00000001473 12332757070 024556 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
class FakeTestClass(testtools.TestCase):
def test_pass(self):
self.assertTrue(True)
def test_pass_list(self):
test_list = ['test', 'a', 'b']
self.assertIn('test', test_list)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/files/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024121 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/files/failing-tests 0000664 0001750 0001750 00000001474 12332757070 024524 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testtools
class FakeTestClass(testtools.TestCase):
def test_pass(self):
self.assertTrue(False)
def test_pass_list(self):
test_list = ['test', 'a', 'b']
self.assertIn('fail', test_list)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_rest_client.py 0000664 0001750 0001750 00000052215 12332757070 024651 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
import json
from tempest.common import rest_client
from tempest.common import xml_utils as xml
from tempest import config
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_auth_provider
from tempest.tests import fake_config
from tempest.tests import fake_http
class BaseRestClientTestClass(base.TestCase):
url = 'fake_endpoint'
def _get_region(self):
return 'fake region'
def setUp(self):
super(BaseRestClientTestClass, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.rest_client = rest_client.RestClient(
fake_auth_provider.FakeAuthProvider())
self.stubs.Set(httplib2.Http, 'request', self.fake_http.request)
self.useFixture(mockpatch.PatchObject(self.rest_client, '_get_region',
side_effect=self._get_region()))
self.useFixture(mockpatch.PatchObject(self.rest_client,
'_log_request'))
class TestRestClientHTTPMethods(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientHTTPMethods, self).setUp()
self.useFixture(mockpatch.PatchObject(self.rest_client,
'_error_checker'))
def test_post(self):
__, return_dict = self.rest_client.post(self.url, {}, {})
self.assertEqual('POST', return_dict['method'])
def test_get(self):
__, return_dict = self.rest_client.get(self.url)
self.assertEqual('GET', return_dict['method'])
def test_delete(self):
__, return_dict = self.rest_client.delete(self.url)
self.assertEqual('DELETE', return_dict['method'])
def test_patch(self):
__, return_dict = self.rest_client.patch(self.url, {}, {})
self.assertEqual('PATCH', return_dict['method'])
def test_put(self):
__, return_dict = self.rest_client.put(self.url, {}, {})
self.assertEqual('PUT', return_dict['method'])
def test_head(self):
self.useFixture(mockpatch.PatchObject(self.rest_client,
'response_checker'))
__, return_dict = self.rest_client.head(self.url)
self.assertEqual('HEAD', return_dict['method'])
def test_copy(self):
__, return_dict = self.rest_client.copy(self.url)
self.assertEqual('COPY', return_dict['method'])
class TestRestClientNotFoundHandling(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2(404)
super(TestRestClientNotFoundHandling, self).setUp()
def test_post(self):
self.assertRaises(exceptions.NotFound, self.rest_client.post,
self.url, {}, {})
class TestRestClientHeadersJSON(TestRestClientHTTPMethods):
TYPE = "json"
def _verify_headers(self, resp):
self.assertEqual(self.rest_client._get_type(), self.TYPE)
resp = dict((k.lower(), v) for k, v in resp.iteritems())
self.assertEqual(self.header_value, resp['accept'])
self.assertEqual(self.header_value, resp['content-type'])
def setUp(self):
super(TestRestClientHeadersJSON, self).setUp()
self.rest_client.TYPE = self.TYPE
self.header_value = 'application/%s' % self.rest_client._get_type()
def test_post(self):
resp, __ = self.rest_client.post(self.url, {})
self._verify_headers(resp)
def test_get(self):
resp, __ = self.rest_client.get(self.url)
self._verify_headers(resp)
def test_delete(self):
resp, __ = self.rest_client.delete(self.url)
self._verify_headers(resp)
def test_patch(self):
resp, __ = self.rest_client.patch(self.url, {})
self._verify_headers(resp)
def test_put(self):
resp, __ = self.rest_client.put(self.url, {})
self._verify_headers(resp)
def test_head(self):
self.useFixture(mockpatch.PatchObject(self.rest_client,
'response_checker'))
resp, __ = self.rest_client.head(self.url)
self._verify_headers(resp)
def test_copy(self):
resp, __ = self.rest_client.copy(self.url)
self._verify_headers(resp)
class TestRestClientUpdateHeaders(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientUpdateHeaders, self).setUp()
self.useFixture(mockpatch.PatchObject(self.rest_client,
'_error_checker'))
self.headers = {'X-Configuration-Session': 'session_id'}
def test_post_update_headers(self):
__, return_dict = self.rest_client.post(self.url, {},
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_get_update_headers(self):
__, return_dict = self.rest_client.get(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_delete_update_headers(self):
__, return_dict = self.rest_client.delete(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_patch_update_headers(self):
__, return_dict = self.rest_client.patch(self.url, {},
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_put_update_headers(self):
__, return_dict = self.rest_client.put(self.url, {},
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_head_update_headers(self):
self.useFixture(mockpatch.PatchObject(self.rest_client,
'response_checker'))
__, return_dict = self.rest_client.head(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
def test_copy_update_headers(self):
__, return_dict = self.rest_client.copy(self.url,
extra_headers=True,
headers=self.headers)
self.assertDictContainsSubset(
{'X-Configuration-Session': 'session_id',
'Content-Type': 'application/json',
'Accept': 'application/json'},
return_dict['headers']
)
class TestRestClientHeadersXML(TestRestClientHeadersJSON):
TYPE = "xml"
# These two tests are needed in one exemplar
def test_send_json_accept_xml(self):
resp, __ = self.rest_client.get(self.url,
self.rest_client.get_headers("xml",
"json"))
resp = dict((k.lower(), v) for k, v in resp.iteritems())
self.assertEqual("application/json", resp["content-type"])
self.assertEqual("application/xml", resp["accept"])
def test_send_xml_accept_json(self):
resp, __ = self.rest_client.get(self.url,
self.rest_client.get_headers("json",
"xml"))
resp = dict((k.lower(), v) for k, v in resp.iteritems())
self.assertEqual("application/json", resp["accept"])
self.assertEqual("application/xml", resp["content-type"])
class TestRestClientParseRespXML(BaseRestClientTestClass):
TYPE = "xml"
keys = ["fake_key1", "fake_key2"]
values = ["fake_value1", "fake_value2"]
item_expected = dict((key, value) for (key, value) in zip(keys, values))
list_expected = {"body_list": [
{keys[0]: values[0]},
{keys[1]: values[1]},
]}
dict_expected = {"body_dict": {
keys[0]: values[0],
keys[1]: values[1],
}}
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientParseRespXML, self).setUp()
self.rest_client.TYPE = self.TYPE
def test_parse_resp_body_item(self):
body_item = xml.Element("item", **self.item_expected)
body = self.rest_client._parse_resp(str(xml.Document(body_item)))
self.assertEqual(self.item_expected, body)
def test_parse_resp_body_list(self):
self.rest_client.list_tags = ["fake_list", ]
body_list = xml.Element(self.rest_client.list_tags[0])
for i in range(2):
body_list.append(xml.Element("fake_item",
**self.list_expected["body_list"][i]))
body = self.rest_client._parse_resp(str(xml.Document(body_list)))
self.assertEqual(self.list_expected["body_list"], body)
def test_parse_resp_body_dict(self):
self.rest_client.dict_tags = ["fake_dict", ]
body_dict = xml.Element(self.rest_client.dict_tags[0])
for i in range(2):
body_dict.append(xml.Element("fake_item", xml.Text(self.values[i]),
key=self.keys[i]))
body = self.rest_client._parse_resp(str(xml.Document(body_dict)))
self.assertEqual(self.dict_expected["body_dict"], body)
class TestRestClientParseRespJSON(TestRestClientParseRespXML):
TYPE = "json"
def test_parse_resp_body_item(self):
body = self.rest_client._parse_resp(json.dumps(self.item_expected))
self.assertEqual(self.item_expected, body)
def test_parse_resp_body_list(self):
body = self.rest_client._parse_resp(json.dumps(self.list_expected))
self.assertEqual(self.list_expected["body_list"], body)
def test_parse_resp_body_dict(self):
body = self.rest_client._parse_resp(json.dumps(self.dict_expected))
self.assertEqual(self.dict_expected["body_dict"], body)
def test_parse_resp_two_top_keys(self):
dict_two_keys = self.dict_expected.copy()
dict_two_keys.update({"second_key": ""})
body = self.rest_client._parse_resp(json.dumps(dict_two_keys))
self.assertEqual(dict_two_keys, body)
def test_parse_resp_one_top_key_without_list_or_dict(self):
data = {"one_top_key": "not_list_or_dict_value"}
body = self.rest_client._parse_resp(json.dumps(data))
self.assertEqual(data, body)
class TestRestClientErrorCheckerJSON(base.TestCase):
c_type = "application/json"
def set_data(self, r_code, enc=None, r_body=None):
if enc is None:
enc = self.c_type
resp_dict = {'status': r_code, 'content-type': enc}
resp = httplib2.Response(resp_dict)
data = {
"method": "fake_method",
"url": "fake_url",
"headers": "fake_headers",
"body": "fake_body",
"resp": resp,
"resp_body": '{"resp_body": "fake_resp_body"}',
}
if r_body is not None:
data.update({"resp_body": r_body})
return data
def setUp(self):
super(TestRestClientErrorCheckerJSON, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
self.rest_client = rest_client.RestClient(
fake_auth_provider.FakeAuthProvider())
def test_response_less_than_400(self):
self.rest_client._error_checker(**self.set_data("399"))
def test_response_400(self):
self.assertRaises(exceptions.BadRequest,
self.rest_client._error_checker,
**self.set_data("400"))
def test_response_401(self):
self.assertRaises(exceptions.Unauthorized,
self.rest_client._error_checker,
**self.set_data("401"))
def test_response_403(self):
self.assertRaises(exceptions.Unauthorized,
self.rest_client._error_checker,
**self.set_data("403"))
def test_response_404(self):
self.assertRaises(exceptions.NotFound,
self.rest_client._error_checker,
**self.set_data("404"))
def test_response_409(self):
self.assertRaises(exceptions.Conflict,
self.rest_client._error_checker,
**self.set_data("409"))
def test_response_413(self):
self.assertRaises(exceptions.OverLimit,
self.rest_client._error_checker,
**self.set_data("413"))
def test_response_422(self):
self.assertRaises(exceptions.UnprocessableEntity,
self.rest_client._error_checker,
**self.set_data("422"))
def test_response_500_with_text(self):
# _parse_resp is expected to return 'str'
self.assertRaises(exceptions.ServerFault,
self.rest_client._error_checker,
**self.set_data("500"))
def test_response_501_with_text(self):
self.assertRaises(exceptions.ServerFault,
self.rest_client._error_checker,
**self.set_data("501"))
def test_response_500_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
self.assertRaises(exceptions.ServerFault,
self.rest_client._error_checker,
**self.set_data("500", r_body=r_body))
def test_response_501_with_dict(self):
r_body = '{"resp_body": {"err": "fake_resp_body"}}'
self.assertRaises(exceptions.ServerFault,
self.rest_client._error_checker,
**self.set_data("501", r_body=r_body))
def test_response_bigger_than_400(self):
# Any response code, that bigger than 400, and not in
# (401, 403, 404, 409, 413, 422, 500, 501)
self.assertRaises(exceptions.UnexpectedResponseCode,
self.rest_client._error_checker,
**self.set_data("402"))
class TestRestClientErrorCheckerXML(TestRestClientErrorCheckerJSON):
c_type = "application/xml"
class TestRestClientErrorCheckerTEXT(TestRestClientErrorCheckerJSON):
c_type = "text/plain"
def test_fake_content_type(self):
# This test is required only in one exemplar
# Any response code, that bigger than 400, and not in
# (401, 403, 404, 409, 413, 422, 500, 501)
self.assertRaises(exceptions.InvalidContentType,
self.rest_client._error_checker,
**self.set_data("405", enc="fake_enc"))
class TestRestClientUtils(BaseRestClientTestClass):
def _is_resource_deleted(self, resource_id):
if not isinstance(self.retry_pass, int):
return False
if self.retry_count >= self.retry_pass:
return True
self.retry_count = self.retry_count + 1
return False
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestRestClientUtils, self).setUp()
self.retry_count = 0
self.retry_pass = None
self.original_deleted_method = self.rest_client.is_resource_deleted
self.rest_client.is_resource_deleted = self._is_resource_deleted
def test_wait_for_resource_deletion(self):
self.retry_pass = 2
# Ensure timeout long enough for loop execution to hit retry count
self.rest_client.build_timeout = 500
sleep_mock = self.patch('time.sleep')
self.rest_client.wait_for_resource_deletion('1234')
self.assertEqual(len(sleep_mock.mock_calls), 2)
def test_wait_for_resource_deletion_not_deleted(self):
self.patch('time.sleep')
# Set timeout to be very quick to force exception faster
self.rest_client.build_timeout = 1
self.assertRaises(exceptions.TimeoutException,
self.rest_client.wait_for_resource_deletion,
'1234')
def test_wait_for_deletion_with_unimplemented_deleted_method(self):
self.rest_client.is_resource_deleted = self.original_deleted_method
self.assertRaises(NotImplementedError,
self.rest_client.wait_for_resource_deletion,
'1234')
class TestNegativeRestClient(BaseRestClientTestClass):
def setUp(self):
self.fake_http = fake_http.fake_httplib2()
super(TestNegativeRestClient, self).setUp()
self.negative_rest_client = rest_client.NegativeRestClient(
fake_auth_provider.FakeAuthProvider())
self.useFixture(mockpatch.PatchObject(self.negative_rest_client,
'_log_request'))
def test_post(self):
__, return_dict = self.negative_rest_client.send_request('POST',
self.url,
[], {})
self.assertEqual('POST', return_dict['method'])
def test_get(self):
__, return_dict = self.negative_rest_client.send_request('GET',
self.url,
[])
self.assertEqual('GET', return_dict['method'])
def test_delete(self):
__, return_dict = self.negative_rest_client.send_request('DELETE',
self.url,
[])
self.assertEqual('DELETE', return_dict['method'])
def test_patch(self):
__, return_dict = self.negative_rest_client.send_request('PATCH',
self.url,
[], {})
self.assertEqual('PATCH', return_dict['method'])
def test_put(self):
__, return_dict = self.negative_rest_client.send_request('PUT',
self.url,
[], {})
self.assertEqual('PUT', return_dict['method'])
def test_head(self):
self.useFixture(mockpatch.PatchObject(self.negative_rest_client,
'response_checker'))
__, return_dict = self.negative_rest_client.send_request('HEAD',
self.url,
[])
self.assertEqual('HEAD', return_dict['method'])
def test_copy(self):
__, return_dict = self.negative_rest_client.send_request('COPY',
self.url,
[])
self.assertEqual('COPY', return_dict['method'])
def test_other(self):
self.assertRaises(AssertionError,
self.negative_rest_client.send_request,
'OTHER', self.url, [])
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/cmd/ 0000775 0001750 0001750 00000000000 12332757136 021466 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/cmd/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023562 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/cmd/test_verify_tempest_config.py 0000664 0001750 0001750 00000046074 12332757070 027501 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import mock
from tempest.cmd import verify_tempest_config
from tempest import config
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_config
class TestGetAPIVersions(base.TestCase):
def test_url_grab_versioned_nova_nossl(self):
base_url = 'http://127.0.0.1:8774/v2/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('http://127.0.0.1:8774', endpoint)
def test_url_grab_versioned_nova_ssl(self):
base_url = 'https://127.0.0.1:8774/v3/'
endpoint = verify_tempest_config._get_unversioned_endpoint(base_url)
self.assertEqual('https://127.0.0.1:8774', endpoint)
class TestDiscovery(base.TestCase):
def setUp(self):
super(TestDiscovery, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_get_keystone_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}, {'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'keystone')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_get_cinder_api_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}, {'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'cinder')
self.assertIn('v1.0', versions)
self.assertIn('v2.0', versions)
def test_get_nova_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}, {'id': 'v3.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
versions = verify_tempest_config._get_api_versions(fake_os, 'nova')
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
def test_verify_keystone_api_versions_no_v3(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v2.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3',
'identity_feature_enabled',
False, True)
def test_verify_keystone_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': {'values': [{'id': 'v3.0'}]}}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_keystone_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2',
'identity_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v2(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v1.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'volume_feature_enabled',
False, True)
def test_verify_cinder_api_versions_no_v1(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_cinder_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'volume_feature_enabled',
False, True)
def test_verify_nova_versions(self):
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, '_get_unversioned_endpoint',
return_value='http://fake_endpoint:5000'))
fake_resp = {'versions': [{'id': 'v2.0'}]}
fake_resp = json.dumps(fake_resp)
self.useFixture(mockpatch.PatchObject(
verify_tempest_config.RAW_HTTP, 'request',
return_value=(None, fake_resp)))
fake_os = mock.MagicMock()
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_nova_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v3', 'compute_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_1(self):
def fake_get_versions():
return (None, ['v1.1'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v2_with_v1_0(self):
def fake_get_versions():
return (None, ['v1.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v2', 'image_feature_enabled',
False, True)
def test_verify_glance_version_no_v1(self):
def fake_get_versions():
return (None, ['v2.0'])
fake_os = mock.MagicMock()
fake_os.image_client.get_versions = fake_get_versions
with mock.patch.object(verify_tempest_config,
'print_and_or_update') as print_mock:
verify_tempest_config.verify_glance_api_versions(fake_os, True)
print_mock.assert_called_once_with('api_v1', 'image_feature_enabled',
False, True)
def test_verify_extensions_neutron(self):
def fake_list_extensions():
return (None, {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('fake1', results['neutron'])
self.assertTrue(results['neutron']['fake1'])
self.assertIn('fake2', results['neutron'])
self.assertTrue(results['neutron']['fake2'])
self.assertIn('fake3', results['neutron'])
self.assertFalse(results['neutron']['fake3'])
self.assertIn('not_fake', results['neutron'])
self.assertFalse(results['neutron']['not_fake'])
def test_verify_extensions_neutron_all(self):
def fake_list_extensions():
return (None, {'extensions': [{'alias': 'fake1'},
{'alias': 'fake2'},
{'alias': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.network_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'neutron', {})
self.assertIn('neutron', results)
self.assertIn('extensions', results['neutron'])
self.assertEqual(['fake1', 'fake2', 'not_fake'],
results['neutron']['extensions'])
def test_verify_extensions_cinder(self):
def fake_list_extensions():
return (None, {'extensions': [{'name': 'fake1'},
{'name': 'fake2'},
{'name': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('fake1', results['cinder'])
self.assertTrue(results['cinder']['fake1'])
self.assertIn('fake2', results['cinder'])
self.assertTrue(results['cinder']['fake2'])
self.assertIn('fake3', results['cinder'])
self.assertFalse(results['cinder']['fake3'])
self.assertIn('not_fake', results['cinder'])
self.assertFalse(results['cinder']['not_fake'])
def test_verify_extensions_cinder_all(self):
def fake_list_extensions():
return (None, {'extensions': [{'name': 'fake1'},
{'name': 'fake2'},
{'name': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.volumes_extension_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'cinder', {})
self.assertIn('cinder', results)
self.assertIn('extensions', results['cinder'])
self.assertEqual(['fake1', 'fake2', 'not_fake'],
results['cinder']['extensions'])
def test_verify_extensions_nova(self):
def fake_list_extensions():
return (None, {'extensions': [{'name': 'fake1'},
{'name': 'fake2'},
{'name': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('fake1', results['nova'])
self.assertTrue(results['nova']['fake1'])
self.assertIn('fake2', results['nova'])
self.assertTrue(results['nova']['fake2'])
self.assertIn('fake3', results['nova'])
self.assertFalse(results['nova']['fake3'])
self.assertIn('not_fake', results['nova'])
self.assertFalse(results['nova']['not_fake'])
def test_verify_extensions_nova_all(self):
def fake_list_extensions():
return (None, {'extensions': [{'name': 'fake1'},
{'name': 'fake2'},
{'name': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova', {})
self.assertIn('nova', results)
self.assertIn('extensions', results['nova'])
self.assertEqual(['fake1', 'fake2', 'not_fake'],
results['nova']['extensions'])
def test_verify_extensions_nova_v3(self):
def fake_list_extensions():
return (None, {'extensions': [{'name': 'fake1'},
{'name': 'fake2'},
{'name': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_v3_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova_v3', {})
self.assertIn('nova_v3', results)
self.assertIn('fake1', results['nova_v3'])
self.assertTrue(results['nova_v3']['fake1'])
self.assertIn('fake2', results['nova_v3'])
self.assertTrue(results['nova_v3']['fake2'])
self.assertIn('fake3', results['nova_v3'])
self.assertFalse(results['nova_v3']['fake3'])
self.assertIn('not_fake', results['nova_v3'])
self.assertFalse(results['nova_v3']['not_fake'])
def test_verify_extensions_nova_v3_all(self):
def fake_list_extensions():
return (None, {'extensions': [{'name': 'fake1'},
{'name': 'fake2'},
{'name': 'not_fake'}]})
fake_os = mock.MagicMock()
fake_os.extensions_v3_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'nova_v3', {})
self.assertIn('nova_v3', results)
self.assertIn('extensions', results['nova_v3'])
self.assertEqual(['fake1', 'fake2', 'not_fake'],
results['nova_v3']['extensions'])
def test_verify_extensions_swift(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['fake1', 'fake2', 'fake3'])))
results = verify_tempest_config.verify_extensions(fake_os, 'swift', {})
self.assertIn('swift', results)
self.assertIn('fake1', results['swift'])
self.assertTrue(results['swift']['fake1'])
self.assertIn('fake2', results['swift'])
self.assertTrue(results['swift']['fake2'])
self.assertIn('fake3', results['swift'])
self.assertFalse(results['swift']['fake3'])
self.assertIn('not_fake', results['swift'])
self.assertFalse(results['swift']['not_fake'])
def test_verify_extensions_swift_all(self):
def fake_list_extensions():
return (None, {'fake1': 'metadata',
'fake2': 'metadata',
'not_fake': 'metadata',
'swift': 'metadata'})
fake_os = mock.MagicMock()
fake_os.account_client.list_extensions = fake_list_extensions
self.useFixture(mockpatch.PatchObject(
verify_tempest_config, 'get_enabled_extensions',
return_value=(['all'])))
results = verify_tempest_config.verify_extensions(fake_os,
'swift', {})
self.assertIn('swift', results)
self.assertIn('extensions', results['swift'])
self.assertEqual(['not_fake', 'fake1', 'fake2'],
results['swift']['extensions'])
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023017 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_credentials.py 0000664 0001750 0001750 00000022471 12332757070 024634 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo.config import cfg
from tempest import auth
from tempest.common import http
from tempest.common import tempest_fixtures as fixtures
from tempest import config
from tempest import exceptions
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests import fake_http
from tempest.tests import fake_identity
class CredentialsTests(base.TestCase):
attributes = {}
credentials_class = auth.Credentials
def _get_credentials(self, attributes=None):
if attributes is None:
attributes = self.attributes
return self.credentials_class(**attributes)
def setUp(self):
super(CredentialsTests, self).setUp()
self.fake_http = fake_http.fake_httplib2(return_type=200)
self.stubs.Set(http.ClosingHttp, 'request', self.fake_http.request)
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def test_create(self):
creds = self._get_credentials()
self.assertEqual(self.attributes, creds._initial)
def test_create_invalid_attr(self):
self.assertRaises(exceptions.InvalidCredentials,
self._get_credentials,
attributes=dict(invalid='fake'))
def test_default(self):
self.useFixture(fixtures.LockFixture('auth_version'))
for ctype in self.credentials_class.TYPES:
self.assertRaises(NotImplementedError,
self.credentials_class.get_default,
credentials_type=ctype)
def test_invalid_default(self):
self.assertRaises(exceptions.InvalidCredentials,
auth.Credentials.get_default,
credentials_type='invalid_type')
def test_is_valid(self):
creds = self._get_credentials()
self.assertRaises(NotImplementedError, creds.is_valid)
class KeystoneV2CredentialsTests(CredentialsTests):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'tenant_name': 'fake_tenant_name'
}
identity_response = fake_identity._fake_v2_response
credentials_class = auth.KeystoneV2Credentials
def setUp(self):
super(KeystoneV2CredentialsTests, self).setUp()
self.stubs.Set(http.ClosingHttp, 'request', self.identity_response)
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def _verify_credentials(self, credentials_class, filled=True,
creds_dict=None):
def _check(credentials):
# Check the right version of credentials has been returned
self.assertIsInstance(credentials, credentials_class)
# Check the id attributes are filled in
attributes = [x for x in credentials.ATTRIBUTES if (
'_id' in x and x != 'domain_id')]
for attr in attributes:
if filled:
self.assertIsNotNone(getattr(credentials, attr))
else:
self.assertIsNone(getattr(credentials, attr))
if creds_dict is None:
for ctype in auth.Credentials.TYPES:
creds = auth.get_default_credentials(credential_type=ctype,
fill_in=filled)
_check(creds)
else:
creds = auth.get_credentials(fill_in=filled, **creds_dict)
_check(creds)
def test_get_default_credentials(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class)
def test_get_credentials(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class,
creds_dict=self.attributes)
def test_get_credentials_not_filled(self):
self.useFixture(fixtures.LockFixture('auth_version'))
self._verify_credentials(credentials_class=self.credentials_class,
filled=False,
creds_dict=self.attributes)
def test_is_valid(self):
creds = self._get_credentials()
self.assertTrue(creds.is_valid())
def test_is_not_valid(self):
creds = self._get_credentials()
for attr in self.attributes.keys():
delattr(creds, attr)
self.assertFalse(creds.is_valid(),
"Credentials should be invalid without %s" % attr)
def test_default(self):
self.useFixture(fixtures.LockFixture('auth_version'))
for ctype in self.credentials_class.TYPES:
creds = self.credentials_class.get_default(credentials_type=ctype)
for attr in self.attributes.keys():
# Default configuration values related to credentials
# are defined as fake_* in fake_config.py
self.assertEqual(getattr(creds, attr), 'fake_' + attr)
def test_reset_all_attributes(self):
creds = self._get_credentials()
initial_creds = copy.deepcopy(creds)
set_attr = creds.__dict__.keys()
missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
# Set all unset attributes, then reset
for attr in missing_attr:
setattr(creds, attr, 'fake' + attr)
creds.reset()
# Check reset credentials are same as initial ones
self.assertEqual(creds, initial_creds)
def test_reset_single_attribute(self):
creds = self._get_credentials()
initial_creds = copy.deepcopy(creds)
set_attr = creds.__dict__.keys()
missing_attr = set(creds.ATTRIBUTES).difference(set_attr)
# Set one unset attributes, then reset
for attr in missing_attr:
setattr(creds, attr, 'fake' + attr)
creds.reset()
# Check reset credentials are same as initial ones
self.assertEqual(creds, initial_creds)
class KeystoneV3CredentialsTests(KeystoneV2CredentialsTests):
attributes = {
'username': 'fake_username',
'password': 'fake_password',
'project_name': 'fake_project_name',
'user_domain_name': 'fake_domain_name'
}
credentials_class = auth.KeystoneV3Credentials
identity_response = fake_identity._fake_v3_response
def setUp(self):
super(KeystoneV3CredentialsTests, self).setUp()
# Additional config items reset by cfg fixture after each test
cfg.CONF.set_default('auth_version', 'v3', group='identity')
# Identity group items
for prefix in ['', 'alt_', 'admin_']:
cfg.CONF.set_default(prefix + 'domain_name', 'fake_domain_name',
group='identity')
# Compute Admin group items
cfg.CONF.set_default('domain_name', 'fake_domain_name',
group='compute-admin')
def test_default(self):
self.useFixture(fixtures.LockFixture('auth_version'))
for ctype in self.credentials_class.TYPES:
creds = self.credentials_class.get_default(credentials_type=ctype)
for attr in self.attributes.keys():
if attr == 'project_name':
config_value = 'fake_tenant_name'
elif attr == 'user_domain_name':
config_value = 'fake_domain_name'
else:
config_value = 'fake_' + attr
self.assertEqual(getattr(creds, attr), config_value)
def test_synced_attributes(self):
attributes = self.attributes
# Create V3 credentials with tenant instead of project, and user_domain
for attr in ['project_id', 'user_domain_id']:
attributes[attr] = 'fake_' + attr
creds = self._get_credentials(attributes)
self.assertEqual(creds.project_name, creds.tenant_name)
self.assertEqual(creds.project_id, creds.tenant_id)
self.assertEqual(creds.user_domain_name, creds.project_domain_name)
self.assertEqual(creds.user_domain_id, creds.project_domain_id)
# Replace user_domain with project_domain
del attributes['user_domain_name']
del attributes['user_domain_id']
del attributes['project_name']
del attributes['project_id']
for attr in ['project_domain_name', 'project_domain_id',
'tenant_name', 'tenant_id']:
attributes[attr] = 'fake_' + attr
self.assertEqual(creds.tenant_name, creds.project_name)
self.assertEqual(creds.tenant_id, creds.project_id)
self.assertEqual(creds.project_domain_name, creds.user_domain_name)
self.assertEqual(creds.project_domain_id, creds.user_domain_id)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_compute_xml_common.py 0000664 0001750 0001750 00000005546 12332757070 026247 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from lxml import etree
from tempest.common import xml_utils as common
from tempest.tests import base
class TestXMLParser(base.TestCase):
def test_xml_to_json_parser_bool_value(self):
node = etree.fromstring('''
False
True
''')
body = common.xml_to_json(node)
self.assertEqual(body['admin_state_up'], False)
self.assertEqual(body['fake_state_up'], True)
def test_xml_to_json_parser_int_value(self):
node = etree.fromstring('''
4
3
''')
body = common.xml_to_json(node)
self.assertEqual(body['delay'], 4L)
self.assertEqual(body['max_retries'], 3)
def test_xml_to_json_parser_text_value(self):
node = etree.fromstring('''
ACTIVE
''')
body = common.xml_to_json(node)
self.assertEqual(body['status'], 'ACTIVE')
def test_xml_to_json_parser_list_as_value(self):
node = etree.fromstring('''
first_element
second_element
''')
body = common.xml_to_json(node, 'elements')
self.assertEqual(body['elements'], ['first_element', 'second_element'])
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_hacking.py 0000664 0001750 0001750 00000011420 12332757070 023733 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Matthew Treinish
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.hacking import checks
from tempest.tests import base
class HackingTestCase(base.TestCase):
"""
This class tests the hacking checks in tempest.hacking.checks by passing
strings to the check methods like the pep8/flake8 parser would. The parser
loops over each line in the file and then passes the parameters to the
check method. The parameter names in the check method dictate what type of
object is passed to the check method. The parameter types are::
logical_line: A processed line with the following modifications:
- Multi-line statements converted to a single line.
- Stripped left and right.
- Contents of strings replaced with "xxx" of same length.
- Comments removed.
physical_line: Raw line of text from the input file.
lines: a list of the raw lines from the input file
tokens: the tokens that contribute to this logical line
line_number: line number in the input file
total_lines: number of lines in the input file
blank_lines: blank lines before this one
indent_char: indentation character in this file (" " or "\t")
indent_level: indentation (with tabs expanded to multiples of 8)
previous_indent_level: indentation on previous line
previous_logical: previous logical line
filename: Path of the file being run through pep8
When running a test on a check method the return will be False/None if
there is no violation in the sample input. If there is an error a tuple is
returned with a position in the line, and a message. So to check the result
just assertTrue if the check is expected to fail and assertFalse if it
should pass.
"""
def test_no_setupclass_for_unit_tests(self):
self.assertTrue(checks.no_setupclass_for_unit_tests(
" def setUpClass(cls):", './tempest/tests/fake_test.py'))
self.assertIsNone(checks.no_setupclass_for_unit_tests(
" def setUpClass(cls): # noqa", './tempest/tests/fake_test.py'))
self.assertFalse(checks.no_setupclass_for_unit_tests(
" def setUpClass(cls):", './tempest/api/fake_test.py'))
def test_import_no_clients_in_api(self):
for client in checks.PYTHON_CLIENTS:
string = "import " + client + "client"
self.assertTrue(checks.import_no_clients_in_api(
string, './tempest/api/fake_test.py'))
self.assertFalse(checks.import_no_clients_in_api(
string, './tempest/scenario/fake_test.py'))
def test_scenario_tests_need_service_tags(self):
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake:', './tempest/scenario/test_fake.py',
"@test.services('compute')"))
self.assertFalse(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/api/compute/test_fake.py',
"@test.services('image')"))
self.assertTrue(checks.scenario_tests_need_service_tags(
'def test_fake_test:', './tempest/scenario/test_fake.py',
'\n'))
def test_no_vi_headers(self):
# NOTE(mtreinish) The lines parameter is used only for finding the
# line location in the file. So these tests just pass a list of an
# arbitrary length to use for verifying the check function.
self.assertTrue(checks.no_vi_headers(
'# vim: tabstop=4 shiftwidth=4 softtabstop=4', 1, range(250)))
self.assertTrue(checks.no_vi_headers(
'# vim: tabstop=4 shiftwidth=4 softtabstop=4', 249, range(250)))
self.assertFalse(checks.no_vi_headers(
'# vim: tabstop=4 shiftwidth=4 softtabstop=4', 149, range(250)))
def test_service_tags_not_in_module_path(self):
self.assertTrue(checks.service_tags_not_in_module_path(
"@test.services('compute')", './tempest/api/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
"@test.services('compute')",
'./tempest/scenario/compute/fake_test.py'))
self.assertFalse(checks.service_tags_not_in_module_path(
"@test.services('compute')", './tempest/api/image/fake_test.py'))
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/README.rst 0000664 0001750 0001750 00000002005 12332757070 022404 0 ustar chuck chuck 0000000 0000000 Tempest Field Guide to Unit tests
=================================
What are these tests?
---------------------
Unit tests are the self checks for Tempest. They provide functional
verification and regression checking for the internal components of tempest.
They should be used to just verify that the individual pieces of tempest are
working as expected. They should not require an external service to be running
and should be able to run solely from the tempest tree.
Why are these tests in tempest?
-------------------------------
These tests exist to make sure that the mechanisms that we use inside of
tempest to are valid and remain functional. They are only here for self
validation of tempest.
Scope of these tests
--------------------
Unit tests should not require an external service to be running or any extra
configuration to run. Any state that is required for a test should either be
mocked out or created in a temporary test directory. (see test_wrappers.py for
an example of using a temporary test directory)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/fake_credentials.py 0000664 0001750 0001750 00000003602 12332757070 024556 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import auth
class FakeCredentials(auth.Credentials):
def is_valid(self):
return True
class FakeKeystoneV2Credentials(auth.KeystoneV2Credentials):
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
tenant_name='fake_tenant_name'
)
super(FakeKeystoneV2Credentials, self).__init__(**creds)
class FakeKeystoneV3Credentials(auth.KeystoneV3Credentials):
"""
Fake credentials suitable for the Keystone Identity V3 API
"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name',
project_name='fake_tenant_name'
)
super(FakeKeystoneV3Credentials, self).__init__(**creds)
class FakeKeystoneV3DomainCredentials(auth.KeystoneV3Credentials):
"""
Fake credentials suitable for the Keystone Identity V3 API, with no scope
"""
def __init__(self):
creds = dict(
username='fake_username',
password='fake_password',
user_domain_name='fake_domain_name'
)
super(FakeKeystoneV3DomainCredentials, self).__init__(**creds)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_commands.py 0000664 0001750 0001750 00000006433 12332757070 024140 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import subprocess
from tempest.common import commands
from tempest.tests import base
class TestCommands(base.TestCase):
def setUp(self):
super(TestCommands, self).setUp()
self.subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
@mock.patch('subprocess.Popen')
def test_ip_addr_raw(self, mock):
expected = ['/usr/bin/sudo', '-n', 'ip', 'a']
commands.ip_addr_raw()
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_ip_route_raw(self, mock):
expected = ['/usr/bin/sudo', '-n', 'ip', 'r']
commands.ip_route_raw()
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_ip_ns_raw(self, mock):
expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
commands.ip_ns_raw()
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_iptables_raw(self, mock):
table = 'filter'
expected = ['/usr/bin/sudo', '-n', 'iptables', '-v', '-S', '-t',
'%s' % table]
commands.iptables_raw(table)
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_ip_ns_list(self, mock):
expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'list']
commands.ip_ns_list()
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_ip_ns_addr(self, mock):
ns_list = commands.ip_ns_list()
for ns in ns_list:
expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
'ip', 'a']
commands.ip_ns_addr(ns)
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_ip_ns_route(self, mock):
ns_list = commands.ip_ns_list()
for ns in ns_list:
expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
'ip', 'r']
commands.ip_ns_route(ns)
mock.assert_called_once_with(expected, **self.subprocess_args)
@mock.patch('subprocess.Popen')
def test_iptables_ns(self, mock):
table = 'filter'
ns_list = commands.ip_ns_list()
for ns in ns_list:
expected = ['/usr/bin/sudo', '-n', 'ip', 'netns', 'exec', ns,
'iptables', '-v', '-S', '-t', table]
commands.iptables_ns(ns, table)
mock.assert_called_once_with(expected, **self.subprocess_args)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/negative/ 0000775 0001750 0001750 00000000000 12332757136 022525 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/negative/test_negative_auto_test.py 0000664 0001750 0001750 00000005412 12332757070 030026 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from tempest import config
import tempest.test as test
from tempest.tests import base
from tempest.tests import fake_config
class TestNegativeAutoTest(base.TestCase):
# Fake entries
_interface = 'json'
_service = 'compute'
fake_input_desc = {"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
"json-schema": {"type": "object",
"properties":
{"minRam": {"type": "integer"},
"minDisk": {"type": "integer"}}
},
"resources": ["flavor", "volume", "image"]
}
def setUp(self):
super(TestNegativeAutoTest, self).setUp()
self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
def _check_prop_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
self.assertIsNotNone(entries)
self.assertIs(len(entries), 2)
for entry in entries:
self.assertIsNotNone(entry[1]['schema'])
def _check_resource_entries(self, result, entry):
entries = [a for a in result if entry in a[0]]
self.assertIsNotNone(entries)
self.assertIs(len(entries), 3)
for entry in entries:
self.assertIsNotNone(entry[1]['resource'])
@mock.patch('tempest.test.NegativeAutoTest.load_schema')
def test_generate_scenario(self, open_mock):
open_mock.return_value = self.fake_input_desc
scenarios = test.NegativeAutoTest.\
generate_scenario(None)
self.assertIsInstance(scenarios, list)
for scenario in scenarios:
self.assertIsInstance(scenario, tuple)
self.assertIsInstance(scenario[0], str)
self.assertIsInstance(scenario[1], dict)
self._check_prop_entries(scenarios, "prop_minRam")
self._check_prop_entries(scenarios, "prop_minDisk")
self._check_resource_entries(scenarios, "inv_res")
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/negative/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024621 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/negative/test_negative_generators.py 0000664 0001750 0001750 00000012525 12332757070 030173 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from tempest.common.generator import base_generator
from tempest.common.generator import negative_generator
from tempest.common.generator import valid_generator
from tempest.tests import base
class TestNegativeBasicGenerator(base.TestCase):
valid_desc = {
"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
"json-schema": {
"type": "object",
"properties": {
"minRam": {"type": "integer"},
"minDisk": {"type": "integer"}
}
},
"resources": ["flavor", "volume", "image"]
}
minimal_desc = {
"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
}
add_prop_desc = {
"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
"unknown_field": [12]
}
invalid_json_schema_desc = {
"name": "list-flavors-with-detail",
"http-method": "GET",
"url": "flavors/detail",
"json-schema": {"type": "NotExistingType"}
}
def setUp(self):
super(TestNegativeBasicGenerator, self).setUp()
self.generator = base_generator.BasicGeneratorSet()
def _assert_valid_jsonschema_call(self, jsonschema_mock, desc):
self.assertEqual(jsonschema_mock.call_count, 1)
jsonschema_mock.assert_called_with(desc, self.generator.schema)
@mock.patch('jsonschema.validate', wraps=jsonschema.validate)
def test_validate_schema_with_valid_input(self, jsonschema_mock):
self.generator.validate_schema(self.valid_desc)
self._assert_valid_jsonschema_call(jsonschema_mock, self.valid_desc)
@mock.patch('jsonschema.validate', wraps=jsonschema.validate)
def test_validate_schema_with_minimal_input(self, jsonschema_mock):
self.generator.validate_schema(self.minimal_desc)
self._assert_valid_jsonschema_call(jsonschema_mock, self.minimal_desc)
def test_validate_schema_with_invalid_input(self):
self.assertRaises(jsonschema.ValidationError,
self.generator.validate_schema, self.add_prop_desc)
self.assertRaises(jsonschema.SchemaError,
self.generator.validate_schema,
self.invalid_json_schema_desc)
class BaseNegativeGenerator(object):
types = ['string', 'integer', 'object']
fake_input_str = {"type": "string",
"minLength": 2,
"maxLength": 8,
'results': {'gen_int': 404}}
fake_input_int = {"type": "integer",
"maximum": 255,
"minimum": 1}
fake_input_obj = {"type": "object",
"properties": {"minRam": {"type": "integer"},
"diskName": {"type": "string"},
"maxRam": {"type": "integer", }
}
}
unkown_type_schema = {
"type": "not_defined"
}
def _validate_result(self, data):
self.assertTrue(isinstance(data, list))
for t in data:
self.assertIsInstance(t, tuple)
self.assertEqual(3, len(t))
self.assertIsInstance(t[0], str)
def test_generate_string(self):
result = self.generator.generate(self.fake_input_str)
self._validate_result(result)
def test_generate_integer(self):
result = self.generator.generate(self.fake_input_int)
self._validate_result(result)
def test_generate_obj(self):
result = self.generator.generate(self.fake_input_obj)
self._validate_result(result)
def test_generator_mandatory_functions(self):
for data_type in self.types:
self.assertIn(data_type, self.generator.types_dict)
def test_generate_with_unknown_type(self):
self.assertRaises(TypeError, self.generator.generate,
self.unkown_type_schema)
class TestNegativeValidGenerator(base.TestCase, BaseNegativeGenerator):
def setUp(self):
super(TestNegativeValidGenerator, self).setUp()
self.generator = valid_generator.ValidTestGenerator()
def test_generate_valid(self):
result = self.generator.generate_valid(self.fake_input_obj)
self.assertIn("minRam", result)
self.assertIsInstance(result["minRam"], int)
self.assertIn("diskName", result)
self.assertIsInstance(result["diskName"], str)
class TestNegativeNegativeGenerator(base.TestCase, BaseNegativeGenerator):
def setUp(self):
super(TestNegativeNegativeGenerator, self).setUp()
self.generator = negative_generator.NegativeTestGenerator()
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_waiters.py 0000664 0001750 0001750 00000003631 12332757070 024012 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import mock
from tempest.common import waiters
from tempest import exceptions
from tempest.tests import base
class TestImageWaiters(base.TestCase):
def setUp(self):
super(TestImageWaiters, self).setUp()
self.client = mock.MagicMock()
self.client.build_timeout = 1
self.client.build_interval = 1
def test_wait_for_image_status(self):
self.client.get_image.return_value = (None, {'status': 'active'})
start_time = int(time.time())
waiters.wait_for_image_status(self.client, 'fake_image_id', 'active')
end_time = int(time.time())
# Ensure waiter returns before build_timeout
self.assertTrue((end_time - start_time) < 10)
def test_wait_for_image_status_timeout(self):
self.client.get_image.return_value = (None, {'status': 'saving'})
self.assertRaises(exceptions.TimeoutException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
def test_wait_for_image_status_error_on_image_create(self):
self.client.get_image.return_value = (None, {'status': 'ERROR'})
self.assertRaises(exceptions.AddImageException,
waiters.wait_for_image_status,
self.client, 'fake_image_id', 'active')
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_list_tests.py 0000664 0001750 0001750 00000002636 12332757070 024535 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import subprocess
from tempest.tests import base
class TestTestList(base.TestCase):
def test_no_import_errors(self):
import_failures = []
p = subprocess.Popen(['testr', 'list-tests'], stdout=subprocess.PIPE)
ids = p.stdout.read()
ids = ids.split('\n')
for test_id in ids:
if re.match('(\w+\.){3}\w+', test_id):
if not test_id.startswith('tempest.'):
fail_id = test_id.split('unittest.loader.ModuleImport'
'Failure.')[1]
import_failures.append(fail_id)
error_message = ("The following tests have import failures and aren't"
" being run with test filters %s" % import_failures)
self.assertFalse(import_failures, error_message)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/cli/ 0000775 0001750 0001750 00000000000 12332757136 021472 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/cli/test_output_parser.py 0000664 0001750 0001750 00000015076 12332757070 026025 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.cli import output_parser
from tempest import exceptions
from tempest.tests import base
class TestOutputParser(base.TestCase):
OUTPUT_LINES = """
+----+------+---------+
| ID | Name | Status |
+----+------+---------+
| 11 | foo | BUILD |
| 21 | bar | ERROR |
| 31 | bee | None |
+----+------+---------+
"""
OUTPUT_LINES2 = """
+----+-------+---------+
| ID | Name2 | Status2 |
+----+-------+---------+
| 41 | aaa | SSSSS |
| 51 | bbb | TTTTT |
| 61 | ccc | AAAAA |
+----+-------+---------+
"""
EXPECTED_TABLE = {'headers': ['ID', 'Name', 'Status'],
'values': [['11', 'foo', 'BUILD'],
['21', 'bar', 'ERROR'],
['31', 'bee', 'None']]}
EXPECTED_TABLE2 = {'headers': ['ID', 'Name2', 'Status2'],
'values': [['41', 'aaa', 'SSSSS'],
['51', 'bbb', 'TTTTT'],
['61', 'ccc', 'AAAAA']]}
def test_table_with_normal_values(self):
actual = output_parser.table(self.OUTPUT_LINES)
self.assertIsInstance(actual, dict)
self.assertEqual(self.EXPECTED_TABLE, actual)
def test_table_with_list(self):
output_lines = self.OUTPUT_LINES.split('\n')
actual = output_parser.table(output_lines)
self.assertIsInstance(actual, dict)
self.assertEqual(self.EXPECTED_TABLE, actual)
def test_table_with_invalid_line(self):
output_lines = self.OUTPUT_LINES + "aaaa"
actual = output_parser.table(output_lines)
self.assertIsInstance(actual, dict)
self.assertEqual(self.EXPECTED_TABLE, actual)
def test_tables_with_normal_values(self):
output_lines = 'test' + self.OUTPUT_LINES +\
'test2' + self.OUTPUT_LINES2
expected = [{'headers': self.EXPECTED_TABLE['headers'],
'label': 'test',
'values': self.EXPECTED_TABLE['values']},
{'headers': self.EXPECTED_TABLE2['headers'],
'label': 'test2',
'values': self.EXPECTED_TABLE2['values']}]
actual = output_parser.tables(output_lines)
self.assertIsInstance(actual, list)
self.assertEqual(expected, actual)
def test_tables_with_invalid_values(self):
output_lines = 'test' + self.OUTPUT_LINES +\
'test2' + self.OUTPUT_LINES2 + '\n'
expected = [{'headers': self.EXPECTED_TABLE['headers'],
'label': 'test',
'values': self.EXPECTED_TABLE['values']},
{'headers': self.EXPECTED_TABLE2['headers'],
'label': 'test2',
'values': self.EXPECTED_TABLE2['values']}]
actual = output_parser.tables(output_lines)
self.assertIsInstance(actual, list)
self.assertEqual(expected, actual)
def test_tables_with_invalid_line(self):
output_lines = 'test' + self.OUTPUT_LINES +\
'test2' + self.OUTPUT_LINES2 +\
'+----+-------+---------+'
expected = [{'headers': self.EXPECTED_TABLE['headers'],
'label': 'test',
'values': self.EXPECTED_TABLE['values']},
{'headers': self.EXPECTED_TABLE2['headers'],
'label': 'test2',
'values': self.EXPECTED_TABLE2['values']}]
actual = output_parser.tables(output_lines)
self.assertIsInstance(actual, list)
self.assertEqual(expected, actual)
LISTING_OUTPUT = """
+----+
| ID |
+----+
| 11 |
| 21 |
| 31 |
+----+
"""
def test_listing(self):
expected = [{'ID': '11'}, {'ID': '21'}, {'ID': '31'}]
actual = output_parser.listing(self.LISTING_OUTPUT)
self.assertIsInstance(actual, list)
self.assertEqual(expected, actual)
def test_details_multiple_with_invalid_line(self):
self.assertRaises(exceptions.InvalidStructure,
output_parser.details_multiple,
self.OUTPUT_LINES)
DETAILS_LINES1 = """First Table
+----------+--------+
| Property | Value |
+----------+--------+
| foo | BUILD |
| bar | ERROR |
| bee | None |
+----------+--------+
"""
DETAILS_LINES2 = """Second Table
+----------+--------+
| Property | Value |
+----------+--------+
| aaa | VVVVV |
| bbb | WWWWW |
| ccc | XXXXX |
+----------+--------+
"""
def test_details_with_normal_line_label_false(self):
expected = {'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
actual = output_parser.details(self.DETAILS_LINES1)
self.assertEqual(expected, actual)
def test_details_with_normal_line_label_true(self):
expected = {'__label': 'First Table',
'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'}
actual = output_parser.details(self.DETAILS_LINES1, with_label=True)
self.assertEqual(expected, actual)
def test_details_multiple_with_normal_line_label_false(self):
expected = [{'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
{'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
actual = output_parser.details_multiple(self.DETAILS_LINES1 +
self.DETAILS_LINES2)
self.assertIsInstance(actual, list)
self.assertEqual(expected, actual)
def test_details_multiple_with_normal_line_label_true(self):
expected = [{'__label': 'First Table',
'foo': 'BUILD', 'bar': 'ERROR', 'bee': 'None'},
{'__label': 'Second Table',
'aaa': 'VVVVV', 'bbb': 'WWWWW', 'ccc': 'XXXXX'}]
actual = output_parser.details_multiple(self.DETAILS_LINES1 +
self.DETAILS_LINES2,
with_label=True)
self.assertIsInstance(actual, list)
self.assertEqual(expected, actual)
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/cli/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023566 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/tests/test_glance_http.py 0000664 0001750 0001750 00000022176 12332757070 024631 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib
import json
import mock
import six
import socket
from tempest.common import glance_http
from tempest import exceptions
from tempest.openstack.common.fixture import mockpatch
from tempest.tests import base
from tempest.tests import fake_auth_provider
from tempest.tests import fake_http
class TestGlanceHTTPClient(base.TestCase):
def setUp(self):
super(TestGlanceHTTPClient, self).setUp()
self.fake_http = fake_http.fake_httplib2(return_type=200)
# NOTE(maurosr): using http here implies that we will be using httplib
# directly. With https glance_client would use an httpS version, but
# the real backend would still be httplib anyway and since we mock it
# that there is no reason to care.
self.endpoint = 'http://fake_url.com'
self.fake_auth = fake_auth_provider.FakeAuthProvider()
self.fake_auth.base_url = mock.MagicMock(return_value=self.endpoint)
self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
'request',
side_effect=self.fake_http.request(self.endpoint)[1]))
self.client = glance_http.HTTPClient(self.fake_auth, {})
def _set_response_fixture(self, header, status, resp_body):
resp = fake_http.fake_httplib(header, status=status,
body=six.StringIO(resp_body))
self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
'getresponse',
return_value=resp))
return resp
def test_json_request_without_content_type_header(self):
self._set_response_fixture({}, 200, 'fake_response_body')
resp, body = self.client.json_request('GET', '/images')
self.assertEqual(200, resp.status)
self.assertIsNone(body)
def test_json_request_with_xml_content_type_header(self):
self._set_response_fixture({'content-type': 'application/xml'},
200, 'fake_response_body')
resp, body = self.client.json_request('GET', '/images')
self.assertEqual(200, resp.status)
self.assertIsNone(body)
def test_json_request_with_content_type_header(self):
self._set_response_fixture({'content-type': 'application/json'},
200, 'fake_response_body')
resp, body = self.client.json_request('GET', '/images')
self.assertEqual(200, resp.status)
self.assertEqual('fake_response_body', body)
def test_json_request_fails_to_json_loads(self):
self._set_response_fixture({'content-type': 'application/json'},
200, 'fake_response_body')
self.useFixture(mockpatch.PatchObject(json, 'loads',
side_effect=ValueError()))
resp, body = self.client.json_request('GET', '/images')
self.assertEqual(200, resp.status)
self.assertEqual(body, 'fake_response_body')
def test_json_request_socket_timeout(self):
self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
'request',
side_effect=socket.timeout()))
self.assertRaises(exceptions.TimeoutException,
self.client.json_request, 'GET', '/images')
def test_json_request_endpoint_not_found(self):
self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
'request',
side_effect=socket.gaierror()))
self.assertRaises(exceptions.EndpointNotFound,
self.client.json_request, 'GET', '/images')
def test_raw_request(self):
self._set_response_fixture({}, 200, 'fake_response_body')
resp, body = self.client.raw_request('GET', '/images')
self.assertEqual(200, resp.status)
self.assertEqual('fake_response_body', body.read())
def test_raw_request_with_response_chunked(self):
self._set_response_fixture({}, 200, 'fake_response_body')
self.useFixture(mockpatch.PatchObject(glance_http,
'CHUNKSIZE', 1))
resp, body = self.client.raw_request('GET', '/images')
self.assertEqual(200, resp.status)
self.assertEqual('fake_response_body', body.read())
def test_raw_request_chunked(self):
self.useFixture(mockpatch.PatchObject(glance_http,
'CHUNKSIZE', 1))
self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
'endheaders'))
self.useFixture(mockpatch.PatchObject(httplib.HTTPConnection,
'send'))
self._set_response_fixture({}, 200, 'fake_response_body')
req_body = six.StringIO('fake_request_body')
resp, body = self.client.raw_request('PUT', '/images', body=req_body)
self.assertEqual(200, resp.status)
self.assertEqual('fake_response_body', body.read())
httplib.HTTPConnection.send.assert_call_count(req_body.len)
def test_get_connection_class_for_https(self):
conn_class = self.client.get_connection_class('https')
self.assertEqual(glance_http.VerifiedHTTPSConnection, conn_class)
def test_get_connection_class_for_http(self):
conn_class = (self.client.get_connection_class('http'))
self.assertEqual(httplib.HTTPConnection, conn_class)
def test_get_connection_http(self):
self.assertTrue(isinstance(self.client.get_connection(),
httplib.HTTPConnection))
def test_get_connection_https(self):
endpoint = 'https://fake_url.com'
self.fake_auth.base_url = mock.MagicMock(return_value=endpoint)
self.client = glance_http.HTTPClient(self.fake_auth, {})
self.assertTrue(isinstance(self.client.get_connection(),
glance_http.VerifiedHTTPSConnection))
def test_get_connection_url_not_fount(self):
self.useFixture(mockpatch.PatchObject(self.client, 'connection_class',
side_effect=httplib.InvalidURL()
))
self.assertRaises(exceptions.EndpointNotFound,
self.client.get_connection)
def test_get_connection_kwargs_default_for_http(self):
kwargs = self.client.get_connection_kwargs('http')
self.assertEqual(600, kwargs['timeout'])
self.assertEqual(1, len(kwargs.keys()))
def test_get_connection_kwargs_set_timeout_for_http(self):
kwargs = self.client.get_connection_kwargs('http', timeout=10,
cacert='foo')
self.assertEqual(10, kwargs['timeout'])
# nothing more than timeout is evaluated for http connections
self.assertEqual(1, len(kwargs.keys()))
def test_get_connection_kwargs_default_for_https(self):
kwargs = self.client.get_connection_kwargs('https')
self.assertEqual(600, kwargs['timeout'])
self.assertEqual(None, kwargs['cacert'])
self.assertEqual(None, kwargs['cert_file'])
self.assertEqual(None, kwargs['key_file'])
self.assertEqual(False, kwargs['insecure'])
self.assertEqual(True, kwargs['ssl_compression'])
self.assertEqual(6, len(kwargs.keys()))
def test_get_connection_kwargs_set_params_for_https(self):
kwargs = self.client.get_connection_kwargs('https', timeout=10,
cacert='foo',
cert_file='/foo/bar.cert',
key_file='/foo/key.pem',
insecure=True,
ssl_compression=False)
self.assertEqual(10, kwargs['timeout'])
self.assertEqual('foo', kwargs['cacert'])
self.assertEqual('/foo/bar.cert', kwargs['cert_file'])
self.assertEqual('/foo/key.pem', kwargs['key_file'])
self.assertEqual(True, kwargs['insecure'])
self.assertEqual(False, kwargs['ssl_compression'])
self.assertEqual(6, len(kwargs.keys()))
class TestResponseBodyIterator(base.TestCase):
def test_iter_default_chunk_size_64k(self):
resp = fake_http.fake_httplib({}, six.StringIO(
'X' * (glance_http.CHUNKSIZE + 1)))
iterator = glance_http.ResponseBodyIterator(resp)
chunks = list(iterator)
self.assertEqual(chunks, ['X' * glance_http.CHUNKSIZE, 'X'])
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/base.py 0000664 0001750 0001750 00000004225 12332757070 022207 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import fixtures
import mock
import testtools
from tempest.openstack.common.fixture import moxstubout
class TestCase(testtools.TestCase):
def setUp(self):
super(TestCase, self).setUp()
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
mox_fixture = self.useFixture(moxstubout.MoxStubout())
self.mox = mox_fixture.mox
self.stubs = mox_fixture.stubs
def patch(self, target, **kwargs):
"""
Returns a started `mock.patch` object for the supplied target.
The caller may then call the returned patcher to create a mock object.
The caller does not need to call stop() on the returned
patcher object, as this method automatically adds a cleanup
to the test class to stop the patcher.
:param target: String module.class or module.object expression to patch
:param **kwargs: Passed as-is to `mock.patch`. See mock documentation
for details.
"""
p = mock.patch(target, **kwargs)
m = p.start()
self.addCleanup(p.stop)
return m
tempest-2014.1.dev4108.gf22b6cc/tempest/tests/fake_http.py 0000664 0001750 0001750 00000004624 12332757070 023245 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import httplib2
class fake_httplib2(object):
def __init__(self, return_type=None, *args, **kwargs):
self.return_type = return_type
def request(self, uri, method="GET", body=None, headers=None,
redirections=5, connection_type=None):
if not self.return_type:
fake_headers = httplib2.Response(headers)
return_obj = {
'uri': uri,
'method': method,
'body': body,
'headers': headers
}
return (fake_headers, return_obj)
# return (headers, return_obj)
elif isinstance(self.return_type, int):
body = "fake_body"
header_info = {
'content-type': 'text/plain',
'status': str(self.return_type),
'content-length': len(body)
}
resp_header = httplib2.Response(header_info)
return (resp_header, body)
else:
msg = "unsupported return type %s" % self.return_type
raise TypeError(msg)
class fake_httplib(object):
def __init__(self, headers, body=None,
version=1.0, status=200, reason="Ok"):
"""
:param headers: dict representing HTTP response headers
:param body: file-like object
:param version: HTTP Version
:param status: Response status code
:param reason: Status code related message.
"""
self.body = body
self.status = status
self.reason = reason
self.version = version
self.headers = headers
def getheaders(self):
return copy.deepcopy(self.headers).items()
def getheader(self, key, default):
return self.headers.get(key, default)
def read(self, amt):
return self.body.read(amt)
tempest-2014.1.dev4108.gf22b6cc/tempest/common/ 0000775 0001750 0001750 00000000000 12332757136 021051 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/generator/ 0000775 0001750 0001750 00000000000 12332757136 023037 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/generator/base_generator.py 0000664 0001750 0001750 00000011243 12332757070 026367 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def _check_for_expected_result(name, schema):
expected_result = None
if "results" in schema:
if name in schema["results"]:
expected_result = schema["results"][name]
return expected_result
def generator_type(*args):
def wrapper(func):
func.types = args
return func
return wrapper
def simple_generator(fn):
"""
Decorator for simple generators that return one value
"""
def wrapped(self, schema):
result = fn(self, schema)
if result is not None:
expected_result = _check_for_expected_result(fn.__name__, schema)
return (fn.__name__, result, expected_result)
return
return wrapped
class BasicGeneratorSet(object):
_instance = None
schema = {
"type": "object",
"properties": {
"name": {"type": "string"},
"http-method": {
"enum": ["GET", "PUT", "HEAD",
"POST", "PATCH", "DELETE", 'COPY']
},
"admin_client": {"type": "boolean"},
"url": {"type": "string"},
"default_result_code": {"type": "integer"},
"json-schema": {},
"resources": {
"type": "array",
"items": {
"oneOf": [
{"type": "string"},
{
"type": "object",
"properties": {
"name": {"type": "string"},
"expected_result": {"type": "integer"}
}
}
]
}
},
"results": {
"type": "object",
"properties": {}
}
},
"required": ["name", "http-method", "url"],
"additionalProperties": False,
}
def __init__(self):
self.types_dict = {}
for m in dir(self):
if callable(getattr(self, m)) and not'__' in m:
method = getattr(self, m)
if hasattr(method, "types"):
for type in method.types:
if type not in self.types_dict:
self.types_dict[type] = []
self.types_dict[type].append(method)
def validate_schema(self, schema):
if "json-schema" in schema:
jsonschema.Draft4Validator.check_schema(schema['json-schema'])
jsonschema.validate(schema, self.schema)
def generate(self, schema):
"""
Generate an json dictionary based on a schema.
Only one value is mis-generated for each dictionary created.
Any generator must return a list of tuples or a single tuple.
The values of this tuple are:
result[0]: Name of the test
result[1]: json schema for the test
result[2]: expected result of the test (can be None)
"""
LOG.debug("generate_invalid: %s" % schema)
schema_type = schema["type"]
if isinstance(schema_type, list):
if "integer" in schema_type:
schema_type = "integer"
else:
raise Exception("non-integer list types not supported")
result = []
if schema_type not in self.types_dict:
raise TypeError("generator (%s) doesn't support type: %s"
% (self.__class__.__name__, schema_type))
for generator in self.types_dict[schema_type]:
ret = generator(schema)
if ret is not None:
if isinstance(ret, list):
result.extend(ret)
elif isinstance(ret, tuple):
result.append(ret)
else:
raise Exception("generator (%s) returns invalid result: %s"
% (generator, ret))
LOG.debug("result: %s" % result)
return result
tempest-2014.1.dev4108.gf22b6cc/tempest/common/generator/valid_generator.py 0000664 0001750 0001750 00000003650 12332757070 026557 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tempest.common.generator.base_generator as base
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class ValidTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
@base.simple_generator
def generate_valid_string(self, schema):
size = schema.get("minLength", 0)
# TODO(dkr mko): handle format and pattern
return "x" * size
@base.generator_type("integer")
@base.simple_generator
def generate_valid_integer(self, schema):
# TODO(dkr mko): handle multipleOf
if "minimum" in schema:
minimum = schema["minimum"]
if "exclusiveMinimum" not in schema:
return minimum
else:
return minimum + 1
if "maximum" in schema:
maximum = schema["maximum"]
if "exclusiveMaximum" not in schema:
return maximum
else:
return maximum - 1
return 0
@base.generator_type("object")
@base.simple_generator
def generate_valid_object(self, schema):
obj = {}
for k, v in schema["properties"].iteritems():
obj[k] = self.generate_valid(v)
return obj
def generate_valid(self, schema):
return self.generate(schema)[0][1]
tempest-2014.1.dev4108.gf22b6cc/tempest/common/generator/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025133 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/generator/negative_generator.py 0000664 0001750 0001750 00000007473 12332757070 027271 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Deutsche Telekom AG
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import tempest.common.generator.base_generator as base
import tempest.common.generator.valid_generator as valid
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
class NegativeTestGenerator(base.BasicGeneratorSet):
@base.generator_type("string")
@base.simple_generator
def gen_int(self, _):
return 4
@base.generator_type("integer")
@base.simple_generator
def gen_string(self, _):
return "XXXXXX"
@base.generator_type("integer", "string")
def gen_none(self, schema):
# Note(mkoderer): it's not using the decorator otherwise it'd be
# filtered
expected_result = base._check_for_expected_result('gen_none', schema)
return ('gen_none', None, expected_result)
@base.generator_type("string")
@base.simple_generator
def gen_str_min_length(self, schema):
min_length = schema.get("minLength", 0)
if min_length > 0:
return "x" * (min_length - 1)
@base.generator_type("string")
@base.simple_generator
def gen_str_max_length(self, schema):
max_length = schema.get("maxLength", -1)
if max_length > -1:
return "x" * (max_length + 1)
@base.generator_type("integer")
@base.simple_generator
def gen_int_min(self, schema):
if "minimum" in schema:
minimum = schema["minimum"]
if "exclusiveMinimum" not in schema:
minimum -= 1
return minimum
@base.generator_type("integer")
@base.simple_generator
def gen_int_max(self, schema):
if "maximum" in schema:
maximum = schema["maximum"]
if "exclusiveMaximum" not in schema:
maximum += 1
return maximum
@base.generator_type("object")
def gen_obj_remove_attr(self, schema):
invalids = []
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
required = schema.get("required", [])
for r in required:
new_valid = copy.deepcopy(valid_schema)
del new_valid[r]
invalids.append(("gen_obj_remove_attr", new_valid, None))
return invalids
@base.generator_type("object")
@base.simple_generator
def gen_obj_add_attr(self, schema):
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
if not schema.get("additionalProperties", True):
new_valid = copy.deepcopy(valid_schema)
new_valid["$$$$$$$$$$"] = "xxx"
return new_valid
@base.generator_type("object")
def gen_inv_prop_obj(self, schema):
LOG.debug("generate_invalid_object: %s" % schema)
valid_schema = valid.ValidTestGenerator().generate_valid(schema)
invalids = []
properties = schema["properties"]
for k, v in properties.iteritems():
for invalid in self.generate(v):
LOG.debug(v)
new_valid = copy.deepcopy(valid_schema)
new_valid[k] = invalid[1]
name = "prop_%s_%s" % (k, invalid[0])
invalids.append((name, new_valid, invalid[2]))
LOG.debug("generate_invalid_object return: %s" % invalids)
return invalids
tempest-2014.1.dev4108.gf22b6cc/tempest/common/http.py 0000664 0001750 0001750 00000001760 12332757070 022403 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 OpenStack Foundation
# Copyright 2013 Citrix Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import httplib2
class ClosingHttp(httplib2.Http):
def request(self, *args, **kwargs):
original_headers = kwargs.get('headers', {})
new_headers = dict(original_headers, connection='close')
new_kwargs = dict(kwargs, headers=new_headers)
return super(ClosingHttp, self).request(*args, **new_kwargs)
tempest-2014.1.dev4108.gf22b6cc/tempest/common/xml_utils.py 0000664 0001750 0001750 00000012521 12332757070 023441 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
XMLNS_11 = "http://docs.openstack.org/compute/api/v1.1"
XMLNS_V3 = "http://docs.openstack.org/compute/api/v1.1"
NEUTRON_NAMESPACES = {
'binding': "http://docs.openstack.org/ext/binding/api/v1.0",
'router': "http://docs.openstack.org/ext/neutron/router/api/v1.0",
'provider': 'http://docs.openstack.org/ext/provider/api/v1.0',
}
# NOTE(danms): This is just a silly implementation to help make generating
# XML faster for prototyping. Could be replaced with proper etree gorp
# if desired
class Element(object):
def __init__(self, element_name, *args, **kwargs):
self.element_name = element_name
self._attrs = kwargs
self._elements = list(args)
def add_attr(self, name, value):
self._attrs[name] = value
def append(self, element):
self._elements.append(element)
def __str__(self):
args = " ".join(['%s="%s"' %
(k, v if v is not None else "")
for k, v in self._attrs.items()])
string = '<%s %s' % (self.element_name, args)
if not self._elements:
string += '/>'
return string
string += '>'
for element in self._elements:
string += str(element)
string += '%s>' % self.element_name
return string
def __getitem__(self, name):
for element in self._elements:
if element.element_name == name:
return element
raise KeyError("No such element `%s'" % name)
def __getattr__(self, name):
if name in self._attrs:
return self._attrs[name]
return object.__getattr__(self, name)
def attributes(self):
return self._attrs.items()
def children(self):
return self._elements
class Document(Element):
def __init__(self, *args, **kwargs):
if 'version' not in kwargs:
kwargs['version'] = '1.0'
if 'encoding' not in kwargs:
kwargs['encoding'] = 'UTF-8'
Element.__init__(self, '?xml', *args, **kwargs)
def __str__(self):
args = " ".join(['%s="%s"' %
(k, v if v is not None else "")
for k, v in self._attrs.items()])
string = '\n' % args
for element in self._elements:
string += str(element)
return string
class Text(Element):
def __init__(self, content=""):
Element.__init__(self, None)
self.__content = content
def __str__(self):
return self.__content
def parse_array(node, plurals=None):
array = []
for child in node.getchildren():
array.append(xml_to_json(child,
plurals))
return array
def xml_to_json(node, plurals=None):
"""This does a really braindead conversion of an XML tree to
something that looks like a json dump. In cases where the XML
and json structures are the same, then this "just works". In
others, it requires a little hand-editing of the result.
"""
json = {}
bool_flag = False
int_flag = False
long_flag = False
for attr in node.keys():
if not attr.startswith("xmlns"):
json[attr] = node.get(attr)
if json[attr] == 'bool':
bool_flag = True
elif json[attr] == 'int':
int_flag = True
elif json[attr] == 'long':
long_flag = True
if not node.getchildren():
if bool_flag:
return node.text == 'True'
elif int_flag:
return int(node.text)
elif long_flag:
return long(node.text)
else:
return node.text or json
for child in node.getchildren():
tag = child.tag
if tag.startswith("{"):
ns, tag = tag.split("}", 1)
for key, uri in NEUTRON_NAMESPACES.iteritems():
if uri == ns[1:]:
tag = key + ":" + tag
if plurals is not None and tag in plurals:
json[tag] = parse_array(child, plurals)
else:
json[tag] = xml_to_json(child, plurals)
return json
def deep_dict_to_xml(dest, source):
"""Populates the ``dest`` xml element with the ``source`` ``Mapping``
elements, if the source Mapping's value is also a ``Mapping``
they will be recursively added as a child elements.
:param source: A python ``Mapping`` (dict)
:param dest: XML child element will be added to the ``dest``
"""
for element, content in source.iteritems():
if isinstance(content, collections.Mapping):
xml_element = Element(element)
deep_dict_to_xml(xml_element, content)
dest.append(xml_element)
else:
dest.append(Element(element, content))
tempest-2014.1.dev4108.gf22b6cc/tempest/common/commands.py 0000664 0001750 0001750 00000004012 12332757070 023216 0 ustar chuck chuck 0000000 0000000 # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import shlex
import subprocess
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
# NOTE(afazekas):
# These commands assumes the tempest node is the same as
# the only one service node. all-in-one installation.
def sudo_cmd_call(cmd):
args = shlex.split(cmd)
subprocess_args = {'stdout': subprocess.PIPE,
'stderr': subprocess.STDOUT}
try:
proc = subprocess.Popen(['/usr/bin/sudo', '-n'] + args,
**subprocess_args)
return proc.communicate()[0]
if proc.returncode != 0:
LOG.error(cmd + "returned with: " +
proc.returncode + "exit status")
except subprocess.CalledProcessError as e:
LOG.error("command output:\n%s" % e.output)
def ip_addr_raw():
return sudo_cmd_call("ip a")
def ip_route_raw():
return sudo_cmd_call("ip r")
def ip_ns_raw():
return sudo_cmd_call("ip netns list")
def iptables_raw(table):
return sudo_cmd_call("iptables -v -S -t " + table)
def ip_ns_list():
return ip_ns_raw().split()
def ip_ns_exec(ns, cmd):
return sudo_cmd_call(" ".join(("ip netns exec", ns, cmd)))
def ip_ns_addr(ns):
return ip_ns_exec(ns, "ip a")
def ip_ns_route(ns):
return ip_ns_exec(ns, "ip r")
def iptables_ns(ns, table):
return ip_ns_exec(ns, "iptables -v -S -t " + table)
def ovs_db_dump():
return sudo_cmd_call("ovsdb-client dump")
tempest-2014.1.dev4108.gf22b6cc/tempest/common/isolated_creds.py 0000664 0001750 0001750 00000047153 12332757070 024416 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import netaddr
from tempest import auth
from tempest import clients
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class IsolatedCreds(object):
def __init__(self, name, tempest_client=True, interface='json',
password='pass', network_resources=None):
self.network_resources = network_resources
self.isolated_creds = {}
self.isolated_creds_old_style = {}
self.isolated_net_resources = {}
self.ports = []
self.name = name
self.tempest_client = tempest_client
self.interface = interface
self.password = password
self.identity_admin_client, self.network_admin_client = (
self._get_admin_clients())
def _get_admin_clients(self):
"""
Returns a tuple with instances of the following admin clients (in this
order):
identity
network
"""
if self.tempest_client:
os = clients.AdminManager(interface=self.interface)
else:
os = clients.OfficialClientManager(
auth.get_default_credentials('identity_admin')
)
return os.identity_client, os.network_client
def _create_tenant(self, name, description):
if self.tempest_client:
resp, tenant = self.identity_admin_client.create_tenant(
name=name, description=description)
else:
tenant = self.identity_admin_client.tenants.create(
name,
description=description)
return tenant
def _get_tenant_by_name(self, name):
if self.tempest_client:
resp, tenant = self.identity_admin_client.get_tenant_by_name(name)
else:
tenants = self.identity_admin_client.tenants.list()
for ten in tenants:
if ten['name'] == name:
tenant = ten
break
else:
raise exceptions.NotFound('No such tenant')
return tenant
def _create_user(self, username, password, tenant, email):
if self.tempest_client:
resp, user = self.identity_admin_client.create_user(username,
password,
tenant['id'],
email)
else:
user = self.identity_admin_client.users.create(username, password,
email,
tenant_id=tenant.id)
return user
def _get_user(self, tenant, username):
if self.tempest_client:
resp, user = self.identity_admin_client.get_user_by_username(
tenant['id'],
username)
else:
user = self.identity_admin_client.users.get(username)
return user
def _list_roles(self):
if self.tempest_client:
resp, roles = self.identity_admin_client.list_roles()
else:
roles = self.identity_admin_client.roles.list()
return roles
def _assign_user_role(self, tenant, user, role):
if self.tempest_client:
self.identity_admin_client.assign_user_role(tenant, user, role)
else:
self.identity_admin_client.roles.add_user_role(user,
role, tenant=tenant)
def _delete_user(self, user):
if self.tempest_client:
self.identity_admin_client.delete_user(user)
else:
self.identity_admin_client.users.delete(user)
def _delete_tenant(self, tenant):
if self.tempest_client:
self.identity_admin_client.delete_tenant(tenant)
else:
self.identity_admin_client.tenants.delete(tenant)
def _create_creds(self, suffix="", admin=False):
"""Create random credentials under the following schema.
If the name contains a '.' is the full class path of something, and
we don't really care. If it isn't, it's probably a meaningful name,
so use it.
For logging purposes, -user and -tenant are long and redundant,
don't use them. The user# will be sufficient to figure it out.
"""
if '.' in self.name:
root = ""
else:
root = self.name
tenant_name = data_utils.rand_name(root) + suffix
tenant_desc = tenant_name + "-desc"
tenant = self._create_tenant(name=tenant_name,
description=tenant_desc)
username = data_utils.rand_name(root) + suffix
email = data_utils.rand_name(root) + suffix + "@example.com"
user = self._create_user(username, self.password,
tenant, email)
if admin:
role = None
try:
roles = self._list_roles()
admin_role = CONF.identity.admin_role
if self.tempest_client:
role = next(r for r in roles if r['name'] == admin_role)
else:
role = next(r for r in roles if r.name == admin_role)
except StopIteration:
msg = "No admin role found"
raise exceptions.NotFound(msg)
if self.tempest_client:
self._assign_user_role(tenant['id'], user['id'], role['id'])
else:
self._assign_user_role(tenant.id, user.id, role.id)
return self._get_credentials(user, tenant), user, tenant
def _get_credentials(self, user, tenant):
if self.tempest_client:
user_get = user.get
tenant_get = tenant.get
else:
user_get = user.__dict__.get
tenant_get = tenant.__dict__.get
return auth.get_credentials(
username=user_get('name'), user_id=user_get('id'),
tenant_name=tenant_get('name'), tenant_id=tenant_get('id'),
password=self.password)
def _create_network_resources(self, tenant_id):
network = None
subnet = None
router = None
# Make sure settings
if self.network_resources:
if self.network_resources['router']:
if (not self.network_resources['subnet'] or
not self.network_resources['network']):
raise exceptions.InvalidConfiguration(
'A router requires a subnet and network')
elif self.network_resources['subnet']:
if not self.network_resources['network']:
raise exceptions.InvalidConfiguration(
'A subnet requires a network')
elif self.network_resources['dhcp']:
raise exceptions.InvalidConfiguration('DHCP requires a subnet')
data_utils.rand_name_root = data_utils.rand_name(self.name)
if not self.network_resources or self.network_resources['network']:
network_name = data_utils.rand_name_root + "-network"
network = self._create_network(network_name, tenant_id)
try:
if not self.network_resources or self.network_resources['subnet']:
subnet_name = data_utils.rand_name_root + "-subnet"
subnet = self._create_subnet(subnet_name, tenant_id,
network['id'])
if not self.network_resources or self.network_resources['router']:
router_name = data_utils.rand_name_root + "-router"
router = self._create_router(router_name, tenant_id)
self._add_router_interface(router['id'], subnet['id'])
except Exception:
if router:
self._clear_isolated_router(router['id'], router['name'])
if subnet:
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if network:
self._clear_isolated_network(network['id'], network['name'])
raise
return network, subnet, router
def _create_network(self, name, tenant_id):
if self.tempest_client:
resp, resp_body = self.network_admin_client.create_network(
name=name, tenant_id=tenant_id)
else:
body = {'network': {'tenant_id': tenant_id, 'name': name}}
resp_body = self.network_admin_client.create_network(body)
return resp_body['network']
def _create_subnet(self, subnet_name, tenant_id, network_id):
if not self.tempest_client:
body = {'subnet': {'name': subnet_name, 'tenant_id': tenant_id,
'network_id': network_id, 'ip_version': 4}}
if self.network_resources:
body['enable_dhcp'] = self.network_resources['dhcp']
base_cidr = netaddr.IPNetwork(CONF.network.tenant_network_cidr)
mask_bits = CONF.network.tenant_network_mask_bits
for subnet_cidr in base_cidr.subnet(mask_bits):
try:
if self.tempest_client:
if self.network_resources:
resp, resp_body = self.network_admin_client.\
create_subnet(
network_id=network_id, cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
enable_dhcp=self.network_resources['dhcp'],
ip_version=4)
else:
resp, resp_body = self.network_admin_client.\
create_subnet(network_id=network_id,
cidr=str(subnet_cidr),
name=subnet_name,
tenant_id=tenant_id,
ip_version=4)
else:
body['subnet']['cidr'] = str(subnet_cidr)
resp_body = self.network_admin_client.create_subnet(body)
break
except exceptions.BadRequest as e:
if 'overlaps with another subnet' not in str(e):
raise
else:
e = exceptions.BuildErrorException()
e.message = 'Available CIDR for subnet creation could not be found'
raise e
return resp_body['subnet']
def _create_router(self, router_name, tenant_id):
external_net_id = dict(
network_id=CONF.network.public_network_id)
if self.tempest_client:
resp, resp_body = self.network_admin_client.create_router(
router_name,
external_gateway_info=external_net_id,
tenant_id=tenant_id)
else:
body = {'router': {'name': router_name, 'tenant_id': tenant_id,
'external_gateway_info': external_net_id,
'admin_state_up': True}}
resp_body = self.network_admin_client.create_router(body)
return resp_body['router']
def _add_router_interface(self, router_id, subnet_id):
if self.tempest_client:
self.network_admin_client.add_router_interface_with_subnet_id(
router_id, subnet_id)
else:
body = {'subnet_id': subnet_id}
self.network_admin_client.add_interface_router(router_id, body)
def get_primary_tenant(self):
# Deprecated. Maintained until all tests are ported
return self.isolated_creds_old_style.get('primary')[1]
def get_primary_user(self):
# Deprecated. Maintained until all tests are ported
return self.isolated_creds_old_style.get('primary')[0]
def get_alt_tenant(self):
# Deprecated. Maintained until all tests are ported
return self.isolated_creds_old_style.get('alt')[1]
def get_alt_user(self):
# Deprecated. Maintained until all tests are ported
return self.isolated_creds_old_style.get('alt')[0]
def get_admin_tenant(self):
# Deprecated. Maintained until all tests are ported
return self.isolated_creds_old_style.get('admin')[1]
def get_admin_user(self):
# Deprecated. Maintained until all tests are ported
return self.isolated_creds_old_style.get('admin')[0]
def get_primary_network(self):
return self.isolated_net_resources.get('primary')[0]
def get_primary_subnet(self):
return self.isolated_net_resources.get('primary')[1]
def get_primary_router(self):
return self.isolated_net_resources.get('primary')[2]
def get_admin_network(self):
return self.isolated_net_resources.get('admin')[0]
def get_admin_subnet(self):
return self.isolated_net_resources.get('admin')[1]
def get_admin_router(self):
return self.isolated_net_resources.get('admin')[2]
def get_alt_network(self):
return self.isolated_net_resources.get('alt')[0]
def get_alt_subnet(self):
return self.isolated_net_resources.get('alt')[1]
def get_alt_router(self):
return self.isolated_net_resources.get('alt')[2]
def get_credentials(self, credential_type, old_style):
if self.isolated_creds.get(credential_type):
credentials = self.isolated_creds[credential_type]
else:
is_admin = (credential_type == 'admin')
credentials, user, tenant = self._create_creds(admin=is_admin)
self.isolated_creds[credential_type] = credentials
# Maintained until tests are ported
self.isolated_creds_old_style[credential_type] = (user, tenant)
LOG.info("Acquired isolated creds:\n credentials: %s"
% credentials)
if CONF.service_available.neutron:
network, subnet, router = self._create_network_resources(
credentials.tenant_id)
self.isolated_net_resources[credential_type] = (
network, subnet, router,)
LOG.info("Created isolated network resources for : \n"
+ " credentials: %s" % credentials)
if old_style:
return (credentials.username, credentials.tenant_name,
credentials.password)
else:
return credentials
def get_primary_creds(self, old_style=False):
return self.get_credentials('primary', old_style)
def get_admin_creds(self, old_style=False):
return self.get_credentials('admin', old_style)
def get_alt_creds(self, old_style=False):
return self.get_credentials('alt', old_style)
def _clear_isolated_router(self, router_id, router_name):
net_client = self.network_admin_client
try:
net_client.delete_router(router_id)
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router_name)
def _clear_isolated_subnet(self, subnet_id, subnet_name):
net_client = self.network_admin_client
try:
net_client.delete_subnet(subnet_id)
except exceptions.NotFound:
LOG.warn('subnet with name: %s not found for delete' %
subnet_name)
def _clear_isolated_network(self, network_id, network_name):
net_client = self.network_admin_client
try:
net_client.delete_network(network_id)
except exceptions.NotFound:
LOG.warn('network with name: %s not found for delete' %
network_name)
def _cleanup_ports(self, network_id):
# TODO(mlavalle) This method will be removed once patch
# https://review.openstack.org/#/c/46563/ merges in Neutron
if not self.ports:
if self.tempest_client:
resp, resp_body = self.network_admin_client.list_ports()
else:
resp_body = self.network_admin_client.list_ports()
self.ports = resp_body['ports']
ports_to_delete = [
port
for port in self.ports
if (port['network_id'] == network_id and
port['device_owner'] != 'network:router_interface' and
port['device_owner'] != 'network:dhcp')
]
for port in ports_to_delete:
try:
LOG.info('Cleaning up port id %s, name %s' %
(port['id'], port['name']))
self.network_admin_client.delete_port(port['id'])
except exceptions.NotFound:
LOG.warn('Port id: %s, name %s not found for clean-up' %
(port['id'], port['name']))
def _clear_isolated_net_resources(self):
net_client = self.network_admin_client
for cred in self.isolated_net_resources:
network, subnet, router = self.isolated_net_resources.get(cred)
LOG.debug("Clearing network: %(network)s, "
"subnet: %(subnet)s, router: %(router)s",
{'network': network, 'subnet': subnet, 'router': router})
if (not self.network_resources or
self.network_resources.get('router')):
try:
if self.tempest_client:
net_client.remove_router_interface_with_subnet_id(
router['id'], subnet['id'])
else:
body = {'subnet_id': subnet['id']}
net_client.remove_interface_router(router['id'], body)
except exceptions.NotFound:
LOG.warn('router with name: %s not found for delete' %
router['name'])
self._clear_isolated_router(router['id'], router['name'])
if (not self.network_resources or
self.network_resources.get('network')):
# TODO(mlavalle) This method call will be removed once patch
# https://review.openstack.org/#/c/46563/ merges in Neutron
self._cleanup_ports(network['id'])
if (not self.network_resources or
self.network_resources.get('subnet')):
self._clear_isolated_subnet(subnet['id'], subnet['name'])
if (not self.network_resources or
self.network_resources.get('network')):
self._clear_isolated_network(network['id'], network['name'])
def clear_isolated_creds(self):
if not self.isolated_creds:
return
self._clear_isolated_net_resources()
for creds in self.isolated_creds.itervalues():
try:
self._delete_user(creds.user_id)
except exceptions.NotFound:
LOG.warn("user with name: %s not found for delete" %
creds.username)
try:
self._delete_tenant(creds.tenant_id)
except exceptions.NotFound:
LOG.warn("tenant with name: %s not found for delete" %
creds.tenant_name)
tempest-2014.1.dev4108.gf22b6cc/tempest/common/waiters.py 0000664 0001750 0001750 00000012606 12332757070 023103 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
from tempest.common.utils import misc as misc_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
# NOTE(afazekas): This function needs to know a token and a subject.
def wait_for_server_status(client, server_id, status, ready_wait=True,
extra_timeout=0, raise_on_error=True):
"""Waits for a server to reach a given status."""
def _get_task_state(body):
if client.service == CONF.compute.catalog_v3_type:
task_state = body.get("os-extended-status:task_state", None)
else:
task_state = body.get('OS-EXT-STS:task_state', None)
return task_state
# NOTE(afazekas): UNKNOWN status possible on ERROR
# or in a very early stage.
resp, body = client.get_server(server_id)
old_status = server_status = body['status']
old_task_state = task_state = _get_task_state(body)
start_time = int(time.time())
timeout = client.build_timeout + extra_timeout
while True:
# NOTE(afazekas): Now the BUILD status only reached
# between the UNKNOWN->ACTIVE transition.
# TODO(afazekas): enumerate and validate the stable status set
if status == 'BUILD' and server_status != 'UNKNOWN':
return
if server_status == status:
if ready_wait:
if status == 'BUILD':
return
# NOTE(afazekas): The instance is in "ready for action state"
# when no task in progress
# NOTE(afazekas): Converted to string bacuse of the XML
# responses
if str(task_state) == "None":
# without state api extension 3 sec usually enough
time.sleep(CONF.compute.ready_wait)
return
else:
return
time.sleep(client.build_interval)
resp, body = client.get_server(server_id)
server_status = body['status']
task_state = _get_task_state(body)
if (server_status != old_status) or (task_state != old_task_state):
LOG.info('State transition "%s" ==> "%s" after %d second wait',
'/'.join((old_status, str(old_task_state))),
'/'.join((server_status, str(task_state))),
time.time() - start_time)
if (server_status == 'ERROR') and raise_on_error:
raise exceptions.BuildErrorException(server_id=server_id)
timed_out = int(time.time()) - start_time >= timeout
if timed_out:
expected_task_state = 'None' if ready_wait else 'n/a'
message = ('Server %(server_id)s failed to reach %(status)s '
'status and task state "%(expected_task_state)s" '
'within the required time (%(timeout)s s).' %
{'server_id': server_id,
'status': status,
'expected_task_state': expected_task_state,
'timeout': timeout})
message += ' Current status: %s.' % server_status
message += ' Current task state: %s.' % task_state
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
old_status = server_status
old_task_state = task_state
def wait_for_image_status(client, image_id, status):
"""Waits for an image to reach a given status.
The client should have a get_image(image_id) method to get the image.
The client should also have build_interval and build_timeout attributes.
"""
resp, image = client.get_image(image_id)
start = int(time.time())
while image['status'] != status:
time.sleep(client.build_interval)
resp, image = client.get_image(image_id)
if image['status'] == 'ERROR':
raise exceptions.AddImageException(image_id=image_id)
# check the status again to avoid a false negative where we hit
# the timeout at the same time that the image reached the expected
# status
if image['status'] == status:
return
if int(time.time()) - start >= client.build_timeout:
message = ('Image %(image_id)s failed to reach %(status)s '
'status within the required time (%(timeout)s s).' %
{'image_id': image_id,
'status': status,
'timeout': client.build_timeout})
message += ' Current status: %s.' % image['status']
caller = misc_utils.find_test_caller()
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
tempest-2014.1.dev4108.gf22b6cc/tempest/common/ssh.py 0000664 0001750 0001750 00000013462 12332757070 022223 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import cStringIO
import select
import six
import socket
import time
import warnings
from tempest import exceptions
from tempest.openstack.common import log as logging
with warnings.catch_warnings():
warnings.simplefilter("ignore")
import paramiko
LOG = logging.getLogger(__name__)
class Client(object):
def __init__(self, host, username, password=None, timeout=300, pkey=None,
channel_timeout=10, look_for_keys=False, key_filename=None):
self.host = host
self.username = username
self.password = password
if isinstance(pkey, six.string_types):
pkey = paramiko.RSAKey.from_private_key(
cStringIO.StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename
self.timeout = int(timeout)
self.channel_timeout = float(channel_timeout)
self.buf_size = 1024
def _get_ssh_connection(self, sleep=1.5, backoff=1):
"""Returns an ssh connection to the specified host."""
bsleep = sleep
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(
paramiko.AutoAddPolicy())
_start_time = time.time()
if self.pkey is not None:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with public key authentication",
self.host, self.username)
else:
LOG.info("Creating ssh connection to '%s' as '%s'"
" with password %s",
self.host, self.username, str(self.password))
attempts = 0
while True:
try:
ssh.connect(self.host, username=self.username,
password=self.password,
look_for_keys=self.look_for_keys,
key_filename=self.key_filename,
timeout=self.channel_timeout, pkey=self.pkey)
LOG.info("ssh connection to %s@%s successfuly created",
self.username, self.host)
return ssh
except (socket.error,
paramiko.SSHException) as e:
if self._is_timed_out(_start_time):
LOG.exception("Failed to establish authenticated ssh"
" connection to %s@%s after %d attempts",
self.username, self.host, attempts)
raise exceptions.SSHTimeout(host=self.host,
user=self.username,
password=self.password)
bsleep += backoff
attempts += 1
LOG.warning("Failed to establish authenticated ssh"
" connection to %s@%s (%s). Number attempts: %s."
" Retry after %d seconds.",
self.username, self.host, e, attempts, bsleep)
time.sleep(bsleep)
def _is_timed_out(self, start_time):
return (time.time() - self.timeout) > start_time
def exec_command(self, cmd):
"""
Execute the specified command on the server.
Note that this method is reading whole command outputs to memory, thus
shouldn't be used for large outputs.
:returns: data read from standard output of the command.
:raises: SSHExecCommandFailed if command returns nonzero
status. The exception contains command status stderr content.
"""
ssh = self._get_ssh_connection()
transport = ssh.get_transport()
channel = transport.open_session()
channel.fileno() # Register event pipe
channel.exec_command(cmd)
channel.shutdown_write()
out_data = []
err_data = []
poll = select.poll()
poll.register(channel, select.POLLIN)
start_time = time.time()
while True:
ready = poll.poll(self.channel_timeout)
if not any(ready):
if not self._is_timed_out(start_time):
continue
raise exceptions.TimeoutException(
"Command: '{0}' executed on host '{1}'.".format(
cmd, self.host))
if not ready[0]: # If there is nothing to read.
continue
out_chunk = err_chunk = None
if channel.recv_ready():
out_chunk = channel.recv(self.buf_size)
out_data += out_chunk,
if channel.recv_stderr_ready():
err_chunk = channel.recv_stderr(self.buf_size)
err_data += err_chunk,
if channel.closed and not err_chunk and not out_chunk:
break
exit_status = channel.recv_exit_status()
if 0 != exit_status:
raise exceptions.SSHExecCommandFailed(
command=cmd, exit_status=exit_status,
strerror=''.join(err_data))
return ''.join(out_data)
def test_connection_auth(self):
"""Raises an exception when we can not connect to server via ssh."""
connection = self._get_ssh_connection()
connection.close()
tempest-2014.1.dev4108.gf22b6cc/tempest/common/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023145 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/custom_matchers.py 0000664 0001750 0001750 00000013512 12332757070 024622 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 NTT Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
class ExistsAllResponseHeaders(object):
"""
Specific matcher to check the existence of Swift's response headers
This matcher checks the existence of common headers for each HTTP method
or the target, which means account, container or object.
When checking the existence of 'specific' headers such as
X-Account-Meta-* or X-Object-Manifest for example, those headers must be
checked in each test code.
"""
def __init__(self, target, method):
"""
param: target Account/Container/Object
param: method PUT/GET/HEAD/DELETE/COPY/POST
"""
self.target = target
self.method = method
def match(self, actual):
"""
param: actual HTTP response headers
"""
# Check common headers for all HTTP methods
if 'content-length' not in actual:
return NonExistentHeader('content-length')
if 'content-type' not in actual:
return NonExistentHeader('content-type')
if 'x-trans-id' not in actual:
return NonExistentHeader('x-trans-id')
if 'date' not in actual:
return NonExistentHeader('date')
# Check headers for a specific method or target
if self.method == 'GET' or self.method == 'HEAD':
if 'x-timestamp' not in actual:
return NonExistentHeader('x-timestamp')
if 'accept-ranges' not in actual:
return NonExistentHeader('accept-ranges')
if self.target == 'Account':
if 'x-account-bytes-used' not in actual:
return NonExistentHeader('x-account-bytes-used')
if 'x-account-container-count' not in actual:
return NonExistentHeader('x-account-container-count')
if 'x-account-object-count' not in actual:
return NonExistentHeader('x-account-object-count')
elif self.target == 'Container':
if 'x-container-bytes-used' not in actual:
return NonExistentHeader('x-container-bytes-used')
if 'x-container-object-count' not in actual:
return NonExistentHeader('x-container-object-count')
elif self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
elif self.method == 'PUT' or self.method == 'COPY':
if self.target == 'Object':
if 'etag' not in actual:
return NonExistentHeader('etag')
return None
class NonExistentHeader(object):
"""
Informs an error message for end users in the case of missing a
certain header in Swift's responses
"""
def __init__(self, header):
self.header = header
def describe(self):
return "%s header does not exist" % self.header
def get_details(self):
return {}
class AreAllWellFormatted(object):
"""
Specific matcher to check the correctness of formats of values of Swift's
response headers
This matcher checks the format of values of response headers.
When checking the format of values of 'specific' headers such as
X-Account-Meta-* or X-Object-Manifest for example, those values must be
checked in each test code.
"""
def match(self, actual):
for key, value in actual.iteritems():
if key == 'content-length' and not value.isdigit():
return InvalidFormat(key, value)
elif key == 'x-timestamp' and not re.match("^\d+\.?\d*\Z", value):
return InvalidFormat(key, value)
elif key == 'x-account-bytes-used' and not value.isdigit():
return InvalidFormat(key, value)
elif key == 'x-account-container-count' and not value.isdigit():
return InvalidFormat(key, value)
elif key == 'x-account-object-count' and not value.isdigit():
return InvalidFormat(key, value)
elif key == 'x-container-bytes-used' and not value.isdigit():
return InvalidFormat(key, value)
elif key == 'x-container-object-count' and not value.isdigit():
return InvalidFormat(key, value)
elif key == 'content-type' and not value:
return InvalidFormat(key, value)
elif key == 'x-trans-id' and \
not re.match("^tx[0-9a-f]{21}-[0-9a-f]{10}.*", value):
return InvalidFormat(key, value)
elif key == 'date' and not value:
return InvalidFormat(key, value)
elif key == 'accept-ranges' and not value == 'bytes':
return InvalidFormat(key, value)
elif key == 'etag' and not value.isalnum():
return InvalidFormat(key, value)
elif key == 'transfer-encoding' and not value == 'chunked':
return InvalidFormat(key, value)
return None
class InvalidFormat(object):
"""
Informs an error message for end users if a format of a certain header
is invalid
"""
def __init__(self, key, value):
self.key = key
self.value = value
def describe(self):
return "InvalidFormat (%s, %s)" % (self.key, self.value)
def get_details(self):
return {}
tempest-2014.1.dev4108.gf22b6cc/tempest/common/debug.py 0000664 0001750 0001750 00000003232 12332757070 022506 0 ustar chuck chuck 0000000 0000000 # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common import commands
from tempest import config
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
TABLES = ['filter', 'nat', 'mangle']
def log_ip_ns():
if not CONF.debug.enable:
return
LOG.info("Host Addr:\n" + commands.ip_addr_raw())
LOG.info("Host Route:\n" + commands.ip_route_raw())
for table in TABLES:
LOG.info('Host %s table:\n%s', table, commands.iptables_raw(table))
ns_list = commands.ip_ns_list()
LOG.info("Host ns list" + str(ns_list))
for ns in ns_list:
LOG.info("ns(%s) Addr:\n%s", ns, commands.ip_ns_addr(ns))
LOG.info("ns(%s) Route:\n%s", ns, commands.ip_ns_route(ns))
for table in TABLES:
LOG.info('ns(%s) table(%s):\n%s', ns, table,
commands.iptables_ns(ns, table))
def log_ovs_db():
if not CONF.debug.enable or not CONF.service_available.neutron:
return
db_dump = commands.ovs_db_dump()
LOG.info("OVS DB:\n" + db_dump)
def log_net_debug():
log_ip_ns()
log_ovs_db()
tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/ 0000775 0001750 0001750 00000000000 12332757136 022211 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/linux/ 0000775 0001750 0001750 00000000000 12332757136 023350 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/linux/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025444 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/linux/remote_client.py 0000664 0001750 0001750 00000010252 12332757070 026550 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import six
import time
from tempest.common import ssh
from tempest import config
from tempest import exceptions
CONF = config.CONF
class RemoteClient():
# NOTE(afazekas): It should always get an address instead of server
def __init__(self, server, username, password=None, pkey=None):
ssh_timeout = CONF.compute.ssh_timeout
network = CONF.compute.network_for_ssh
ip_version = CONF.compute.ip_version_for_ssh
ssh_channel_timeout = CONF.compute.ssh_channel_timeout
if isinstance(server, six.string_types):
ip_address = server
else:
addresses = server['addresses'][network]
for address in addresses:
if address['version'] == ip_version:
ip_address = address['addr']
break
else:
raise exceptions.ServerUnreachable()
self.ssh_client = ssh.Client(ip_address, username, password,
ssh_timeout, pkey=pkey,
channel_timeout=ssh_channel_timeout)
def exec_command(self, cmd):
return self.ssh_client.exec_command(cmd)
def validate_authentication(self):
"""Validate ssh connection and authentication
This method raises an Exception when the validation fails.
"""
self.ssh_client.test_connection_auth()
def hostname_equals_servername(self, expected_hostname):
# Get host name using command "hostname"
actual_hostname = self.exec_command("hostname").rstrip()
return expected_hostname == actual_hostname
def get_files(self, path):
# Return a list of comma separated files
command = "ls -m " + path
return self.exec_command(command).rstrip('\n').split(', ')
def get_ram_size_in_mb(self):
output = self.exec_command('free -m | grep Mem')
if output:
return output.split()[1]
def get_number_of_vcpus(self):
command = 'cat /proc/cpuinfo | grep processor | wc -l'
output = self.exec_command(command)
return int(output)
def get_partitions(self):
# Return the contents of /proc/partitions
command = 'cat /proc/partitions'
output = self.exec_command(command)
return output
def get_boot_time(self):
cmd = 'cut -f1 -d. /proc/uptime'
boot_secs = self.exec_command(cmd)
boot_time = time.time() - int(boot_secs)
return time.localtime(boot_time)
def write_to_console(self, message):
message = re.sub("([$\\`])", "\\\\\\\\\\1", message)
# usually to /dev/ttyS0
cmd = 'sudo sh -c "echo \\"%s\\" >/dev/console"' % message
return self.exec_command(cmd)
def ping_host(self, host):
cmd = 'ping -c1 -w1 %s' % host
return self.exec_command(cmd)
def get_mac_address(self):
cmd = "/sbin/ifconfig | awk '/HWaddr/ {print $5}'"
return self.exec_command(cmd)
def get_ip_list(self):
cmd = "/bin/ip address"
return self.exec_command(cmd)
def assign_static_ip(self, nic, addr):
cmd = "sudo /bin/ip addr add {ip}/{mask} dev {nic}".format(
ip=addr, mask=CONF.network.tenant_network_mask_bits,
nic=nic
)
return self.exec_command(cmd)
def turn_nic_on(self, nic):
cmd = "sudo /bin/ip link set {nic} up".format(nic=nic)
return self.exec_command(cmd)
def get_pids(self, pr_name):
# Get pid(s) of a process/program
cmd = "ps -ef | grep %s | grep -v 'grep' | awk {'print $1'}" % pr_name
return self.exec_command(cmd).split('\n')
tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/data_utils.py 0000664 0001750 0001750 00000004042 12332757070 024711 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
import random
import uuid
def rand_uuid():
return str(uuid.uuid4())
def rand_uuid_hex():
return uuid.uuid4().hex
def rand_name(name=''):
randbits = str(random.randint(1, 0x7fffffff))
if name:
return name + '-' + randbits
else:
return randbits
def rand_int_id(start=0, end=0x7fffffff):
return random.randint(start, end)
def rand_mac_address():
"""Generate an Ethernet MAC address."""
# NOTE(vish): We would prefer to use 0xfe here to ensure that linux
# bridge mac addresses don't change, but it appears to
# conflict with libvirt, so we use the next highest octet
# that has the unicast and locally administered bits set
# properly: 0xfa.
# Discussion: https://bugs.launchpad.net/nova/+bug/921838
mac = [0xfa, 0x16, 0x3e,
random.randint(0x00, 0xff),
random.randint(0x00, 0xff),
random.randint(0x00, 0xff)]
return ':'.join(["%02x" % x for x in mac])
def parse_image_id(image_ref):
"""Return the image id from a given image ref."""
return image_ref.rsplit('/')[-1]
def arbitrary_string(size=4, base_text=None):
"""
Return size characters from base_text, repeating the base_text infinitely
if needed.
"""
if not base_text:
base_text = 'test'
return ''.join(itertools.islice(itertools.cycle(base_text), size))
tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/__init__.py 0000664 0001750 0001750 00000000174 12332757070 024321 0 ustar chuck chuck 0000000 0000000 PING_IPV4_COMMAND = 'ping -c 3 '
PING_IPV6_COMMAND = 'ping6 -c 3 '
PING_PACKET_LOSS_REGEX = '(\d{1,3})\.?\d*\% packet loss'
tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/misc.py 0000664 0001750 0001750 00000005654 12332757070 023525 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import inspect
import re
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def singleton(cls):
"""Simple wrapper for classes that should only have a single instance."""
instances = {}
def getinstance():
if cls not in instances:
instances[cls] = cls()
return instances[cls]
return getinstance
def find_test_caller():
"""Find the caller class and test name.
Because we know that the interesting things that call us are
test_* methods, and various kinds of setUp / tearDown, we
can look through the call stack to find appropriate methods,
and the class we were in when those were called.
"""
caller_name = None
names = []
frame = inspect.currentframe()
is_cleanup = False
# Start climbing the ladder until we hit a good method
while True:
try:
frame = frame.f_back
name = frame.f_code.co_name
names.append(name)
if re.search("^(test_|setUp|tearDown)", name):
cname = ""
if 'self' in frame.f_locals:
cname = frame.f_locals['self'].__class__.__name__
if 'cls' in frame.f_locals:
cname = frame.f_locals['cls'].__name__
caller_name = cname + ":" + name
break
elif re.search("^_run_cleanup", name):
is_cleanup = True
else:
cname = ""
if 'self' in frame.f_locals:
cname = frame.f_locals['self'].__class__.__name__
if 'cls' in frame.f_locals:
cname = frame.f_locals['cls'].__name__
# the fact that we are running cleanups is indicated pretty
# deep in the stack, so if we see that we want to just
# start looking for a real class name, and declare victory
# once we do.
if is_cleanup and cname:
if not re.search("^RunTest", cname):
caller_name = cname + ":_run_cleanups"
break
except Exception:
break
# prevents frame leaks
del frame
if caller_name is None:
LOG.debug("Sane call name not found in %s" % names)
return caller_name
tempest-2014.1.dev4108.gf22b6cc/tempest/common/utils/file_utils.py 0000664 0001750 0001750 00000001422 12332757070 024716 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
def have_effective_read_access(path):
try:
fh = open(path, "rb")
except IOError:
return False
fh.close()
return True
tempest-2014.1.dev4108.gf22b6cc/tempest/common/rest_client.py 0000664 0001750 0001750 00000056316 12332757070 023746 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import json
from lxml import etree
import re
import time
import jsonschema
from tempest.common import http
from tempest.common.utils import misc as misc_utils
from tempest.common import xml_utils as common
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
CONF = config.CONF
# redrive rate limited calls at most twice
MAX_RECURSION_DEPTH = 2
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
# All the successful HTTP status codes from RFC 2616
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
class RestClient(object):
TYPE = "json"
# This is used by _parse_resp method
# Redefine it for purposes of your xml service client
# List should contain top-xml_tag-names of data, which is like list/array
# For example, in keystone it is users, roles, tenants and services
# All of it has children with same tag-names
list_tags = []
# This is used by _parse_resp method too
# Used for selection of dict-like xmls,
# like metadata for Vms in nova, and volumes in cinder
dict_tags = ["metadata", ]
LOG = logging.getLogger(__name__)
def __init__(self, auth_provider):
self.auth_provider = auth_provider
self.endpoint_url = None
self.service = None
# The version of the API this client implements
self.api_version = None
self._skip_path = False
self.build_interval = CONF.compute.build_interval
self.build_timeout = CONF.compute.build_timeout
self.general_header_lc = set(('cache-control', 'connection',
'date', 'pragma', 'trailer',
'transfer-encoding', 'via',
'warning'))
self.response_header_lc = set(('accept-ranges', 'age', 'etag',
'location', 'proxy-authenticate',
'retry-after', 'server',
'vary', 'www-authenticate'))
dscv = CONF.identity.disable_ssl_certificate_validation
self.http_obj = http.ClosingHttp(
disable_ssl_certificate_validation=dscv)
def _get_type(self):
return self.TYPE
def get_headers(self, accept_type=None, send_type=None):
if accept_type is None:
accept_type = self._get_type()
if send_type is None:
send_type = self._get_type()
return {'Content-Type': 'application/%s' % send_type,
'Accept': 'application/%s' % accept_type}
def __str__(self):
STRING_LIMIT = 80
str_format = ("config:%s, service:%s, base_url:%s, "
"filters: %s, build_interval:%s, build_timeout:%s"
"\ntoken:%s..., \nheaders:%s...")
return str_format % (CONF, self.service, self.base_url,
self.filters, self.build_interval,
self.build_timeout,
str(self.token)[0:STRING_LIMIT],
str(self.get_headers())[0:STRING_LIMIT])
def _get_region(self, service):
"""
Returns the region for a specific service
"""
service_region = None
for cfgname in dir(CONF._config):
# Find all config.FOO.catalog_type and assume FOO is a service.
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if catalog_type == service:
service_region = getattr(cfg, 'region', None)
if not service_region:
service_region = CONF.identity.region
return service_region
def _get_endpoint_type(self, service):
"""
Returns the endpoint type for a specific service
"""
# If the client requests a specific endpoint type, then be it
if self.endpoint_url:
return self.endpoint_url
endpoint_type = None
for cfgname in dir(CONF._config):
# Find all config.FOO.catalog_type and assume FOO is a service.
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if catalog_type == service:
endpoint_type = getattr(cfg, 'endpoint_type', 'publicURL')
break
# Special case for compute v3 service which hasn't its own
# configuration group
else:
if service == CONF.compute.catalog_v3_type:
endpoint_type = CONF.compute.endpoint_type
return endpoint_type
@property
def user(self):
return self.auth_provider.credentials.username
@property
def tenant_name(self):
return self.auth_provider.credentials.tenant_name
@property
def tenant_id(self):
return self.auth_provider.credentials.tenant_id
@property
def password(self):
return self.auth_provider.credentials.password
@property
def base_url(self):
return self.auth_provider.base_url(filters=self.filters)
@property
def token(self):
return self.auth_provider.get_token()
@property
def filters(self):
_filters = dict(
service=self.service,
endpoint_type=self._get_endpoint_type(self.service),
region=self._get_region(self.service)
)
if self.api_version is not None:
_filters['api_version'] = self.api_version
if self._skip_path:
_filters['skip_path'] = self._skip_path
return _filters
def skip_path(self):
"""
When set, ignore the path part of the base URL from the catalog
"""
self._skip_path = True
def reset_path(self):
"""
When reset, use the base URL from the catalog as-is
"""
self._skip_path = False
def expected_success(self, expected_code, read_code):
assert_msg = ("This function only allowed to use for HTTP status"
"codes which explicitly defined in the RFC 2616. {0}"
" is not a defined Success Code!").format(expected_code)
assert expected_code in HTTP_SUCCESS, assert_msg
# NOTE(afazekas): the http status code above 400 is processed by
# the _error_checker method
if read_code < 400 and read_code != expected_code:
pattern = """Unexpected http success status code {0},
The expected status code is {1}"""
details = pattern.format(read_code, expected_code)
raise exceptions.InvalidHttpSuccessCode(details)
def post(self, url, body, headers=None, extra_headers=False):
return self.request('POST', url, extra_headers, headers, body)
def get(self, url, headers=None, extra_headers=False):
return self.request('GET', url, extra_headers, headers)
def delete(self, url, headers=None, body=None, extra_headers=False):
return self.request('DELETE', url, extra_headers, headers, body)
def patch(self, url, body, headers=None, extra_headers=False):
return self.request('PATCH', url, extra_headers, headers, body)
def put(self, url, body, headers=None, extra_headers=False):
return self.request('PUT', url, extra_headers, headers, body)
def head(self, url, headers=None, extra_headers=False):
return self.request('HEAD', url, extra_headers, headers)
def copy(self, url, headers=None, extra_headers=False):
return self.request('COPY', url, extra_headers, headers)
def get_versions(self):
resp, body = self.get('')
body = self._parse_resp(body)
versions = map(lambda x: x['id'], body)
return resp, versions
def _get_request_id(self, resp):
for i in ('x-openstack-request-id', 'x-compute-request-id'):
if i in resp:
return resp[i]
return ""
def _log_request(self, method, req_url, resp,
secs="", req_headers={},
req_body=None, resp_body=None):
# if we have the request id, put it in the right part of the log
extra = dict(request_id=self._get_request_id(resp))
# NOTE(sdague): while we still have 6 callers to this function
# we're going to just provide work around on who is actually
# providing timings by gracefully adding no content if they don't.
# Once we're down to 1 caller, clean this up.
caller_name = misc_utils.find_test_caller()
if secs:
secs = " %.3fs" % secs
self.LOG.info(
'Request (%s): %s %s %s%s' % (
caller_name,
resp['status'],
method,
req_url,
secs),
extra=extra)
# We intentionally duplicate the info content because in a parallel
# world this is important to match
trace_regex = CONF.debug.trace_requests
if trace_regex and re.search(trace_regex, caller_name):
if 'X-Auth-Token' in req_headers:
req_headers['X-Auth-Token'] = ''
log_fmt = """Request (%s): %s %s %s%s
Request - Headers: %s
Body: %s
Response - Headers: %s
Body: %s"""
self.LOG.debug(
log_fmt % (
caller_name,
resp['status'],
method,
req_url,
secs,
str(req_headers),
str(req_body)[:2048],
str(resp),
str(resp_body)[:2048]),
extra=extra)
def _parse_resp(self, body):
if self._get_type() is "json":
body = json.loads(body)
# We assume, that if the first value of the deserialized body's
# item set is a dict or a list, that we just return the first value
# of deserialized body.
# Essentially "cutting out" the first placeholder element in a body
# that looks like this:
#
# {
# "users": [
# ...
# ]
# }
try:
# Ensure there are not more than one top-level keys
if len(body.keys()) > 1:
return body
# Just return the "wrapped" element
first_key, first_item = body.items()[0]
if isinstance(first_item, (dict, list)):
return first_item
except (ValueError, IndexError):
pass
return body
elif self._get_type() is "xml":
element = etree.fromstring(body)
if any(s in element.tag for s in self.dict_tags):
# Parse dictionary-like xmls (metadata, etc)
dictionary = {}
for el in element.getchildren():
dictionary[u"%s" % el.get("key")] = u"%s" % el.text
return dictionary
if any(s in element.tag for s in self.list_tags):
# Parse list-like xmls (users, roles, etc)
array = []
for child in element.getchildren():
array.append(common.xml_to_json(child))
return array
# Parse one-item-like xmls (user, role, etc)
return common.xml_to_json(element)
def response_checker(self, method, resp, resp_body):
if (resp.status in set((204, 205, 304)) or resp.status < 200 or
method.upper() == 'HEAD') and resp_body:
raise exceptions.ResponseWithNonEmptyBody(status=resp.status)
# NOTE(afazekas):
# If the HTTP Status Code is 205
# 'The response MUST NOT include an entity.'
# A HTTP entity has an entity-body and an 'entity-header'.
# In the HTTP response specification (Section 6) the 'entity-header'
# 'generic-header' and 'response-header' are in OR relation.
# All headers not in the above two group are considered as entity
# header in every interpretation.
if (resp.status == 205 and
0 != len(set(resp.keys()) - set(('status',)) -
self.response_header_lc - self.general_header_lc)):
raise exceptions.ResponseWithEntity()
# NOTE(afazekas)
# Now the swift sometimes (delete not empty container)
# returns with non json error response, we can create new rest class
# for swift.
# Usually RFC2616 says error responses SHOULD contain an explanation.
# The warning is normal for SHOULD/SHOULD NOT case
# Likely it will cause an error
if not resp_body and resp.status >= 400:
self.LOG.warning("status >= 400 response with empty body")
def _request(self, method, url, headers=None, body=None):
"""A simple HTTP request interface."""
# Authenticate the request with the auth provider
req_url, req_headers, req_body = self.auth_provider.auth_request(
method, url, headers, body, self.filters)
# Do the actual request, and time it
start = time.time()
resp, resp_body = self.http_obj.request(
req_url, method, headers=req_headers, body=req_body)
end = time.time()
self._log_request(method, req_url, resp, secs=(end - start),
req_headers=req_headers, req_body=req_body,
resp_body=resp_body)
# Verify HTTP response codes
self.response_checker(method, resp, resp_body)
return resp, resp_body
def request(self, method, url, extra_headers=False, headers=None,
body=None):
# if extra_headers is True
# default headers would be added to headers
retry = 0
if headers is None:
# NOTE(vponomaryov): if some client do not need headers,
# it should explicitly pass empty dict
headers = self.get_headers()
elif extra_headers:
try:
headers = headers.copy()
headers.update(self.get_headers())
except (ValueError, TypeError):
headers = self.get_headers()
resp, resp_body = self._request(method, url,
headers=headers, body=body)
while (resp.status == 413 and
'retry-after' in resp and
not self.is_absolute_limit(
resp, self._parse_resp(resp_body)) and
retry < MAX_RECURSION_DEPTH):
retry += 1
delay = int(resp['retry-after'])
time.sleep(delay)
resp, resp_body = self._request(method, url,
headers=headers, body=body)
self._error_checker(method, url, headers, body,
resp, resp_body)
return resp, resp_body
def _error_checker(self, method, url,
headers, body, resp, resp_body):
# NOTE(mtreinish): Check for httplib response from glance_http. The
# object can't be used here because importing httplib breaks httplib2.
# If another object from a class not imported were passed here as
# resp this could possibly fail
if str(type(resp)) == "":
ctype = resp.getheader('content-type')
else:
try:
ctype = resp['content-type']
# NOTE(mtreinish): Keystone delete user responses doesn't have a
# content-type header. (They don't have a body) So just pretend it
# is set.
except KeyError:
ctype = 'application/json'
# It is not an error response
if resp.status < 400:
return
JSON_ENC = ['application/json', 'application/json; charset=utf-8']
# NOTE(mtreinish): This is for compatibility with Glance and swift
# APIs. These are the return content types that Glance api v1
# (and occasionally swift) are using.
TXT_ENC = ['text/plain', 'text/html', 'text/html; charset=utf-8',
'text/plain; charset=utf-8']
XML_ENC = ['application/xml', 'application/xml; charset=utf-8']
if ctype.lower() in JSON_ENC or ctype.lower() in XML_ENC:
parse_resp = True
elif ctype.lower() in TXT_ENC:
parse_resp = False
else:
raise exceptions.InvalidContentType(str(resp.status))
if resp.status == 401 or resp.status == 403:
raise exceptions.Unauthorized(resp_body)
if resp.status == 404:
raise exceptions.NotFound(resp_body)
if resp.status == 400:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.BadRequest(resp_body)
if resp.status == 409:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.Conflict(resp_body)
if resp.status == 413:
if parse_resp:
resp_body = self._parse_resp(resp_body)
if self.is_absolute_limit(resp, resp_body):
raise exceptions.OverLimit(resp_body)
else:
raise exceptions.RateLimitExceeded(resp_body)
if resp.status == 422:
if parse_resp:
resp_body = self._parse_resp(resp_body)
raise exceptions.UnprocessableEntity(resp_body)
if resp.status in (500, 501):
message = resp_body
if parse_resp:
try:
resp_body = self._parse_resp(resp_body)
except ValueError:
# If response body is a non-json string message.
# Use resp_body as is and raise InvalidResponseBody
# exception.
raise exceptions.InvalidHTTPResponseBody(message)
else:
if isinstance(resp_body, dict):
# I'm seeing both computeFault
# and cloudServersFault come back.
# Will file a bug to fix, but leave as is for now.
if 'cloudServersFault' in resp_body:
message = resp_body['cloudServersFault']['message']
elif 'computeFault' in resp_body:
message = resp_body['computeFault']['message']
elif 'error' in resp_body: # Keystone errors
message = resp_body['error']['message']
raise exceptions.IdentityError(message)
elif 'message' in resp_body:
message = resp_body['message']
else:
message = resp_body
raise exceptions.ServerFault(message)
if resp.status >= 400:
raise exceptions.UnexpectedResponseCode(str(resp.status))
def is_absolute_limit(self, resp, resp_body):
if (not isinstance(resp_body, collections.Mapping) or
'retry-after' not in resp):
return True
if self._get_type() is "json":
over_limit = resp_body.get('overLimit', None)
if not over_limit:
return True
return 'exceed' in over_limit.get('message', 'blabla')
elif self._get_type() is "xml":
return 'exceed' in resp_body.get('message', 'blabla')
def wait_for_resource_deletion(self, id):
"""Waits for a resource to be deleted."""
start_time = int(time.time())
while True:
if self.is_resource_deleted(id):
return
if int(time.time()) - start_time >= self.build_timeout:
raise exceptions.TimeoutException
time.sleep(self.build_interval)
def is_resource_deleted(self, id):
"""
Subclasses override with specific deletion detection.
"""
message = ('"%s" does not implement is_resource_deleted'
% self.__class__.__name__)
raise NotImplementedError(message)
@classmethod
def validate_response(cls, schema, resp, body):
# Only check the response if the status code is a success code
# TODO(cyeoh): Eventually we should be able to verify that a failure
# code if it exists is something that we expect. This is explicitly
# declared in the V3 API and so we should be able to export this in
# the response schema. For now we'll ignore it.
if resp.status in HTTP_SUCCESS:
response_code = schema['status_code']
if resp.status not in response_code:
msg = ("The status code(%s) is different than the expected "
"one(%s)") % (resp.status, response_code)
raise exceptions.InvalidHttpSuccessCode(msg)
# Check the body of a response
body_schema = schema.get('response_body')
if body_schema:
try:
jsonschema.validate(body, body_schema)
except jsonschema.ValidationError as ex:
msg = ("HTTP response body is invalid (%s)") % ex
raise exceptions.InvalidHTTPResponseBody(msg)
else:
if body:
msg = ("HTTP response body should not exist (%s)") % body
raise exceptions.InvalidHTTPResponseBody(msg)
# Check the header of a response
header_schema = schema.get('response_header')
if header_schema:
try:
jsonschema.validate(resp, header_schema)
except jsonschema.ValidationError as ex:
msg = ("HTTP response header is invalid (%s)") % ex
raise exceptions.InvalidHTTPResponseHeader(msg)
class NegativeRestClient(RestClient):
"""
Version of RestClient that does not raise exceptions.
"""
def _error_checker(self, method, url,
headers, body, resp, resp_body):
pass
def send_request(self, method, url_template, resources, body=None):
url = url_template % tuple(resources)
if method == "GET":
resp, body = self.get(url)
elif method == "POST":
resp, body = self.post(url, body)
elif method == "PUT":
resp, body = self.put(url, body)
elif method == "PATCH":
resp, body = self.patch(url, body)
elif method == "HEAD":
resp, body = self.head(url)
elif method == "DELETE":
resp, body = self.delete(url)
elif method == "COPY":
resp, body = self.copy(url)
else:
assert False
return resp, body
tempest-2014.1.dev4108.gf22b6cc/tempest/common/tempest_fixtures.py 0000664 0001750 0001750 00000001456 12332757070 025040 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.openstack.common.fixture import lockutils
class LockFixture(lockutils.LockFixture):
def __init__(self, name):
super(LockFixture, self).__init__(name, 'tempest-')
tempest-2014.1.dev4108.gf22b6cc/tempest/common/glance_http.py 0000664 0001750 0001750 00000033503 12332757070 023714 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Originally copied from python-glanceclient
import copy
import hashlib
import httplib
import json
import OpenSSL
import posixpath
import re
from six import moves
import socket
import StringIO
import struct
import urlparse
from tempest import exceptions as exc
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
USER_AGENT = 'tempest'
CHUNKSIZE = 1024 * 64 # 64kB
TOKEN_CHARS_RE = re.compile('^[-A-Za-z0-9+/=]*$')
class HTTPClient(object):
def __init__(self, auth_provider, filters, **kwargs):
self.auth_provider = auth_provider
self.filters = filters
self.endpoint = auth_provider.base_url(filters)
endpoint_parts = urlparse.urlparse(self.endpoint)
self.endpoint_scheme = endpoint_parts.scheme
self.endpoint_hostname = endpoint_parts.hostname
self.endpoint_port = endpoint_parts.port
self.endpoint_path = endpoint_parts.path
self.connection_class = self.get_connection_class(self.endpoint_scheme)
self.connection_kwargs = self.get_connection_kwargs(
self.endpoint_scheme, **kwargs)
@staticmethod
def get_connection_class(scheme):
if scheme == 'https':
return VerifiedHTTPSConnection
else:
return httplib.HTTPConnection
@staticmethod
def get_connection_kwargs(scheme, **kwargs):
_kwargs = {'timeout': float(kwargs.get('timeout', 600))}
if scheme == 'https':
_kwargs['cacert'] = kwargs.get('cacert', None)
_kwargs['cert_file'] = kwargs.get('cert_file', None)
_kwargs['key_file'] = kwargs.get('key_file', None)
_kwargs['insecure'] = kwargs.get('insecure', False)
_kwargs['ssl_compression'] = kwargs.get('ssl_compression', True)
return _kwargs
def get_connection(self):
_class = self.connection_class
try:
return _class(self.endpoint_hostname, self.endpoint_port,
**self.connection_kwargs)
except httplib.InvalidURL:
raise exc.EndpointNotFound
def _http_request(self, url, method, **kwargs):
"""Send an http request with the specified characteristics.
Wrapper around httplib.HTTP(S)Connection.request to handle tasks such
as setting headers and error handling.
"""
# Copy the kwargs so we can reuse the original in case of redirects
kwargs['headers'] = copy.deepcopy(kwargs.get('headers', {}))
kwargs['headers'].setdefault('User-Agent', USER_AGENT)
self._log_request(method, url, kwargs['headers'])
conn = self.get_connection()
try:
url_parts = urlparse.urlparse(url)
conn_url = posixpath.normpath(url_parts.path)
LOG.debug('Actual Path: {path}'.format(path=conn_url))
if kwargs['headers'].get('Transfer-Encoding') == 'chunked':
conn.putrequest(method, conn_url)
for header, value in kwargs['headers'].items():
conn.putheader(header, value)
conn.endheaders()
chunk = kwargs['body'].read(CHUNKSIZE)
# Chunk it, baby...
while chunk:
conn.send('%x\r\n%s\r\n' % (len(chunk), chunk))
chunk = kwargs['body'].read(CHUNKSIZE)
conn.send('0\r\n\r\n')
else:
conn.request(method, conn_url, **kwargs)
resp = conn.getresponse()
except socket.gaierror as e:
message = ("Error finding address for %(url)s: %(e)s" %
{'url': url, 'e': e})
raise exc.EndpointNotFound(message)
except (socket.error, socket.timeout) as e:
message = ("Error communicating with %(endpoint)s %(e)s" %
{'endpoint': self.endpoint, 'e': e})
raise exc.TimeoutException(message)
body_iter = ResponseBodyIterator(resp)
# Read body into string if it isn't obviously image data
if resp.getheader('content-type', None) != 'application/octet-stream':
body_str = ''.join([body_chunk for body_chunk in body_iter])
body_iter = StringIO.StringIO(body_str)
self._log_response(resp, None)
else:
self._log_response(resp, body_iter)
return resp, body_iter
def _log_request(self, method, url, headers):
LOG.info('Request: ' + method + ' ' + url)
if headers:
headers_out = headers
if 'X-Auth-Token' in headers and headers['X-Auth-Token']:
token = headers['X-Auth-Token']
if len(token) > 64 and TOKEN_CHARS_RE.match(token):
headers_out = headers.copy()
headers_out['X-Auth-Token'] = ""
LOG.info('Request Headers: ' + str(headers_out))
def _log_response(self, resp, body):
status = str(resp.status)
LOG.info("Response Status: " + status)
if resp.getheaders():
LOG.info('Response Headers: ' + str(resp.getheaders()))
if body:
str_body = str(body)
length = len(body)
LOG.info('Response Body: ' + str_body[:2048])
if length >= 2048:
self.LOG.debug("Large body (%d) md5 summary: %s", length,
hashlib.md5(str_body).hexdigest())
def json_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type', 'application/json')
if 'body' in kwargs:
kwargs['body'] = json.dumps(kwargs['body'])
resp, body_iter = self._http_request(url, method, **kwargs)
if 'application/json' in resp.getheader('content-type', ''):
body = ''.join([chunk for chunk in body_iter])
try:
body = json.loads(body)
except ValueError:
LOG.error('Could not decode response body as JSON')
else:
body = None
return resp, body
def raw_request(self, method, url, **kwargs):
kwargs.setdefault('headers', {})
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
if 'body' in kwargs:
if (hasattr(kwargs['body'], 'read')
and method.lower() in ('post', 'put')):
# We use 'Transfer-Encoding: chunked' because
# body size may not always be known in advance.
kwargs['headers']['Transfer-Encoding'] = 'chunked'
# Decorate the request with auth
req_url, kwargs['headers'], kwargs['body'] = \
self.auth_provider.auth_request(
method=method, url=url, headers=kwargs['headers'],
body=kwargs.get('body', None), filters=self.filters)
return self._http_request(req_url, method, **kwargs)
class OpenSSLConnectionDelegator(object):
"""
An OpenSSL.SSL.Connection delegator.
Supplies an additional 'makefile' method which httplib requires
and is not present in OpenSSL.SSL.Connection.
Note: Since it is not possible to inherit from OpenSSL.SSL.Connection
a delegator must be used.
"""
def __init__(self, *args, **kwargs):
self.connection = OpenSSL.SSL.Connection(*args, **kwargs)
def __getattr__(self, name):
return getattr(self.connection, name)
def makefile(self, *args, **kwargs):
# Ensure the socket is closed when this file is closed
kwargs['close'] = True
return socket._fileobject(self.connection, *args, **kwargs)
class VerifiedHTTPSConnection(httplib.HTTPSConnection):
"""
Extended HTTPSConnection which uses the OpenSSL library
for enhanced SSL support.
Note: Much of this functionality can eventually be replaced
with native Python 3.3 code.
"""
def __init__(self, host, port=None, key_file=None, cert_file=None,
cacert=None, timeout=None, insecure=False,
ssl_compression=True):
httplib.HTTPSConnection.__init__(self, host, port,
key_file=key_file,
cert_file=cert_file)
self.key_file = key_file
self.cert_file = cert_file
self.timeout = timeout
self.insecure = insecure
self.ssl_compression = ssl_compression
self.cacert = cacert
self.setcontext()
@staticmethod
def host_matches_cert(host, x509):
"""
Verify that the the x509 certificate we have received
from 'host' correctly identifies the server we are
connecting to, ie that the certificate's Common Name
or a Subject Alternative Name matches 'host'.
"""
# First see if we can match the CN
if x509.get_subject().commonName == host:
return True
# Also try Subject Alternative Names for a match
san_list = None
for i in moves.xrange(x509.get_extension_count()):
ext = x509.get_extension(i)
if ext.get_short_name() == 'subjectAltName':
san_list = str(ext)
for san in ''.join(san_list.split()).split(','):
if san == "DNS:%s" % host:
return True
# Server certificate does not match host
msg = ('Host "%s" does not match x509 certificate contents: '
'CommonName "%s"' % (host, x509.get_subject().commonName))
if san_list is not None:
msg = msg + ', subjectAltName "%s"' % san_list
raise exc.SSLCertificateError(msg)
def verify_callback(self, connection, x509, errnum,
depth, preverify_ok):
if x509.has_expired():
msg = "SSL Certificate expired on '%s'" % x509.get_notAfter()
raise exc.SSLCertificateError(msg)
if depth == 0 and preverify_ok is True:
# We verify that the host matches against the last
# certificate in the chain
return self.host_matches_cert(self.host, x509)
else:
# Pass through OpenSSL's default result
return preverify_ok
def setcontext(self):
"""
Set up the OpenSSL context.
"""
self.context = OpenSSL.SSL.Context(OpenSSL.SSL.SSLv23_METHOD)
if self.ssl_compression is False:
self.context.set_options(0x20000) # SSL_OP_NO_COMPRESSION
if self.insecure is not True:
self.context.set_verify(OpenSSL.SSL.VERIFY_PEER,
self.verify_callback)
else:
self.context.set_verify(OpenSSL.SSL.VERIFY_NONE,
self.verify_callback)
if self.cert_file:
try:
self.context.use_certificate_file(self.cert_file)
except Exception as e:
msg = 'Unable to load cert from "%s" %s' % (self.cert_file, e)
raise exc.SSLConfigurationError(msg)
if self.key_file is None:
# We support having key and cert in same file
try:
self.context.use_privatekey_file(self.cert_file)
except Exception as e:
msg = ('No key file specified and unable to load key '
'from "%s" %s' % (self.cert_file, e))
raise exc.SSLConfigurationError(msg)
if self.key_file:
try:
self.context.use_privatekey_file(self.key_file)
except Exception as e:
msg = 'Unable to load key from "%s" %s' % (self.key_file, e)
raise exc.SSLConfigurationError(msg)
if self.cacert:
try:
self.context.load_verify_locations(self.cacert)
except Exception as e:
msg = 'Unable to load CA from "%s"' % (self.cacert, e)
raise exc.SSLConfigurationError(msg)
else:
self.context.set_default_verify_paths()
def connect(self):
"""
Connect to an SSL port using the OpenSSL library and apply
per-connection parameters.
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
if self.timeout is not None:
# '0' microseconds
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVTIMEO,
struct.pack('LL', self.timeout, 0))
self.sock = OpenSSLConnectionDelegator(self.context, sock)
self.sock.connect((self.host, self.port))
def close(self):
if self.sock:
# Remove the reference to the socket but don't close it yet.
# Response close will close both socket and associated
# file. Closing socket too soon will cause response
# reads to fail with socket IO error 'Bad file descriptor'.
self.sock = None
httplib.HTTPSConnection.close(self)
class ResponseBodyIterator(object):
"""A class that acts as an iterator over an HTTP response."""
def __init__(self, resp):
self.resp = resp
def __iter__(self):
while True:
yield self.next()
def next(self):
chunk = self.resp.read(CHUNKSIZE)
if chunk:
return chunk
else:
raise StopIteration()
tempest-2014.1.dev4108.gf22b6cc/tempest/common/generate_sample_tempest.py 0000664 0001750 0001750 00000002742 12332757070 026321 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import sys
import tempest.config
from tempest.openstack.common.config import generator
# NOTE(mtreinish): This hack is needed because of how oslo config is used in
# tempest. Tempest is run from inside a test runner and so we can't rely on the
# global CONF object being fully populated when we run a test. (test runners
# don't init every file for running a test) So to get around that we manually
# load the config file in tempest for each test class to ensure that every
# config option is set. However, the tool expects the CONF object to be fully
# populated when it inits all the files in the project. This just works around
# the issue by manually loading the config file (which may or may not exist)
# which will populate all the options before running the generator.
if __name__ == "__main__":
tempest.config.register_opts()
generator.generate(sys.argv[1:])
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/ 0000775 0001750 0001750 00000000000 12332757136 021550 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/ 0000775 0001750 0001750 00000000000 12332757136 023040 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/lockutils.py 0000664 0001750 0001750 00000023325 12332757070 025425 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import functools
import os
import shutil
import subprocess
import sys
import tempfile
import threading
import time
import weakref
from oslo.config import cfg
from tempest.openstack.common import fileutils
from tempest.openstack.common.gettextutils import _
from tempest.openstack.common import local
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
util_opts = [
cfg.BoolOpt('disable_process_locking', default=False,
help='Whether to disable inter-process locks'),
cfg.StrOpt('lock_path',
default=os.environ.get("TEMPEST_LOCK_PATH"),
help=('Directory to use for lock files.'))
]
CONF = cfg.CONF
CONF.register_opts(util_opts)
def set_defaults(lock_path):
cfg.set_defaults(util_opts, lock_path=lock_path)
class _InterProcessLock(object):
"""Lock implementation which allows multiple locks, working around
issues like bugs.debian.org/cgi-bin/bugreport.cgi?bug=632857 and does
not require any cleanup. Since the lock is always held on a file
descriptor rather than outside of the process, the lock gets dropped
automatically if the process crashes, even if __exit__ is not executed.
There are no guarantees regarding usage by multiple green threads in a
single process here. This lock works only between processes. Exclusive
access between local threads should be achieved using the semaphores
in the @synchronized decorator.
Note these locks are released when the descriptor is closed, so it's not
safe to close the file descriptor while another green thread holds the
lock. Just opening and closing the lock file can break synchronisation,
so lock files must be accessed only using this abstraction.
"""
def __init__(self, name):
self.lockfile = None
self.fname = name
def __enter__(self):
self.lockfile = open(self.fname, 'w')
while True:
try:
# Using non-blocking locks since green threads are not
# patched to deal with blocking locking calls.
# Also upon reading the MSDN docs for locking(), it seems
# to have a laughable 10 attempts "blocking" mechanism.
self.trylock()
return self
except IOError as e:
if e.errno in (errno.EACCES, errno.EAGAIN):
# external locks synchronise things like iptables
# updates - give it some time to prevent busy spinning
time.sleep(0.01)
else:
raise
def __exit__(self, exc_type, exc_val, exc_tb):
try:
self.unlock()
self.lockfile.close()
except IOError:
LOG.exception(_("Could not release the acquired lock `%s`"),
self.fname)
def trylock(self):
raise NotImplementedError()
def unlock(self):
raise NotImplementedError()
class _WindowsLock(_InterProcessLock):
def trylock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_NBLCK, 1)
def unlock(self):
msvcrt.locking(self.lockfile.fileno(), msvcrt.LK_UNLCK, 1)
class _PosixLock(_InterProcessLock):
def trylock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
def unlock(self):
fcntl.lockf(self.lockfile, fcntl.LOCK_UN)
if os.name == 'nt':
import msvcrt
InterProcessLock = _WindowsLock
else:
import fcntl
InterProcessLock = _PosixLock
_semaphores = weakref.WeakValueDictionary()
_semaphores_lock = threading.Lock()
@contextlib.contextmanager
def lock(name, lock_file_prefix=None, external=False, lock_path=None):
"""Context based lock
This function yields a `threading.Semaphore` instance (if we don't use
eventlet.monkey_patch(), else `semaphore.Semaphore`) unless external is
True, in which case, it'll yield an InterProcessLock instance.
:param lock_file_prefix: The lock_file_prefix argument is used to provide
lock files on disk with a meaningful prefix.
:param external: The external keyword argument denotes whether this lock
should work across multiple processes. This means that if two different
workers both run a a method decorated with @synchronized('mylock',
external=True), only one of them will execute at a time.
:param lock_path: The lock_path keyword argument is used to specify a
special location for external lock files to live. If nothing is set, then
CONF.lock_path is used as a default.
"""
with _semaphores_lock:
try:
sem = _semaphores[name]
except KeyError:
sem = threading.Semaphore()
_semaphores[name] = sem
with sem:
LOG.debug(_('Got semaphore "%(lock)s"'), {'lock': name})
# NOTE(mikal): I know this looks odd
if not hasattr(local.strong_store, 'locks_held'):
local.strong_store.locks_held = []
local.strong_store.locks_held.append(name)
try:
if external and not CONF.disable_process_locking:
LOG.debug(_('Attempting to grab file lock "%(lock)s"'),
{'lock': name})
# We need a copy of lock_path because it is non-local
local_lock_path = lock_path or CONF.lock_path
if not local_lock_path:
raise cfg.RequiredOptError('lock_path')
if not os.path.exists(local_lock_path):
fileutils.ensure_tree(local_lock_path)
LOG.info(_('Created lock path: %s'), local_lock_path)
def add_prefix(name, prefix):
if not prefix:
return name
sep = '' if prefix.endswith('-') else '-'
return '%s%s%s' % (prefix, sep, name)
# NOTE(mikal): the lock name cannot contain directory
# separators
lock_file_name = add_prefix(name.replace(os.sep, '_'),
lock_file_prefix)
lock_file_path = os.path.join(local_lock_path, lock_file_name)
try:
lock = InterProcessLock(lock_file_path)
with lock as lock:
LOG.debug(_('Got file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
yield lock
finally:
LOG.debug(_('Released file lock "%(lock)s" at %(path)s'),
{'lock': name, 'path': lock_file_path})
else:
yield sem
finally:
local.strong_store.locks_held.remove(name)
def synchronized(name, lock_file_prefix=None, external=False, lock_path=None):
"""Synchronization decorator.
Decorating a method like so::
@synchronized('mylock')
def foo(self, *args):
...
ensures that only one thread will execute the foo method at a time.
Different methods can share the same lock::
@synchronized('mylock')
def foo(self, *args):
...
@synchronized('mylock')
def bar(self, *args):
...
This way only one of either foo or bar can be executing at a time.
"""
def wrap(f):
@functools.wraps(f)
def inner(*args, **kwargs):
try:
with lock(name, lock_file_prefix, external, lock_path):
LOG.debug(_('Got semaphore / lock "%(function)s"'),
{'function': f.__name__})
return f(*args, **kwargs)
finally:
LOG.debug(_('Semaphore / lock released "%(function)s"'),
{'function': f.__name__})
return inner
return wrap
def synchronized_with_prefix(lock_file_prefix):
"""Partial object generator for the synchronization decorator.
Redefine @synchronized in each project like so::
(in nova/utils.py)
from nova.openstack.common import lockutils
synchronized = lockutils.synchronized_with_prefix('nova-')
(in nova/foo.py)
from nova import utils
@utils.synchronized('mylock')
def bar(self, *args):
...
The lock_file_prefix argument is used to provide lock files on disk with a
meaningful prefix.
"""
return functools.partial(synchronized, lock_file_prefix=lock_file_prefix)
def main(argv):
"""Create a dir for locks and pass it to command from arguments
If you run this:
python -m openstack.common.lockutils python setup.py testr
a temporary directory will be created for all your locks and passed to all
your tests in an environment variable. The temporary dir will be deleted
afterwards and the return value will be preserved.
"""
lock_dir = tempfile.mkdtemp()
os.environ["TEMPEST_LOCK_PATH"] = lock_dir
try:
ret_val = subprocess.call(argv[1:])
finally:
shutil.rmtree(lock_dir, ignore_errors=True)
return ret_val
if __name__ == '__main__':
sys.exit(main(sys.argv))
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/excutils.py 0000664 0001750 0001750 00000007172 12332757070 025256 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exception related utilities.
"""
import logging
import sys
import time
import traceback
import six
from tempest.openstack.common.gettextutils import _
class save_and_reraise_exception(object):
"""Save current exception, run some code and then re-raise.
In some cases the exception context can be cleared, resulting in None
being attempted to be re-raised after an exception handler is run. This
can happen when eventlet switches greenthreads or when running an
exception handler, code raises and catches an exception. In both
cases the exception context will be cleared.
To work around this, we save the exception state, run handler code, and
then re-raise the original exception. If another exception occurs, the
saved exception is logged and the new exception is re-raised.
In some cases the caller may not want to re-raise the exception, and
for those circumstances this context provides a reraise flag that
can be used to suppress the exception. For example::
except Exception:
with save_and_reraise_exception() as ctxt:
decide_if_need_reraise()
if not should_be_reraised:
ctxt.reraise = False
"""
def __init__(self):
self.reraise = True
def __enter__(self):
self.type_, self.value, self.tb, = sys.exc_info()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is not None:
logging.error(_('Original exception being dropped: %s'),
traceback.format_exception(self.type_,
self.value,
self.tb))
return False
if self.reraise:
six.reraise(self.type_, self.value, self.tb)
def forever_retry_uncaught_exceptions(infunc):
def inner_func(*args, **kwargs):
last_log_time = 0
last_exc_message = None
exc_count = 0
while True:
try:
return infunc(*args, **kwargs)
except Exception as exc:
this_exc_message = six.u(str(exc))
if this_exc_message == last_exc_message:
exc_count += 1
else:
exc_count = 1
# Do not log any more frequently than once a minute unless
# the exception message changes
cur_time = int(time.time())
if (cur_time - last_log_time > 60 or
this_exc_message != last_exc_message):
logging.exception(
_('Unexpected exception occurred %d time(s)... '
'retrying.') % exc_count)
last_log_time = cur_time
last_exc_message = this_exc_message
exc_count = 0
# This should be a very rare event. In case it isn't, do
# a sleep.
time.sleep(1)
return inner_func
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/importutils.py 0000664 0001750 0001750 00000004514 12332757070 026006 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Import related utilities and helper functions.
"""
import sys
import traceback
def import_class(import_str):
"""Returns a class from a string including module and class."""
mod_str, _sep, class_str = import_str.rpartition('.')
try:
__import__(mod_str)
return getattr(sys.modules[mod_str], class_str)
except (ValueError, AttributeError):
raise ImportError('Class %s cannot be found (%s)' %
(class_str,
traceback.format_exception(*sys.exc_info())))
def import_object(import_str, *args, **kwargs):
"""Import a class and return an instance of it."""
return import_class(import_str)(*args, **kwargs)
def import_object_ns(name_space, import_str, *args, **kwargs):
"""Tries to import object from default namespace.
Imports a class and return an instance of it, first by trying
to find the class in a default namespace, then failing back to
a full path if not found in the default namespace.
"""
import_value = "%s.%s" % (name_space, import_str)
try:
return import_class(import_value)(*args, **kwargs)
except ImportError:
return import_class(import_str)(*args, **kwargs)
def import_module(import_str):
"""Import a module."""
__import__(import_str)
return sys.modules[import_str]
def import_versioned_module(version, submodule=None):
module = 'tempest.v%s' % version
if submodule:
module = '.'.join((module, submodule))
return import_module(module)
def try_import(import_str, default=None):
"""Try to import a module and if it fails return default."""
try:
return import_module(import_str)
except ImportError:
return default
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/timeutils.py 0000664 0001750 0001750 00000014240 12332757070 025427 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Time related utilities and helper functions.
"""
import calendar
import datetime
import time
import iso8601
import six
# ISO 8601 extended time format with microseconds
_ISO8601_TIME_FORMAT_SUBSECOND = '%Y-%m-%dT%H:%M:%S.%f'
_ISO8601_TIME_FORMAT = '%Y-%m-%dT%H:%M:%S'
PERFECT_TIME_FORMAT = _ISO8601_TIME_FORMAT_SUBSECOND
def isotime(at=None, subsecond=False):
"""Stringify time in ISO 8601 format."""
if not at:
at = utcnow()
st = at.strftime(_ISO8601_TIME_FORMAT
if not subsecond
else _ISO8601_TIME_FORMAT_SUBSECOND)
tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
st += ('Z' if tz == 'UTC' else tz)
return st
def parse_isotime(timestr):
"""Parse time from ISO 8601 format."""
try:
return iso8601.parse_date(timestr)
except iso8601.ParseError as e:
raise ValueError(six.text_type(e))
except TypeError as e:
raise ValueError(six.text_type(e))
def strtime(at=None, fmt=PERFECT_TIME_FORMAT):
"""Returns formatted utcnow."""
if not at:
at = utcnow()
return at.strftime(fmt)
def parse_strtime(timestr, fmt=PERFECT_TIME_FORMAT):
"""Turn a formatted time back into a datetime."""
return datetime.datetime.strptime(timestr, fmt)
def normalize_time(timestamp):
"""Normalize time in arbitrary timezone to UTC naive object."""
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset
def is_older_than(before, seconds):
"""Return True if before is older than seconds."""
if isinstance(before, six.string_types):
before = parse_strtime(before).replace(tzinfo=None)
else:
before = before.replace(tzinfo=None)
return utcnow() - before > datetime.timedelta(seconds=seconds)
def is_newer_than(after, seconds):
"""Return True if after is newer than seconds."""
if isinstance(after, six.string_types):
after = parse_strtime(after).replace(tzinfo=None)
else:
after = after.replace(tzinfo=None)
return after - utcnow() > datetime.timedelta(seconds=seconds)
def utcnow_ts():
"""Timestamp version of our utcnow function."""
if utcnow.override_time is None:
# NOTE(kgriffs): This is several times faster
# than going through calendar.timegm(...)
return int(time.time())
return calendar.timegm(utcnow().timetuple())
def utcnow():
"""Overridable version of utils.utcnow."""
if utcnow.override_time:
try:
return utcnow.override_time.pop(0)
except AttributeError:
return utcnow.override_time
return datetime.datetime.utcnow()
def iso8601_from_timestamp(timestamp):
"""Returns a iso8601 formated date from timestamp."""
return isotime(datetime.datetime.utcfromtimestamp(timestamp))
utcnow.override_time = None
def set_time_override(override_time=None):
"""Overrides utils.utcnow.
Make it return a constant time or a list thereof, one at a time.
:param override_time: datetime instance or list thereof. If not
given, defaults to the current UTC time.
"""
utcnow.override_time = override_time or datetime.datetime.utcnow()
def advance_time_delta(timedelta):
"""Advance overridden time using a datetime.timedelta."""
assert(not utcnow.override_time is None)
try:
for dt in utcnow.override_time:
dt += timedelta
except TypeError:
utcnow.override_time += timedelta
def advance_time_seconds(seconds):
"""Advance overridden time by seconds."""
advance_time_delta(datetime.timedelta(0, seconds))
def clear_time_override():
"""Remove the overridden time."""
utcnow.override_time = None
def marshall_now(now=None):
"""Make an rpc-safe datetime with microseconds.
Note: tzinfo is stripped, but not required for relative times.
"""
if not now:
now = utcnow()
return dict(day=now.day, month=now.month, year=now.year, hour=now.hour,
minute=now.minute, second=now.second,
microsecond=now.microsecond)
def unmarshall_time(tyme):
"""Unmarshall a datetime dict."""
return datetime.datetime(day=tyme['day'],
month=tyme['month'],
year=tyme['year'],
hour=tyme['hour'],
minute=tyme['minute'],
second=tyme['second'],
microsecond=tyme['microsecond'])
def delta_seconds(before, after):
"""Return the difference between two timing objects.
Compute the difference in seconds between two date, time, or
datetime objects (as a float, to microsecond resolution).
"""
delta = after - before
return total_seconds(delta)
def total_seconds(delta):
"""Return the total seconds of datetime.timedelta object.
Compute total seconds of datetime.timedelta, datetime.timedelta
doesn't have method total_seconds in Python2.6, calculate it manually.
"""
try:
return delta.total_seconds()
except AttributeError:
return ((delta.days * 24 * 3600) + delta.seconds +
float(delta.microseconds) / (10 ** 6))
def is_soon(dt, window):
"""Determines if time is going to happen in the next window seconds.
:param dt: the time
:param window: minimum seconds to remain to consider the time not soon
:return: True if expiration is within the given duration
"""
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fixture/ 0000775 0001750 0001750 00000000000 12332757136 024526 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fixture/lockutils.py 0000664 0001750 0001750 00000003542 12332757070 027112 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from tempest.openstack.common import lockutils
class LockFixture(fixtures.Fixture):
"""External locking fixture.
This fixture is basically an alternative to the synchronized decorator with
the external flag so that tearDowns and addCleanups will be included in
the lock context for locking between tests. The fixture is recommended to
be the first line in a test method, like so::
def test_method(self):
self.useFixture(LockFixture)
...
or the first line in setUp if all the test methods in the class are
required to be serialized. Something like::
class TestCase(testtools.testcase):
def setUp(self):
self.useFixture(LockFixture)
super(TestCase, self).setUp()
...
This is because addCleanups are put on a LIFO queue that gets run after the
test method exits. (either by completing or raising an exception)
"""
def __init__(self, name, lock_file_prefix=None):
self.mgr = lockutils.lock(name, lock_file_prefix, True)
def setUp(self):
super(LockFixture, self).setUp()
self.addCleanup(self.mgr.__exit__, None, None, None)
self.mgr.__enter__()
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fixture/config.py 0000664 0001750 0001750 00000002710 12332757070 026342 0 ustar chuck chuck 0000000 0000000 #
# Copyright 2013 Mirantis, Inc.
# Copyright 2013 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
from oslo.config import cfg
import six
class Config(fixtures.Fixture):
"""Override some configuration values.
The keyword arguments are the names of configuration options to
override and their values.
If a group argument is supplied, the overrides are applied to
the specified configuration option group.
All overrides are automatically cleared at the end of the current
test by the reset() method, which is registered by addCleanup().
"""
def __init__(self, conf=cfg.CONF):
self.conf = conf
def setUp(self):
super(Config, self).setUp()
self.addCleanup(self.conf.reset)
def config(self, **kw):
group = kw.pop('group', None)
for k, v in six.iteritems(kw):
self.conf.set_override(k, v, group)
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fixture/moxstubout.py 0000664 0001750 0001750 00000002307 12332757070 027330 0 ustar chuck chuck 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mox
class MoxStubout(fixtures.Fixture):
"""Deal with code around mox and stubout as a fixture."""
def setUp(self):
super(MoxStubout, self).setUp()
# emulate some of the mox stuff, we can't use the metaclass
# because it screws with our generators
self.mox = mox.Mox()
self.stubs = self.mox.stubs
self.addCleanup(self.mox.UnsetStubs)
self.addCleanup(self.mox.VerifyAll)
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fixture/__init__.py 0000664 0001750 0001750 00000000000 12332757070 026622 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fixture/mockpatch.py 0000664 0001750 0001750 00000003051 12332757070 027045 0 ustar chuck chuck 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
class PatchObject(fixtures.Fixture):
"""Deal with code around mock."""
def __init__(self, obj, attr, new=mock.DEFAULT, **kwargs):
self.obj = obj
self.attr = attr
self.kwargs = kwargs
self.new = new
def setUp(self):
super(PatchObject, self).setUp()
_p = mock.patch.object(self.obj, self.attr, self.new, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
class Patch(fixtures.Fixture):
"""Deal with code around mock.patch."""
def __init__(self, obj, **kwargs):
self.obj = obj
self.kwargs = kwargs
def setUp(self):
super(Patch, self).setUp()
_p = mock.patch(self.obj, **self.kwargs)
self.mock = _p.start()
self.addCleanup(_p.stop)
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/config/ 0000775 0001750 0001750 00000000000 12332757136 024305 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/config/generator.py 0000664 0001750 0001750 00000024453 12332757070 026652 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 SINA Corporation
# Copyright 2014 Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Extracts OpenStack config option info from module(s)."""
from __future__ import print_function
import argparse
import imp
import os
import re
import socket
import sys
import textwrap
from oslo.config import cfg
import six
import stevedore.named
from tempest.openstack.common import gettextutils
from tempest.openstack.common import importutils
gettextutils.install('tempest')
STROPT = "StrOpt"
BOOLOPT = "BoolOpt"
INTOPT = "IntOpt"
FLOATOPT = "FloatOpt"
LISTOPT = "ListOpt"
DICTOPT = "DictOpt"
MULTISTROPT = "MultiStrOpt"
OPT_TYPES = {
STROPT: 'string value',
BOOLOPT: 'boolean value',
INTOPT: 'integer value',
FLOATOPT: 'floating point value',
LISTOPT: 'list value',
DICTOPT: 'dict value',
MULTISTROPT: 'multi valued',
}
OPTION_REGEX = re.compile(r"(%s)" % "|".join([STROPT, BOOLOPT, INTOPT,
FLOATOPT, LISTOPT, DICTOPT,
MULTISTROPT]))
PY_EXT = ".py"
BASEDIR = os.path.abspath(os.path.join(os.path.dirname(__file__),
"../../../../"))
WORDWRAP_WIDTH = 60
def raise_extension_exception(extmanager, ep, err):
raise
def generate(argv):
parser = argparse.ArgumentParser(
description='generate sample configuration file',
)
parser.add_argument('-m', dest='modules', action='append')
parser.add_argument('-l', dest='libraries', action='append')
parser.add_argument('srcfiles', nargs='*')
parsed_args = parser.parse_args(argv)
mods_by_pkg = dict()
for filepath in parsed_args.srcfiles:
pkg_name = filepath.split(os.sep)[1]
mod_str = '.'.join(['.'.join(filepath.split(os.sep)[:-1]),
os.path.basename(filepath).split('.')[0]])
mods_by_pkg.setdefault(pkg_name, list()).append(mod_str)
# NOTE(lzyeval): place top level modules before packages
pkg_names = sorted(pkg for pkg in mods_by_pkg if pkg.endswith(PY_EXT))
ext_names = sorted(pkg for pkg in mods_by_pkg if pkg not in pkg_names)
pkg_names.extend(ext_names)
# opts_by_group is a mapping of group name to an options list
# The options list is a list of (module, options) tuples
opts_by_group = {'DEFAULT': []}
if parsed_args.modules:
for module_name in parsed_args.modules:
module = _import_module(module_name)
if module:
for group, opts in _list_opts(module):
opts_by_group.setdefault(group, []).append((module_name,
opts))
# Look for entry points defined in libraries (or applications) for
# option discovery, and include their return values in the output.
#
# Each entry point should be a function returning an iterable
# of pairs with the group name (or None for the default group)
# and the list of Opt instances for that group.
if parsed_args.libraries:
loader = stevedore.named.NamedExtensionManager(
'oslo.config.opts',
names=list(set(parsed_args.libraries)),
invoke_on_load=False,
on_load_failure_callback=raise_extension_exception
)
for ext in loader:
for group, opts in ext.plugin():
opt_list = opts_by_group.setdefault(group or 'DEFAULT', [])
opt_list.append((ext.name, opts))
for pkg_name in pkg_names:
mods = mods_by_pkg.get(pkg_name)
mods.sort()
for mod_str in mods:
if mod_str.endswith('.__init__'):
mod_str = mod_str[:mod_str.rfind(".")]
mod_obj = _import_module(mod_str)
if not mod_obj:
raise RuntimeError("Unable to import module %s" % mod_str)
for group, opts in _list_opts(mod_obj):
opts_by_group.setdefault(group, []).append((mod_str, opts))
print_group_opts('DEFAULT', opts_by_group.pop('DEFAULT', []))
for group in sorted(opts_by_group.keys()):
print_group_opts(group, opts_by_group[group])
def _import_module(mod_str):
try:
if mod_str.startswith('bin.'):
imp.load_source(mod_str[4:], os.path.join('bin', mod_str[4:]))
return sys.modules[mod_str[4:]]
else:
return importutils.import_module(mod_str)
except Exception as e:
sys.stderr.write("Error importing module %s: %s\n" % (mod_str, str(e)))
return None
def _is_in_group(opt, group):
"Check if opt is in group."
for value in group._opts.values():
# NOTE(llu): Temporary workaround for bug #1262148, wait until
# newly released oslo.config support '==' operator.
if not(value['opt'] != opt):
return True
return False
def _guess_groups(opt, mod_obj):
# is it in the DEFAULT group?
if _is_in_group(opt, cfg.CONF):
return 'DEFAULT'
# what other groups is it in?
for value in cfg.CONF.values():
if isinstance(value, cfg.CONF.GroupAttr):
if _is_in_group(opt, value._group):
return value._group.name
raise RuntimeError(
"Unable to find group for option %s, "
"maybe it's defined twice in the same group?"
% opt.name
)
def _list_opts(obj):
def is_opt(o):
return (isinstance(o, cfg.Opt) and
not isinstance(o, cfg.SubCommandOpt))
opts = list()
for attr_str in dir(obj):
attr_obj = getattr(obj, attr_str)
if is_opt(attr_obj):
opts.append(attr_obj)
elif (isinstance(attr_obj, list) and
all(map(lambda x: is_opt(x), attr_obj))):
opts.extend(attr_obj)
ret = {}
for opt in opts:
ret.setdefault(_guess_groups(opt, obj), []).append(opt)
return ret.items()
def print_group_opts(group, opts_by_module):
print("[%s]" % group)
print('')
for mod, opts in opts_by_module:
print('#')
print('# Options defined in %s' % mod)
print('#')
print('')
for opt in opts:
_print_opt(opt)
print('')
def _get_my_ip():
try:
csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
csock.connect(('8.8.8.8', 80))
(addr, port) = csock.getsockname()
csock.close()
return addr
except socket.error:
return None
def _sanitize_default(name, value):
"""Set up a reasonably sensible default for pybasedir, my_ip and host."""
if value.startswith(sys.prefix):
# NOTE(jd) Don't use os.path.join, because it is likely to think the
# second part is an absolute pathname and therefore drop the first
# part.
value = os.path.normpath("/usr/" + value[len(sys.prefix):])
elif value.startswith(BASEDIR):
return value.replace(BASEDIR, '/usr/lib/python/site-packages')
elif BASEDIR in value:
return value.replace(BASEDIR, '')
elif value == _get_my_ip():
return '10.0.0.1'
elif value in (socket.gethostname(), socket.getfqdn()) and 'host' in name:
return 'tempest'
elif value.strip() != value:
return '"%s"' % value
return value
def _print_opt(opt):
opt_name, opt_default, opt_help = opt.dest, opt.default, opt.help
if not opt_help:
sys.stderr.write('WARNING: "%s" is missing help string.\n' % opt_name)
opt_help = ""
opt_type = None
try:
opt_type = OPTION_REGEX.search(str(type(opt))).group(0)
except (ValueError, AttributeError) as err:
sys.stderr.write("%s\n" % str(err))
sys.exit(1)
opt_help = u'%s (%s)' % (opt_help,
OPT_TYPES[opt_type])
print('#', "\n# ".join(textwrap.wrap(opt_help, WORDWRAP_WIDTH)))
if opt.deprecated_opts:
for deprecated_opt in opt.deprecated_opts:
if deprecated_opt.name:
deprecated_group = (deprecated_opt.group if
deprecated_opt.group else "DEFAULT")
print('# Deprecated group/name - [%s]/%s' %
(deprecated_group,
deprecated_opt.name))
try:
if opt_default is None:
print('#%s=' % opt_name)
elif opt_type == STROPT:
assert(isinstance(opt_default, six.string_types))
print('#%s=%s' % (opt_name, _sanitize_default(opt_name,
opt_default)))
elif opt_type == BOOLOPT:
assert(isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, str(opt_default).lower()))
elif opt_type == INTOPT:
assert(isinstance(opt_default, int) and
not isinstance(opt_default, bool))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == FLOATOPT:
assert(isinstance(opt_default, float))
print('#%s=%s' % (opt_name, opt_default))
elif opt_type == LISTOPT:
assert(isinstance(opt_default, list))
print('#%s=%s' % (opt_name, ','.join(opt_default)))
elif opt_type == DICTOPT:
assert(isinstance(opt_default, dict))
opt_default_strlist = [str(key) + ':' + str(value)
for (key, value) in opt_default.items()]
print('#%s=%s' % (opt_name, ','.join(opt_default_strlist)))
elif opt_type == MULTISTROPT:
assert(isinstance(opt_default, list))
if not opt_default:
opt_default = ['']
for default in opt_default:
print('#%s=%s' % (opt_name, default))
print('')
except Exception:
sys.stderr.write('Error in option "%s"\n' % opt_name)
sys.exit(1)
def main():
generate(sys.argv[1:])
if __name__ == '__main__':
main()
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/config/__init__.py 0000664 0001750 0001750 00000000000 12332757070 026401 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/log.py 0000664 0001750 0001750 00000053545 12332757070 024204 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Openstack logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used. Additionally, an instance uuid
may be passed as part of the log message, which is intended to make it easier
for admins to find messages related to a specific instance.
It also allows setting of formatting information through conf.
"""
import inspect
import itertools
import logging
import logging.config
import logging.handlers
import os
import re
import sys
import traceback
from oslo.config import cfg
import six
from six import moves
from tempest.openstack.common.gettextutils import _
from tempest.openstack.common import importutils
from tempest.openstack.common import jsonutils
from tempest.openstack.common import local
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
_SANITIZE_KEYS = ['adminPass', 'admin_pass', 'password', 'admin_password']
# NOTE(ldbragst): Let's build a list of regex objects using the list of
# _SANITIZE_KEYS we already have. This way, we only have to add the new key
# to the list of _SANITIZE_KEYS and we can generate regular expressions
# for XML and JSON automatically.
_SANITIZE_PATTERNS = []
_FORMAT_PATTERNS = [r'(%(key)s\s*[=]\s*[\"\']).*?([\"\'])',
r'(<%(key)s>).*?(%(key)s>)',
r'([\"\']%(key)s[\"\']\s*:\s*[\"\']).*?([\"\'])',
r'([\'"].*?%(key)s[\'"]\s*:\s*u?[\'"]).*?([\'"])']
for key in _SANITIZE_KEYS:
for pattern in _FORMAT_PATTERNS:
reg_ex = re.compile(pattern % {'key': key}, re.DOTALL)
_SANITIZE_PATTERNS.append(reg_ex)
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
help='Print debugging output (set logging level to '
'DEBUG instead of default WARNING level).'),
cfg.BoolOpt('verbose',
short='v',
default=False,
help='Print more verbose output (set logging level to '
'INFO instead of default WARNING level).'),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
help='The name of logging configuration file. It does not '
'disable existing loggers, but just appends specified '
'logging configuration to any other existing logging '
'options. Please see the Python logging module '
'documentation for details on logging configuration '
'files.'),
cfg.StrOpt('log-format',
default=None,
metavar='FORMAT',
help='DEPRECATED. '
'A logging.Formatter log message format string which may '
'use any of the available logging.LogRecord attributes. '
'This option is deprecated. Please use '
'logging_context_format_string and '
'logging_default_format_string instead.'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Format string for %%(asctime)s in log records. '
'Default: %(default)s'),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to output to. '
'If no default is set, logging will go to stdout.'),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative '
'--log-file paths'),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging.'),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='syslog facility to receive log lines')
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=True,
help='Log output to standard error')
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(request_id)s %(user_identity)s] '
'%(instance)s%(message)s',
help='format string to use for log messages with context'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='format string to use for log messages without context'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='data to append to log format when level is DEBUG'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d TRACE %(name)s '
'%(instance)s',
help='prefix each line of exception output with this format'),
cfg.ListOpt('default_log_levels',
default=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'iso8601=WARN',
],
help='list of logger=LEVEL pairs'),
cfg.BoolOpt('publish_errors',
default=False,
help='publish error events'),
cfg.BoolOpt('fatal_deprecations',
default=False,
help='make deprecations fatal'),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='If an instance is passed with the log message, format '
'it like this'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='If an instance UUID is passed with the log message, '
'format it like this'),
]
CONF = cfg.CONF
CONF.register_cli_opts(common_cli_opts)
CONF.register_cli_opts(logging_cli_opts)
CONF.register_opts(generic_log_opts)
CONF.register_opts(log_opts)
# our new audit level
# NOTE(jkoelker) Since we synthesized an audit level, make the logging
# module aware of it so it acts like other levels.
logging.AUDIT = logging.INFO + 1
logging.addLevelName(logging.AUDIT, 'AUDIT')
try:
NullHandler = logging.NullHandler
except AttributeError: # NOTE(jkoelker) NullHandler added in Python 2.7
class NullHandler(logging.Handler):
def handle(self, record):
pass
def emit(self, record):
pass
def createLock(self):
self.lock = None
def _dictify_context(context):
if context is None:
return None
if not isinstance(context, dict) and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def _get_binary_name():
return os.path.basename(inspect.stack()[-1][1])
def _get_log_file_path(binary=None):
logfile = CONF.log_file
logdir = CONF.log_dir
if logfile and not logdir:
return logfile
if logfile and logdir:
return os.path.join(logdir, logfile)
if logdir:
binary = binary or _get_binary_name()
return '%s.log' % (os.path.join(logdir, binary),)
return None
def mask_password(message, secret="***"):
"""Replace password with 'secret' in message.
:param message: The string which includes security information.
:param secret: value with which to replace passwords.
:returns: The unicode value of message with the password fields masked.
For example:
>>> mask_password("'adminPass' : 'aaaaa'")
"'adminPass' : '***'"
>>> mask_password("'admin_pass' : 'aaaaa'")
"'admin_pass' : '***'"
>>> mask_password('"password" : "aaaaa"')
'"password" : "***"'
>>> mask_password("'original_password' : 'aaaaa'")
"'original_password' : '***'"
>>> mask_password("u'original_password' : u'aaaaa'")
"u'original_password' : u'***'"
"""
message = six.text_type(message)
# NOTE(ldbragst): Check to see if anything in message contains any key
# specified in _SANITIZE_KEYS, if not then just return the message since
# we don't have to mask any passwords.
if not any(key in message for key in _SANITIZE_KEYS):
return message
secret = r'\g<1>' + secret + r'\g<2>'
for pattern in _SANITIZE_PATTERNS:
message = re.sub(pattern, secret, message)
return message
class BaseLoggerAdapter(logging.LoggerAdapter):
def audit(self, msg, *args, **kwargs):
self.log(logging.AUDIT, msg, *args, **kwargs)
class LazyAdapter(BaseLoggerAdapter):
def __init__(self, name='unknown', version='unknown'):
self._logger = None
self.extra = {}
self.name = name
self.version = version
@property
def logger(self):
if not self._logger:
self._logger = getLogger(self.name, self.version)
return self._logger
class ContextAdapter(BaseLoggerAdapter):
warn = logging.LoggerAdapter.warning
def __init__(self, logger, project_name, version_string):
self.logger = logger
self.project = project_name
self.version = version_string
@property
def handlers(self):
return self.logger.handlers
def deprecated(self, msg, *args, **kwargs):
stdmsg = _("Deprecated: %s") % msg
if CONF.fatal_deprecations:
self.critical(stdmsg, *args, **kwargs)
raise DeprecatedConfig(msg=stdmsg)
else:
self.warn(stdmsg, *args, **kwargs)
def process(self, msg, kwargs):
# NOTE(mrodden): catch any Message/other object and
# coerce to unicode before they can get
# to the python logging and possibly
# cause string encoding trouble
if not isinstance(msg, six.string_types):
msg = six.text_type(msg)
if 'extra' not in kwargs:
kwargs['extra'] = {}
extra = kwargs['extra']
context = kwargs.pop('context', None)
if not context:
context = getattr(local.store, 'context', None)
if context:
extra.update(_dictify_context(context))
instance = kwargs.pop('instance', None)
instance_uuid = (extra.get('instance_uuid', None) or
kwargs.pop('instance_uuid', None))
instance_extra = ''
if instance:
instance_extra = CONF.instance_format % instance
elif instance_uuid:
instance_extra = (CONF.instance_uuid_format
% {'uuid': instance_uuid})
extra['instance'] = instance_extra
extra.setdefault('user_identity', kwargs.pop('user_identity', None))
extra['project'] = self.project
extra['version'] = self.version
extra['extra'] = extra.copy()
return msg, kwargs
class JSONFormatter(logging.Formatter):
def __init__(self, fmt=None, datefmt=None):
# NOTE(jkoelker) we ignore the fmt argument, but its still there
# since logging.config.fileConfig passes it.
self.datefmt = datefmt
def formatException(self, ei, strip_newlines=True):
lines = traceback.format_exception(*ei)
if strip_newlines:
lines = [moves.filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = list(itertools.chain(*lines))
return lines
def format(self, record):
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': record.args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None}
if hasattr(record, 'extra'):
message['extra'] = record.extra
if record.exc_info:
message['traceback'] = self.formatException(record.exc_info)
return jsonutils.dumps(message)
def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {}
if CONF.verbose:
extra['exc_info'] = (exc_type, value, tb)
getLogger(product_name).critical(str(value), **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
self.log_config = log_config
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
def _load_log_config(log_config_append):
try:
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
except moves.configparser.Error as exc:
raise LogConfigError(log_config_append, str(exc))
def setup(product_name):
"""Setup logging."""
if CONF.log_config_append:
_load_log_config(CONF.log_config_append)
else:
_setup_logging_from_conf()
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string):
cfg.set_defaults(log_opts,
logging_context_format_string=
logging_context_format_string)
def _find_facility_from_conf():
facility_names = logging.handlers.SysLogHandler.facility_names
facility = getattr(logging.handlers.SysLogHandler,
CONF.syslog_log_facility,
None)
if facility is None and CONF.syslog_log_facility in facility_names:
facility = facility_names.get(CONF.syslog_log_facility)
if facility is None:
valid_facilities = facility_names.keys()
consts = ['LOG_AUTH', 'LOG_AUTHPRIV', 'LOG_CRON', 'LOG_DAEMON',
'LOG_FTP', 'LOG_KERN', 'LOG_LPR', 'LOG_MAIL', 'LOG_NEWS',
'LOG_AUTH', 'LOG_SYSLOG', 'LOG_USER', 'LOG_UUCP',
'LOG_LOCAL0', 'LOG_LOCAL1', 'LOG_LOCAL2', 'LOG_LOCAL3',
'LOG_LOCAL4', 'LOG_LOCAL5', 'LOG_LOCAL6', 'LOG_LOCAL7']
valid_facilities.extend(consts)
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
return facility
def _setup_logging_from_conf():
log_root = getLogger(None).logger
for handler in log_root.handlers:
log_root.removeHandler(handler)
if CONF.use_syslog:
facility = _find_facility_from_conf()
syslog = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
log_root.addHandler(syslog)
logpath = _get_log_file_path()
if logpath:
filelog = logging.handlers.WatchedFileHandler(logpath)
log_root.addHandler(filelog)
if CONF.use_stderr:
streamlog = ColorHandler()
log_root.addHandler(streamlog)
elif not logpath:
# pass sys.stdout as a positional argument
# python2.6 calls the argument strm, in 2.7 it's stream
streamlog = logging.StreamHandler(sys.stdout)
log_root.addHandler(streamlog)
if CONF.publish_errors:
handler = importutils.import_object(
"tempest.openstack.common.log_handler.PublishErrorsHandler",
logging.ERROR)
log_root.addHandler(handler)
datefmt = CONF.log_date_format
for handler in log_root.handlers:
# NOTE(alaski): CONF.log_format overrides everything currently. This
# should be deprecated in favor of context aware formatting.
if CONF.log_format:
handler.setFormatter(logging.Formatter(fmt=CONF.log_format,
datefmt=datefmt))
log_root.info('Deprecated: log_format is now deprecated and will '
'be removed in the next release')
else:
handler.setFormatter(ContextFormatter(datefmt=datefmt))
if CONF.debug:
log_root.setLevel(logging.DEBUG)
elif CONF.verbose:
log_root.setLevel(logging.INFO)
else:
log_root.setLevel(logging.WARNING)
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
level = logging.getLevelName(level_name)
logger = logging.getLogger(mod)
logger.setLevel(level)
_loggers = {}
def getLogger(name='unknown', version='unknown'):
if name not in _loggers:
_loggers[name] = ContextAdapter(logging.getLogger(name),
name,
version)
return _loggers[name]
def getLazyLogger(name='unknown', version='unknown'):
"""Returns lazy logger.
Creates a pass-through logger that does not create the real logger
until it is really needed and delegates all calls to the real logger
once it is created.
"""
return LazyAdapter(name, version)
class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.INFO):
self.logger = logger
self.level = level
def write(self, msg):
self.logger.log(self.level, msg)
class ContextFormatter(logging.Formatter):
"""A context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_format_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default."""
# NOTE(sdague): default the fancier formating params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color'):
if key not in record.__dict__:
record.__dict__[key] = ''
if record.__dict__.get('request_id', None):
self._fmt = CONF.logging_context_format_string
else:
self._fmt = CONF.logging_default_format_string
if (record.levelno == logging.DEBUG and
CONF.logging_debug_format_suffix):
self._fmt += " " + CONF.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with CONF.logging_exception_prefix."""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = moves.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
if CONF.logging_exception_prefix.find('%(asctime)') != -1:
record.asctime = self.formatTime(record, self.datefmt)
formatted_lines = []
for line in lines:
pl = CONF.logging_exception_prefix % record.__dict__
fl = '%s%s' % (pl, line)
formatted_lines.append(fl)
return '\n'.join(formatted_lines)
class ColorHandler(logging.StreamHandler):
LEVEL_COLORS = {
logging.DEBUG: '\033[00;32m', # GREEN
logging.INFO: '\033[00;36m', # CYAN
logging.AUDIT: '\033[01;36m', # BOLD CYAN
logging.WARN: '\033[01;33m', # BOLD YELLOW
logging.ERROR: '\033[01;31m', # BOLD RED
logging.CRITICAL: '\033[01;31m', # BOLD RED
}
def format(self, record):
record.color = self.LEVEL_COLORS[record.levelno]
return logging.StreamHandler.format(self, record)
class DeprecatedConfig(Exception):
message = _("Fatal call to deprecated config: %(msg)s")
def __init__(self, msg):
super(Exception, self).__init__(self.message % dict(msg=msg))
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/__init__.py 0000664 0001750 0001750 00000001204 12332757070 025143 0 ustar chuck chuck 0000000 0000000 #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
six.add_move(six.MovedModule('mox', 'mox', 'mox3.mox'))
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/local.py 0000664 0001750 0001750 00000003215 12332757070 024502 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Local storage of variables using weak references"""
import threading
import weakref
class WeakLocal(threading.local):
def __getattribute__(self, attr):
rval = super(WeakLocal, self).__getattribute__(attr)
if rval:
# NOTE(mikal): this bit is confusing. What is stored is a weak
# reference, not the value itself. We therefore need to lookup
# the weak reference and return the inner value here.
rval = rval()
return rval
def __setattr__(self, attr, value):
value = weakref.ref(value)
return super(WeakLocal, self).__setattr__(attr, value)
# NOTE(mikal): the name "store" should be deprecated in the future
store = WeakLocal()
# A "weak" store uses weak references and allows an object to fall out of scope
# when it falls out of scope in the code that uses the thread local storage. A
# "strong" store will hold a reference to the object so that it never falls out
# of scope.
weak_store = WeakLocal()
strong_store = threading.local()
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/gettextutils.py 0000664 0001750 0001750 00000042243 12332757070 026161 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from tempest.openstack.common.gettextutils import _
"""
import copy
import functools
import gettext
import locale
from logging import handlers
import os
from babel import localedata
import six
_localedir = os.environ.get('tempest'.upper() + '_LOCALEDIR')
_t = gettext.translation('tempest', localedir=_localedir, fallback=True)
# We use separate translation catalogs for each log level, so set up a
# mapping between the log level name and the translator. The domain
# for the log level is project_name + "-log-" + log_level so messages
# for each level end up in their own catalog.
_t_log_levels = dict(
(level, gettext.translation('tempest' + '-log-' + level,
localedir=_localedir,
fallback=True))
for level in ['info', 'warning', 'error', 'critical']
)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to use lazy gettext functionality. This is useful if
your project is importing _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='tempest')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def _log_translation(msg, level):
"""Build a single translation of a log message
"""
if USE_LAZY:
return Message(msg, domain='tempest' + '-log-' + level)
else:
translator = _t_log_levels[level]
if six.PY3:
return translator.gettext(msg)
return translator.ugettext(msg)
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = functools.partial(_log_translation, level='info')
_LW = functools.partial(_log_translation, level='warning')
_LE = functools.partial(_log_translation, level='error')
_LC = functools.partial(_log_translation, level='critical')
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None,
domain='tempest', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
params = self._sanitize_mod_params(other)
unicode_mod = super(Message, self).__mod__(params)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=params,
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
elif isinstance(other, dict):
# Merge the dictionaries
# Copy each item in case one does not support deep copy.
params = {}
if isinstance(self.params, dict):
for key, val in self.params.items():
params[key] = self._copy_param(val)
for key, val in other.items():
params[key] = self._copy_param(val)
else:
params = self._copy_param(other)
return params
def _copy_param(self, param):
try:
return copy.deepcopy(param)
except Exception:
# Fallback to casting to unicode this will handle the
# python code-like objects that can't be deep-copied
return six.text_type(param)
def __add__(self, other):
msg = _('Message objects do not support addition.')
raise TypeError(msg)
def __radd__(self, other):
return self.__add__(other)
def __str__(self):
# NOTE(luisg): Logging in python 2.6 tries to str() log records,
# and it expects specifically a UnicodeError in order to proceed.
msg = _('Message objects do not support str() because they may '
'contain non-ascii characters. '
'Please use unicode() or translate() instead.')
raise UnicodeError(msg)
def get_available_languages(domain):
"""Lists the available languages for the given translation domain.
:param domain: the domain to get languages for
"""
if domain in _AVAILABLE_LANGUAGES:
return copy.copy(_AVAILABLE_LANGUAGES[domain])
localedir = '%s_LOCALEDIR' % domain.upper()
find = lambda x: gettext.find(domain,
localedir=os.environ.get(localedir),
languages=[x])
# NOTE(mrodden): en_US should always be available (and first in case
# order matters) since our in-line message strings are en_US
language_list = ['en_US']
# NOTE(luisg): Babel <1.0 used a function called list(), which was
# renamed to locale_identifiers() in >=1.0, the requirements master list
# requires >=0.9.6, uncapped, so defensively work with both. We can remove
# this check when the master list updates to >=1.0, and update all projects
list_identifiers = (getattr(localedata, 'list', None) or
getattr(localedata, 'locale_identifiers'))
locale_identifiers = list_identifiers()
for i in locale_identifiers:
if find(i) is not None:
language_list.append(i)
# NOTE(luisg): Babel>=1.0,<1.3 has a bug where some OpenStack supported
# locales (e.g. 'zh_CN', and 'zh_TW') aren't supported even though they
# are perfectly legitimate locales:
# https://github.com/mitsuhiko/babel/issues/37
# In Babel 1.3 they fixed the bug and they support these locales, but
# they are still not explicitly "listed" by locale_identifiers().
# That is why we add the locales here explicitly if necessary so that
# they are listed as supported.
aliases = {'zh': 'zh_CN',
'zh_Hant_HK': 'zh_HK',
'zh_Hant': 'zh_TW',
'fil': 'tl_PH'}
for (locale, alias) in six.iteritems(aliases):
if locale in language_list and alias not in language_list:
language_list.append(alias)
_AVAILABLE_LANGUAGES[domain] = language_list
return copy.copy(language_list)
def translate(obj, desired_locale=None):
"""Gets the translated unicode representation of the given object.
If the object is not translatable it is returned as-is.
If the locale is None the object is translated to the system locale.
:param obj: the object to translate
:param desired_locale: the locale to translate the message to, if None the
default system locale will be used
:returns: the translated object in unicode, or the original object if
it could not be translated
"""
message = obj
if not isinstance(message, Message):
# If the object to translate is not already translatable,
# let's first get its unicode representation
message = six.text_type(obj)
if isinstance(message, Message):
# Even after unicoding() we still need to check if we are
# running with translatable unicode before translating
return message.translate(desired_locale)
return obj
def _translate_args(args, desired_locale=None):
"""Translates all the translatable elements of the given arguments object.
This method is used for translating the translatable values in method
arguments which include values of tuples or dictionaries.
If the object is not a tuple or a dictionary the object itself is
translated if it is translatable.
If the locale is None the object is translated to the system locale.
:param args: the args to translate
:param desired_locale: the locale to translate the args to, if None the
default system locale will be used
:returns: a new args object with the translated contents of the original
"""
if isinstance(args, tuple):
return tuple(translate(v, desired_locale) for v in args)
if isinstance(args, dict):
translated_dict = {}
for (k, v) in six.iteritems(args):
translated_v = translate(v, desired_locale)
translated_dict[k] = translated_v
return translated_dict
return translate(args, desired_locale)
class TranslationHandler(handlers.MemoryHandler):
"""Handler that translates records before logging them.
The TranslationHandler takes a locale and a target logging.Handler object
to forward LogRecord objects to after translating them. This handler
depends on Message objects being logged, instead of regular strings.
The handler can be configured declaratively in the logging.conf as follows:
[handlers]
keys = translatedlog, translator
[handler_translatedlog]
class = handlers.WatchedFileHandler
args = ('/var/log/api-localized.log',)
formatter = context
[handler_translator]
class = openstack.common.log.TranslationHandler
target = translatedlog
args = ('zh_CN',)
If the specified locale is not available in the system, the handler will
log in the default locale.
"""
def __init__(self, locale=None, target=None):
"""Initialize a TranslationHandler
:param locale: locale to use for translating messages
:param target: logging.Handler object to forward
LogRecord objects to after translation
"""
# NOTE(luisg): In order to allow this handler to be a wrapper for
# other handlers, such as a FileHandler, and still be able to
# configure it using logging.conf, this handler has to extend
# MemoryHandler because only the MemoryHandlers' logging.conf
# parsing is implemented such that it accepts a target handler.
handlers.MemoryHandler.__init__(self, capacity=0, target=target)
self.locale = locale
def setFormatter(self, fmt):
self.target.setFormatter(fmt)
def emit(self, record):
# We save the message from the original record to restore it
# after translation, so other handlers are not affected by this
original_msg = record.msg
original_args = record.args
try:
self._translate_and_log_record(record)
finally:
record.msg = original_msg
record.args = original_args
def _translate_and_log_record(self, record):
record.msg = translate(record.msg, self.locale)
# In addition to translating the message, we also need to translate
# arguments that were passed to the log method that were not part
# of the main message e.g., log.info(_('Some message %s'), this_one))
record.args = _translate_args(record.args, self.locale)
self.target.emit(record)
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/fileutils.py 0000664 0001750 0001750 00000007507 12332757070 025420 0 ustar chuck chuck 0000000 0000000 # Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import errno
import os
import tempfile
from tempest.openstack.common import excutils
from tempest.openstack.common.gettextutils import _
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
_FILE_CACHE = {}
def ensure_tree(path):
"""Create a directory (and any ancestor directories required)
:param path: Directory to create
"""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno == errno.EEXIST:
if not os.path.isdir(path):
raise
else:
raise
def read_cached_file(filename, force_reload=False):
"""Read from a file if it has been modified.
:param force_reload: Whether to reload the file.
:returns: A tuple with a boolean specifying if the data is fresh
or not.
"""
global _FILE_CACHE
if force_reload and filename in _FILE_CACHE:
del _FILE_CACHE[filename]
reloaded = False
mtime = os.path.getmtime(filename)
cache_info = _FILE_CACHE.setdefault(filename, {})
if not cache_info or mtime > cache_info.get('mtime', 0):
LOG.debug(_("Reloading cached file %s") % filename)
with open(filename) as fap:
cache_info['data'] = fap.read()
cache_info['mtime'] = mtime
reloaded = True
return (reloaded, cache_info['data'])
def delete_if_exists(path, remove=os.unlink):
"""Delete a file, but ignore file not found error.
:param path: File to delete
:param remove: Optional function to remove passed path
"""
try:
remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
@contextlib.contextmanager
def remove_path_on_error(path, remove=delete_if_exists):
"""Protect code that wants to operate on PATH atomically.
Any exception will cause PATH to be removed.
:param path: File to work with
:param remove: Optional function to remove passed path
"""
try:
yield
except Exception:
with excutils.save_and_reraise_exception():
remove(path)
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def write_to_tempfile(content, path=None, suffix='', prefix='tmp'):
"""Create temporary file or use existing file.
This util is needed for creating temporary file with
specified content, suffix and prefix. If path is not None,
it will be used for writing content. If the path doesn't
exist it'll be created.
:param content: content for temporary file.
:param path: same as parameter 'dir' for mkstemp
:param suffix: same as parameter 'suffix' for mkstemp
:param prefix: same as parameter 'prefix' for mkstemp
For example: it can be used in database tests for creating
configuration files.
"""
if path:
ensure_tree(path)
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=path, prefix=prefix)
try:
os.write(fd, content)
finally:
os.close(fd)
return path
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/common/jsonutils.py 0000664 0001750 0001750 00000015072 12332757070 025446 0 ustar chuck chuck 0000000 0000000 # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
JSON related utilities.
This module provides a few things:
1) A handy function for getting an object down to something that can be
JSON serialized. See to_primitive().
2) Wrappers around loads() and dumps(). The dumps() wrapper will
automatically use to_primitive() for you if needed.
3) This sets up anyjson to use the loads() and dumps() wrappers if anyjson
is available.
'''
import datetime
import functools
import inspect
import itertools
import json
try:
import xmlrpclib
except ImportError:
# NOTE(jaypipes): xmlrpclib was renamed to xmlrpc.client in Python3
# however the function and object call signatures
# remained the same. This whole try/except block should
# be removed and replaced with a call to six.moves once
# six 1.4.2 is released. See http://bit.ly/1bqrVzu
import xmlrpc.client as xmlrpclib
import six
from tempest.openstack.common import gettextutils
from tempest.openstack.common import importutils
from tempest.openstack.common import timeutils
netaddr = importutils.try_import("netaddr")
_nasty_type_tests = [inspect.ismodule, inspect.isclass, inspect.ismethod,
inspect.isfunction, inspect.isgeneratorfunction,
inspect.isgenerator, inspect.istraceback, inspect.isframe,
inspect.iscode, inspect.isbuiltin, inspect.isroutine,
inspect.isabstract]
_simple_types = (six.string_types + six.integer_types
+ (type(None), bool, float))
def to_primitive(value, convert_instances=False, convert_datetime=True,
level=0, max_depth=3):
"""Convert a complex object into primitives.
Handy for JSON serialization. We can optionally handle instances,
but since this is a recursive function, we could have cyclical
data structures.
To handle cyclical data structures we could track the actual objects
visited in a set, but not all objects are hashable. Instead we just
track the depth of the object inspections and don't go too deep.
Therefore, convert_instances=True is lossy ... be aware.
"""
# handle obvious types first - order of basic types determined by running
# full tests on nova project, resulting in the following counts:
# 572754
# 460353
# 379632
# 274610
# 199918
# 114200
# 51817
# 26164
# 6491
# 283
# 19
if isinstance(value, _simple_types):
return value
if isinstance(value, datetime.datetime):
if convert_datetime:
return timeutils.strtime(value)
else:
return value
# value of itertools.count doesn't get caught by nasty_type_tests
# and results in infinite loop when list(value) is called.
if type(value) == itertools.count:
return six.text_type(value)
# FIXME(vish): Workaround for LP bug 852095. Without this workaround,
# tests that raise an exception in a mocked method that
# has a @wrap_exception with a notifier will fail. If
# we up the dependency to 0.5.4 (when it is released) we
# can remove this workaround.
if getattr(value, '__module__', None) == 'mox':
return 'mock'
if level > max_depth:
return '?'
# The try block may not be necessary after the class check above,
# but just in case ...
try:
recursive = functools.partial(to_primitive,
convert_instances=convert_instances,
convert_datetime=convert_datetime,
level=level,
max_depth=max_depth)
if isinstance(value, dict):
return dict((k, recursive(v)) for k, v in six.iteritems(value))
elif isinstance(value, (list, tuple)):
return [recursive(lv) for lv in value]
# It's not clear why xmlrpclib created their own DateTime type, but
# for our purposes, make it a datetime type which is explicitly
# handled
if isinstance(value, xmlrpclib.DateTime):
value = datetime.datetime(*tuple(value.timetuple())[:6])
if convert_datetime and isinstance(value, datetime.datetime):
return timeutils.strtime(value)
elif isinstance(value, gettextutils.Message):
return value.data
elif hasattr(value, 'iteritems'):
return recursive(dict(value.iteritems()), level=level + 1)
elif hasattr(value, '__iter__'):
return recursive(list(value))
elif convert_instances and hasattr(value, '__dict__'):
# Likely an instance of something. Watch for cycles.
# Ignore class member vars.
return recursive(value.__dict__, level=level + 1)
elif netaddr and isinstance(value, netaddr.IPAddress):
return six.text_type(value)
else:
if any(test(value) for test in _nasty_type_tests):
return six.text_type(value)
return value
except TypeError:
# Class objects are tricky since they may define something like
# __iter__ defined but it isn't callable as list().
return six.text_type(value)
def dumps(value, default=to_primitive, **kwargs):
return json.dumps(value, default=default, **kwargs)
def loads(s):
return json.loads(s)
def load(s):
return json.load(s)
try:
import anyjson
except ImportError:
pass
else:
anyjson._modules.append((__name__, 'dumps', TypeError,
'loads', ValueError, 'load'))
anyjson.force_implementation(__name__)
tempest-2014.1.dev4108.gf22b6cc/tempest/openstack/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023644 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/ 0000775 0001750 0001750 00000000000 12332757136 021652 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023746 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/ 0000775 0001750 0001750 00000000000 12332757136 023326 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/agents.py 0000664 0001750 0001750 00000003036 12332757070 025160 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_agents = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'agents': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'agent_id': {'type': ['integer', 'string']},
'hypervisor': {'type': 'string'},
'os': {'type': 'string'},
'architecture': {'type': 'string'},
'version': {'type': 'string'},
'url': {'type': 'string', 'format': 'uri'},
'md5hash': {'type': 'string'}
},
'required': ['agent_id', 'hypervisor', 'os',
'architecture', 'version', 'url', 'md5hash']
}
}
},
'required': ['agents']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/quotas.py 0000664 0001750 0001750 00000003216 12332757070 025213 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_quota_set = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'quota_set': {
'type': 'object',
'properties': {
'instances': {'type': 'integer'},
'cores': {'type': 'integer'},
'ram': {'type': 'integer'},
'floating_ips': {'type': 'integer'},
'fixed_ips': {'type': 'integer'},
'metadata_items': {'type': 'integer'},
'key_pairs': {'type': 'integer'},
'security_groups': {'type': 'integer'},
'security_group_rules': {'type': 'integer'}
},
'required': ['instances', 'cores', 'ram',
'floating_ips', 'fixed_ips',
'metadata_items', 'key_pairs',
'security_groups', 'security_group_rules']
}
},
'required': ['quota_set']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/flavors.py 0000664 0001750 0001750 00000004251 12332757070 025353 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.compute import parameter_types
list_flavors = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'links': parameter_types.links,
'id': {'type': 'string'}
},
'required': ['name', 'links', 'id']
}
}
},
'required': ['flavors']
}
}
common_flavor_info = {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'links': parameter_types.links,
'ram': {'type': 'integer'},
'vcpus': {'type': 'integer'},
'swap': {'type': 'integer'},
'disk': {'type': 'integer'},
'id': {'type': 'string'}
},
'required': ['name', 'links', 'ram', 'vcpus',
'swap', 'disk', 'id']
}
common_flavor_list_details = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavors': {
'type': 'array',
'items': common_flavor_info
}
},
'required': ['flavors']
}
}
common_flavor_details = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavor': common_flavor_info
},
'required': ['flavor']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/parameter_types.py 0000664 0001750 0001750 00000003506 12332757070 027105 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
links = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {
'type': 'string',
'format': 'uri'
},
'rel': {'type': 'string'}
},
'required': ['href', 'rel']
}
}
mac_address = {
'type': 'string',
'pattern': '(?:[a-f0-9]{2}:){5}[a-f0-9]{2}'
}
access_ip_v4 = {
'type': 'string',
'anyOf': [{'format': 'ipv4'}, {'enum': ['']}]
}
access_ip_v6 = {
'type': 'string',
'anyOf': [{'format': 'ipv6'}, {'enum': ['']}]
}
addresses = {
'type': 'object',
'patternProperties': {
# NOTE: Here is for 'private' or something.
'^[a-zA-Z0-9-_.]+$': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'version': {'type': 'integer'},
'addr': {
'type': 'string',
'anyOf': [
{'format': 'ipv4'},
{'format': 'ipv6'}
]
}
},
'required': ['version', 'addr']
}
}
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/ 0000775 0001750 0001750 00000000000 12332757136 023656 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/agents.py 0000664 0001750 0001750 00000001243 12332757070 025506 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
delete_agent = {
'status_code': [204]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/quotas.py 0000664 0001750 0001750 00000004552 12332757070 025547 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import quotas
quota_set = copy.deepcopy(quotas.common_quota_set)
quota_set['response_body']['properties']['quota_set']['properties'][
'id'] = {'type': 'string'}
quota_set['response_body']['properties']['quota_set'][
'required'].extend(['id'])
quota_common_info = {
'type': 'object',
'properties': {
'reserved': {'type': 'integer'},
'limit': {'type': 'integer'},
'in_use': {'type': 'integer'}
},
'required': ['reserved', 'limit', 'in_use']
}
quota_set_detail = copy.deepcopy(quotas.common_quota_set)
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'id'] = {'type': 'string'}
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'instances'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'cores'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'ram'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'floating_ips'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'fixed_ips'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'metadata_items'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'key_pairs'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'security_groups'] = quota_common_info
quota_set_detail['response_body']['properties']['quota_set']['properties'][
'security_group_rules'] = quota_common_info
delete_quota = {
'status_code': [204]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/flavors.py 0000664 0001750 0001750 00000005276 12332757070 025713 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import flavors
from tempest.api_schema.compute import flavors_extra_specs
list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
# Defining extra attributes for V3 flavor schema
list_flavors_details['response_body']['properties']['flavors']['items'][
'properties'].update({'disabled': {'type': 'boolean'},
'ephemeral': {'type': 'integer'},
'flavor-access:is_public': {'type': 'boolean'},
'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
# So they are not 'required'.
list_flavors_details['response_body']['properties']['flavors']['items'][
'required'].extend(['disabled', 'ephemeral'])
set_flavor_extra_specs = copy.deepcopy(flavors_extra_specs.flavor_extra_specs)
set_flavor_extra_specs['status_code'] = [201]
unset_flavor_extra_specs = {
'status_code': [204]
}
get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
# NOTE- In v3 API, 'swap' comes as '0' not empty string '""'
# (In V2 API, it comes as empty string) So leaving 'swap'as integer type only.
# Defining extra attributes for V3 flavor schema
get_flavor_details['response_body']['properties']['flavor'][
'properties'].update({'disabled': {'type': 'boolean'},
'ephemeral': {'type': 'integer'},
'flavor-access:is_public': {'type': 'boolean'},
'os-flavor-rxtx:rxtx_factor': {'type': 'number'}})
# 'flavor-access' and 'os-flavor-rxtx' are API extensions.
# So they are not 'required'.
get_flavor_details['response_body']['properties']['flavor'][
'required'].extend(['disabled', 'ephemeral'])
create_flavor_details = copy.deepcopy(get_flavor_details)
# Overriding the status code for create flavor V3 API.
create_flavor_details['status_code'] = [201]
delete_flavor = {
'status_code': [204]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/certificates.py 0000664 0001750 0001750 00000001443 12332757070 026674 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import certificates
create_certificate = copy.deepcopy(certificates._common_schema)
create_certificate['status_code'] = [201]
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/hypervisors.py 0000664 0001750 0001750 00000004137 12332757070 026627 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import hypervisors
list_hypervisors_detail = copy.deepcopy(
hypervisors.common_list_hypervisors_detail)
# Defining extra attributes for V3 show hypervisor schema
list_hypervisors_detail['response_body']['properties']['hypervisors'][
'items']['properties']['os-pci:pci_stats'] = {'type': 'array'}
show_hypervisor = copy.deepcopy(hypervisors.common_show_hypervisor)
# Defining extra attributes for V3 show hypervisor schema
show_hypervisor['response_body']['properties']['hypervisor']['properties'][
'os-pci:pci_stats'] = {'type': 'array'}
hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_info)
# Defining extra attributes for V3 show hypervisor schema
hypervisors_servers['response_body']['properties']['hypervisor']['properties'][
'servers'] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer,
# but here allows 'string' also because we
# will be able to change it to 'uuid' in
# the future.
'id': {'type': ['integer', 'string']},
'name': {'type': 'string'}
}
}
}
# V3 API response body always contains the 'servers' attribute even there
# is no server (VM) are present on Hypervisor host.
hypervisors_servers['response_body']['properties']['hypervisor'][
'required'] = ['id', 'hypervisor_hostname', 'servers']
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/keypairs.py 0000664 0001750 0001750 00000002465 12332757070 026063 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.compute import keypairs
get_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'}
},
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
create_keypair = {
'status_code': [201],
'response_body': keypairs.create_keypair
}
delete_keypair = {
'status_code': [204],
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/aggregates.py 0000664 0001750 0001750 00000001574 12332757070 026345 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import aggregates
delete_aggregate = {
'status_code': [204]
}
create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
# V3 API's response status_code is 201
create_aggregate['status_code'] = [201]
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/servers.py 0000664 0001750 0001750 00000005732 12332757070 025725 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import parameter_types
from tempest.api_schema.compute import servers
create_server = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is uuid, but here allows
# 'integer' also because old OpenStack uses 'integer'
# as a server id.
'id': {'type': ['integer', 'string']},
'os-security-groups:security_groups': {'type': 'array'},
'links': parameter_types.links,
'admin_password': {'type': 'string'},
'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
},
# NOTE: os-access-ips:access_ip_v4/v6 are API extension,
# and some environments return a response without these
# attributes. So they are not 'required'.
'required': ['id', 'os-security-groups:security_groups',
'links', 'admin_password']
}
},
'required': ['server']
}
}
addresses_v3 = copy.deepcopy(parameter_types.addresses)
addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
'properties'].update({
'type': {'type': 'string'},
'mac_addr': {'type': 'string'}
})
addresses_v3['patternProperties']['^[a-zA-Z0-9-_.]+$']['items'][
'required'].extend(
['type', 'mac_addr']
)
update_server = copy.deepcopy(servers.base_update_server)
update_server['response_body']['properties']['server']['properties'].update({
'addresses': addresses_v3,
'host_id': {'type': 'string'},
'os-access-ips:access_ip_v4': parameter_types.access_ip_v4,
'os-access-ips:access_ip_v6': parameter_types.access_ip_v6
})
update_server['response_body']['properties']['server']['required'].append(
# NOTE: os-access-ips:access_ip_v4/v6 are API extension,
# and some environments return a response without these
# attributes. So they are not 'required'.
'host_id'
)
attach_detach_volume = {
'status_code': [202]
}
set_get_server_metadata_item = copy.deepcopy(servers.set_server_metadata)
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/extensions.py 0000664 0001750 0001750 00000002444 12332757070 026430 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_extensions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extensions': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'},
'alias': {'type': 'string'},
'description': {'type': 'string'},
'version': {'type': 'integer'}
},
'required': ['name', 'alias', 'description', 'version']
}
}
},
'required': ['extensions']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025752 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/availability_zone.py 0000664 0001750 0001750 00000003650 12332757070 027736 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import availability_zone as common
base = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'availability_zone_info': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'zone_name': {'type': 'string'},
'zone_state': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'}
},
'required': ['available']
},
# NOTE: Here is the difference between detail and
# non-detail
'hosts': {'type': 'null'}
},
'required': ['zone_name', 'zone_state', 'hosts']
}
}
},
'required': ['availability_zone_info']
}
}
get_availability_zone_list = copy.deepcopy(base)
get_availability_zone_list_detail = copy.deepcopy(base)
get_availability_zone_list_detail['response_body']['properties'][
'availability_zone_info']['items']['properties']['hosts'] = common.detail
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v3/hosts.py 0000664 0001750 0001750 00000002750 12332757070 025371 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import hosts
startup_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': hosts.common_start_up_body
},
'required': ['host']
}
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
shutdown_host = copy.deepcopy(startup_host)
shutdown_host['response_body']['properties']['power_action'] = {
'enum': ['shutdown']
}
# The 'power_action' attribute of 'reboot_host' API is 'reboot'
reboot_host = copy.deepcopy(startup_host)
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
update_host = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': hosts.update_host_common
},
'required': ['host']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/flavors_extra_specs.py 0000664 0001750 0001750 00000002276 12332757070 027760 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
flavor_extra_specs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extra_specs': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
},
'required': ['extra_specs']
}
}
flavor_extra_specs_key = {
'status_code': [200],
'response_body': {
'type': 'object',
'patternProperties': {
'^[a-zA-Z0-9_\-\. :]+$': {'type': 'string'}
}
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/certificates.py 0000664 0001750 0001750 00000002370 12332757070 026344 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
_common_schema = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'certificate': {
'type': 'object',
'properties': {
'data': {'type': 'string'},
'private_key': {'type': 'string'},
},
'required': ['data', 'private_key'],
}
},
'required': ['certificate'],
}
}
get_certificate = copy.deepcopy(_common_schema)
get_certificate['response_body']['properties']['certificate'][
'properties']['private_key'].update({'type': 'null'})
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/hypervisors.py 0000664 0001750 0001750 00000017516 12332757070 026304 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
hypervisor_statistics = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisor_statistics': {
'type': 'object',
'properties': {
'count': {'type': 'integer'},
'current_workload': {'type': 'integer'},
'disk_available_least': {'type': 'integer'},
'free_disk_gb': {'type': 'integer'},
'free_ram_mb': {'type': 'integer'},
'local_gb': {'type': 'integer'},
'local_gb_used': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'memory_mb_used': {'type': 'integer'},
'running_vms': {'type': 'integer'},
'vcpus': {'type': 'integer'},
'vcpus_used': {'type': 'integer'}
},
'required': ['count', 'current_workload',
'disk_available_least', 'free_disk_gb',
'free_ram_mb', 'local_gb', 'local_gb_used',
'memory_mb', 'memory_mb_used', 'running_vms',
'vcpus', 'vcpus_used']
}
},
'required': ['hypervisor_statistics']
}
}
common_list_hypervisors_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'cpu_info': {'type': 'string'},
'current_workload': {'type': 'integer'},
'disk_available_least': {'type': ['integer', 'null']},
'host_ip': {
'type': 'string',
'format': 'ip-address'
},
'free_disk_gb': {'type': 'integer'},
'free_ram_mb': {'type': 'integer'},
'hypervisor_hostname': {'type': 'string'},
'hypervisor_type': {'type': 'string'},
'hypervisor_version': {'type': 'integer'},
'id': {'type': ['integer', 'string']},
'local_gb': {'type': 'integer'},
'local_gb_used': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'memory_mb_used': {'type': 'integer'},
'running_vms': {'type': 'integer'},
'service': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'id': {'type': ['integer', 'string']}
},
'required': ['host', 'id']
},
'vcpus': {'type': 'integer'},
'vcpus_used': {'type': 'integer'}
},
'required': ['cpu_info', 'current_workload',
'disk_available_least', 'host_ip',
'free_disk_gb', 'free_ram_mb',
'hypervisor_hostname', 'hypervisor_type',
'hypervisor_version', 'id', 'local_gb',
'local_gb_used', 'memory_mb',
'memory_mb_used', 'running_vms', 'service',
'vcpus', 'vcpus_used']
}
}
},
'required': ['hypervisors']
}
}
common_show_hypervisor = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisor': {
'type': 'object',
'properties': {
'cpu_info': {'type': 'string'},
'current_workload': {'type': 'integer'},
'disk_available_least': {'type': 'integer'},
'host_ip': {
'type': 'string',
'format': 'ip-address'
},
'free_disk_gb': {'type': 'integer'},
'free_ram_mb': {'type': 'integer'},
'hypervisor_hostname': {'type': 'string'},
'hypervisor_type': {'type': 'string'},
'hypervisor_version': {'type': 'integer'},
'id': {'type': ['integer', 'string']},
'local_gb': {'type': 'integer'},
'local_gb_used': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'memory_mb_used': {'type': 'integer'},
'running_vms': {'type': 'integer'},
'service': {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'id': {'type': ['integer', 'string']}
},
'required': ['host', 'id']
},
'vcpus': {'type': 'integer'},
'vcpus_used': {'type': 'integer'}
},
'required': ['cpu_info', 'current_workload',
'disk_available_least', 'host_ip',
'free_disk_gb', 'free_ram_mb',
'hypervisor_hostname', 'hypervisor_type',
'hypervisor_version', 'id', 'local_gb',
'local_gb_used', 'memory_mb', 'memory_mb_used',
'running_vms', 'service', 'vcpus', 'vcpus_used']
}
},
'required': ['hypervisor']
}
}
common_hypervisors_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisors': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'hypervisor_hostname': {'type': 'string'}
},
'required': ['id', 'hypervisor_hostname']
}
}
},
'required': ['hypervisors']
}
}
common_hypervisors_info = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hypervisor': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'hypervisor_hostname': {'type': 'string'},
},
'required': ['id', 'hypervisor_hostname']
}
},
'required': ['hypervisor']
}
}
hypervisor_uptime = copy.deepcopy(common_hypervisors_info)
hypervisor_uptime['response_body']['properties']['hypervisor'][
'properties']['uptime'] = {'type': 'string'}
hypervisor_uptime['response_body']['properties']['hypervisor'][
'required'] = ['id', 'hypervisor_hostname', 'uptime']
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/keypairs.py 0000664 0001750 0001750 00000004610 12332757070 025525 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_keypairs = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypairs': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'}
},
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
},
'required': ['keypairs']
}
}
create_keypair = {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'fingerprint': {'type': 'string'},
'name': {'type': 'string'},
'public_key': {'type': 'string'},
# NOTE: Now the type of 'user_id' is integer, but here
# allows 'string' also because we will be able to change
# it to 'uuid' in the future.
'user_id': {'type': ['integer', 'string']},
'private_key': {'type': 'string'}
},
# When create keypair API is being called with 'Public key'
# (Importing keypair) then, response body does not contain
# 'private_key' So it is not defined as 'required'
'required': ['fingerprint', 'name', 'public_key', 'user_id']
}
},
'required': ['keypair']
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/aggregates.py 0000664 0001750 0001750 00000005204 12332757070 026007 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
aggregate = {
'type': 'object',
'properties': {
'availability_zone': {'type': ['string', 'null']},
'created_at': {'type': 'string'},
'deleted': {'type': 'boolean'},
'deleted_at': {'type': ['string', 'null']},
'hosts': {'type': 'array'},
'id': {'type': 'integer'},
'metadata': {'type': 'object'},
'name': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
'required': ['availability_zone', 'created_at', 'deleted',
'deleted_at', 'hosts', 'id', 'metadata',
'name', 'updated_at']
}
list_aggregates = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregates': {
'type': 'array',
'items': aggregate
}
},
'required': ['aggregates']
}
}
get_aggregate = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'aggregate': aggregate
},
'required': ['aggregate']
}
}
aggregate_set_metadata = get_aggregate
# The 'updated_at' attribute of 'update_aggregate' can't be null.
update_aggregate = copy.deepcopy(get_aggregate)
update_aggregate['response_body']['properties']['aggregate']['properties'][
'updated_at'] = {
'type': 'string'
}
common_create_aggregate = {
'response_body': {
'type': 'object',
'properties': {
'aggregate': aggregate
},
'required': ['aggregate']
}
}
# create-aggregate api doesn't have 'hosts' and 'metadata' attributes.
del common_create_aggregate['response_body']['properties']['aggregate'][
'properties']['hosts']
del common_create_aggregate['response_body']['properties']['aggregate'][
'properties']['metadata']
common_create_aggregate['response_body']['properties']['aggregate'][
'required'] = ['availability_zone', 'created_at', 'deleted', 'deleted_at',
'id', 'name', 'updated_at']
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/servers.py 0000664 0001750 0001750 00000007164 12332757070 025376 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import parameter_types
get_password = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'password': {'type': 'string'}
},
'required': ['password']
}
}
get_vnc_console = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'console': {
'type': 'object',
'properties': {
'type': {'type': 'string'},
'url': {
'type': 'string',
'format': 'uri'
}
},
'required': ['type', 'url']
}
},
'required': ['console']
}
}
base_update_server = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'name': {'type': 'string'},
'status': {'type': 'string'},
'image': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'flavor': {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'user_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
'created': {'type': 'string'},
'updated': {'type': 'string'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'links': parameter_types.links,
'addresses': parameter_types.addresses,
},
'required': ['id', 'name', 'status', 'image', 'flavor',
'user_id', 'tenant_id', 'created', 'updated',
'progress', 'metadata', 'links', 'addresses']
}
}
}
}
delete_server = {
'status_code': [204],
}
set_server_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'required': ['metadata']
}
}
list_server_metadata = copy.deepcopy(set_server_metadata)
delete_server_metadata_item = {
'status_code': [204]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/interfaces.py 0000664 0001750 0001750 00000001247 12332757070 026024 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
delete_interface = {
'status_code': [202]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025422 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/availability_zone.py 0000664 0001750 0001750 00000002600 12332757070 027400 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# NOTE: This is the detail information for "get az detail" API.
# The information is the same between v2 and v3 APIs.
detail = {
'type': 'object',
'patternProperties': {
# NOTE: Here is for a hostname
'^[a-zA-Z0-9-_.]+$': {
'type': 'object',
'patternProperties': {
# NOTE: Here is for a service name
'^.*$': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'},
'active': {'type': 'boolean'},
'updated_at': {'type': 'string'}
},
'required': ['available', 'active', 'updated_at']
}
}
}
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/services.py 0000664 0001750 0001750 00000004432 12332757070 025523 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_services = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'services': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but here
# allows 'string' also because we will be able to
# change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'zone': {'type': 'string'},
'host': {'type': 'string'},
'state': {'type': 'string'},
'binary': {'type': 'string'},
'status': {'type': 'string'},
'updated_at': {'type': 'string'},
'disabled_reason': {'type': ['string', 'null']}
},
'required': ['id', 'zone', 'host', 'state', 'binary',
'status', 'updated_at', 'disabled_reason']
}
}
},
'required': ['services']
}
}
enable_service = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'service': {
'type': 'object',
'properties': {
'status': {'type': 'string'},
'binary': {'type': 'string'},
'host': {'type': 'string'}
},
'required': ['status', 'binary', 'host']
}
},
'required': ['service']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/hosts.py 0000664 0001750 0001750 00000005336 12332757070 025044 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_start_up_body = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'power_action': {'enum': ['startup']}
},
'required': ['host', 'power_action']
}
list_hosts = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'hosts': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'host_name': {'type': 'string'},
'service': {'type': 'string'},
'zone': {'type': 'string'}
},
'required': ['host_name', 'service', 'zone']
}
}
},
'required': ['hosts']
}
}
show_host_detail = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'host': {
'type': 'array',
'item': {
'type': 'object',
'properties': {
'resource': {
'type': 'object',
'properties': {
'cpu': {'type': 'integer'},
'disk_gb': {'type': 'integer'},
'host': {'type': 'string'},
'memory_mb': {'type': 'integer'},
'project': {'type': 'string'}
},
'required': ['cpu', 'disk_gb', 'host',
'memory_mb', 'project']
}
},
'required': ['resource']
}
}
},
'required': ['host']
}
}
update_host_common = {
'type': 'object',
'properties': {
'host': {'type': 'string'},
'maintenance_mode': {'enum': ['on_maintenance', 'off_maintenance']},
'status': {'enum': ['enabled', 'disabled']}
},
'required': ['host', 'maintenance_mode', 'status']
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/flavors_access.py 0000664 0001750 0001750 00000002277 12332757070 026702 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
add_remove_list_flavor_access = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'flavor_access': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'flavor_id': {'type': 'string'},
'tenant_id': {'type': 'string'},
},
'required': ['flavor_id', 'tenant_id'],
}
}
},
'required': ['flavor_access']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/ 0000775 0001750 0001750 00000000000 12332757136 023655 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/agents.py 0000664 0001750 0001750 00000001243 12332757070 025505 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
delete_agent = {
'status_code': [200]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/quotas.py 0000664 0001750 0001750 00000004036 12332757070 025543 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import quotas
quota_set = copy.deepcopy(quotas.common_quota_set)
quota_set['response_body']['properties']['quota_set']['properties'][
'id'] = {'type': 'string'}
quota_set['response_body']['properties']['quota_set']['properties'][
'injected_files'] = {'type': 'integer'}
quota_set['response_body']['properties']['quota_set']['properties'][
'injected_file_content_bytes'] = {'type': 'integer'}
quota_set['response_body']['properties']['quota_set']['properties'][
'injected_file_path_bytes'] = {'type': 'integer'}
quota_set['response_body']['properties']['quota_set']['required'].extend([
'id',
'injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes'])
quota_set_update = copy.deepcopy(quotas.common_quota_set)
quota_set_update['response_body']['properties']['quota_set']['properties'][
'injected_files'] = {'type': 'integer'}
quota_set_update['response_body']['properties']['quota_set']['properties'][
'injected_file_content_bytes'] = {'type': 'integer'}
quota_set_update['response_body']['properties']['quota_set']['properties'][
'injected_file_path_bytes'] = {'type': 'integer'}
quota_set_update['response_body']['properties']['quota_set'][
'required'].extend(['injected_files',
'injected_file_content_bytes',
'injected_file_path_bytes'])
delete_quota = {
'status_code': [202]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/volumes.py 0000664 0001750 0001750 00000012015 12332757070 025715 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
create_get_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'status': {'type': 'string'},
'displayName': {'type': ['string', 'null']},
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
'volumeType': {'type': 'string'},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
'attachments': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
}
# NOTE- If volume is not attached to any server
# then, 'attachments' attributes comes as array
# with empty objects "[{}]" due to that elements
# of 'attachments' cannot defined as 'required'.
# If it would come as empty array "[]" then,
# those elements can be defined as 'required'.
}
}
},
'required': ['id', 'status', 'displayName', 'availabilityZone',
'createdAt', 'displayDescription', 'volumeType',
'snapshotId', 'metadata', 'size', 'attachments']
}
},
'required': ['volume']
}
}
list_volumes = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumes': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'status': {'type': 'string'},
'displayName': {'type': ['string', 'null']},
'availabilityZone': {'type': 'string'},
'createdAt': {'type': 'string'},
'displayDescription': {'type': ['string', 'null']},
'volumeType': {'type': 'string'},
'snapshotId': {'type': ['string', 'null']},
'metadata': {'type': 'object'},
'size': {'type': 'integer'},
'attachments': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
}
# NOTE- If volume is not attached to any server
# then, 'attachments' attributes comes as array
# with empty object "[{}]" due to that elements
# of 'attachments' cannot defined as 'required'
# If it would come as empty array "[]" then,
# those elements can be defined as 'required'.
}
}
},
'required': ['id', 'status', 'displayName',
'availabilityZone', 'createdAt',
'displayDescription', 'volumeType',
'snapshotId', 'metadata', 'size',
'attachments']
}
}
},
'required': ['volumes']
}
}
delete_volume = {
'status_code': [202]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/flavors.py 0000664 0001750 0001750 00000004736 12332757070 025712 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import flavors
list_flavors_details = copy.deepcopy(flavors.common_flavor_list_details)
# 'swap' attributes comes as integre value but if it is empty it comes as "".
# So defining type of as string and integer.
list_flavors_details['response_body']['properties']['flavors']['items'][
'properties']['swap'] = {'type': ['string', 'integer']}
# Defining extra attributes for V2 flavor schema
list_flavors_details['response_body']['properties']['flavors']['items'][
'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
'os-flavor-access:is_public': {'type': 'boolean'},
'rxtx_factor': {'type': 'number'},
'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
# are API extensions. So they are not 'required'.
unset_flavor_extra_specs = {
'status_code': [200]
}
create_get_flavor_details = copy.deepcopy(flavors.common_flavor_details)
# 'swap' attributes comes as integre value but if it is empty it comes as "".
# So defining type of as string and integer.
create_get_flavor_details['response_body']['properties']['flavor'][
'properties']['swap'] = {'type': ['string', 'integer']}
# Defining extra attributes for V2 flavor schema
create_get_flavor_details['response_body']['properties']['flavor'][
'properties'].update({'OS-FLV-DISABLED:disabled': {'type': 'boolean'},
'os-flavor-access:is_public': {'type': 'boolean'},
'rxtx_factor': {'type': 'number'},
'OS-FLV-EXT-DATA:ephemeral': {'type': 'integer'}})
# 'OS-FLV-DISABLED', 'os-flavor-access', 'rxtx_factor' and 'OS-FLV-EXT-DATA'
# are API extensions. So they are not 'required'.
delete_flavor = {
'status_code': [202]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/limits.py 0000664 0001750 0001750 00000010675 12332757070 025536 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
get_limit = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'limits': {
'type': 'object',
'properties': {
'absolute': {
'type': 'object',
'properties': {
'maxTotalRAMSize': {'type': 'integer'},
'totalCoresUsed': {'type': 'integer'},
'maxTotalInstances': {'type': 'integer'},
'maxTotalFloatingIps': {'type': 'integer'},
'totalSecurityGroupsUsed': {'type': 'integer'},
'maxTotalCores': {'type': 'integer'},
'totalFloatingIpsUsed': {'type': 'integer'},
'maxSecurityGroups': {'type': 'integer'},
'maxServerMeta': {'type': 'integer'},
'maxPersonality': {'type': 'integer'},
'maxImageMeta': {'type': 'integer'},
'maxPersonalitySize': {'type': 'integer'},
'maxSecurityGroupRules': {'type': 'integer'},
'maxTotalKeypairs': {'type': 'integer'},
'totalRAMUsed': {'type': 'integer'},
'totalInstancesUsed': {'type': 'integer'}
},
'required': ['maxImageMeta',
'maxPersonality',
'maxPersonalitySize',
'maxSecurityGroupRules',
'maxSecurityGroups',
'maxServerMeta',
'maxTotalCores',
'maxTotalFloatingIps',
'maxTotalInstances',
'maxTotalKeypairs',
'maxTotalRAMSize',
'totalCoresUsed',
'totalFloatingIpsUsed',
'totalInstancesUsed',
'totalRAMUsed',
'totalSecurityGroupsUsed']
},
'rate': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'limit': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'next-available':
{'type': 'string'},
'remaining':
{'type': 'integer'},
'unit':
{'type': 'string'},
'value':
{'type': 'integer'},
'verb':
{'type': 'string'}
}
}
},
'regex': {'type': 'string'},
'uri': {'type': 'string'}
}
}
}
},
'required': ['absolute', 'rate']
}
},
'required': ['limits']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/certificates.py 0000664 0001750 0001750 00000001371 12332757070 026673 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import certificates
create_certificate = copy.deepcopy(certificates._common_schema)
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/hypervisors.py 0000664 0001750 0001750 00000002764 12332757070 026632 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import hypervisors
hypervisors_servers = copy.deepcopy(hypervisors.common_hypervisors_detail)
# Defining extra attributes for V3 show hypervisor schema
hypervisors_servers['response_body']['properties']['hypervisors']['items'][
'properties']['servers'] = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer,
# but here allows 'string' also because we
# will be able to change it to 'uuid' in
# the future.
'id': {'type': ['integer', 'string']},
'name': {'type': 'string'}
}
}
}
# In V2 API, if there is no servers (VM) on the Hypervisor host then 'servers'
# attribute will not be present in response body So it is not 'required'.
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/keypairs.py 0000664 0001750 0001750 00000004266 12332757070 026063 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.compute import keypairs
get_keypair = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'keypair': {
'type': 'object',
'properties': {
'public_key': {'type': 'string'},
'name': {'type': 'string'},
'fingerprint': {'type': 'string'},
# NOTE: Now the type of 'user_id' is integer, but here
# allows 'string' also because we will be able to change
# it to 'uuid' in the future.
'user_id': {'type': ['integer', 'string']},
'deleted': {'type': 'boolean'},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']},
'deleted_at': {'type': ['string', 'null']},
'id': {'type': 'integer'}
},
# When we run the get keypair API, response body includes
# all the above mentioned attributes.
# But in Nova API sample file, response body includes only
# 'public_key', 'name' & 'fingerprint'. So only 'public_key',
# 'name' & 'fingerprint' are defined as 'required'.
'required': ['public_key', 'name', 'fingerprint']
}
},
'required': ['keypair']
}
}
create_keypair = {
'status_code': [200],
'response_body': keypairs.create_keypair
}
delete_keypair = {
'status_code': [202],
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/tenant_usages.py 0000664 0001750 0001750 00000005761 12332757070 027075 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
_server_usages = {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'ended_at': {
'oneOf': [
{'type': 'string'},
{'type': 'null'}
]
},
'flavor': {'type': 'string'},
'hours': {'type': 'number'},
'instance_id': {'type': 'string'},
'local_gb': {'type': 'integer'},
'memory_mb': {'type': 'integer'},
'name': {'type': 'string'},
'started_at': {'type': 'string'},
'state': {'type': 'string'},
'tenant_id': {'type': 'string'},
'uptime': {'type': 'integer'},
'vcpus': {'type': 'integer'},
},
'required': ['ended_at', 'flavor', 'hours', 'instance_id', 'local_gb',
'memory_mb', 'name', 'started_at', 'state', 'tenant_id',
'uptime', 'vcpus']
}
}
_tenant_usage_list = {
'type': 'object',
'properties': {
'server_usages': _server_usages,
'start': {'type': 'string'},
'stop': {'type': 'string'},
'tenant_id': {'type': 'string'},
'total_hours': {'type': 'number'},
'total_local_gb_usage': {'type': 'number'},
'total_memory_mb_usage': {'type': 'number'},
'total_vcpus_usage': {'type': 'number'},
},
'required': ['start', 'stop', 'tenant_id',
'total_hours', 'total_local_gb_usage',
'total_memory_mb_usage', 'total_vcpus_usage']
}
# 'required' of get_tenant is different from list_tenant's.
_tenant_usage_get = copy.deepcopy(_tenant_usage_list)
_tenant_usage_get['required'] = ['server_usages', 'start', 'stop', 'tenant_id',
'total_hours', 'total_local_gb_usage',
'total_memory_mb_usage', 'total_vcpus_usage']
list_tenant = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'tenant_usages': {
'type': 'array',
'items': _tenant_usage_list
}
},
'required': ['tenant_usages']
}
}
get_tenant = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'tenant_usage': _tenant_usage_get
},
'required': ['tenant_usage']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/aggregates.py 0000664 0001750 0001750 00000001574 12332757070 026344 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import aggregates
delete_aggregate = {
'status_code': [200]
}
create_aggregate = copy.deepcopy(aggregates.common_create_aggregate)
# V2 API's response status_code is 200
create_aggregate['status_code'] = [200]
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/floating_ips.py 0000664 0001750 0001750 00000006606 12332757070 026712 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_floating_ips = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ips': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but
# here allows 'string' also because we will be
# able to change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
'instance_id': {'type': ['integer', 'string', 'null']},
'ip': {
'type': 'string',
'format': 'ip-address'
},
'fixed_ip': {
'type': ['string', 'null'],
'format': 'ip-address'
}
},
'required': ['id', 'pool', 'instance_id', 'ip', 'fixed_ip']
}
}
},
'required': ['floating_ips']
}
}
floating_ip = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but here allows
# 'string' also because we will be able to change it to
# 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'pool': {'type': ['string', 'null']},
'instance_id': {'type': ['integer', 'string', 'null']},
'ip': {
'type': 'string',
'format': 'ip-address'
},
'fixed_ip': {
'type': ['string', 'null'],
'format': 'ip-address'
}
},
'required': ['id', 'pool', 'instance_id', 'ip', 'fixed_ip']
}
},
'required': ['floating_ip']
}
}
floating_ip_pools = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'floating_ip_pools': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'name': {'type': 'string'}
},
'required': ['name']
}
}
},
'required': ['floating_ip_pools']
}
}
add_remove_floating_ip = {
'status_code': [202]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/servers.py 0000664 0001750 0001750 00000010226 12332757070 025716 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import parameter_types
from tempest.api_schema.compute import servers
create_server = {
'status_code': [202],
'response_body': {
'type': 'object',
'properties': {
'server': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is uuid, but here allows
# 'integer' also because old OpenStack uses 'integer'
# as a server id.
'id': {'type': ['integer', 'string']},
'security_groups': {'type': 'array'},
'links': parameter_types.links,
'adminPass': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'}
},
# NOTE: OS-DCF:diskConfig is API extension, and some
# environments return a response without the attribute.
# So it is not 'required'.
# NOTE: adminPass is not required because it can be deactivated
# with nova API flag enable_instance_password=False
'required': ['id', 'security_groups', 'links']
}
},
'required': ['server']
}
}
update_server = copy.deepcopy(servers.base_update_server)
update_server['response_body']['properties']['server']['properties'].update({
'hostId': {'type': 'string'},
'OS-DCF:diskConfig': {'type': 'string'},
'accessIPv4': parameter_types.access_ip_v4,
'accessIPv6': parameter_types.access_ip_v6
})
update_server['response_body']['properties']['server']['required'].append(
# NOTE: OS-DCF:diskConfig and accessIPv4/v6 are API
# extensions, and some environments return a response
# without these attributes. So they are not 'required'.
'hostId'
)
list_virtual_interfaces = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'virtual_interfaces': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'mac_address': parameter_types.mac_address,
'OS-EXT-VIF-NET:net_id': {'type': 'string'}
},
# 'OS-EXT-VIF-NET:net_id' is API extension So it is
# not defined as 'required'
'required': ['id', 'mac_address']
}
}
},
'required': ['virtual_interfaces']
}
}
attach_volume = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'volumeAttachment': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'device': {'type': 'string'},
'volumeId': {'type': 'string'},
'serverId': {'type': ['integer', 'string']}
},
'required': ['id', 'device', 'volumeId', 'serverId']
}
},
'required': ['volumeAttachment']
}
}
detach_volume = {
'status_code': [202]
}
set_get_server_metadata_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {
'type': 'object',
'patternProperties': {
'^.+$': {'type': 'string'}
}
}
},
'required': ['meta']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/extensions.py 0000664 0001750 0001750 00000003223 12332757070 026423 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_extensions = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'extensions': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'updated': {
'type': 'string',
'format': 'data-time'
},
'name': {'type': 'string'},
'links': {'type': 'array'},
'namespace': {
'type': 'string',
'format': 'uri'
},
'alias': {'type': 'string'},
'description': {'type': 'string'}
},
'required': ['updated', 'name', 'links', 'namespace',
'alias', 'description']
}
}
},
'required': ['extensions']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025751 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/availability_zone.py 0000664 0001750 0001750 00000003640 12332757070 027734 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import availability_zone as common
base = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'availabilityZoneInfo': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'zoneName': {'type': 'string'},
'zoneState': {
'type': 'object',
'properties': {
'available': {'type': 'boolean'}
},
'required': ['available']
},
# NOTE: Here is the difference between detail and
# non-detail.
'hosts': {'type': 'null'}
},
'required': ['zoneName', 'zoneState', 'hosts']
}
}
},
'required': ['availabilityZoneInfo']
}
}
get_availability_zone_list = copy.deepcopy(base)
get_availability_zone_list_detail = copy.deepcopy(base)
get_availability_zone_list_detail['response_body']['properties'][
'availabilityZoneInfo']['items']['properties']['hosts'] = common.detail
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/hosts.py 0000664 0001750 0001750 00000002375 12332757070 025373 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from tempest.api_schema.compute import hosts
startup_host = {
'status_code': [200],
'response_body': hosts.common_start_up_body
}
# The 'power_action' attribute of 'shutdown_host' API is 'shutdown'
shutdown_host = copy.deepcopy(startup_host)
shutdown_host['response_body']['properties']['power_action'] = {
'enum': ['shutdown']
}
# The 'power_action' attribute of 'reboot_host' API is 'reboot'
reboot_host = copy.deepcopy(startup_host)
reboot_host['response_body']['properties']['power_action'] = {
'enum': ['reboot']
}
update_host = {
'status_code': [200],
'response_body': hosts.update_host_common
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/images.py 0000664 0001750 0001750 00000007320 12332757070 025473 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.api_schema.compute import parameter_types
common_image_schema = {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'status': {'type': 'string'},
'updated': {'type': 'string'},
'links': parameter_types.links,
'name': {'type': 'string'},
'created': {'type': 'string'},
'minDisk': {'type': 'integer'},
'minRam': {'type': 'integer'},
'progress': {'type': 'integer'},
'metadata': {'type': 'object'},
'server': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but here
# allows 'string' also because we will be able to
# change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'links': parameter_types.links
},
'required': ['id', 'links']
},
'OS-EXT-IMG-SIZE:size': {'type': 'integer'}
},
# 'server' attributes only comes in response body if image is
# associated with any server. 'OS-EXT-IMG-SIZE:size' is API
# extension, So those are not defined as 'required'.
'required': ['id', 'status', 'updated', 'links', 'name',
'created', 'minDisk', 'minRam', 'progress',
'metadata']
}
get_image = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'image': common_image_schema
},
'required': ['image']
}
}
list_images = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'images': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': parameter_types.links,
'name': {'type': 'string'}
},
'required': ['id', 'links', 'name']
}
}
},
'required': ['images']
}
}
create_image = {
'status_code': [202],
'response_header': {
'type': 'object',
'properties': {
'location': {
'type': 'string',
'format': 'uri'
}
},
'required': ['location']
}
}
delete = {
'status_code': [204]
}
image_metadata = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'metadata': {'type': 'object'}
},
'required': ['metadata']
}
}
image_meta_item = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'meta': {'type': 'object'}
},
'required': ['meta']
}
}
list_images_details = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'images': {
'type': 'array',
'items': common_image_schema
}
},
'required': ['images']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/fixed_ips.py 0000664 0001750 0001750 00000002532 12332757070 026200 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
fixed_ips = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'fixed_ip': {
'type': 'object',
'properties': {
'address': {
'type': 'string',
'format': 'ip-address'
},
'cidr': {'type': 'string'},
'host': {'type': 'string'},
'hostname': {'type': 'string'}
},
'required': ['address', 'cidr', 'host', 'hostname']
}
},
'required': ['fixed_ip']
}
}
fixed_ip_action = {
'status_code': [202],
'response_body': {'type': 'string'}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/instance_usage_audit_logs.py 0000664 0001750 0001750 00000004044 12332757070 031430 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_instance_usage_audit_log = {
'type': 'object',
'properties': {
'hosts_not_run': {
'type': 'array',
'items': {'type': 'string'}
},
'log': {'type': 'object'},
'num_hosts': {'type': 'integer'},
'num_hosts_done': {'type': 'integer'},
'num_hosts_not_run': {'type': 'integer'},
'num_hosts_running': {'type': 'integer'},
'overall_status': {'type': 'string'},
'period_beginning': {'type': 'string'},
'period_ending': {'type': 'string'},
'total_errors': {'type': 'integer'},
'total_instances': {'type': 'integer'}
},
'required': ['hosts_not_run', 'log', 'num_hosts', 'num_hosts_done',
'num_hosts_not_run', 'num_hosts_running', 'overall_status',
'period_beginning', 'period_ending', 'total_errors',
'total_instances']
}
get_instance_usage_audit_log = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instance_usage_audit_log': common_instance_usage_audit_log
},
'required': ['instance_usage_audit_log']
}
}
list_instance_usage_audit_log = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'instance_usage_audit_logs': common_instance_usage_audit_log
},
'required': ['instance_usage_audit_logs']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/v2/security_groups.py 0000664 0001750 0001750 00000006024 12332757070 027474 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
common_security_group_rule = {
'from_port': {'type': ['integer', 'null']},
'to_port': {'type': ['integer', 'null']},
'group': {
'type': 'object',
'properties': {
'tenant_id': {'type': 'string'},
'name': {'type': 'string'}
}
},
'ip_protocol': {'type': ['string', 'null']},
# 'parent_group_id' can be UUID so defining it as 'string' also.
'parent_group_id': {'type': ['string', 'integer', 'null']},
'ip_range': {
'type': 'object',
'properties': {
'cidr': {'type': 'string'}
}
# When optional argument is provided in request body
# like 'group_id' then, attribute 'cidr' does not
# comes in response body. So it is not 'required'.
},
'id': {'type': ['string', 'integer']}
}
common_security_group = {
'type': 'object',
'properties': {
'id': {'type': ['integer', 'string']},
'name': {'type': 'string'},
'tenant_id': {'type': 'string'},
'rules': {
'type': 'array',
'items': {
'type': ['object', 'null'],
'properties': common_security_group_rule
}
},
'description': {'type': 'string'},
},
'required': ['id', 'name', 'tenant_id', 'rules', 'description'],
}
list_security_groups = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_groups': {
'type': 'array',
'items': common_security_group
}
},
'required': ['security_groups']
}
}
get_security_group = create_security_group = update_security_group = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_group': common_security_group
},
'required': ['security_group']
}
}
create_security_group_rule = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'security_group_rule': {
'type': 'object',
'properties': common_security_group_rule,
'required': ['from_port', 'to_port', 'group', 'ip_protocol',
'parent_group_id', 'id', 'ip_range']
}
},
'required': ['security_group_rule']
}
}
delete_security_group_rule = {
'status_code': [202]
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/migrations.py 0000664 0001750 0001750 00000004601 12332757070 026052 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
list_migrations = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'migrations': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
# NOTE: Now the type of 'id' is integer, but here
# allows 'string' also because we will be able to
# change it to 'uuid' in the future.
'id': {'type': ['integer', 'string']},
'status': {'type': 'string'},
'instance_uuid': {'type': 'string'},
'source_node': {'type': 'string'},
'source_compute': {'type': 'string'},
'dest_node': {'type': 'string'},
'dest_compute': {'type': 'string'},
'dest_host': {'type': 'string'},
'old_instance_type_id': {
'type': ['integer', 'string']
},
'new_instance_type_id': {
'type': ['integer', 'string']
},
'created_at': {'type': 'string'},
'updated_at': {'type': ['string', 'null']}
},
'required': [
'id', 'status', 'instance_uuid', 'source_node',
'source_compute', 'dest_node', 'dest_compute',
'dest_host', 'old_instance_type_id',
'new_instance_type_id', 'created_at', 'updated_at'
]
}
}
},
'required': ['migrations']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/api_schema/compute/version.py 0000664 0001750 0001750 00000004151 12332757070 025363 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 NEC Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
version = {
'status_code': [200],
'response_body': {
'type': 'object',
'properties': {
'version': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'links': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'href': {'type': 'string', 'format': 'uri'},
'rel': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['href', 'rel']
}
},
'media-types': {
'type': 'array',
'items': {
'type': 'object',
'properties': {
'base': {'type': 'string'},
'type': {'type': 'string'}
},
'required': ['base', 'type']
}
},
'status': {'type': 'string'},
'updated': {'type': 'string', 'format': 'date-time'}
},
'required': ['id', 'links', 'media-types', 'status', 'updated']
}
},
'required': ['version']
}
}
tempest-2014.1.dev4108.gf22b6cc/tempest/clients.py 0000664 0001750 0001750 00000077143 12332757070 021605 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import keystoneclient.exceptions
import keystoneclient.v2_0.client
from tempest import auth
from tempest.common.rest_client import NegativeRestClient
from tempest import config
from tempest import exceptions
from tempest import manager
from tempest.openstack.common import log as logging
from tempest.services.baremetal.v1.client_json import BaremetalClientJSON
from tempest.services import botoclients
from tempest.services.compute.json.agents_client import \
AgentsClientJSON
from tempest.services.compute.json.aggregates_client import \
AggregatesClientJSON
from tempest.services.compute.json.availability_zone_client import \
AvailabilityZoneClientJSON
from tempest.services.compute.json.certificates_client import \
CertificatesClientJSON
from tempest.services.compute.json.extensions_client import \
ExtensionsClientJSON
from tempest.services.compute.json.fixed_ips_client import FixedIPsClientJSON
from tempest.services.compute.json.flavors_client import FlavorsClientJSON
from tempest.services.compute.json.floating_ips_client import \
FloatingIPsClientJSON
from tempest.services.compute.json.hosts_client import HostsClientJSON
from tempest.services.compute.json.hypervisor_client import \
HypervisorClientJSON
from tempest.services.compute.json.images_client import ImagesClientJSON
from tempest.services.compute.json.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClientJSON
from tempest.services.compute.json.interfaces_client import \
InterfacesClientJSON
from tempest.services.compute.json.keypairs_client import KeyPairsClientJSON
from tempest.services.compute.json.limits_client import LimitsClientJSON
from tempest.services.compute.json.migrations_client import \
MigrationsClientJSON
from tempest.services.compute.json.quotas_client import QuotasClientJSON
from tempest.services.compute.json.security_groups_client import \
SecurityGroupsClientJSON
from tempest.services.compute.json.servers_client import ServersClientJSON
from tempest.services.compute.json.services_client import ServicesClientJSON
from tempest.services.compute.json.tenant_usages_client import \
TenantUsagesClientJSON
from tempest.services.compute.json.volumes_extensions_client import \
VolumesExtensionsClientJSON
from tempest.services.compute.v3.json.agents_client import AgentsV3ClientJSON
from tempest.services.compute.v3.json.aggregates_client import \
AggregatesV3ClientJSON
from tempest.services.compute.v3.json.availability_zone_client import \
AvailabilityZoneV3ClientJSON
from tempest.services.compute.v3.json.certificates_client import \
CertificatesV3ClientJSON
from tempest.services.compute.v3.json.extensions_client import \
ExtensionsV3ClientJSON
from tempest.services.compute.v3.json.flavors_client import FlavorsV3ClientJSON
from tempest.services.compute.v3.json.hosts_client import HostsV3ClientJSON
from tempest.services.compute.v3.json.hypervisor_client import \
HypervisorV3ClientJSON
from tempest.services.compute.v3.json.interfaces_client import \
InterfacesV3ClientJSON
from tempest.services.compute.v3.json.keypairs_client import \
KeyPairsV3ClientJSON
from tempest.services.compute.v3.json.migration_client import \
MigrationsV3ClientJSON
from tempest.services.compute.v3.json.quotas_client import \
QuotasV3ClientJSON
from tempest.services.compute.v3.json.servers_client import \
ServersV3ClientJSON
from tempest.services.compute.v3.json.services_client import \
ServicesV3ClientJSON
from tempest.services.compute.v3.json.version_client import \
VersionV3ClientJSON
from tempest.services.compute.xml.aggregates_client import AggregatesClientXML
from tempest.services.compute.xml.availability_zone_client import \
AvailabilityZoneClientXML
from tempest.services.compute.xml.certificates_client import \
CertificatesClientXML
from tempest.services.compute.xml.extensions_client import ExtensionsClientXML
from tempest.services.compute.xml.fixed_ips_client import FixedIPsClientXML
from tempest.services.compute.xml.flavors_client import FlavorsClientXML
from tempest.services.compute.xml.floating_ips_client import \
FloatingIPsClientXML
from tempest.services.compute.xml.hosts_client import HostsClientXML
from tempest.services.compute.xml.hypervisor_client import HypervisorClientXML
from tempest.services.compute.xml.images_client import ImagesClientXML
from tempest.services.compute.xml.instance_usage_audit_log_client import \
InstanceUsagesAuditLogClientXML
from tempest.services.compute.xml.interfaces_client import \
InterfacesClientXML
from tempest.services.compute.xml.keypairs_client import KeyPairsClientXML
from tempest.services.compute.xml.limits_client import LimitsClientXML
from tempest.services.compute.xml.quotas_client import QuotasClientXML
from tempest.services.compute.xml.security_groups_client \
import SecurityGroupsClientXML
from tempest.services.compute.xml.servers_client import ServersClientXML
from tempest.services.compute.xml.services_client import ServicesClientXML
from tempest.services.compute.xml.tenant_usages_client import \
TenantUsagesClientXML
from tempest.services.compute.xml.volumes_extensions_client import \
VolumesExtensionsClientXML
from tempest.services.data_processing.v1_1.client import DataProcessingClient
from tempest.services.database.json.flavors_client import \
DatabaseFlavorsClientJSON
from tempest.services.identity.json.identity_client import IdentityClientJSON
from tempest.services.identity.json.identity_client import TokenClientJSON
from tempest.services.identity.v3.json.credentials_client import \
CredentialsClientJSON
from tempest.services.identity.v3.json.endpoints_client import \
EndPointClientJSON
from tempest.services.identity.v3.json.identity_client import \
IdentityV3ClientJSON
from tempest.services.identity.v3.json.identity_client import V3TokenClientJSON
from tempest.services.identity.v3.json.policy_client import PolicyClientJSON
from tempest.services.identity.v3.json.service_client import \
ServiceClientJSON
from tempest.services.identity.v3.xml.credentials_client import \
CredentialsClientXML
from tempest.services.identity.v3.xml.endpoints_client import EndPointClientXML
from tempest.services.identity.v3.xml.identity_client import \
IdentityV3ClientXML
from tempest.services.identity.v3.xml.identity_client import V3TokenClientXML
from tempest.services.identity.v3.xml.policy_client import PolicyClientXML
from tempest.services.identity.v3.xml.service_client import \
ServiceClientXML
from tempest.services.identity.xml.identity_client import IdentityClientXML
from tempest.services.identity.xml.identity_client import TokenClientXML
from tempest.services.image.v1.json.image_client import ImageClientJSON
from tempest.services.image.v2.json.image_client import ImageClientV2JSON
from tempest.services.network.json.network_client import NetworkClientJSON
from tempest.services.network.xml.network_client import NetworkClientXML
from tempest.services.object_storage.account_client import AccountClient
from tempest.services.object_storage.account_client import \
AccountClientCustomizedHeader
from tempest.services.object_storage.container_client import ContainerClient
from tempest.services.object_storage.object_client import ObjectClient
from tempest.services.object_storage.object_client import \
ObjectClientCustomizedHeader
from tempest.services.orchestration.json.orchestration_client import \
OrchestrationClient
from tempest.services.queuing.json.queuing_client import QueuingClientJSON
from tempest.services.telemetry.json.telemetry_client import \
TelemetryClientJSON
from tempest.services.telemetry.xml.telemetry_client import \
TelemetryClientXML
from tempest.services.volume.json.admin.volume_hosts_client import \
VolumeHostsClientJSON
from tempest.services.volume.json.admin.volume_quotas_client import \
VolumeQuotasClientJSON
from tempest.services.volume.json.admin.volume_services_client import \
VolumesServicesClientJSON
from tempest.services.volume.json.admin.volume_types_client import \
VolumeTypesClientJSON
from tempest.services.volume.json.backups_client import BackupsClientJSON
from tempest.services.volume.json.extensions_client import \
ExtensionsClientJSON as VolumeExtensionClientJSON
from tempest.services.volume.json.snapshots_client import SnapshotsClientJSON
from tempest.services.volume.json.volumes_client import VolumesClientJSON
from tempest.services.volume.v2.json.volumes_client import VolumesV2ClientJSON
from tempest.services.volume.v2.xml.volumes_client import VolumesV2ClientXML
from tempest.services.volume.xml.admin.volume_hosts_client import \
VolumeHostsClientXML
from tempest.services.volume.xml.admin.volume_quotas_client import \
VolumeQuotasClientXML
from tempest.services.volume.xml.admin.volume_services_client import \
VolumesServicesClientXML
from tempest.services.volume.xml.admin.volume_types_client import \
VolumeTypesClientXML
from tempest.services.volume.xml.backups_client import BackupsClientXML
from tempest.services.volume.xml.extensions_client import \
ExtensionsClientXML as VolumeExtensionClientXML
from tempest.services.volume.xml.snapshots_client import SnapshotsClientXML
from tempest.services.volume.xml.volumes_client import VolumesClientXML
CONF = config.CONF
LOG = logging.getLogger(__name__)
class Manager(manager.Manager):
"""
Top level manager for OpenStack tempest clients
"""
def __init__(self, credentials=None, interface='json', service=None):
# Set interface and client type first
self.interface = interface
self.client_type = 'tempest'
# super cares for credentials validation
super(Manager, self).__init__(credentials=credentials)
if self.interface == 'xml':
self.certificates_client = CertificatesClientXML(
self.auth_provider)
self.servers_client = ServersClientXML(self.auth_provider)
self.limits_client = LimitsClientXML(self.auth_provider)
self.images_client = ImagesClientXML(self.auth_provider)
self.keypairs_client = KeyPairsClientXML(self.auth_provider)
self.quotas_client = QuotasClientXML(self.auth_provider)
self.flavors_client = FlavorsClientXML(self.auth_provider)
self.extensions_client = ExtensionsClientXML(self.auth_provider)
self.volumes_extensions_client = VolumesExtensionsClientXML(
self.auth_provider)
self.floating_ips_client = FloatingIPsClientXML(
self.auth_provider)
self.backups_client = BackupsClientXML(self.auth_provider)
self.snapshots_client = SnapshotsClientXML(self.auth_provider)
self.volumes_client = VolumesClientXML(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientXML(self.auth_provider)
self.volume_types_client = VolumeTypesClientXML(
self.auth_provider)
self.identity_client = IdentityClientXML(self.auth_provider)
self.identity_v3_client = IdentityV3ClientXML(
self.auth_provider)
self.security_groups_client = SecurityGroupsClientXML(
self.auth_provider)
self.interfaces_client = InterfacesClientXML(self.auth_provider)
self.endpoints_client = EndPointClientXML(self.auth_provider)
self.fixed_ips_client = FixedIPsClientXML(self.auth_provider)
self.availability_zone_client = AvailabilityZoneClientXML(
self.auth_provider)
self.service_client = ServiceClientXML(self.auth_provider)
self.volume_services_client = VolumesServicesClientXML(
self.auth_provider)
self.aggregates_client = AggregatesClientXML(self.auth_provider)
self.services_client = ServicesClientXML(self.auth_provider)
self.tenant_usages_client = TenantUsagesClientXML(
self.auth_provider)
self.policy_client = PolicyClientXML(self.auth_provider)
self.hosts_client = HostsClientXML(self.auth_provider)
self.hypervisor_client = HypervisorClientXML(self.auth_provider)
self.network_client = NetworkClientXML(self.auth_provider)
self.credentials_client = CredentialsClientXML(
self.auth_provider)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClientXML(self.auth_provider)
self.volume_hosts_client = VolumeHostsClientXML(
self.auth_provider)
self.volume_quotas_client = VolumeQuotasClientXML(
self.auth_provider)
self.volumes_extension_client = VolumeExtensionClientXML(
self.auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientXML(
self.auth_provider)
self.token_client = TokenClientXML()
self.token_v3_client = V3TokenClientXML()
elif self.interface == 'json':
self.certificates_client = CertificatesClientJSON(
self.auth_provider)
self.certificates_v3_client = CertificatesV3ClientJSON(
self.auth_provider)
self.baremetal_client = BaremetalClientJSON(self.auth_provider)
self.servers_client = ServersClientJSON(self.auth_provider)
self.servers_v3_client = ServersV3ClientJSON(self.auth_provider)
self.limits_client = LimitsClientJSON(self.auth_provider)
self.images_client = ImagesClientJSON(self.auth_provider)
self.keypairs_v3_client = KeyPairsV3ClientJSON(
self.auth_provider)
self.keypairs_client = KeyPairsClientJSON(self.auth_provider)
self.keypairs_v3_client = KeyPairsV3ClientJSON(
self.auth_provider)
self.quotas_client = QuotasClientJSON(self.auth_provider)
self.quotas_v3_client = QuotasV3ClientJSON(self.auth_provider)
self.flavors_client = FlavorsClientJSON(self.auth_provider)
self.flavors_v3_client = FlavorsV3ClientJSON(self.auth_provider)
self.extensions_v3_client = ExtensionsV3ClientJSON(
self.auth_provider)
self.extensions_client = ExtensionsClientJSON(
self.auth_provider)
self.volumes_extensions_client = VolumesExtensionsClientJSON(
self.auth_provider)
self.floating_ips_client = FloatingIPsClientJSON(
self.auth_provider)
self.backups_client = BackupsClientJSON(self.auth_provider)
self.snapshots_client = SnapshotsClientJSON(self.auth_provider)
self.volumes_client = VolumesClientJSON(self.auth_provider)
self.volumes_v2_client = VolumesV2ClientJSON(self.auth_provider)
self.volume_types_client = VolumeTypesClientJSON(
self.auth_provider)
self.identity_client = IdentityClientJSON(self.auth_provider)
self.identity_v3_client = IdentityV3ClientJSON(
self.auth_provider)
self.security_groups_client = SecurityGroupsClientJSON(
self.auth_provider)
self.interfaces_v3_client = InterfacesV3ClientJSON(
self.auth_provider)
self.interfaces_client = InterfacesClientJSON(
self.auth_provider)
self.endpoints_client = EndPointClientJSON(self.auth_provider)
self.fixed_ips_client = FixedIPsClientJSON(self.auth_provider)
self.availability_zone_v3_client = AvailabilityZoneV3ClientJSON(
self.auth_provider)
self.availability_zone_client = AvailabilityZoneClientJSON(
self.auth_provider)
self.services_v3_client = ServicesV3ClientJSON(
self.auth_provider)
self.service_client = ServiceClientJSON(self.auth_provider)
self.volume_services_client = VolumesServicesClientJSON(
self.auth_provider)
self.agents_v3_client = AgentsV3ClientJSON(self.auth_provider)
self.aggregates_v3_client = AggregatesV3ClientJSON(
self.auth_provider)
self.aggregates_client = AggregatesClientJSON(
self.auth_provider)
self.services_client = ServicesClientJSON(self.auth_provider)
self.tenant_usages_client = TenantUsagesClientJSON(
self.auth_provider)
self.version_v3_client = VersionV3ClientJSON(self.auth_provider)
self.migrations_v3_client = MigrationsV3ClientJSON(
self.auth_provider)
self.policy_client = PolicyClientJSON(self.auth_provider)
self.hosts_client = HostsClientJSON(self.auth_provider)
self.hypervisor_v3_client = HypervisorV3ClientJSON(
self.auth_provider)
self.hypervisor_client = HypervisorClientJSON(
self.auth_provider)
self.network_client = NetworkClientJSON(self.auth_provider)
self.credentials_client = CredentialsClientJSON(
self.auth_provider)
self.instance_usages_audit_log_client = \
InstanceUsagesAuditLogClientJSON(self.auth_provider)
self.volume_hosts_client = VolumeHostsClientJSON(
self.auth_provider)
self.volume_quotas_client = VolumeQuotasClientJSON(
self.auth_provider)
self.volumes_extension_client = VolumeExtensionClientJSON(
self.auth_provider)
self.hosts_v3_client = HostsV3ClientJSON(self.auth_provider)
self.database_flavors_client = DatabaseFlavorsClientJSON(
self.auth_provider)
self.queuing_client = QueuingClientJSON(self.auth_provider)
if CONF.service_available.ceilometer:
self.telemetry_client = TelemetryClientJSON(
self.auth_provider)
self.token_client = TokenClientJSON()
self.token_v3_client = V3TokenClientJSON()
self.negative_client = NegativeRestClient(self.auth_provider)
self.negative_client.service = service
else:
msg = "Unsupported interface type `%s'" % interface
raise exceptions.InvalidConfiguration(msg)
# TODO(andreaf) EC2 client still do their auth, v2 only
ec2_client_args = (self.credentials.username,
self.credentials.password,
CONF.identity.uri,
self.credentials.tenant_name)
# common clients
self.account_client = AccountClient(self.auth_provider)
self.agents_client = AgentsClientJSON(self.auth_provider)
if CONF.service_available.glance:
self.image_client = ImageClientJSON(self.auth_provider)
self.image_client_v2 = ImageClientV2JSON(self.auth_provider)
self.container_client = ContainerClient(self.auth_provider)
self.object_client = ObjectClient(self.auth_provider)
self.orchestration_client = OrchestrationClient(
self.auth_provider)
self.ec2api_client = botoclients.APIClientEC2(*ec2_client_args)
self.s3_client = botoclients.ObjectClientS3(*ec2_client_args)
self.custom_object_client = ObjectClientCustomizedHeader(
self.auth_provider)
self.custom_account_client = \
AccountClientCustomizedHeader(self.auth_provider)
self.data_processing_client = DataProcessingClient(
self.auth_provider)
self.migrations_client = MigrationsClientJSON(self.auth_provider)
class AltManager(Manager):
"""
Manager object that uses the alt_XXX credentials for its
managed client objects
"""
def __init__(self, interface='json', service=None):
super(AltManager, self).__init__(
credentials=auth.get_default_credentials('alt_user'),
interface=interface,
service=service)
class AdminManager(Manager):
"""
Manager object that uses the admin credentials for its
managed client objects
"""
def __init__(self, interface='json', service=None):
super(AdminManager, self).__init__(
credentials=auth.get_default_credentials('identity_admin'),
interface=interface,
service=service)
class ComputeAdminManager(Manager):
"""
Manager object that uses the compute_admin credentials for its
managed client objects
"""
def __init__(self, interface='json', service=None):
base = super(ComputeAdminManager, self)
base.__init__(
credentials=auth.get_default_credentials('compute_admin'),
interface=interface,
service=service)
class OfficialClientManager(manager.Manager):
"""
Manager that provides access to the official python clients for
calling various OpenStack APIs.
"""
NOVACLIENT_VERSION = '2'
CINDERCLIENT_VERSION = '1'
HEATCLIENT_VERSION = '1'
IRONICCLIENT_VERSION = '1'
SAHARACLIENT_VERSION = '1.1'
def __init__(self, credentials):
# FIXME(andreaf) Auth provider for client_type 'official' is
# not implemented yet, setting to 'tempest' for now.
self.client_type = 'tempest'
self.interface = None
# super cares for credentials validation
super(OfficialClientManager, self).__init__(credentials=credentials)
self.baremetal_client = self._get_baremetal_client()
self.compute_client = self._get_compute_client(credentials)
self.identity_client = self._get_identity_client(credentials)
self.image_client = self._get_image_client()
self.network_client = self._get_network_client()
self.volume_client = self._get_volume_client(credentials)
self.object_storage_client = self._get_object_storage_client(
credentials)
self.orchestration_client = self._get_orchestration_client(
credentials)
self.data_processing_client = self._get_data_processing_client(
credentials)
def _get_roles(self):
admin_credentials = auth.get_default_credentials('identity_admin')
keystone_admin = self._get_identity_client(admin_credentials)
username = self.credentials.username
tenant_name = self.credentials.tenant_name
user_id = keystone_admin.users.find(name=username).id
tenant_id = keystone_admin.tenants.find(name=tenant_name).id
roles = keystone_admin.roles.roles_for_user(
user=user_id, tenant=tenant_id)
return [r.name for r in roles]
def _get_compute_client(self, credentials):
# Novaclient will not execute operations for anyone but the
# identified user, so a new client needs to be created for
# each user that operations need to be performed for.
if not CONF.service_available.nova:
return None
import novaclient.client
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
region = CONF.identity.region
client_args = (credentials.username, credentials.password,
credentials.tenant_name, auth_url)
# Create our default Nova client to use in testing
service_type = CONF.compute.catalog_type
endpoint_type = CONF.compute.endpoint_type
return novaclient.client.Client(self.NOVACLIENT_VERSION,
*client_args,
service_type=service_type,
endpoint_type=endpoint_type,
region_name=region,
no_cache=True,
insecure=dscv,
http_log_debug=True)
def _get_image_client(self):
if not CONF.service_available.glance:
return None
import glanceclient
token = self.identity_client.auth_token
region = CONF.identity.region
endpoint_type = CONF.image.endpoint_type
endpoint = self.identity_client.service_catalog.url_for(
attr='region', filter_value=region,
service_type=CONF.image.catalog_type, endpoint_type=endpoint_type)
dscv = CONF.identity.disable_ssl_certificate_validation
return glanceclient.Client('1', endpoint=endpoint, token=token,
insecure=dscv)
def _get_volume_client(self, credentials):
if not CONF.service_available.cinder:
return None
import cinderclient.client
auth_url = CONF.identity.uri
region = CONF.identity.region
endpoint_type = CONF.volume.endpoint_type
dscv = CONF.identity.disable_ssl_certificate_validation
return cinderclient.client.Client(self.CINDERCLIENT_VERSION,
credentials.username,
credentials.password,
credentials.tenant_name,
auth_url,
region_name=region,
endpoint_type=endpoint_type,
insecure=dscv,
http_log_debug=True)
def _get_object_storage_client(self, credentials):
if not CONF.service_available.swift:
return None
import swiftclient
auth_url = CONF.identity.uri
# add current tenant to swift operator role group.
admin_credentials = auth.get_default_credentials('identity_admin')
keystone_admin = self._get_identity_client(admin_credentials)
# enable test user to operate swift by adding operator role to him.
roles = keystone_admin.roles.list()
operator_role = CONF.object_storage.operator_role
member_role = [role for role in roles if role.name == operator_role][0]
# NOTE(maurosr): This is surrounded in the try-except block cause
# neutron tests doesn't have tenant isolation.
try:
keystone_admin.roles.add_user_role(self.identity_client.user_id,
member_role.id,
self.identity_client.tenant_id)
except keystoneclient.exceptions.Conflict:
pass
endpoint_type = CONF.object_storage.endpoint_type
os_options = {'endpoint_type': endpoint_type}
return swiftclient.Connection(auth_url, credentials.username,
credentials.password,
tenant_name=credentials.tenant_name,
auth_version='2',
os_options=os_options)
def _get_orchestration_client(self, credentials):
if not CONF.service_available.heat:
return None
import heatclient.client
keystone = self._get_identity_client(credentials)
region = CONF.identity.region
endpoint_type = CONF.orchestration.endpoint_type
token = keystone.auth_token
service_type = CONF.orchestration.catalog_type
try:
endpoint = keystone.service_catalog.url_for(
attr='region',
filter_value=region,
service_type=service_type,
endpoint_type=endpoint_type)
except keystoneclient.exceptions.EndpointNotFound:
return None
else:
return heatclient.client.Client(self.HEATCLIENT_VERSION,
endpoint,
token=token,
username=credentials.username,
password=credentials.password)
def _get_identity_client(self, credentials):
# This identity client is not intended to check the security
# of the identity service, so use admin credentials by default.
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
return keystoneclient.v2_0.client.Client(
username=credentials.username,
password=credentials.password,
tenant_name=credentials.tenant_name,
auth_url=auth_url,
insecure=dscv)
def _get_baremetal_client(self):
# ironic client is currently intended to by used by admin users
if not CONF.service_available.ironic:
return None
import ironicclient.client
roles = self._get_roles()
if CONF.identity.admin_role not in roles:
return None
auth_url = CONF.identity.uri
api_version = self.IRONICCLIENT_VERSION
insecure = CONF.identity.disable_ssl_certificate_validation
service_type = CONF.baremetal.catalog_type
endpoint_type = CONF.baremetal.endpoint_type
creds = {
'os_username': self.credentials.username,
'os_password': self.credentials.password,
'os_tenant_name': self.credentials.tenant_name
}
try:
return ironicclient.client.get_client(
api_version=api_version,
os_auth_url=auth_url,
insecure=insecure,
os_service_type=service_type,
os_endpoint_type=endpoint_type,
**creds)
except keystoneclient.exceptions.EndpointNotFound:
return None
def _get_network_client(self):
# The intended configuration is for the network client to have
# admin privileges and indicate for whom resources are being
# created via a 'tenant_id' parameter. This will often be
# preferable to authenticating as a specific user because
# working with certain resources (public routers and networks)
# often requires admin privileges anyway.
if not CONF.service_available.neutron:
return None
import neutronclient.v2_0.client
credentials = auth.get_default_credentials('identity_admin')
auth_url = CONF.identity.uri
dscv = CONF.identity.disable_ssl_certificate_validation
endpoint_type = CONF.network.endpoint_type
return neutronclient.v2_0.client.Client(
username=credentials.username,
password=credentials.password,
tenant_name=credentials.tenant_name,
endpoint_type=endpoint_type,
auth_url=auth_url,
insecure=dscv)
def _get_data_processing_client(self, credentials):
if not CONF.service_available.sahara:
# Sahara isn't available
return None
import saharaclient.client
endpoint_type = CONF.data_processing.endpoint_type
catalog_type = CONF.data_processing.catalog_type
auth_url = CONF.identity.uri
client = saharaclient.client.Client(
self.SAHARACLIENT_VERSION,
credentials.username,
credentials.password,
project_name=credentials.tenant_name,
endpoint_type=endpoint_type,
service_type=catalog_type,
auth_url=auth_url)
return client
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/ 0000775 0001750 0001750 00000000000 12332757136 021104 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/stress/stressaction.py 0000664 0001750 0001750 00000006156 12332757070 024204 0 ustar chuck chuck 0000000 0000000 # (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import signal
import sys
from tempest.openstack.common import log as logging
class StressAction(object):
def __init__(self, manager, max_runs=None, stop_on_error=False):
full_cname = self.__module__ + "." + self.__class__.__name__
self.logger = logging.getLogger(full_cname)
self.manager = manager
self.max_runs = max_runs
self.stop_on_error = stop_on_error
def _shutdown_handler(self, signal, frame):
try:
self.tearDown()
except Exception:
self.logger.exception("Error while tearDown")
sys.exit(0)
@property
def action(self):
"""This methods returns the action. Overload this if you
create a stress test wrapper.
"""
return self.__class__.__name__
def setUp(self, **kwargs):
"""This method is called before the run method
to help the test initialize any structures.
kwargs contains arguments passed in from the
configuration json file.
setUp doesn't count against the time duration.
"""
self.logger.debug("setUp")
def tearDown(self):
"""This method is called to do any cleanup
after the test is complete.
"""
self.logger.debug("tearDown")
def execute(self, shared_statistic):
"""This is the main execution entry point called
by the driver. We register a signal handler to
allow us to tearDown gracefully, and then exit.
We also keep track of how many runs we do.
"""
signal.signal(signal.SIGHUP, self._shutdown_handler)
signal.signal(signal.SIGTERM, self._shutdown_handler)
while self.max_runs is None or (shared_statistic['runs'] <
self.max_runs):
self.logger.debug("Trigger new run (run %d)" %
shared_statistic['runs'])
try:
self.run()
except Exception:
shared_statistic['fails'] += 1
self.logger.exception("Failure in run")
finally:
shared_statistic['runs'] += 1
if self.stop_on_error and (shared_statistic['fails'] > 1):
self.logger.warn("Stop process due to"
"\"stop-on-error\" argument")
self.tearDown()
sys.exit(1)
def run(self):
"""This method is where the stress test code runs."""
raise NotImplemented()
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/tools/ 0000775 0001750 0001750 00000000000 12332757136 022244 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/stress/tools/cleanup.py 0000775 0001750 0001750 00000001274 12332757070 024251 0 ustar chuck chuck 0000000 0000000 #!/usr/bin/env python
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.stress import cleanup
cleanup.cleanup()
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/ 0000775 0001750 0001750 00000000000 12332757136 022544 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/volume_attach_verify.py 0000664 0001750 0001750 00000024212 12332757070 027333 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
import tempest.stress.stressaction as stressaction
import tempest.test
import re
CONF = config.CONF
class VolumeVerifyStress(stressaction.StressAction):
def _create_keypair(self):
keyname = data_utils.rand_name("key")
resp, self.key = self.manager.keypairs_client.create_keypair(keyname)
assert(resp.status == 200)
def _delete_keypair(self):
resp, _ = self.manager.keypairs_client.delete_keypair(self.key['name'])
assert(resp.status == 202)
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
servers_client = self.manager.servers_client
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
vm_args['key_name'] = self.key['name']
resp, server = servers_client.create_server(name, self.image,
self.flavor,
**vm_args)
self.server_id = server['id']
assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting server: %s" % self.server_id)
resp, _ = self.manager.servers_client.delete_server(self.server_id)
assert(resp.status == 204) # It cannot be 204 if I had to wait..
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted server: %s" % self.server_id)
def _create_sec_group(self):
sec_grp_cli = self.manager.security_groups_client
s_name = data_utils.rand_name('sec_grp-')
s_description = data_utils.rand_name('desc-')
_, self.sec_grp = sec_grp_cli.create_security_group(s_name,
s_description)
create_rule = sec_grp_cli.create_security_group_rule
create_rule(self.sec_grp['id'], 'tcp', 22, 22)
create_rule(self.sec_grp['id'], 'icmp', -1, -1)
def _destroy_sec_grp(self):
sec_grp_cli = self.manager.security_groups_client
sec_grp_cli.delete_security_group(self.sec_grp['id'])
def _create_floating_ip(self):
floating_cli = self.manager.floating_ips_client
_, self.floating = floating_cli.create_floating_ip(self.floating_pool)
def _destroy_floating_ip(self):
cli = self.manager.floating_ips_client
cli.delete_floating_ip(self.floating['id'])
cli.wait_for_resource_deletion(self.floating['id'])
self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
def _create_volume(self):
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
volumes_client = self.manager.volumes_client
resp, self.volume = volumes_client.create_volume(size=1,
display_name=
name)
assert(resp.status == 200)
volumes_client.wait_for_volume_status(self.volume['id'],
'available')
self.logger.info("created volume: %s" % self.volume['id'])
def _delete_volume(self):
self.logger.info("deleting volume: %s" % self.volume['id'])
volumes_client = self.manager.volumes_client
resp, _ = volumes_client.delete_volume(self.volume['id'])
assert(resp.status == 202)
volumes_client.wait_for_resource_deletion(self.volume['id'])
self.logger.info("deleted volume: %s" % self.volume['id'])
def _wait_disassociate(self):
cli = self.manager.floating_ips_client
def func():
_, floating = cli.get_floating_ip_details(self.floating['id'])
return floating['instance_id'] is None
if not tempest.test.call_until_true(func, CONF.compute.build_timeout,
CONF.compute.build_interval):
raise RuntimeError("IP disassociate timeout!")
def new_server_ops(self):
self._create_vm()
cli = self.manager.floating_ips_client
cli.associate_floating_ip_to_server(self.floating['ip'],
self.server_id)
if self.ssh_test_before_attach and self.enable_ssh_verify:
self.logger.info("Scanning for block devices via ssh on %s"
% self.server_id)
self.part_wait(self.detach_match_count)
def setUp(self, **kwargs):
"""Note able configuration combinations:
Closest options to the test_stamp_pattern:
new_server = True
new_volume = True
enable_ssh_verify = True
ssh_test_before_attach = False
Just attaching:
new_server = False
new_volume = False
enable_ssh_verify = True
ssh_test_before_attach = True
Mostly API load by repeated attachment:
new_server = False
new_volume = False
enable_ssh_verify = False
ssh_test_before_attach = False
Minimal Nova load, but cinder load not decreased:
new_server = False
new_volume = True
enable_ssh_verify = True
ssh_test_before_attach = True
"""
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
self.vm_extra_args = kwargs.get('vm_extra_args', {})
self.floating_pool = kwargs.get('floating_pool', None)
self.new_volume = kwargs.get('new_volume', True)
self.new_server = kwargs.get('new_server', False)
self.enable_ssh_verify = kwargs.get('enable_ssh_verify', True)
self.ssh_test_before_attach = kwargs.get('ssh_test_before_attach',
False)
self.part_line_re = re.compile(kwargs.get('part_line_re', '.*vd.*'))
self.detach_match_count = kwargs.get('detach_match_count', 1)
self.attach_match_count = kwargs.get('attach_match_count', 2)
self.part_name = kwargs.get('part_name', '/dev/vdc')
self._create_floating_ip()
self._create_sec_group()
self._create_keypair()
private_key = self.key['private_key']
username = CONF.compute.image_ssh_user
self.remote_client = remote_client.RemoteClient(self.floating['ip'],
username,
pkey=private_key)
if not self.new_volume:
self._create_volume()
if not self.new_server:
self.new_server_ops()
# now we just test is number of partition increased or decrised
def part_wait(self, num_match):
def _part_state():
self.partitions = self.remote_client.get_partitions().split('\n')
matching = 0
for part_line in self.partitions[1:]:
if self.part_line_re.match(part_line):
matching += 1
return matching == num_match
if tempest.test.call_until_true(_part_state,
CONF.compute.build_timeout,
CONF.compute.build_interval):
return
else:
raise RuntimeError("Unexpected partitions: %s",
str(self.partitions))
def run(self):
if self.new_server:
self.new_server_ops()
if self.new_volume:
self._create_volume()
servers_client = self.manager.servers_client
self.logger.info("attach volume (%s) to vm %s" %
(self.volume['id'], self.server_id))
resp, body = servers_client.attach_volume(self.server_id,
self.volume['id'],
self.part_name)
assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'in-use')
if self.enable_ssh_verify:
self.logger.info("Scanning for new block device on %s"
% self.server_id)
self.part_wait(self.attach_match_count)
resp, body = servers_client.detach_volume(self.server_id,
self.volume['id'])
assert(resp.status == 202)
self.manager.volumes_client.wait_for_volume_status(self.volume['id'],
'available')
if self.enable_ssh_verify:
self.logger.info("Scanning for block device disapperance on %s"
% self.server_id)
self.part_wait(self.detach_match_count)
if self.new_volume:
self._delete_volume()
if self.new_server:
self._destroy_vm()
def tearDown(self):
cli = self.manager.floating_ips_client
cli.disassociate_floating_ip_from_server(self.floating['ip'],
self.server_id)
self._wait_disassociate()
if not self.new_server:
self._destroy_vm()
self._delete_keypair()
self._destroy_floating_ip()
self._destroy_sec_grp()
if not self.new_volume:
self._delete_volume()
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/server_create_destroy.py 0000664 0001750 0001750 00000003254 12332757070 027521 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils import data_utils
from tempest import config
import tempest.stress.stressaction as stressaction
CONF = config.CONF
class ServerCreateDestroyTest(stressaction.StressAction):
def setUp(self, **kwargs):
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
def run(self):
name = data_utils.rand_name("instance")
self.logger.info("creating %s" % name)
resp, server = self.manager.servers_client.create_server(
name, self.image, self.flavor)
server_id = server['id']
assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id,
'ACTIVE')
self.logger.info("created %s" % server_id)
self.logger.info("deleting %s" % name)
resp, _ = self.manager.servers_client.delete_server(server_id)
assert(resp.status == 204)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted %s" % server_id)
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/volume_create_delete.py 0000664 0001750 0001750 00000002654 12332757070 027276 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils import data_utils
import tempest.stress.stressaction as stressaction
class VolumeCreateDeleteTest(stressaction.StressAction):
def run(self):
name = data_utils.rand_name("volume")
self.logger.info("creating %s" % name)
volumes_client = self.manager.volumes_client
resp, volume = volumes_client.create_volume(size=1,
display_name=name)
assert(resp.status == 200)
vol_id = volume['id']
volumes_client.wait_for_volume_status(vol_id, 'available')
self.logger.info("created %s" % volume['id'])
self.logger.info("deleting %s" % name)
resp, _ = volumes_client.delete_volume(vol_id)
assert(resp.status == 202)
volumes_client.wait_for_resource_deletion(vol_id)
self.logger.info("deleted %s" % vol_id)
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/unit_test.py 0000664 0001750 0001750 00000006422 12332757070 025135 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
import tempest.stress.stressaction as stressaction
CONF = config.CONF
class SetUpClassRunTime(object):
process = 'process'
action = 'action'
application = 'application'
allowed = set((process, action, application))
@classmethod
def validate(cls, name):
if name not in cls.allowed:
raise KeyError("\'%s\' not a valid option" % name)
class UnitTest(stressaction.StressAction):
"""This is a special action for running existing unittests as stress test.
You need to pass ``test_method`` and ``class_setup_per``
using ``kwargs`` in the JSON descriptor;
``test_method`` should be the fully qualified name of a unittest,
``class_setup_per`` should be one from:
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
Not all combination working in every case.
"""
def setUp(self, **kwargs):
method = kwargs['test_method'].split('.')
self.test_method = method.pop()
self.klass = importutils.import_class('.'.join(method))
self.logger = logging.getLogger('.'.join(method))
# valid options are 'process', 'application' , 'action'
self.class_setup_per = kwargs.get('class_setup_per',
SetUpClassRunTime.process)
SetUpClassRunTime.validate(self.class_setup_per)
if self.class_setup_per == SetUpClassRunTime.application:
self.klass.setUpClass()
self.setupclass_called = False
@property
def action(self):
if self.test_method:
return self.test_method
return super(UnitTest, self).action
def run_core(self):
res = self.klass(self.test_method).run()
if res.errors:
raise RuntimeError(res.errors)
def run(self):
if self.class_setup_per != SetUpClassRunTime.application:
if (self.class_setup_per == SetUpClassRunTime.action
or self.setupclass_called is False):
self.klass.setUpClass()
self.setupclass_called = True
try:
self.run_core()
except Exception as e:
raise e
finally:
if (CONF.stress.leave_dirty_stack is False
and self.class_setup_per == SetUpClassRunTime.action):
self.klass.tearDownClass()
else:
self.run_core()
def tearDown(self):
if self.class_setup_per != SetUpClassRunTime.action:
self.klass.tearDownClass()
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024640 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/volume_attach_delete.py 0000664 0001750 0001750 00000006375 12332757070 027303 0 ustar chuck chuck 0000000 0000000 # (c) 2013 Deutsche Telekom AG
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest.common.utils import data_utils
from tempest import config
import tempest.stress.stressaction as stressaction
CONF = config.CONF
class VolumeAttachDeleteTest(stressaction.StressAction):
def setUp(self, **kwargs):
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
def run(self):
# Step 1: create volume
name = data_utils.rand_name("volume")
self.logger.info("creating volume: %s" % name)
resp, volume = self.manager.volumes_client.create_volume(size=1,
display_name=
name)
assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'available')
self.logger.info("created volume: %s" % volume['id'])
# Step 2: create vm instance
vm_name = data_utils.rand_name("instance")
self.logger.info("creating vm: %s" % vm_name)
resp, server = self.manager.servers_client.create_server(
vm_name, self.image, self.flavor)
server_id = server['id']
assert(resp.status == 202)
self.manager.servers_client.wait_for_server_status(server_id, 'ACTIVE')
self.logger.info("created vm %s" % server_id)
# Step 3: attach volume to vm
self.logger.info("attach volume (%s) to vm %s" %
(volume['id'], server_id))
resp, body = self.manager.servers_client.attach_volume(server_id,
volume['id'],
'/dev/vdc')
assert(resp.status == 200)
self.manager.volumes_client.wait_for_volume_status(volume['id'],
'in-use')
self.logger.info("volume (%s) attached to vm %s" %
(volume['id'], server_id))
# Step 4: delete vm
self.logger.info("deleting vm: %s" % vm_name)
resp, _ = self.manager.servers_client.delete_server(server_id)
assert(resp.status == 204)
self.manager.servers_client.wait_for_server_termination(server_id)
self.logger.info("deleted vm: %s" % server_id)
# Step 5: delete volume
self.logger.info("deleting volume: %s" % volume['id'])
resp, _ = self.manager.volumes_client.delete_volume(volume['id'])
assert(resp.status == 202)
self.manager.volumes_client.wait_for_resource_deletion(volume['id'])
self.logger.info("deleted volume: %s" % volume['id'])
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/actions/ssh_floating.py 0000664 0001750 0001750 00000017322 12332757070 025600 0 ustar chuck chuck 0000000 0000000 # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import socket
import subprocess
from tempest.common.utils import data_utils
from tempest import config
import tempest.stress.stressaction as stressaction
import tempest.test
CONF = config.CONF
class FloatingStress(stressaction.StressAction):
# from the scenario manager
def ping_ip_address(self, ip_address):
cmd = ['ping', '-c1', '-w1', ip_address]
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
success = proc.returncode == 0
return success
def tcp_connect_scan(self, addr, port):
# like tcp
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((addr, port))
except socket.error as exc:
self.logger.info("%s(%s): %s", self.server_id, self.floating['ip'],
str(exc))
return False
self.logger.info("%s(%s): Connected :)", self.server_id,
self.floating['ip'])
s.close()
return True
def check_port_ssh(self):
def func():
return self.tcp_connect_scan(self.floating['ip'], 22)
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
raise RuntimeError("Cannot connect to the ssh port.")
def check_icmp_echo(self):
self.logger.info("%s(%s): Pinging..",
self.server_id, self.floating['ip'])
def func():
return self.ping_ip_address(self.floating['ip'])
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
raise RuntimeError("%s(%s): Cannot ping the machine.",
self.server_id, self.floating['ip'])
self.logger.info("%s(%s): pong :)",
self.server_id, self.floating['ip'])
def _create_vm(self):
self.name = name = data_utils.rand_name("instance")
servers_client = self.manager.servers_client
self.logger.info("creating %s" % name)
vm_args = self.vm_extra_args.copy()
vm_args['security_groups'] = [self.sec_grp]
resp, server = servers_client.create_server(name, self.image,
self.flavor,
**vm_args)
self.server_id = server['id']
assert(resp.status == 202)
if self.wait_after_vm_create:
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
def _destroy_vm(self):
self.logger.info("deleting %s" % self.server_id)
resp, _ = self.manager.servers_client.delete_server(self.server_id)
assert(resp.status == 204) # It cannot be 204 if I had to wait..
self.manager.servers_client.wait_for_server_termination(self.server_id)
self.logger.info("deleted %s" % self.server_id)
def _create_sec_group(self):
sec_grp_cli = self.manager.security_groups_client
s_name = data_utils.rand_name('sec_grp-')
s_description = data_utils.rand_name('desc-')
_, self.sec_grp = sec_grp_cli.create_security_group(s_name,
s_description)
create_rule = sec_grp_cli.create_security_group_rule
create_rule(self.sec_grp['id'], 'tcp', 22, 22)
create_rule(self.sec_grp['id'], 'icmp', -1, -1)
def _destroy_sec_grp(self):
sec_grp_cli = self.manager.security_groups_client
sec_grp_cli.delete_security_group(self.sec_grp['id'])
def _create_floating_ip(self):
floating_cli = self.manager.floating_ips_client
_, self.floating = floating_cli.create_floating_ip(self.floating_pool)
def _destroy_floating_ip(self):
cli = self.manager.floating_ips_client
cli.delete_floating_ip(self.floating['id'])
cli.wait_for_resource_deletion(self.floating['id'])
self.logger.info("Deleted Floating IP %s", str(self.floating['ip']))
def setUp(self, **kwargs):
self.image = CONF.compute.image_ref
self.flavor = CONF.compute.flavor_ref
self.vm_extra_args = kwargs.get('vm_extra_args', {})
self.wait_after_vm_create = kwargs.get('wait_after_vm_create',
True)
self.new_vm = kwargs.get('new_vm', False)
self.new_sec_grp = kwargs.get('new_sec_group', False)
self.new_floating = kwargs.get('new_floating', False)
self.reboot = kwargs.get('reboot', False)
self.floating_pool = kwargs.get('floating_pool', None)
self.verify = kwargs.get('verify', ('check_port_ssh',
'check_icmp_echo'))
self.check_timeout = kwargs.get('check_timeout', 120)
self.check_interval = kwargs.get('check_interval', 1)
self.wait_for_disassociate = kwargs.get('wait_for_disassociate',
True)
# allocate floating
if not self.new_floating:
self._create_floating_ip()
# add security group
if not self.new_sec_grp:
self._create_sec_group()
# create vm
if not self.new_vm:
self._create_vm()
def wait_disassociate(self):
cli = self.manager.floating_ips_client
def func():
_, floating = cli.get_floating_ip_details(self.floating['id'])
return floating['instance_id'] is None
if not tempest.test.call_until_true(func, self.check_timeout,
self.check_interval):
raise RuntimeError("IP disassociate timeout!")
def run_core(self):
cli = self.manager.floating_ips_client
cli.associate_floating_ip_to_server(self.floating['ip'],
self.server_id)
for method in self.verify:
m = getattr(self, method)
m()
cli.disassociate_floating_ip_from_server(self.floating['ip'],
self.server_id)
if self.wait_for_disassociate:
self.wait_disassociate()
def run(self):
if self.new_sec_grp:
self._create_sec_group()
if self.new_floating:
self._create_floating_ip()
if self.new_vm:
self._create_vm()
if self.reboot:
self.manager.servers_client.reboot(self.server_id, 'HARD')
self.manager.servers_client.wait_for_server_status(self.server_id,
'ACTIVE')
self.run_core()
if self.new_vm:
self._destroy_vm()
if self.new_floating:
self._destroy_floating_ip()
if self.new_sec_grp:
self._destroy_sec_grp()
def tearDown(self):
if not self.new_vm:
self._destroy_vm()
if not self.new_floating:
self._destroy_floating_ip()
if not self.new_sec_grp:
self._destroy_sec_grp()
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/ 0000775 0001750 0001750 00000000000 12332757136 021657 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/volume-attach-verify.json 0000664 0001750 0001750 00000000524 12332757070 026623 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.volume_attach_verify.VolumeVerifyStress",
"threads": 1,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {"vm_extra_args": {},
"new_volume": true,
"new_server": false,
"ssh_test_before_attach": false,
"enable_ssh_verify": true}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/server-create-destroy-test.json 0000664 0001750 0001750 00000000260 12332757070 027760 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
"threads": 8,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/volume-attach-delete-test.json 0000664 0001750 0001750 00000000256 12332757070 027540 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
"threads": 4,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/volume-create-delete-test.json 0000664 0001750 0001750 00000000256 12332757070 027537 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
"threads": 4,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/sample-unit-test.json 0000664 0001750 0001750 00000000460 12332757070 025762 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.unit_test.UnitTest",
"threads": 8,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {"test_method": "tempest.cli.simple_read_only.test_glance.SimpleReadOnlyGlanceClientTest.test_glance_fake_action",
"class_setup_per": "process"}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/stress-tox-job.json 0000664 0001750 0001750 00000001014 12332757070 025446 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.server_create_destroy.ServerCreateDestroyTest",
"threads": 8,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
},
{"action": "tempest.stress.actions.volume_create_delete.VolumeCreateDeleteTest",
"threads": 4,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
},
{"action": "tempest.stress.actions.volume_attach_delete.VolumeAttachDeleteTest",
"threads": 2,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/etc/ssh_floating.json 0000664 0001750 0001750 00000001013 12332757070 025222 0 ustar chuck chuck 0000000 0000000 [{"action": "tempest.stress.actions.ssh_floating.FloatingStress",
"threads": 8,
"use_admin": false,
"use_isolated_tenants": false,
"kwargs": {"vm_extra_args": {},
"new_vm": true,
"new_sec_group": true,
"new_floating": true,
"verify": ["check_icmp_echo", "check_port_ssh"],
"check_timeout": 120,
"check_interval": 1,
"wait_after_vm_create": true,
"wait_for_disassociate": true,
"reboot": false}
}
]
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/cleanup.py 0000664 0001750 0001750 00000007564 12332757070 023116 0 ustar chuck chuck 0000000 0000000 #!/usr/bin/env python
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tempest import clients
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def cleanup():
admin_manager = clients.AdminManager()
_, body = admin_manager.servers_client.list_servers({"all_tenants": True})
LOG.info("Cleanup::remove %s servers" % len(body['servers']))
for s in body['servers']:
try:
admin_manager.servers_client.delete_server(s['id'])
except Exception:
pass
for s in body['servers']:
try:
admin_manager.servers_client.wait_for_server_termination(s['id'])
except Exception:
pass
_, keypairs = admin_manager.keypairs_client.list_keypairs()
LOG.info("Cleanup::remove %s keypairs" % len(keypairs))
for k in keypairs:
try:
admin_manager.keypairs_client.delete_keypair(k['name'])
except Exception:
pass
secgrp_client = admin_manager.security_groups_client
_, secgrp = secgrp_client.list_security_groups({"all_tenants": True})
secgrp_del = [grp for grp in secgrp if grp['name'] != 'default']
LOG.info("Cleanup::remove %s Security Group" % len(secgrp_del))
for g in secgrp_del:
try:
secgrp_client.delete_security_group(g['id'])
except Exception:
pass
_, floating_ips = admin_manager.floating_ips_client.list_floating_ips()
LOG.info("Cleanup::remove %s floating ips" % len(floating_ips))
for f in floating_ips:
try:
admin_manager.floating_ips_client.delete_floating_ip(f['id'])
except Exception:
pass
_, users = admin_manager.identity_client.get_users()
LOG.info("Cleanup::remove %s users" % len(users))
for user in users:
if user['name'].startswith("stress_user"):
admin_manager.identity_client.delete_user(user['id'])
_, tenants = admin_manager.identity_client.list_tenants()
LOG.info("Cleanup::remove %s tenants" % len(tenants))
for tenant in tenants:
if tenant['name'].startswith("stress_tenant"):
admin_manager.identity_client.delete_tenant(tenant['id'])
# We have to delete snapshots first or
# volume deletion may block
_, snaps = admin_manager.snapshots_client.\
list_snapshots({"all_tenants": True})
LOG.info("Cleanup::remove %s snapshots" % len(snaps))
for v in snaps:
try:
admin_manager.snapshots_client.\
wait_for_snapshot_status(v['id'], 'available')
admin_manager.snapshots_client.delete_snapshot(v['id'])
except Exception:
pass
for v in snaps:
try:
admin_manager.snapshots_client.wait_for_resource_deletion(v['id'])
except Exception:
pass
_, vols = admin_manager.volumes_client.list_volumes({"all_tenants": True})
LOG.info("Cleanup::remove %s volumes" % len(vols))
for v in vols:
try:
admin_manager.volumes_client.\
wait_for_volume_status(v['id'], 'available')
admin_manager.volumes_client.delete_volume(v['id'])
except Exception:
pass
for v in vols:
try:
admin_manager.volumes_client.wait_for_resource_deletion(v['id'])
except Exception:
pass
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023200 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/stress/run_stress.py 0000775 0001750 0001750 00000011445 12332757070 023672 0 ustar chuck chuck 0000000 0000000 #!/usr/bin/env python
# Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import inspect
import json
import sys
from testtools import testsuite
try:
from unittest import loader
except ImportError:
# unittest in python 2.6 does not contain loader, so uses unittest2
from unittest2 import loader
from tempest.openstack.common import log as logging
from tempest.stress import driver
LOG = logging.getLogger(__name__)
def discover_stress_tests(path="./", filter_attr=None, call_inherited=False):
"""Discovers all tempest tests and create action out of them
"""
LOG.info("Start test discovery")
tests = []
testloader = loader.TestLoader()
list = testloader.discover(path)
for func in (testsuite.iterate_tests(list)):
attrs = []
try:
method_name = getattr(func, '_testMethodName')
full_name = "%s.%s.%s" % (func.__module__,
func.__class__.__name__,
method_name)
test_func = getattr(func, method_name)
# NOTE(mkoderer): this contains a list of all type attributes
attrs = getattr(test_func, "__testtools_attrs")
except Exception:
next
if 'stress' in attrs:
if filter_attr is not None and not filter_attr in attrs:
continue
class_setup_per = getattr(test_func, "st_class_setup_per")
action = {'action':
"tempest.stress.actions.unit_test.UnitTest",
'kwargs': {"test_method": full_name,
"class_setup_per": class_setup_per
}
}
if (not call_inherited and
getattr(test_func, "st_allow_inheritance") is not True):
class_structure = inspect.getmro(test_func.im_class)
if test_func.__name__ not in class_structure[0].__dict__:
continue
tests.append(action)
return tests
def main(ns):
result = 0
if not ns.all:
tests = json.load(open(ns.tests, 'r'))
else:
tests = discover_stress_tests(filter_attr=ns.type,
call_inherited=ns.call_inherited)
if ns.serial:
for test in tests:
step_result = driver.stress_openstack([test],
ns.duration,
ns.number,
ns.stop)
# NOTE(mkoderer): we just save the last result code
if (step_result != 0):
result = step_result
if ns.stop:
return result
else:
result = driver.stress_openstack(tests,
ns.duration,
ns.number,
ns.stop)
return result
parser = argparse.ArgumentParser(description='Run stress tests')
parser.add_argument('-d', '--duration', default=300, type=int,
help="Duration of test in secs")
parser.add_argument('-s', '--serial', action='store_true',
help="Trigger running tests serially")
parser.add_argument('-S', '--stop', action='store_true',
default=False, help="Stop on first error")
parser.add_argument('-n', '--number', type=int,
help="How often an action is executed for each process")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('-a', '--all', action='store_true',
help="Execute all stress tests")
parser.add_argument('-T', '--type',
help="Filters tests of a certain type (e.g. gate)")
parser.add_argument('-i', '--call-inherited', action='store_true',
default=False,
help="Call also inherited function with stress attribute")
group.add_argument('-t', "--tests", nargs='?',
help="Name of the file with test description")
if __name__ == "__main__":
try:
sys.exit(main(parser.parse_args()))
except Exception:
LOG.exception("Failure in the stress test framework")
sys.exit(1)
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/driver.py 0000664 0001750 0001750 00000020625 12332757070 022753 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 Quanta Research Cambridge, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import multiprocessing
import os
import signal
import time
from six import moves
from tempest import auth
from tempest import clients
from tempest.common import ssh
from tempest.common.utils import data_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
from tempest.stress import cleanup
CONF = config.CONF
LOG = logging.getLogger(__name__)
processes = []
def do_ssh(command, host, ssh_user, ssh_key=None):
ssh_client = ssh.Client(host, ssh_user, key_filename=ssh_key)
try:
return ssh_client.exec_command(command)
except exceptions.SSHExecCommandFailed:
LOG.error('do_ssh raise exception. command:%s, host:%s.'
% (command, host))
return None
def _get_compute_nodes(controller, ssh_user, ssh_key=None):
"""
Returns a list of active compute nodes. List is generated by running
nova-manage on the controller.
"""
nodes = []
cmd = "nova-manage service list | grep ^nova-compute"
output = do_ssh(cmd, controller, ssh_user, ssh_key)
if not output:
return nodes
# For example: nova-compute xg11eth0 nova enabled :-) 2011-10-31 18:57:46
# This is fragile but there is, at present, no other way to get this info.
for line in output.split('\n'):
words = line.split()
if len(words) > 0 and words[4] == ":-)":
nodes.append(words[1])
return nodes
def _has_error_in_logs(logfiles, nodes, ssh_user, ssh_key=None,
stop_on_error=False):
"""
Detect errors in the nova log files on the controller and compute nodes.
"""
grep = 'egrep "ERROR|TRACE" %s' % logfiles
ret = False
for node in nodes:
errors = do_ssh(grep, node, ssh_user, ssh_key)
if len(errors) > 0:
LOG.error('%s: %s' % (node, errors))
ret = True
if stop_on_error:
break
return ret
def sigchld_handler(signalnum, frame):
"""
Signal handler (only active if stop_on_error is True).
"""
for process in processes:
if (not process['process'].is_alive() and
process['process'].exitcode != 0):
signal.signal(signalnum, signal.SIG_DFL)
terminate_all_processes()
break
def terminate_all_processes(check_interval=20):
"""
Goes through the process list and terminates all child processes.
"""
LOG.info("Stopping all processes.")
for process in processes:
if process['process'].is_alive():
try:
process['process'].terminate()
except Exception:
pass
time.sleep(check_interval)
for process in processes:
if process['process'].is_alive():
try:
pid = process['process'].pid
LOG.warn("Process %d hangs. Send SIGKILL." % pid)
os.kill(pid, signal.SIGKILL)
except Exception:
pass
process['process'].join()
def stress_openstack(tests, duration, max_runs=None, stop_on_error=False):
"""
Workload driver. Executes an action function against a nova-cluster.
"""
admin_manager = clients.AdminManager()
ssh_user = CONF.stress.target_ssh_user
ssh_key = CONF.stress.target_private_key_path
logfiles = CONF.stress.target_logfiles
log_check_interval = int(CONF.stress.log_check_interval)
default_thread_num = int(CONF.stress.default_thread_number_per_action)
if logfiles:
controller = CONF.stress.target_controller
computes = _get_compute_nodes(controller, ssh_user, ssh_key)
for node in computes:
do_ssh("rm -f %s" % logfiles, node, ssh_user, ssh_key)
for test in tests:
if test.get('use_admin', False):
manager = admin_manager
else:
manager = clients.Manager()
for p_number in moves.xrange(test.get('threads', default_thread_num)):
if test.get('use_isolated_tenants', False):
username = data_utils.rand_name("stress_user")
tenant_name = data_utils.rand_name("stress_tenant")
password = "pass"
identity_client = admin_manager.identity_client
_, tenant = identity_client.create_tenant(name=tenant_name)
identity_client.create_user(username,
password,
tenant['id'],
"email")
creds = auth.get_credentials(username=username,
password=password,
tenant_name=tenant_name)
manager = clients.Manager(credentials=creds)
test_obj = importutils.import_class(test['action'])
test_run = test_obj(manager, max_runs, stop_on_error)
kwargs = test.get('kwargs', {})
test_run.setUp(**dict(kwargs.iteritems()))
LOG.debug("calling Target Object %s" %
test_run.__class__.__name__)
mp_manager = multiprocessing.Manager()
shared_statistic = mp_manager.dict()
shared_statistic['runs'] = 0
shared_statistic['fails'] = 0
p = multiprocessing.Process(target=test_run.execute,
args=(shared_statistic,))
process = {'process': p,
'p_number': p_number,
'action': test_run.action,
'statistic': shared_statistic}
processes.append(process)
p.start()
if stop_on_error:
# NOTE(mkoderer): only the parent should register the handler
signal.signal(signal.SIGCHLD, sigchld_handler)
end_time = time.time() + duration
had_errors = False
try:
while True:
if max_runs is None:
remaining = end_time - time.time()
if remaining <= 0:
break
else:
remaining = log_check_interval
all_proc_term = True
for process in processes:
if process['process'].is_alive():
all_proc_term = False
break
if all_proc_term:
break
time.sleep(min(remaining, log_check_interval))
if stop_on_error:
if any([True for proc in processes
if proc['statistic']['fails'] > 0]):
break
if not logfiles:
continue
if _has_error_in_logs(logfiles, computes, ssh_user, ssh_key,
stop_on_error):
had_errors = True
break
except KeyboardInterrupt:
LOG.warning("Interrupted, going to print statistics and exit ...")
if stop_on_error:
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
terminate_all_processes()
sum_fails = 0
sum_runs = 0
LOG.info("Statistics (per process):")
for process in processes:
if process['statistic']['fails'] > 0:
had_errors = True
sum_runs += process['statistic']['runs']
sum_fails += process['statistic']['fails']
LOG.info(" Process %d (%s): Run %d actions (%d failed)" %
(process['p_number'],
process['action'],
process['statistic']['runs'],
process['statistic']['fails']))
LOG.info("Summary:")
LOG.info("Run %d actions (%d failed)" %
(sum_runs, sum_fails))
if not had_errors and CONF.stress.full_clean_stack:
LOG.info("cleaning up")
cleanup.cleanup()
if had_errors:
return 1
else:
return 0
tempest-2014.1.dev4108.gf22b6cc/tempest/stress/README.rst 0000664 0001750 0001750 00000004316 12332757070 022574 0 ustar chuck chuck 0000000 0000000 Tempest Field Guide to Stress Tests
===================================
OpenStack is a distributed, asynchronous system that is prone to race condition
bugs. These bugs will not be easily found during
functional testing but will be encountered by users in large deployments in a
way that is hard to debug. The stress test tries to cause these bugs to happen
in a more controlled environment.
Environment
-----------
This particular framework assumes your working Nova cluster understands Nova
API 2.0. The stress tests can read the logs from the cluster. To enable this
you have to provide the hostname to call 'nova-manage' and
the private key and user name for ssh to the cluster in the
[stress] section of tempest.conf. You also need to provide the
location of the log files:
target_logfiles = "regexp to all log files to be checked for errors"
target_private_key_path = "private ssh key for controller and log file nodes"
target_ssh_user = "username for controller and log file nodes"
target_controller = "hostname or ip of controller node (for nova-manage)
log_check_interval = "time between checking logs for errors (default 60s)"
To activate logging on your console please make sure that you activate `use_stderr`
in tempest.conf or use the default `logging.conf.sample` file.
Running default stress test set
-------------------------------
The stress test framework can automatically discover test inside the tempest
test suite. All test flag with the `@stresstest` decorator will be executed.
In order to use this discovery you have to be in the tempest root directory
and execute the following:
tempest/stress/run_stress.py -a -d 30
Running the sample test
-----------------------
To test installation, do the following (from the tempest/stress directory):
./run_stress.py -t etc/server-create-destroy-test.json -d 30
This sample test tries to create a few VMs and kill a few VMs.
Additional Tools
----------------
Sometimes the tests don't finish, or there are failures. In these
cases, you may want to clean out the nova cluster. We have provided
some scripts to do this in the ``tools`` subdirectory.
You can use the following script to destroy any keypairs,
floating ips, and servers:
tempest/stress/tools/cleanup.py
tempest-2014.1.dev4108.gf22b6cc/tempest/manager.py 0000664 0001750 0001750 00000004702 12332757070 021545 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import auth
from tempest import config
from tempest import exceptions
CONF = config.CONF
class Manager(object):
"""
Base manager class
Manager objects are responsible for providing a configuration object
and a client object for a test case to use in performing actions.
"""
def __init__(self, credentials=None):
"""
We allow overriding of the credentials used within the various
client classes managed by the Manager object. Left as None, the
standard username/password/tenant_name[/domain_name] is used.
:param credentials: Override of the credentials
"""
self.auth_version = CONF.identity.auth_version
if credentials is None:
self.credentials = auth.get_default_credentials('user')
else:
self.credentials = credentials
# Check if passed or default credentials are valid
if not self.credentials.is_valid():
raise exceptions.InvalidCredentials()
# Creates an auth provider for the credentials
self.auth_provider = self.get_auth_provider(self.credentials)
# FIXME(andreaf) unused
self.client_attr_names = []
@classmethod
def get_auth_provider_class(cls, auth_version):
if auth_version == 'v2':
return auth.KeystoneV2AuthProvider
else:
return auth.KeystoneV3AuthProvider
def get_auth_provider(self, credentials):
if credentials is None:
raise exceptions.InvalidCredentials(
'Credentials must be specified')
auth_provider_class = self.get_auth_provider_class(self.auth_version)
return auth_provider_class(
client_type=getattr(self, 'client_type', None),
interface=getattr(self, 'interface', None),
credentials=credentials)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/ 0000775 0001750 0001750 00000000000 12332757136 021753 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/ 0000775 0001750 0001750 00000000000 12332757136 022716 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_s3_objects.py 0000664 0001750 0001750 00000003572 12332757070 026371 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import boto.s3.key
from tempest.common.utils import data_utils
from tempest import test
from tempest.thirdparty.boto import test as boto_test
class S3BucketsTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(S3BucketsTest, cls).setUpClass()
cls.client = cls.os.s3_client
@test.attr(type='smoke')
def test_create_get_delete_object(self):
# S3 Create, get and delete object
bucket_name = data_utils.rand_name("s3bucket-")
object_name = data_utils.rand_name("s3object-")
content = 'x' * 42
bucket = self.client.create_bucket(bucket_name)
self.addResourceCleanUp(self.destroy_bucket,
self.client.connection_data,
bucket_name)
self.assertTrue(bucket.name == bucket_name)
with contextlib.closing(boto.s3.key.Key(bucket)) as key:
key.key = object_name
key.set_contents_from_string(content)
readback = key.get_contents_as_string()
self.assertTrue(readback == content)
bucket.delete_key(key)
self.assertBotoError(self.s3_error_code.client.NoSuchKey,
key.get_contents_as_string)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_s3_buckets.py 0000664 0001750 0001750 00000003275 12332757070 026400 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import test
from tempest.thirdparty.boto import test as boto_test
class S3BucketsTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(S3BucketsTest, cls).setUpClass()
cls.client = cls.os.s3_client
@test.skip_because(bug="1076965")
@test.attr(type='smoke')
def test_create_and_get_delete_bucket(self):
# S3 Create, get and delete bucket
bucket_name = data_utils.rand_name("s3bucket-")
cleanup_key = self.addResourceCleanUp(self.client.delete_bucket,
bucket_name)
bucket = self.client.create_bucket(bucket_name)
self.assertTrue(bucket.name == bucket_name)
bucket = self.client.get_bucket(bucket_name)
self.assertTrue(bucket.name == bucket_name)
self.client.delete_bucket(bucket_name)
self.assertBotoError(self.s3_error_code.client.NoSuchBucket,
self.client.get_bucket, bucket_name)
self.cancelResourceCleanUp(cleanup_key)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_ec2_keys.py 0000664 0001750 0001750 00000005371 12332757070 026036 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import test
from tempest.thirdparty.boto import test as boto_test
def compare_key_pairs(a, b):
return (a.name == b.name and
a.fingerprint == b.fingerprint)
class EC2KeysTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(EC2KeysTest, cls).setUpClass()
cls.client = cls.os.ec2api_client
cls.ec = cls.ec2_error_code
# TODO(afazekas): merge create, delete, get test cases
@test.attr(type='smoke')
def test_create_ec2_keypair(self):
# EC2 create KeyPair
key_name = data_utils.rand_name("keypair-")
self.addResourceCleanUp(self.client.delete_key_pair, key_name)
keypair = self.client.create_key_pair(key_name)
self.assertTrue(compare_key_pairs(keypair,
self.client.get_key_pair(key_name)))
@test.skip_because(bug="1072318")
@test.attr(type='smoke')
def test_delete_ec2_keypair(self):
# EC2 delete KeyPair
key_name = data_utils.rand_name("keypair-")
self.client.create_key_pair(key_name)
self.client.delete_key_pair(key_name)
self.assertIsNone(self.client.get_key_pair(key_name))
@test.attr(type='smoke')
def test_get_ec2_keypair(self):
# EC2 get KeyPair
key_name = data_utils.rand_name("keypair-")
self.addResourceCleanUp(self.client.delete_key_pair, key_name)
keypair = self.client.create_key_pair(key_name)
self.assertTrue(compare_key_pairs(keypair,
self.client.get_key_pair(key_name)))
@test.attr(type='smoke')
def test_duplicate_ec2_keypair(self):
# EC2 duplicate KeyPair
key_name = data_utils.rand_name("keypair-")
self.addResourceCleanUp(self.client.delete_key_pair, key_name)
keypair = self.client.create_key_pair(key_name)
self.assertBotoError(self.ec.client.InvalidKeyPair.Duplicate,
self.client.create_key_pair,
key_name)
self.assertTrue(compare_key_pairs(keypair,
self.client.get_key_pair(key_name)))
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_ec2_network.py 0000664 0001750 0001750 00000003422 12332757070 026547 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from tempest.thirdparty.boto import test as boto_test
class EC2NetworkTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(EC2NetworkTest, cls).setUpClass()
cls.client = cls.os.ec2api_client
# Note(afazekas): these tests for things duable without an instance
@test.skip_because(bug="1080406")
@test.attr(type='smoke')
def test_disassociate_not_associated_floating_ip(self):
# EC2 disassociate not associated floating ip
ec2_codes = self.ec2_error_code
address = self.client.allocate_address()
public_ip = address.public_ip
rcuk = self.addResourceCleanUp(self.client.release_address, public_ip)
addresses_get = self.client.get_all_addresses(addresses=(public_ip,))
self.assertEqual(len(addresses_get), 1)
self.assertEqual(addresses_get[0].public_ip, public_ip)
self.assertBotoError(ec2_codes.client.InvalidAssociationID.NotFound,
address.disassociate)
self.client.release_address(public_ip)
self.cancelResourceCleanUp(rcuk)
self.assertAddressReleasedWait(address)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/__init__.py 0000664 0001750 0001750 00000000000 12332757070 025012 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_s3_ec2_images.py 0000664 0001750 0001750 00000013075 12332757070 026735 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from tempest.common.utils import data_utils
from tempest import config
from tempest import test
from tempest.thirdparty.boto import test as boto_test
from tempest.thirdparty.boto.utils import s3
CONF = config.CONF
class S3ImagesTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(S3ImagesTest, cls).setUpClass()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
cls.s3_client = cls.os.s3_client
cls.images_client = cls.os.ec2api_client
cls.materials_path = CONF.boto.s3_materials_path
cls.ami_manifest = CONF.boto.ami_manifest
cls.aki_manifest = CONF.boto.aki_manifest
cls.ari_manifest = CONF.boto.ari_manifest
cls.ami_path = cls.materials_path + os.sep + cls.ami_manifest
cls.aki_path = cls.materials_path + os.sep + cls.aki_manifest
cls.ari_path = cls.materials_path + os.sep + cls.ari_manifest
cls.bucket_name = data_utils.rand_name("bucket-")
bucket = cls.s3_client.create_bucket(cls.bucket_name)
cls.addResourceCleanUp(cls.destroy_bucket,
cls.s3_client.connection_data,
cls.bucket_name)
s3.s3_upload_dir(bucket, cls.materials_path)
@test.attr(type='smoke')
def test_register_get_deregister_ami_image(self):
# Register and deregister ami image
image = {"name": data_utils.rand_name("ami-name-"),
"location": self.bucket_name + "/" + self.ami_manifest,
"type": "ami"}
image["image_id"] = self.images_client.register_image(
name=image["name"],
image_location=image["location"])
# NOTE(afazekas): delete_snapshot=True might trigger boto lib? bug
image["cleanUp"] = self.addResourceCleanUp(
self.images_client.deregister_image,
image["image_id"])
self.assertEqual(image["image_id"][0:3], image["type"])
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
if retrieved_image.state != "available":
self.assertImageStateWait(retrieved_image, "available")
self.images_client.deregister_image(image["image_id"])
self.assertNotIn(image["image_id"], str(
self.images_client.get_all_images()))
self.cancelResourceCleanUp(image["cleanUp"])
def test_register_get_deregister_aki_image(self):
# Register and deregister aki image
image = {"name": data_utils.rand_name("aki-name-"),
"location": self.bucket_name + "/" + self.aki_manifest,
"type": "aki"}
image["image_id"] = self.images_client.register_image(
name=image["name"],
image_location=image["location"])
image["cleanUp"] = self.addResourceCleanUp(
self.images_client.deregister_image,
image["image_id"])
self.assertEqual(image["image_id"][0:3], image["type"])
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
self.assertIn(retrieved_image.state, self.valid_image_state)
if retrieved_image.state != "available":
self.assertImageStateWait(retrieved_image, "available")
self.images_client.deregister_image(image["image_id"])
self.assertNotIn(image["image_id"], str(
self.images_client.get_all_images()))
self.cancelResourceCleanUp(image["cleanUp"])
def test_register_get_deregister_ari_image(self):
# Register and deregister ari image
image = {"name": data_utils.rand_name("ari-name-"),
"location": "/" + self.bucket_name + "/" + self.ari_manifest,
"type": "ari"}
image["image_id"] = self.images_client.register_image(
name=image["name"],
image_location=image["location"])
image["cleanUp"] = self.addResourceCleanUp(
self.images_client.deregister_image,
image["image_id"])
self.assertEqual(image["image_id"][0:3], image["type"])
retrieved_image = self.images_client.get_image(image["image_id"])
self.assertIn(retrieved_image.state, self.valid_image_state)
if retrieved_image.state != "available":
self.assertImageStateWait(retrieved_image, "available")
self.assertIn(retrieved_image.state, self.valid_image_state)
self.assertTrue(retrieved_image.name == image["name"])
self.assertTrue(retrieved_image.id == image["image_id"])
self.images_client.deregister_image(image["image_id"])
self.cancelResourceCleanUp(image["cleanUp"])
# TODO(afazekas): less copy-paste style
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test.py 0000664 0001750 0001750 00000065454 12332757070 024262 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import logging as orig_logging
import os
import re
import six
import urlparse
import boto
from boto import ec2
from boto import exception
from boto import s3
import keystoneclient.exceptions
import tempest.clients
from tempest.common.utils import file_utils
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
import tempest.test
from tempest.thirdparty.boto.utils import wait
CONF = config.CONF
LOG = logging.getLogger(__name__)
def decision_maker():
A_I_IMAGES_READY = True # ari,ami,aki
S3_CAN_CONNECT_ERROR = None
EC2_CAN_CONNECT_ERROR = None
secret_matcher = re.compile("[A-Za-z0-9+/]{32,}") # 40 in other system
id_matcher = re.compile("[A-Za-z0-9]{20,}")
def all_read(*args):
return all(map(file_utils.have_effective_read_access, args))
materials_path = CONF.boto.s3_materials_path
ami_path = materials_path + os.sep + CONF.boto.ami_manifest
aki_path = materials_path + os.sep + CONF.boto.aki_manifest
ari_path = materials_path + os.sep + CONF.boto.ari_manifest
A_I_IMAGES_READY = all_read(ami_path, aki_path, ari_path)
boto_logger = logging.getLogger('boto')
level = boto_logger.logger.level
boto_logger.logger.setLevel(orig_logging.CRITICAL) # suppress logging
# for these
def _cred_sub_check(connection_data):
if not id_matcher.match(connection_data["aws_access_key_id"]):
raise Exception("Invalid AWS access Key")
if not secret_matcher.match(connection_data["aws_secret_access_key"]):
raise Exception("Invalid AWS secret Key")
raise Exception("Unknown (Authentication?) Error")
openstack = tempest.clients.Manager()
try:
if urlparse.urlparse(CONF.boto.ec2_url).hostname is None:
raise Exception("Failed to get hostname from the ec2_url")
ec2client = openstack.ec2api_client
try:
ec2client.get_all_regions()
except exception.BotoServerError as exc:
if exc.error_code is None:
raise Exception("EC2 target does not looks EC2 service")
_cred_sub_check(ec2client.connection_data)
except keystoneclient.exceptions.Unauthorized:
EC2_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" faild to get them even by keystoneclient"
except Exception as exc:
EC2_CAN_CONNECT_ERROR = str(exc)
try:
if urlparse.urlparse(CONF.boto.s3_url).hostname is None:
raise Exception("Failed to get hostname from the s3_url")
s3client = openstack.s3_client
try:
s3client.get_bucket("^INVALID*#()@INVALID.")
except exception.BotoServerError as exc:
if exc.status == 403:
_cred_sub_check(s3client.connection_data)
except Exception as exc:
S3_CAN_CONNECT_ERROR = str(exc)
except keystoneclient.exceptions.Unauthorized:
S3_CAN_CONNECT_ERROR = "AWS credentials not set," +\
" faild to get them even by keystoneclient"
boto_logger.logger.setLevel(level)
return {'A_I_IMAGES_READY': A_I_IMAGES_READY,
'S3_CAN_CONNECT_ERROR': S3_CAN_CONNECT_ERROR,
'EC2_CAN_CONNECT_ERROR': EC2_CAN_CONNECT_ERROR}
class BotoExceptionMatcher(object):
STATUS_RE = r'[45]\d\d'
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
""":returns: Retruns with an error string if not matches,
returns with None when matches.
"""
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
LOG.info("Status: %s , error_code: %s", exc.status, exc.error_code)
if re.match(self.STATUS_RE, str(exc.status)) is None:
return ("Status code (%s) does not match"
"the expected re pattern \"%s\""
% (exc.status, self.STATUS_RE))
if re.match(self.CODE_RE, str(exc.error_code)) is None:
return ("Error code (%s) does not match" +
"the expected re pattern \"%s\"") %\
(exc.error_code, self.CODE_RE)
return None
class ClientError(BotoExceptionMatcher):
STATUS_RE = r'4\d\d'
class ServerError(BotoExceptionMatcher):
STATUS_RE = r'5\d\d'
def _add_matcher_class(error_cls, error_data, base=BotoExceptionMatcher):
"""
Usable for adding an ExceptionMatcher(s) into the exception tree.
The not leaf elements does wildcard match
"""
# in error_code just literal and '.' characters expected
if not isinstance(error_data, six.string_types):
(error_code, status_code) = map(str, error_data)
else:
status_code = None
error_code = error_data
parts = error_code.split('.')
basematch = ""
num_parts = len(parts)
max_index = num_parts - 1
add_cls = error_cls
for i_part in six.moves.xrange(num_parts):
part = parts[i_part]
leaf = i_part == max_index
if not leaf:
match = basematch + part + "[.].*"
else:
match = basematch + part
basematch += part + "[.]"
if not hasattr(add_cls, part):
cls_dict = {"CODE_RE": match}
if leaf and status_code is not None:
cls_dict["STATUS_RE"] = status_code
cls = type(part, (base, ), cls_dict)
setattr(add_cls, part, cls())
add_cls = cls
elif leaf:
raise LookupError("Tries to redefine an error code \"%s\"" % part)
else:
add_cls = getattr(add_cls, part)
# TODO(afazekas): classmethod handling
def friendly_function_name_simple(call_able):
name = ""
if hasattr(call_able, "im_class"):
name += call_able.im_class.__name__ + "."
name += call_able.__name__
return name
def friendly_function_call_str(call_able, *args, **kwargs):
string = friendly_function_name_simple(call_able)
string += "(" + ", ".join(map(str, args))
if len(kwargs):
if len(args):
string += ", "
string += ", ".join("=".join(map(str, (key, value)))
for (key, value) in kwargs.items())
return string + ")"
class BotoTestCase(tempest.test.BaseTestCase):
"""Recommended to use as base class for boto related test."""
@classmethod
def setUpClass(cls):
super(BotoTestCase, cls).setUpClass()
cls.conclusion = decision_maker()
cls.os = cls.get_client_manager()
# The trash contains cleanup functions and paramaters in tuples
# (function, *args, **kwargs)
cls._resource_trash_bin = {}
cls._sequence = -1
if (hasattr(cls, "EC2") and
cls.conclusion['EC2_CAN_CONNECT_ERROR'] is not None):
raise cls.skipException("EC2 " + cls.__name__ + ": " +
cls.conclusion['EC2_CAN_CONNECT_ERROR'])
if (hasattr(cls, "S3") and
cls.conclusion['S3_CAN_CONNECT_ERROR'] is not None):
raise cls.skipException("S3 " + cls.__name__ + ": " +
cls.conclusion['S3_CAN_CONNECT_ERROR'])
@classmethod
def addResourceCleanUp(cls, function, *args, **kwargs):
"""Adds CleanUp callable, used by tearDownClass.
Recommended to a use (deep)copy on the mutable args.
"""
cls._sequence = cls._sequence + 1
cls._resource_trash_bin[cls._sequence] = (function, args, kwargs)
return cls._sequence
@classmethod
def cancelResourceCleanUp(cls, key):
"""Cancel Clean up request."""
del cls._resource_trash_bin[key]
# TODO(afazekas): Add "with" context handling
def assertBotoError(self, excMatcher, callableObj,
*args, **kwargs):
"""Example usage:
self.assertBotoError(self.ec2_error_code.client.
InvalidKeyPair.Duplicate,
self.client.create_keypair,
key_name)
"""
try:
callableObj(*args, **kwargs)
except exception.BotoServerError as exc:
error_msg = excMatcher.match(exc)
if error_msg is not None:
raise self.failureException, error_msg
else:
raise self.failureException, "BotoServerError not raised"
@classmethod
def tearDownClass(cls):
"""Calls the callables added by addResourceCleanUp,
when you overwrite this function don't forget to call this too.
"""
fail_count = 0
trash_keys = sorted(cls._resource_trash_bin, reverse=True)
for key in trash_keys:
(function, pos_args, kw_args) = cls._resource_trash_bin[key]
try:
func_name = friendly_function_call_str(function, *pos_args,
**kw_args)
LOG.debug("Cleaning up: %s" % func_name)
function(*pos_args, **kw_args)
except BaseException:
fail_count += 1
LOG.exception("Cleanup failed %s" % func_name)
finally:
del cls._resource_trash_bin[key]
cls.clear_isolated_creds()
super(BotoTestCase, cls).tearDownClass()
# NOTE(afazekas): let the super called even on exceptions
# The real exceptions already logged, if the super throws another,
# does not causes hidden issues
if fail_count:
raise exceptions.TearDownException(num=fail_count)
ec2_error_code = BotoExceptionMatcher()
# InsufficientInstanceCapacity can be both server and client error
ec2_error_code.server = ServerError()
ec2_error_code.client = ClientError()
s3_error_code = BotoExceptionMatcher()
s3_error_code.server = ServerError()
s3_error_code.client = ClientError()
valid_image_state = set(('available', 'pending', 'failed'))
# NOTE(afazekas): 'paused' is not valid status in EC2, but it does not have
# a good mapping, because it uses memory, but not really a running machine
valid_instance_state = set(('pending', 'running', 'shutting-down',
'terminated', 'stopping', 'stopped', 'paused'))
valid_volume_status = set(('creating', 'available', 'in-use',
'deleting', 'deleted', 'error'))
valid_snapshot_status = set(('pending', 'completed', 'error'))
gone_set = set(('_GONE',))
@classmethod
def get_lfunction_gone(cls, obj):
"""If the object is instance of a well know type returns back with
with the correspoding function otherwise it assumes the obj itself
is the function.
"""
ec = cls.ec2_error_code
if isinstance(obj, ec2.instance.Instance):
colusure_matcher = ec.client.InvalidInstanceID.NotFound
status_attr = "state"
elif isinstance(obj, ec2.image.Image):
colusure_matcher = ec.client.InvalidAMIID.NotFound
status_attr = "state"
elif isinstance(obj, ec2.snapshot.Snapshot):
colusure_matcher = ec.client.InvalidSnapshot.NotFound
status_attr = "status"
elif isinstance(obj, ec2.volume.Volume):
colusure_matcher = ec.client.InvalidVolume.NotFound
status_attr = "status"
else:
return obj
def _status():
try:
obj.update(validate=True)
except ValueError:
return "_GONE"
except exception.EC2ResponseError as exc:
if colusure_matcher.match(exc) is None:
return "_GONE"
else:
raise
return getattr(obj, status_attr)
return _status
def state_wait_gone(self, lfunction, final_set, valid_set):
if not isinstance(final_set, set):
final_set = set((final_set,))
final_set |= self.gone_set
lfunction = self.get_lfunction_gone(lfunction)
state = wait.state_wait(lfunction, final_set, valid_set)
self.assertIn(state, valid_set | self.gone_set)
return state
def waitImageState(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_image_state)
def waitInstanceState(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_instance_state)
def waitSnapshotStatus(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_snapshot_status)
def waitVolumeStatus(self, lfunction, wait_for):
return self.state_wait_gone(lfunction, wait_for,
self.valid_volume_status)
def assertImageStateWait(self, lfunction, wait_for):
state = self.waitImageState(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertInstanceStateWait(self, lfunction, wait_for):
state = self.waitInstanceState(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertVolumeStatusWait(self, lfunction, wait_for):
state = self.waitVolumeStatus(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertSnapshotStatusWait(self, lfunction, wait_for):
state = self.waitSnapshotStatus(lfunction, wait_for)
self.assertIn(state, wait_for)
def assertAddressDissasociatedWait(self, address):
def _disassociate():
cli = self.ec2_client
addresses = cli.get_all_addresses(addresses=(address.public_ip,))
if len(addresses) != 1:
return "INVALID"
if addresses[0].instance_id:
LOG.info("%s associated to %s",
address.public_ip,
addresses[0].instance_id)
return "ASSOCIATED"
return "DISASSOCIATED"
state = wait.state_wait(_disassociate, "DISASSOCIATED",
set(("ASSOCIATED", "DISASSOCIATED")))
self.assertEqual(state, "DISASSOCIATED")
def assertAddressReleasedWait(self, address):
def _address_delete():
# NOTE(afazekas): the filter gives back IP
# even if it is not associated to my tenant
if (address.public_ip not in map(lambda a: a.public_ip,
self.ec2_client.get_all_addresses())):
return "DELETED"
return "NOTDELETED"
state = wait.state_wait(_address_delete, "DELETED")
self.assertEqual(state, "DELETED")
def assertReSearch(self, regexp, string):
if re.search(regexp, string) is None:
raise self.failureException("regexp: '%s' not found in '%s'" %
(regexp, string))
def assertNotReSearch(self, regexp, string):
if re.search(regexp, string) is not None:
raise self.failureException("regexp: '%s' found in '%s'" %
(regexp, string))
def assertReMatch(self, regexp, string):
if re.match(regexp, string) is None:
raise self.failureException("regexp: '%s' not matches on '%s'" %
(regexp, string))
def assertNotReMatch(self, regexp, string):
if re.match(regexp, string) is not None:
raise self.failureException("regexp: '%s' matches on '%s'" %
(regexp, string))
@classmethod
def destroy_bucket(cls, connection_data, bucket):
"""Destroys the bucket and its content, just for teardown."""
exc_num = 0
try:
with contextlib.closing(
boto.connect_s3(**connection_data)) as conn:
if isinstance(bucket, basestring):
bucket = conn.lookup(bucket)
assert isinstance(bucket, s3.bucket.Bucket)
for obj in bucket.list():
try:
bucket.delete_key(obj.key)
obj.close()
except BaseException:
LOG.exception("Failed to delete key %s " % obj.key)
exc_num += 1
conn.delete_bucket(bucket)
except BaseException:
LOG.exception("Failed to destroy bucket %s " % bucket)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_reservation(cls, reservation):
"""Terminate instances in a reservation, just for teardown."""
exc_num = 0
def _instance_state():
try:
instance.update(validate=True)
except ValueError:
return "_GONE"
except exception.EC2ResponseError as exc:
if cls.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc) is None:
return "_GONE"
# NOTE(afazekas): incorrect code,
# but the resource must be destoreyd
if exc.error_code == "InstanceNotFound":
return "_GONE"
return instance.state
for instance in reservation.instances:
try:
instance.terminate()
wait.re_search_wait(_instance_state, "_GONE")
except BaseException:
LOG.exception("Failed to terminate instance %s " % instance)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
# NOTE(afazekas): The incorrect ErrorCodes makes very, very difficult
# to write better teardown
@classmethod
def destroy_security_group_wait(cls, group):
"""Delete group.
Use just for teardown!
"""
# NOTE(afazekas): should wait/try until all related instance terminates
group.delete()
@classmethod
def destroy_volume_wait(cls, volume):
"""Delete volume, tryies to detach first.
Use just for teardown!
"""
exc_num = 0
snaps = volume.snapshots()
if len(snaps):
LOG.critical("%s Volume has %s snapshot(s)", volume.id,
map(snaps.id, snaps))
# NOTE(afazekas): detaching/attching not valid EC2 status
def _volume_state():
volume.update(validate=True)
try:
if volume.status != "available":
volume.detach(force=True)
except BaseException:
LOG.exception("Failed to detach volume %s" % volume)
# exc_num += 1 "nonlocal" not in python2
return volume.status
try:
wait.re_search_wait(_volume_state, "available")
# not validates status
LOG.info(_volume_state())
volume.delete()
except BaseException:
LOG.exception("Failed to delete volume %s" % volume)
exc_num += 1
if exc_num:
raise exceptions.TearDownException(num=exc_num)
@classmethod
def destroy_snapshot_wait(cls, snapshot):
"""delete snaphot, wait until not exists."""
snapshot.delete()
def _update():
snapshot.update(validate=True)
wait.wait_exception(_update)
# you can specify tuples if you want to specify the status pattern
for code in ('AddressLimitExceeded', 'AttachmentLimitExceeded', 'AuthFailure',
'Blocked', 'CustomerGatewayLimitExceeded', 'DependencyViolation',
'DiskImageSizeTooLarge', 'FilterLimitExceeded',
'Gateway.NotAttached', 'IdempotentParameterMismatch',
'IncorrectInstanceState', 'IncorrectState',
'InstanceLimitExceeded', 'InsufficientInstanceCapacity',
'InsufficientReservedInstancesCapacity',
'InternetGatewayLimitExceeded', 'InvalidAMIAttributeItemValue',
'InvalidAMIID.Malformed', 'InvalidAMIID.NotFound',
'InvalidAMIID.Unavailable', 'InvalidAssociationID.NotFound',
'InvalidAttachment.NotFound', 'InvalidConversionTaskId',
'InvalidCustomerGateway.DuplicateIpAddress',
'InvalidCustomerGatewayID.NotFound', 'InvalidDevice.InUse',
'InvalidDhcpOptionsID.NotFound', 'InvalidFormat',
'InvalidFilter', 'InvalidGatewayID.NotFound',
'InvalidGroup.Duplicate', 'InvalidGroupId.Malformed',
'InvalidGroup.InUse', 'InvalidGroup.NotFound',
'InvalidGroup.Reserved', 'InvalidInstanceID.Malformed',
'InvalidInstanceID.NotFound',
'InvalidInternetGatewayID.NotFound', 'InvalidIPAddress.InUse',
'InvalidKeyPair.Duplicate', 'InvalidKeyPair.Format',
'InvalidKeyPair.NotFound', 'InvalidManifest',
'InvalidNetworkAclEntry.NotFound',
'InvalidNetworkAclID.NotFound', 'InvalidParameterCombination',
'InvalidParameterValue', 'InvalidPermission.Duplicate',
'InvalidPermission.Malformed', 'InvalidReservationID.Malformed',
'InvalidReservationID.NotFound', 'InvalidRoute.NotFound',
'InvalidRouteTableID.NotFound',
'InvalidSecurity.RequestHasExpired',
'InvalidSnapshotID.Malformed', 'InvalidSnapshot.NotFound',
'InvalidUserID.Malformed', 'InvalidReservedInstancesId',
'InvalidReservedInstancesOfferingId',
'InvalidSubnetID.NotFound', 'InvalidVolumeID.Duplicate',
'InvalidVolumeID.Malformed', 'InvalidVolumeID.ZoneMismatch',
'InvalidVolume.NotFound', 'InvalidVpcID.NotFound',
'InvalidVpnConnectionID.NotFound',
'InvalidVpnGatewayID.NotFound',
'InvalidZone.NotFound', 'LegacySecurityGroup',
'MissingParameter', 'NetworkAclEntryAlreadyExists',
'NetworkAclEntryLimitExceeded', 'NetworkAclLimitExceeded',
'NonEBSInstance', 'PendingSnapshotLimitExceeded',
'PendingVerification', 'OptInRequired', 'RequestLimitExceeded',
'ReservedInstancesLimitExceeded', 'Resource.AlreadyAssociated',
'ResourceLimitExceeded', 'RouteAlreadyExists',
'RouteLimitExceeded', 'RouteTableLimitExceeded',
'RulesPerSecurityGroupLimitExceeded',
'SecurityGroupLimitExceeded',
'SecurityGroupsPerInstanceLimitExceeded',
'SnapshotLimitExceeded', 'SubnetLimitExceeded',
'UnknownParameter', 'UnsupportedOperation',
'VolumeLimitExceeded', 'VpcLimitExceeded',
'VpnConnectionLimitExceeded',
'VpnGatewayAttachmentLimitExceeded', 'VpnGatewayLimitExceeded'):
_add_matcher_class(BotoTestCase.ec2_error_code.client,
code, base=ClientError)
for code in ('InsufficientAddressCapacity', 'InsufficientInstanceCapacity',
'InsufficientReservedInstanceCapacity', 'InternalError',
'Unavailable'):
_add_matcher_class(BotoTestCase.ec2_error_code.server,
code, base=ServerError)
for code in (('AccessDenied', 403),
('AccountProblem', 403),
('AmbiguousGrantByEmailAddress', 400),
('BadDigest', 400),
('BucketAlreadyExists', 409),
('BucketAlreadyOwnedByYou', 409),
('BucketNotEmpty', 409),
('CredentialsNotSupported', 400),
('CrossLocationLoggingProhibited', 403),
('EntityTooSmall', 400),
('EntityTooLarge', 400),
('ExpiredToken', 400),
('IllegalVersioningConfigurationException', 400),
('IncompleteBody', 400),
('IncorrectNumberOfFilesInPostRequest', 400),
('InlineDataTooLarge', 400),
('InvalidAccessKeyId', 403),
'InvalidAddressingHeader',
('InvalidArgument', 400),
('InvalidBucketName', 400),
('InvalidBucketState', 409),
('InvalidDigest', 400),
('InvalidLocationConstraint', 400),
('InvalidPart', 400),
('InvalidPartOrder', 400),
('InvalidPayer', 403),
('InvalidPolicyDocument', 400),
('InvalidRange', 416),
('InvalidRequest', 400),
('InvalidSecurity', 403),
('InvalidSOAPRequest', 400),
('InvalidStorageClass', 400),
('InvalidTargetBucketForLogging', 400),
('InvalidToken', 400),
('InvalidURI', 400),
('KeyTooLong', 400),
('MalformedACLError', 400),
('MalformedPOSTRequest', 400),
('MalformedXML', 400),
('MaxMessageLengthExceeded', 400),
('MaxPostPreDataLengthExceededError', 400),
('MetadataTooLarge', 400),
('MethodNotAllowed', 405),
('MissingAttachment'),
('MissingContentLength', 411),
('MissingRequestBodyError', 400),
('MissingSecurityElement', 400),
('MissingSecurityHeader', 400),
('NoLoggingStatusForKey', 400),
('NoSuchBucket', 404),
('NoSuchKey', 404),
('NoSuchLifecycleConfiguration', 404),
('NoSuchUpload', 404),
('NoSuchVersion', 404),
('NotSignedUp', 403),
('NotSuchBucketPolicy', 404),
('OperationAborted', 409),
('PermanentRedirect', 301),
('PreconditionFailed', 412),
('Redirect', 307),
('RequestIsNotMultiPartContent', 400),
('RequestTimeout', 400),
('RequestTimeTooSkewed', 403),
('RequestTorrentOfBucketError', 400),
('SignatureDoesNotMatch', 403),
('TemporaryRedirect', 307),
('TokenRefreshRequired', 400),
('TooManyBuckets', 400),
('UnexpectedContent', 400),
('UnresolvableGrantByEmailAddress', 400),
('UserKeyMustBeSpecified', 400)):
_add_matcher_class(BotoTestCase.s3_error_code.client,
code, base=ClientError)
for code in (('InternalError', 500),
('NotImplemented', 501),
('ServiceUnavailable', 503),
('SlowDown', 503)):
_add_matcher_class(BotoTestCase.s3_error_code.server,
code, base=ServerError)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/utils/ 0000775 0001750 0001750 00000000000 12332757136 024056 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/utils/wait.py 0000664 0001750 0001750 00000010604 12332757070 025372 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import time
import boto.exception
import testtools
from tempest import config
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
def state_wait(lfunction, final_set=set(), valid_set=None):
# TODO(afazekas): evaluate using ABC here
if not isinstance(final_set, set):
final_set = set((final_set,))
if not isinstance(valid_set, set) and valid_set is not None:
valid_set = set((valid_set,))
start_time = time.time()
old_status = status = lfunction()
while True:
if status != old_status:
LOG.info('State transition "%s" ==> "%s" %d second', old_status,
status, time.time() - start_time)
if status in final_set:
return status
if valid_set is not None and status not in valid_set:
return status
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException("State change timeout exceeded!"
'(%ds) While waiting'
'for %s at "%s"' %
(dtime, final_set, status))
time.sleep(CONF.boto.build_interval)
old_status = status
status = lfunction()
def re_search_wait(lfunction, regexp):
"""Stops waiting on success."""
start_time = time.time()
while True:
text = lfunction()
result = re.search(regexp, text)
if result is not None:
LOG.info('Pattern "%s" found in %d second in "%s"',
regexp,
time.time() - start_time,
text)
return result
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException('Pattern find timeout exceeded!'
'(%ds) While waiting for'
'"%s" pattern in "%s"' %
(dtime, regexp, text))
time.sleep(CONF.boto.build_interval)
def wait_no_exception(lfunction, exc_class=None, exc_matcher=None):
"""Stops waiting on success."""
start_time = time.time()
if exc_matcher is not None:
exc_class = boto.exception.BotoServerError
if exc_class is None:
exc_class = BaseException
while True:
result = None
try:
result = lfunction()
LOG.info('No Exception in %d second',
time.time() - start_time)
return result
except exc_class as exc:
if exc_matcher is not None:
res = exc_matcher.match(exc)
if res is not None:
LOG.info(res)
raise exc
# Let the other exceptions propagate
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException("Wait timeout exceeded! (%ds)" % dtime)
time.sleep(CONF.boto.build_interval)
# NOTE(afazekas): EC2/boto normally raise exception instead of empty list
def wait_exception(lfunction):
"""Returns with the exception or raises one."""
start_time = time.time()
while True:
try:
lfunction()
except BaseException as exc:
LOG.info('Exception in %d second',
time.time() - start_time)
return exc
dtime = time.time() - start_time
if dtime > CONF.boto.build_timeout:
raise testtools.TestCase\
.failureException("Wait timeout exceeded! (%ds)" % dtime)
time.sleep(CONF.boto.build_interval)
# TODO(afazekas): consider strategy design pattern..
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/utils/s3.py 0000664 0001750 0001750 00000003016 12332757070 024752 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import os
import re
import boto
import boto.s3.key
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
def s3_upload_dir(bucket, path, prefix="", connection_data=None):
if isinstance(bucket, basestring):
with contextlib.closing(boto.connect_s3(**connection_data)) as conn:
bucket = conn.lookup(bucket)
for root, dirs, files in os.walk(path):
for fil in files:
with contextlib.closing(boto.s3.key.Key(bucket)) as key:
source = root + os.sep + fil
target = re.sub("^" + re.escape(path) + "?/", prefix, source)
if os.sep != '/':
target = re.sub(re.escape(os.sep), '/', target)
key.key = target
LOG.info("Uploading %s to %s/%s", source, bucket.name, target)
key.set_contents_from_filename(source)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/utils/__init__.py 0000664 0001750 0001750 00000000000 12332757070 026152 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_ec2_security_groups.py 0000664 0001750 0001750 00000006541 12332757070 030331 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.common.utils import data_utils
from tempest import test
from tempest.thirdparty.boto import test as boto_test
class EC2SecurityGroupTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(EC2SecurityGroupTest, cls).setUpClass()
cls.client = cls.os.ec2api_client
@test.attr(type='smoke')
def test_create_authorize_security_group(self):
# EC2 Create, authorize/revoke security group
group_name = data_utils.rand_name("securty_group-")
group_description = group_name + " security group description "
group = self.client.create_security_group(group_name,
group_description)
self.addResourceCleanUp(self.client.delete_security_group, group_name)
groups_get = self.client.get_all_security_groups(
groupnames=(group_name,))
self.assertEqual(len(groups_get), 1)
group_get = groups_get[0]
self.assertEqual(group.name, group_get.name)
self.assertEqual(group.name, group_get.name)
# ping (icmp_echo) and other icmp allowed from everywhere
# from_port and to_port act as icmp type
success = self.client.authorize_security_group(group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1)
self.assertTrue(success)
# allow standard ssh port from anywhere
success = self.client.authorize_security_group(group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22)
self.assertTrue(success)
# TODO(afazekas): Duplicate tests
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
# remove listed rules
for ip_permission in group_get.rules:
for cidr in ip_permission.grants:
self.assertTrue(self.client.revoke_security_group(group_name,
ip_protocol=ip_permission.ip_protocol,
cidr_ip=cidr,
from_port=ip_permission.from_port,
to_port=ip_permission.to_port))
group_get = self.client.get_all_security_groups(
groupnames=(group_name,))[0]
# all rules shuld be removed now
self.assertEqual(0, len(group_get.rules))
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_ec2_instance_run.py 0000664 0001750 0001750 00000034413 12332757070 027552 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from boto import exception
from tempest.common.utils import data_utils
from tempest.common.utils.linux import remote_client
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log as logging
from tempest import test
from tempest.thirdparty.boto import test as boto_test
from tempest.thirdparty.boto.utils import s3
from tempest.thirdparty.boto.utils import wait
CONF = config.CONF
LOG = logging.getLogger(__name__)
class InstanceRunTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(InstanceRunTest, cls).setUpClass()
if not cls.conclusion['A_I_IMAGES_READY']:
raise cls.skipException("".join(("EC2 ", cls.__name__,
": requires ami/aki/ari manifest")))
cls.s3_client = cls.os.s3_client
cls.ec2_client = cls.os.ec2api_client
cls.zone = CONF.boto.aws_zone
cls.materials_path = CONF.boto.s3_materials_path
ami_manifest = CONF.boto.ami_manifest
aki_manifest = CONF.boto.aki_manifest
ari_manifest = CONF.boto.ari_manifest
cls.instance_type = CONF.boto.instance_type
cls.bucket_name = data_utils.rand_name("s3bucket-")
cls.keypair_name = data_utils.rand_name("keypair-")
cls.keypair = cls.ec2_client.create_key_pair(cls.keypair_name)
cls.addResourceCleanUp(cls.ec2_client.delete_key_pair,
cls.keypair_name)
bucket = cls.s3_client.create_bucket(cls.bucket_name)
cls.addResourceCleanUp(cls.destroy_bucket,
cls.s3_client.connection_data,
cls.bucket_name)
s3.s3_upload_dir(bucket, cls.materials_path)
cls.images = {"ami":
{"name": data_utils.rand_name("ami-name-"),
"location": cls.bucket_name + "/" + ami_manifest},
"aki":
{"name": data_utils.rand_name("aki-name-"),
"location": cls.bucket_name + "/" + aki_manifest},
"ari":
{"name": data_utils.rand_name("ari-name-"),
"location": cls.bucket_name + "/" + ari_manifest}}
for image in cls.images.itervalues():
image["image_id"] = cls.ec2_client.register_image(
name=image["name"],
image_location=image["location"])
cls.addResourceCleanUp(cls.ec2_client.deregister_image,
image["image_id"])
for image in cls.images.itervalues():
def _state():
retr = cls.ec2_client.get_image(image["image_id"])
return retr.state
state = wait.state_wait(_state, "available")
if state != "available":
for _image in cls.images.itervalues():
cls.ec2_client.deregister_image(_image["image_id"])
raise exceptions.EC2RegisterImageException(image_id=
image["image_id"])
@test.attr(type='smoke')
def test_run_idempotent_instances(self):
# EC2 run instances idempotently
def _run_instance(client_token):
reservation = self.ec2_client.run_instances(
image_id=self.images["ami"]["image_id"],
kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
client_token=client_token)
rcuk = self.addResourceCleanUp(self.destroy_reservation,
reservation)
return (reservation, rcuk)
def _terminate_reservation(reservation, rcuk):
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
reservation_1, rcuk_1 = _run_instance('token_1')
reservation_2, rcuk_2 = _run_instance('token_2')
reservation_1a, rcuk_1a = _run_instance('token_1')
self.assertIsNotNone(reservation_1)
self.assertIsNotNone(reservation_2)
self.assertIsNotNone(reservation_1a)
# same reservation for token_1
self.assertEqual(reservation_1.id, reservation_1a.id)
# Cancel cleanup -- since it's a duplicate, it's
# handled by rcuk1
self.cancelResourceCleanUp(rcuk_1a)
_terminate_reservation(reservation_1, rcuk_1)
_terminate_reservation(reservation_2, rcuk_2)
@test.attr(type='smoke')
def test_run_stop_terminate_instance(self):
# EC2 run, stop and terminate instance
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
@test.attr(type='smoke')
def test_run_stop_terminate_instance_with_tags(self):
# EC2 run, stop and terminate instance with tags
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
rcuk = self.addResourceCleanUp(self.destroy_reservation, reservation)
for instance in reservation.instances:
LOG.info("state: %s", instance.state)
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
instance.add_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'key1'})
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'value': 'value1'})
self.assertEqual(tags[0].name, 'key1')
self.assertEqual(tags[0].value, 'value1')
tags = self.ec2_client.get_all_tags(filters={'key': 'value2'})
self.assertEqual(len(tags), 0, str(tags))
for instance in reservation.instances:
instance.remove_tag('key1', value='value1')
tags = self.ec2_client.get_all_tags()
self.assertEqual(len(tags), 0, str(tags))
for instance in reservation.instances:
instance.stop()
LOG.info("state: %s", instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
for instance in reservation.instances:
instance.terminate()
self.cancelResourceCleanUp(rcuk)
@test.skip_because(bug="1098891")
@test.attr(type='smoke')
def test_run_terminate_instance(self):
# EC2 run, terminate immediately
image_ami = self.ec2_client.get_image(self.images["ami"]
["image_id"])
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type)
for instance in reservation.instances:
instance.terminate()
try:
instance.update(validate=True)
except ValueError:
pass
except exception.EC2ResponseError as exc:
if self.ec2_error_code.\
client.InvalidInstanceID.NotFound.match(exc):
pass
else:
raise
else:
self.assertNotEqual(instance.state, "running")
@test.attr(type='smoke')
def test_compute_with_volumes(self):
# EC2 1. integration test (not strict)
image_ami = self.ec2_client.get_image(self.images["ami"]["image_id"])
sec_group_name = data_utils.rand_name("securitygroup-")
group_desc = sec_group_name + " security group description "
security_group = self.ec2_client.create_security_group(sec_group_name,
group_desc)
self.addResourceCleanUp(self.destroy_security_group_wait,
security_group)
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="icmp",
cidr_ip="0.0.0.0/0",
from_port=-1,
to_port=-1))
self.assertTrue(
self.ec2_client.authorize_security_group(
sec_group_name,
ip_protocol="tcp",
cidr_ip="0.0.0.0/0",
from_port=22,
to_port=22))
reservation = image_ami.run(kernel_id=self.images["aki"]["image_id"],
ramdisk_id=self.images["ari"]["image_id"],
instance_type=self.instance_type,
key_name=self.keypair_name,
security_groups=(sec_group_name,))
LOG.debug("Instance booted - state: %s",
reservation.instances[0].state)
self.addResourceCleanUp(self.destroy_reservation,
reservation)
volume = self.ec2_client.create_volume(1, self.zone)
LOG.debug("Volume created - status: %s", volume.status)
self.addResourceCleanUp(self.destroy_volume_wait, volume)
instance = reservation.instances[0]
if instance.state != "running":
self.assertInstanceStateWait(instance, "running")
LOG.debug("Instance now running - state: %s", instance.state)
address = self.ec2_client.allocate_address()
rcuk_a = self.addResourceCleanUp(address.delete)
self.assertTrue(address.associate(instance.id))
rcuk_da = self.addResourceCleanUp(address.disassociate)
# TODO(afazekas): ping test. dependecy/permission ?
self.assertVolumeStatusWait(volume, "available")
# NOTE(afazekas): it may be reports available before it is available
ssh = remote_client.RemoteClient(address.public_ip,
CONF.compute.ssh_user,
pkey=self.keypair.material)
text = data_utils.rand_name("Pattern text for console output -")
resp = ssh.write_to_console(text)
self.assertFalse(resp)
def _output():
output = instance.get_console_output()
return output.output
wait.re_search_wait(_output, text)
part_lines = ssh.get_partitions().split('\n')
volume.attach(instance.id, "/dev/vdh")
def _volume_state():
"""Return volume state realizing that 'in-use' is overloaded."""
volume.update(validate=True)
status = volume.status
attached = volume.attach_data.status
LOG.debug("Volume %s is in status: %s, attach_status: %s",
volume.id, status, attached)
# Nova reports 'in-use' on 'attaching' volumes because we
# have a single volume status, and EC2 has 2. Ensure that
# if we aren't attached yet we return something other than
# 'in-use'
if status == 'in-use' and attached != 'attached':
return 'attaching'
else:
return status
wait.re_search_wait(_volume_state, "in-use")
# NOTE(afazekas): Different Hypervisor backends names
# differently the devices,
# now we just test is the partition number increased/decrised
def _part_state():
current = ssh.get_partitions().split('\n')
LOG.debug("Partition map for instance: %s", current)
if current > part_lines:
return 'INCREASE'
if current < part_lines:
return 'DECREASE'
return 'EQUAL'
wait.state_wait(_part_state, 'INCREASE')
part_lines = ssh.get_partitions().split('\n')
# TODO(afazekas): Resource compare to the flavor settings
volume.detach()
self.assertVolumeStatusWait(_volume_state, "available")
wait.re_search_wait(_volume_state, "available")
wait.state_wait(_part_state, 'DECREASE')
instance.stop()
address.disassociate()
self.assertAddressDissasociatedWait(address)
self.cancelResourceCleanUp(rcuk_da)
address.release()
self.assertAddressReleasedWait(address)
self.cancelResourceCleanUp(rcuk_a)
LOG.debug("Instance %s state: %s", instance.id, instance.state)
if instance.state != "stopped":
self.assertInstanceStateWait(instance, "stopped")
# TODO(afazekas): move steps from teardown to the test case
# TODO(afazekas): Snapshot/volume read/write test case
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/boto/test_ec2_volumes.py 0000664 0001750 0001750 00000005151 12332757070 026551 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import config
from tempest.openstack.common import log as logging
from tempest import test
from tempest.thirdparty.boto import test as boto_test
CONF = config.CONF
LOG = logging.getLogger(__name__)
def compare_volumes(a, b):
return (a.id == b.id and
a.size == b.size)
class EC2VolumesTest(boto_test.BotoTestCase):
@classmethod
def setUpClass(cls):
super(EC2VolumesTest, cls).setUpClass()
if not CONF.service_available.cinder:
skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
raise cls.skipException(skip_msg)
cls.client = cls.os.ec2api_client
cls.zone = CONF.boto.aws_zone
@test.attr(type='smoke')
def test_create_get_delete(self):
# EC2 Create, get, delete Volume
volume = self.client.create_volume(1, self.zone)
cuk = self.addResourceCleanUp(self.client.delete_volume, volume.id)
self.assertIn(volume.status, self.valid_volume_status)
retrieved = self.client.get_all_volumes((volume.id,))
self.assertEqual(1, len(retrieved))
self.assertTrue(compare_volumes(volume, retrieved[0]))
self.assertVolumeStatusWait(volume, "available")
self.client.delete_volume(volume.id)
self.cancelResourceCleanUp(cuk)
@test.attr(type='smoke')
def test_create_volume_from_snapshot(self):
# EC2 Create volume from snapshot
volume = self.client.create_volume(1, self.zone)
self.addResourceCleanUp(self.client.delete_volume, volume.id)
self.assertVolumeStatusWait(volume, "available")
snap = self.client.create_snapshot(volume.id)
self.addResourceCleanUp(self.destroy_snapshot_wait, snap)
self.assertSnapshotStatusWait(snap, "completed")
svol = self.client.create_volume(1, self.zone, snapshot=snap)
cuk = self.addResourceCleanUp(svol.delete)
self.assertVolumeStatusWait(svol, "available")
svol.delete()
self.cancelResourceCleanUp(cuk)
tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024047 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/thirdparty/README.rst 0000664 0001750 0001750 00000002024 12332757070 023435 0 ustar chuck chuck 0000000 0000000 Tempest Field Guide to Third Party API tests
============================================
What are these tests?
---------------------
Third party tests are tests for non native OpenStack APIs that are
part of OpenStack projects. If we ship an API, we're really required
to ensure that it's working.
An example is that Nova Compute currently has EC2 API support in tree,
which should be tested as part of normal process.
Why are these tests in tempest?
-------------------------------
If we ship an API in an OpenStack component, there should be tests in
tempest to exercise it in some way.
Scope of these tests
--------------------
Third party API testing should be limited to the functional testing of
third party API compliance. Complex scenarios should be avoided, and
instead exercised with the OpenStack API, unless the third party API
can't be tested without those scenarios.
Whenever possible third party API testing should use a client as close
to the third party API as possible. The point of these tests is API
validation.
tempest-2014.1.dev4108.gf22b6cc/tempest/hacking/ 0000775 0001750 0001750 00000000000 12332757136 021165 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/hacking/checks.py 0000664 0001750 0001750 00000007646 12332757070 023011 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import re
import pep8
PYTHON_CLIENTS = ['cinder', 'glance', 'keystone', 'nova', 'swift', 'neutron',
'trove', 'ironic', 'savanna', 'heat', 'ceilometer',
'marconi', 'sahara']
PYTHON_CLIENT_RE = re.compile('import (%s)client' % '|'.join(PYTHON_CLIENTS))
TEST_DEFINITION = re.compile(r'^\s*def test.*')
SETUPCLASS_DEFINITION = re.compile(r'^\s*def setUpClass')
SCENARIO_DECORATOR = re.compile(r'\s*@.*services\((.*)\)')
VI_HEADER_RE = re.compile(r"^#\s+vim?:.+")
def import_no_clients_in_api(physical_line, filename):
"""Check for client imports from tempest/api tests
T102: Cannot import OpenStack python clients
"""
if "tempest/api" in filename:
res = PYTHON_CLIENT_RE.match(physical_line)
if res:
return (physical_line.find(res.group(1)),
("T102: python clients import not allowed"
" in tempest/api/* tests"))
def scenario_tests_need_service_tags(physical_line, filename,
previous_logical):
"""Check that scenario tests have service tags
T104: Scenario tests require a services decorator
"""
if 'tempest/scenario/test_' in filename:
if TEST_DEFINITION.match(physical_line):
if not SCENARIO_DECORATOR.match(previous_logical):
return (physical_line.find('def'),
"T104: Scenario tests require a service decorator")
def no_setupclass_for_unit_tests(physical_line, filename):
if pep8.noqa(physical_line):
return
if 'tempest/tests' in filename:
if SETUPCLASS_DEFINITION.match(physical_line):
return (physical_line.find('def'),
"T105: setUpClass can not be used with unit tests")
def no_vi_headers(physical_line, line_number, lines):
"""Check for vi editor configuration in source files.
By default vi modelines can only appear in the first or
last 5 lines of a source file.
T106
"""
# NOTE(gilliard): line_number is 1-indexed
if line_number <= 5 or line_number > len(lines) - 5:
if VI_HEADER_RE.match(physical_line):
return 0, "T106: Don't put vi configuration in source files"
def service_tags_not_in_module_path(physical_line, filename):
"""Check that a service tag isn't in the module path
A service tag should only be added if the service name isn't already in
the module path.
T107
"""
# NOTE(mtreinish) Scenario tests always need service tags, but subdirs are
# created for services like heat which would cause false negatives for
# those tests, so just exclude the scenario tests.
if 'tempest/scenario' not in filename:
matches = SCENARIO_DECORATOR.match(physical_line)
if matches:
services = matches.group(1).split(',')
for service in services:
service_name = service.strip().strip("'")
modulepath = os.path.split(filename)[0]
if service_name in modulepath:
return (physical_line.find(service_name),
"T107: service tag should not be in path")
def factory(register):
register(import_no_clients_in_api)
register(scenario_tests_need_service_tags)
register(no_setupclass_for_unit_tests)
register(no_vi_headers)
register(service_tags_not_in_module_path)
tempest-2014.1.dev4108.gf22b6cc/tempest/hacking/__init__.py 0000664 0001750 0001750 00000000000 12332757070 023261 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/auth.py 0000664 0001750 0001750 00000062536 12332757070 021105 0 ustar chuck chuck 0000000 0000000 # Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import exceptions
import re
import urlparse
from tempest import config
from tempest.services.identity.json import identity_client as json_id
from tempest.services.identity.v3.json import identity_client as json_v3id
from tempest.services.identity.v3.xml import identity_client as xml_v3id
from tempest.services.identity.xml import identity_client as xml_id
from tempest.openstack.common import log as logging
CONF = config.CONF
LOG = logging.getLogger(__name__)
class AuthProvider(object):
"""
Provide authentication
"""
def __init__(self, credentials, client_type='tempest',
interface=None):
"""
:param credentials: credentials for authentication
:param client_type: 'tempest' or 'official'
:param interface: 'json' or 'xml'. Applicable for tempest client only
"""
credentials = self._convert_credentials(credentials)
if self.check_credentials(credentials):
self.credentials = credentials
else:
raise TypeError("Invalid credentials")
self.client_type = client_type
self.interface = interface
if self.client_type == 'tempest' and self.interface is None:
self.interface = 'json'
self.cache = None
self.alt_auth_data = None
self.alt_part = None
def _convert_credentials(self, credentials):
# Support dict credentials for backwards compatibility
if isinstance(credentials, dict):
return get_credentials(**credentials)
else:
return credentials
def __str__(self):
return "Creds :{creds}, client type: {client_type}, interface: " \
"{interface}, cached auth data: {cache}".format(
creds=self.credentials, client_type=self.client_type,
interface=self.interface, cache=self.cache
)
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
"""
Decorate request with authentication data
"""
raise NotImplementedError
def _get_auth(self):
raise NotImplementedError
def _fill_credentials(self, auth_data_body):
raise NotImplementedError
def fill_credentials(self):
"""
Fill credentials object with data from auth
"""
auth_data = self.get_auth()
self._fill_credentials(auth_data[1])
return self.credentials
@classmethod
def check_credentials(cls, credentials):
"""
Verify credentials are valid.
"""
return isinstance(credentials, Credentials) and credentials.is_valid()
@property
def auth_data(self):
return self.get_auth()
@auth_data.deleter
def auth_data(self):
self.clear_auth()
def get_auth(self):
"""
Returns auth from cache if available, else auth first
"""
if self.cache is None or self.is_expired(self.cache):
self.set_auth()
return self.cache
def set_auth(self):
"""
Forces setting auth, ignores cache if it exists.
Refills credentials
"""
self.cache = self._get_auth()
self._fill_credentials(self.cache[1])
def clear_auth(self):
"""
Can be called to clear the access cache so that next request
will fetch a new token and base_url.
"""
self.cache = None
self.credentials.reset()
def is_expired(self, auth_data):
raise NotImplementedError
def auth_request(self, method, url, headers=None, body=None, filters=None):
"""
Obtains auth data and decorates a request with that.
:param method: HTTP method of the request
:param url: relative URL of the request (path)
:param headers: HTTP headers of the request
:param body: HTTP body in case of POST / PUT
:param filters: select a base URL out of the catalog
:returns a Tuple (url, headers, body)
"""
orig_req = dict(url=url, headers=headers, body=body)
auth_url, auth_headers, auth_body = self._decorate_request(
filters, method, url, headers, body)
auth_req = dict(url=auth_url, headers=auth_headers, body=auth_body)
# Overwrite part if the request if it has been requested
if self.alt_part is not None:
if self.alt_auth_data is not None:
alt_url, alt_headers, alt_body = self._decorate_request(
filters, method, url, headers, body,
auth_data=self.alt_auth_data)
alt_auth_req = dict(url=alt_url, headers=alt_headers,
body=alt_body)
auth_req[self.alt_part] = alt_auth_req[self.alt_part]
else:
# If alt auth data is None, skip auth in the requested part
auth_req[self.alt_part] = orig_req[self.alt_part]
# Next auth request will be normal, unless otherwise requested
self.reset_alt_auth_data()
return auth_req['url'], auth_req['headers'], auth_req['body']
def reset_alt_auth_data(self):
"""
Configure auth provider to provide valid authentication data
"""
self.alt_part = None
self.alt_auth_data = None
def set_alt_auth_data(self, request_part, auth_data):
"""
Configure auth provider to provide alt authentication data
on a part of the *next* auth_request. If credentials are None,
set invalid data.
:param request_part: request part to contain invalid auth: url,
headers, body
:param auth_data: alternative auth_data from which to get the
invalid data to be injected
"""
self.alt_part = request_part
self.alt_auth_data = auth_data
def base_url(self, filters, auth_data=None):
"""
Extracts the base_url based on provided filters
"""
raise NotImplementedError
class KeystoneAuthProvider(AuthProvider):
token_expiry_threshold = datetime.timedelta(seconds=60)
def __init__(self, credentials, client_type='tempest', interface=None):
super(KeystoneAuthProvider, self).__init__(credentials, client_type,
interface)
self.auth_client = self._auth_client()
def _decorate_request(self, filters, method, url, headers=None, body=None,
auth_data=None):
if auth_data is None:
auth_data = self.auth_data
token, _ = auth_data
base_url = self.base_url(filters=filters, auth_data=auth_data)
# build authenticated request
# returns new request, it does not touch the original values
_headers = copy.deepcopy(headers) if headers is not None else {}
_headers['X-Auth-Token'] = token
if url is None or url == "":
_url = base_url
else:
# Join base URL and url, and remove multiple contiguous slashes
_url = "/".join([base_url, url])
parts = [x for x in urlparse.urlparse(_url)]
parts[2] = re.sub("/{2,}", "/", parts[2])
_url = urlparse.urlunparse(parts)
# no change to method or body
return _url, _headers, body
def _auth_client(self):
raise NotImplementedError
def _auth_params(self):
raise NotImplementedError
def _get_auth(self):
# Bypasses the cache
if self.client_type == 'tempest':
auth_func = getattr(self.auth_client, 'get_token')
auth_params = self._auth_params()
# returns token, auth_data
token, auth_data = auth_func(**auth_params)
return token, auth_data
else:
raise NotImplementedError
def get_token(self):
return self.auth_data[0]
class KeystoneV2AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ'
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
return json_id.TokenClientJSON()
else:
return xml_id.TokenClientXML()
else:
raise NotImplementedError
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
user=self.credentials.username,
password=self.credentials.password,
tenant=self.credentials.tenant_name,
auth_data=True)
else:
raise NotImplementedError
def _fill_credentials(self, auth_data_body):
tenant = auth_data_body['token']['tenant']
user = auth_data_body['user']
if self.credentials.tenant_name is None:
self.credentials.tenant_name = tenant['name']
if self.credentials.tenant_id is None:
self.credentials.tenant_id = tenant['id']
if self.credentials.username is None:
self.credentials.username = user['name']
if self.credentials.user_id is None:
self.credentials.user_id = user['id']
def base_url(self, filters, auth_data=None):
"""
Filters can be:
- service: compute, image, etc
- region: the service region
- endpoint_type: adminURL, publicURL, internalURL
- api_version: replace catalog version with this
- skip_path: take just the base URL
"""
if auth_data is None:
auth_data = self.auth_data
token, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
endpoint_type = filters.get('endpoint_type', 'publicURL')
if service is None:
raise exceptions.EndpointNotFound("No service provided")
_base_url = None
for ep in _auth_data['serviceCatalog']:
if ep["type"] == service:
for _ep in ep['endpoints']:
if region is not None and _ep['region'] == region:
_base_url = _ep.get(endpoint_type)
if not _base_url:
# No region matching, use the first
_base_url = ep['endpoints'][0].get(endpoint_type)
break
if _base_url is None:
raise exceptions.EndpointNotFound(service)
parts = urlparse.urlparse(_base_url)
if filters.get('api_version', None) is not None:
path = "/" + filters['api_version']
noversion_path = "/".join(parts.path.split("/")[2:])
if noversion_path != "":
path += "/" + noversion_path
_base_url = _base_url.replace(parts.path, path)
if filters.get('skip_path', None) is not None:
_base_url = _base_url.replace(parts.path, "/")
return _base_url
def is_expired(self, auth_data):
_, access = auth_data
expiry = datetime.datetime.strptime(access['token']['expires'],
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
class KeystoneV3AuthProvider(KeystoneAuthProvider):
EXPIRY_DATE_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
def _auth_client(self):
if self.client_type == 'tempest':
if self.interface == 'json':
return json_v3id.V3TokenClientJSON()
else:
return xml_v3id.V3TokenClientXML()
else:
raise NotImplementedError
def _auth_params(self):
if self.client_type == 'tempest':
return dict(
user=self.credentials.username,
password=self.credentials.password,
tenant=self.credentials.tenant_name,
domain=self.credentials.user_domain_name,
auth_data=True)
else:
raise NotImplementedError
def _fill_credentials(self, auth_data_body):
# project or domain, depending on the scope
project = auth_data_body.get('project', None)
domain = auth_data_body.get('domain', None)
# user is always there
user = auth_data_body['user']
# Set project fields
if project is not None:
if self.credentials.project_name is None:
self.credentials.project_name = project['name']
if self.credentials.project_id is None:
self.credentials.project_id = project['id']
if self.credentials.project_domain_id is None:
self.credentials.project_domain_id = project['domain']['id']
if self.credentials.project_domain_name is None:
self.credentials.project_domain_name = \
project['domain']['name']
# Set domain fields
if domain is not None:
if self.credentials.domain_id is None:
self.credentials.domain_id = domain['id']
if self.credentials.domain_name is None:
self.credentials.domain_name = domain['name']
# Set user fields
if self.credentials.username is None:
self.credentials.username = user['name']
if self.credentials.user_id is None:
self.credentials.user_id = user['id']
if self.credentials.user_domain_id is None:
self.credentials.user_domain_id = user['domain']['id']
if self.credentials.user_domain_name is None:
self.credentials.user_domain_name = user['domain']['name']
def base_url(self, filters, auth_data=None):
"""
Filters can be:
- service: compute, image, etc
- region: the service region
- endpoint_type: adminURL, publicURL, internalURL
- api_version: replace catalog version with this
- skip_path: take just the base URL
"""
if auth_data is None:
auth_data = self.auth_data
token, _auth_data = auth_data
service = filters.get('service')
region = filters.get('region')
endpoint_type = filters.get('endpoint_type', 'public')
if service is None:
raise exceptions.EndpointNotFound("No service provided")
if 'URL' in endpoint_type:
endpoint_type = endpoint_type.replace('URL', '')
_base_url = None
catalog = _auth_data['catalog']
# Select entries with matching service type
service_catalog = [ep for ep in catalog if ep['type'] == service]
if len(service_catalog) > 0:
service_catalog = service_catalog[0]['endpoints']
else:
# No matching service
raise exceptions.EndpointNotFound(service)
# Filter by endpoint type (interface)
filtered_catalog = [ep for ep in service_catalog if
ep['interface'] == endpoint_type]
if len(filtered_catalog) == 0:
# No matching type, keep all and try matching by region at least
filtered_catalog = service_catalog
# Filter by region
filtered_catalog = [ep for ep in filtered_catalog if
ep['region'] == region]
if len(filtered_catalog) == 0:
# No matching region, take the first endpoint
filtered_catalog = [service_catalog[0]]
# There should be only one match. If not take the first.
_base_url = filtered_catalog[0].get('url', None)
if _base_url is None:
raise exceptions.EndpointNotFound(service)
parts = urlparse.urlparse(_base_url)
if filters.get('api_version', None) is not None:
path = "/" + filters['api_version']
noversion_path = "/".join(parts.path.split("/")[2:])
if noversion_path != "":
path += "/" + noversion_path
_base_url = _base_url.replace(parts.path, path)
if filters.get('skip_path', None) is not None:
_base_url = _base_url.replace(parts.path, "/")
return _base_url
def is_expired(self, auth_data):
_, access = auth_data
expiry = datetime.datetime.strptime(access['expires_at'],
self.EXPIRY_DATE_FORMAT)
return expiry - self.token_expiry_threshold <= \
datetime.datetime.utcnow()
def get_default_credentials(credential_type, fill_in=True):
"""
Returns configured credentials of the specified type
based on the configured auth_version
"""
return get_credentials(fill_in=fill_in, credential_type=credential_type)
def get_credentials(credential_type=None, fill_in=True, **kwargs):
"""
Builds a credentials object based on the configured auth_version
:param credential_type (string): requests credentials from tempest
configuration file. Valid values are defined in
Credentials.TYPE.
:param kwargs (dict): take into account only if credential_type is
not specified or None. Dict of credential key/value pairs
Examples:
Returns credentials from the provided parameters:
>>> get_credentials(username='foo', password='bar')
Returns credentials from tempest configuration:
>>> get_credentials(credential_type='user')
"""
if CONF.identity.auth_version == 'v2':
credential_class = KeystoneV2Credentials
auth_provider_class = KeystoneV2AuthProvider
elif CONF.identity.auth_version == 'v3':
credential_class = KeystoneV3Credentials
auth_provider_class = KeystoneV3AuthProvider
else:
raise exceptions.InvalidConfiguration('Unsupported auth version')
if credential_type is not None:
creds = credential_class.get_default(credential_type)
else:
creds = credential_class(**kwargs)
# Fill in the credentials fields that were not specified
if fill_in:
auth_provider = auth_provider_class(creds)
creds = auth_provider.fill_credentials()
return creds
class Credentials(object):
"""
Set of credentials for accessing OpenStack services
ATTRIBUTES: list of valid class attributes representing credentials.
TYPES: types of credentials available in the configuration file.
For each key there's a tuple (section, prefix) to match the
configuration options.
"""
ATTRIBUTES = []
TYPES = {
'identity_admin': ('identity', 'admin'),
'compute_admin': ('compute_admin', None),
'user': ('identity', None),
'alt_user': ('identity', 'alt')
}
def __init__(self, **kwargs):
"""
Enforce the available attributes at init time (only).
Additional attributes can still be set afterwards if tests need
to do so.
"""
self._initial = kwargs
self._apply_credentials(kwargs)
def _apply_credentials(self, attr):
for key in attr.keys():
if key in self.ATTRIBUTES:
setattr(self, key, attr[key])
else:
raise exceptions.InvalidCredentials
def __str__(self):
"""
Represent only attributes included in self.ATTRIBUTES
"""
_repr = dict((k, getattr(self, k)) for k in self.ATTRIBUTES)
return str(_repr)
def __eq__(self, other):
"""
Credentials are equal if attributes in self.ATTRIBUTES are equal
"""
return str(self) == str(other)
def __getattr__(self, key):
# If an attribute is set, __getattr__ is not invoked
# If an attribute is not set, and it is a known one, return None
if key in self.ATTRIBUTES:
return None
else:
raise AttributeError
def __delitem__(self, key):
# For backwards compatibility, support dict behaviour
if key in self.ATTRIBUTES:
delattr(self, key)
else:
raise AttributeError
def get(self, item, default):
# In this patch act as dict for backward compatibility
try:
return getattr(self, item)
except AttributeError:
return default
@classmethod
def get_default(cls, credentials_type):
if credentials_type not in cls.TYPES:
raise exceptions.InvalidCredentials()
creds = cls._get_default(credentials_type)
if not creds.is_valid():
raise exceptions.InvalidConfiguration()
return creds
@classmethod
def _get_default(cls, credentials_type):
raise NotImplementedError
def is_valid(self):
raise NotImplementedError
def reset(self):
# First delete all known attributes
for key in self.ATTRIBUTES:
if getattr(self, key) is not None:
delattr(self, key)
# Then re-apply initial setup
self._apply_credentials(self._initial)
class KeystoneV2Credentials(Credentials):
CONF_ATTRIBUTES = ['username', 'password', 'tenant_name']
ATTRIBUTES = ['user_id', 'tenant_id']
ATTRIBUTES.extend(CONF_ATTRIBUTES)
@classmethod
def _get_default(cls, credentials_type='user'):
params = {}
section, prefix = cls.TYPES[credentials_type]
for attr in cls.CONF_ATTRIBUTES:
_section = getattr(CONF, section)
if prefix is None:
params[attr] = getattr(_section, attr)
else:
params[attr] = getattr(_section, prefix + "_" + attr)
return cls(**params)
def is_valid(self):
"""
Minimum set of valid credentials, are username and password.
Tenant is optional.
"""
return None not in (self.username, self.password)
class KeystoneV3Credentials(KeystoneV2Credentials):
"""
Credentials suitable for the Keystone Identity V3 API
"""
CONF_ATTRIBUTES = ['domain_name', 'password', 'tenant_name', 'username']
ATTRIBUTES = ['project_domain_id', 'project_domain_name', 'project_id',
'project_name', 'tenant_id', 'tenant_name', 'user_domain_id',
'user_domain_name', 'user_id']
ATTRIBUTES.extend(CONF_ATTRIBUTES)
def __init__(self, **kwargs):
"""
If domain is not specified, load the one configured for the
identity manager.
"""
domain_fields = set(x for x in self.ATTRIBUTES if 'domain' in x)
if not domain_fields.intersection(kwargs.keys()):
kwargs['user_domain_name'] = CONF.identity.admin_domain_name
super(KeystoneV3Credentials, self).__init__(**kwargs)
def __setattr__(self, key, value):
parent = super(KeystoneV3Credentials, self)
# for tenant_* set both project and tenant
if key == 'tenant_id':
parent.__setattr__('project_id', value)
elif key == 'tenant_name':
parent.__setattr__('project_name', value)
# for project_* set both project and tenant
if key == 'project_id':
parent.__setattr__('tenant_id', value)
elif key == 'project_name':
parent.__setattr__('tenant_name', value)
# for *_domain_* set both user and project if not set yet
if key == 'user_domain_id':
if self.project_domain_id is None:
parent.__setattr__('project_domain_id', value)
if key == 'project_domain_id':
if self.user_domain_id is None:
parent.__setattr__('user_domain_id', value)
if key == 'user_domain_name':
if self.project_domain_name is None:
parent.__setattr__('project_domain_name', value)
if key == 'project_domain_name':
if self.user_domain_name is None:
parent.__setattr__('user_domain_name', value)
# support domain_name coming from config
if key == 'domain_name':
parent.__setattr__('user_domain_name', value)
parent.__setattr__('project_domain_name', value)
# finally trigger default behaviour for all attributes
parent.__setattr__(key, value)
def is_valid(self):
"""
Valid combinations of v3 credentials (excluding token, scope)
- User id, password (optional domain)
- User name, password and its domain id/name
For the scope, valid combinations are:
- None
- Project id (optional domain)
- Project name and its domain id/name
"""
valid_user_domain = any(
[self.user_domain_id is not None,
self.user_domain_name is not None])
valid_project_domain = any(
[self.project_domain_id is not None,
self.project_domain_name is not None])
valid_user = any(
[self.user_id is not None,
self.username is not None and valid_user_domain])
valid_project = any(
[self.project_name is None and self.project_id is None,
self.project_id is not None,
self.project_name is not None and valid_project_domain])
return all([self.password is not None, valid_user, valid_project])
tempest-2014.1.dev4108.gf22b6cc/tempest/cmd/ 0000775 0001750 0001750 00000000000 12332757136 020324 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/cmd/verify_tempest_config.py 0000775 0001750 0001750 00000032774 12332757070 025305 0 ustar chuck chuck 0000000 0000000 #!/usr/bin/env python
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import argparse
import json
import os
import sys
import urlparse
import httplib2
from six.moves import configparser
from tempest import clients
from tempest import config
CONF = config.CONF
RAW_HTTP = httplib2.Http()
CONF_FILE = None
OUTFILE = sys.stdout
def _get_config_file():
default_config_dir = os.path.join(os.path.abspath(
os.path.dirname(os.path.dirname(__file__))), "etc")
default_config_file = "tempest.conf"
conf_dir = os.environ.get('TEMPEST_CONFIG_DIR', default_config_dir)
conf_file = os.environ.get('TEMPEST_CONFIG', default_config_file)
path = os.path.join(conf_dir, conf_file)
fd = open(path, 'rw')
return fd
def change_option(option, group, value):
config_parse = configparser.SafeConfigParser()
config_parse.optionxform = str
config_parse.readfp(CONF_FILE)
if not config_parse.has_section(group):
config_parse.add_section(group)
config_parse.set(group, option, str(value))
global OUTFILE
config_parse.write(OUTFILE)
def print_and_or_update(option, group, value, update):
print('Config option %s in group %s should be changed to: %s'
% (option, group, value))
if update:
change_option(option, group, value)
def verify_glance_api_versions(os, update):
# Check glance api versions
__, versions = os.image_client.get_versions()
if CONF.image_feature_enabled.api_v1 != ('v1.1' in versions or 'v1.0' in
versions):
print_and_or_update('api_v1', 'image_feature_enabled',
not CONF.image_feature_enabled.api_v1, update)
if CONF.image_feature_enabled.api_v2 != ('v2.0' in versions):
print_and_or_update('api_v2', 'image_feature_enabled',
not CONF.image_feature_enabled.api_v2, update)
def _get_unversioned_endpoint(base_url):
endpoint_parts = urlparse.urlparse(base_url)
endpoint = endpoint_parts.scheme + '://' + endpoint_parts.netloc
return endpoint
def _get_api_versions(os, service):
client_dict = {
'nova': os.servers_client,
'keystone': os.identity_client,
'cinder': os.volumes_client,
}
client_dict[service].skip_path()
endpoint = _get_unversioned_endpoint(client_dict[service].base_url)
__, body = RAW_HTTP.request(endpoint, 'GET')
client_dict[service].reset_path()
body = json.loads(body)
if service == 'keystone':
versions = map(lambda x: x['id'], body['versions']['values'])
else:
versions = map(lambda x: x['id'], body['versions'])
return versions
def verify_keystone_api_versions(os, update):
# Check keystone api versions
versions = _get_api_versions(os, 'keystone')
if CONF.identity_feature_enabled.api_v2 != ('v2.0' in versions):
print_and_or_update('api_v2', 'identity_feature_enabled',
not CONF.identity_feature_enabled.api_v2, update)
if CONF.identity_feature_enabled.api_v3 != ('v3.0' in versions):
print_and_or_update('api_v3', 'identity_feature_enabled',
not CONF.identity_feature_enabled.api_v3, update)
def verify_nova_api_versions(os, update):
versions = _get_api_versions(os, 'nova')
if CONF.compute_feature_enabled.api_v3 != ('v3.0' in versions):
print_and_or_update('api_v3', 'compute_feature_enabled',
not CONF.compute_feature_enabled.api_v3, update)
def verify_cinder_api_versions(os, update):
# Check cinder api versions
versions = _get_api_versions(os, 'cinder')
if CONF.volume_feature_enabled.api_v1 != ('v1.0' in versions):
print_and_or_update('api_v1', 'volume_feature_enabled',
not CONF.volume_feature_enabled.api_v1, update)
if CONF.volume_feature_enabled.api_v2 != ('v2.0' in versions):
print_and_or_update('api_v2', 'volume_feature_enabled',
not CONF.volume_feature_enabled.api_v2, update)
def get_extension_client(os, service):
extensions_client = {
'nova': os.extensions_client,
'nova_v3': os.extensions_v3_client,
'cinder': os.volumes_extension_client,
'neutron': os.network_client,
'swift': os.account_client,
}
if service not in extensions_client:
print('No tempest extensions client for %s' % service)
exit(1)
return extensions_client[service]
def get_enabled_extensions(service):
extensions_options = {
'nova': CONF.compute_feature_enabled.api_extensions,
'nova_v3': CONF.compute_feature_enabled.api_v3_extensions,
'cinder': CONF.volume_feature_enabled.api_extensions,
'neutron': CONF.network_feature_enabled.api_extensions,
'swift': CONF.object_storage_feature_enabled.discoverable_apis,
}
if service not in extensions_options:
print('No supported extensions list option for %s' % service)
exit(1)
return extensions_options[service]
def verify_extensions(os, service, results):
extensions_client = get_extension_client(os, service)
__, resp = extensions_client.list_extensions()
if isinstance(resp, dict):
# Neutron's extension 'name' field has is not a single word (it has
# spaces in the string) Since that can't be used for list option the
# api_extension option in the network-feature-enabled group uses alias
# instead of name.
if service == 'neutron':
extensions = map(lambda x: x['alias'], resp['extensions'])
elif service == 'swift':
# Remove Swift general information from extensions list
resp.pop('swift')
extensions = resp.keys()
else:
extensions = map(lambda x: x['name'], resp['extensions'])
else:
extensions = map(lambda x: x['name'], resp)
if not results.get(service):
results[service] = {}
extensions_opt = get_enabled_extensions(service)
if extensions_opt[0] == 'all':
results[service]['extensions'] = extensions
return results
# Verify that all configured extensions are actually enabled
for extension in extensions_opt:
results[service][extension] = extension in extensions
# Verify that there aren't additional extensions enabled that aren't
# specified in the config list
for extension in extensions:
if extension not in extensions_opt:
results[service][extension] = False
return results
def display_results(results, update, replace):
update_dict = {
'swift': 'object-storage-feature-enabled',
'nova': 'compute-feature-enabled',
'nova_v3': 'compute-feature-enabled',
'cinder': 'volume-feature-enabled',
'neutron': 'network-feature-enabled',
}
for service in results:
# If all extensions are specified as being enabled there is no way to
# verify this so we just assume this to be true
if results[service].get('extensions'):
if replace:
output_list = results[service].get('extensions')
else:
output_list = ['all']
else:
extension_list = get_enabled_extensions(service)
output_list = []
for extension in results[service]:
if not results[service][extension]:
if extension in extension_list:
print("%s extension: %s should not be included in the "
"list of enabled extensions" % (service,
extension))
else:
print("%s extension: %s should be included in the list"
" of enabled extensions" % (service, extension))
output_list.append(extension)
else:
output_list.append(extension)
if update:
# Sort List
output_list.sort()
# Convert list to a string
output_string = ', '.join(output_list)
if service == 'swift':
change_option('discoverable_apis', update_dict[service],
output_string)
elif service == 'nova_v3':
change_option('api_v3_extensions', update_dict[service],
output_string)
else:
change_option('api_extensions', update_dict[service],
output_string)
def check_service_availability(os, update):
services = []
avail_services = []
codename_match = {
'volume': 'cinder',
'network': 'neutron',
'image': 'glance',
'object_storage': 'swift',
'compute': 'nova',
'orchestration': 'heat',
'metering': 'ceilometer',
'telemetry': 'ceilometer',
'data_processing': 'sahara',
'baremetal': 'ironic',
'identity': 'keystone',
'queuing': 'marconi',
'database': 'trove'
}
# Get catalog list for endpoints to use for validation
__, endpoints = os.endpoints_client.list_endpoints()
for endpoint in endpoints:
__, service = os.service_client.get_service(endpoint['service_id'])
services.append(service['type'])
# Pull all catalog types from config file and compare against endpoint list
for cfgname in dir(CONF._config):
cfg = getattr(CONF, cfgname)
catalog_type = getattr(cfg, 'catalog_type', None)
if not catalog_type:
continue
else:
if cfgname == 'identity':
# Keystone is a required service for tempest
continue
if catalog_type not in services:
if getattr(CONF.service_available, codename_match[cfgname]):
print('Endpoint type %s not found either disable service '
'%s or fix the catalog_type in the config file' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', False)
else:
if not getattr(CONF.service_available,
codename_match[cfgname]):
print('Endpoint type %s is available, service %s should be'
' set as available in the config file.' % (
catalog_type, codename_match[cfgname]))
if update:
change_option(codename_match[cfgname],
'service_available', True)
else:
avail_services.append(codename_match[cfgname])
return avail_services
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('-u', '--update', action='store_true',
help='Update the config file with results from api '
'queries. This assumes whatever is set in the '
'config file is incorrect. In the case of '
'endpoint checks where it could either be the '
'incorrect catalog type or the service available '
'option the service available option is assumed '
'to be incorrect and is thus changed')
parser.add_argument('-o', '--output',
help="Output file to write an updated config file to. "
"This has to be a separate file from the "
"original config file. If one isn't specified "
"with -u the new config file will be printed to "
"STDOUT")
parser.add_argument('-r', '--replace-ext', action='store_true',
help="If specified the all option will be replaced "
"with a full list of extensions")
args = parser.parse_args()
return args
def main():
print('Running config verification...')
opts = parse_args()
update = opts.update
replace = opts.replace_ext
global CONF_FILE
global OUTFILE
if update:
CONF_FILE = _get_config_file()
if opts.output:
OUTFILE = open(opts.output, 'w+')
os = clients.ComputeAdminManager(interface='json')
services = check_service_availability(os, update)
results = {}
for service in ['nova', 'nova_v3', 'cinder', 'neutron', 'swift']:
if service == 'nova_v3' and 'nova' not in services:
continue
elif service not in services:
continue
results = verify_extensions(os, service, results)
verify_keystone_api_versions(os, update)
verify_glance_api_versions(os, update)
verify_nova_api_versions(os, update)
verify_cinder_api_versions(os, update)
display_results(results, update, replace)
if CONF_FILE:
CONF_FILE.close()
OUTFILE.close()
if __name__ == "__main__":
main()
tempest-2014.1.dev4108.gf22b6cc/tempest/cmd/__init__.py 0000664 0001750 0001750 00000000000 12332757070 022420 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/__init__.py 0000664 0001750 0001750 00000000000 12332757070 021655 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/test_discover/ 0000775 0001750 0001750 00000000000 12332757136 022436 5 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/test_discover/test_discover.py 0000664 0001750 0001750 00000002412 12332757070 025661 0 ustar chuck chuck 0000000 0000000 # Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import sys
if sys.version_info >= (2, 7):
import unittest
else:
import unittest2 as unittest
def load_tests(loader, tests, pattern):
suite = unittest.TestSuite()
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
for test_dir in ['./tempest/api', './tempest/cli', './tempest/scenario',
'./tempest/thirdparty']:
if not pattern:
suite.addTests(loader.discover(test_dir, top_level_dir=base_path))
else:
suite.addTests(loader.discover(test_dir, pattern=pattern,
top_level_dir=base_path))
return suite
tempest-2014.1.dev4108.gf22b6cc/tempest/test_discover/__init__.py 0000664 0001750 0001750 00000000000 12332757070 024532 0 ustar chuck chuck 0000000 0000000 tempest-2014.1.dev4108.gf22b6cc/tempest/test.py 0000664 0001750 0001750 00000056062 12332757070 021120 0 ustar chuck chuck 0000000 0000000 # Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import functools
import json
import os
import re
import sys
import time
import urllib
import uuid
import fixtures
import testresources
import testscenarios
import testtools
from tempest import clients
import tempest.common.generator.valid_generator as valid
from tempest.common import isolated_creds
from tempest import config
from tempest import exceptions
from tempest.openstack.common import importutils
from tempest.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = config.CONF
# All the successful HTTP status codes from RFC 2616
HTTP_SUCCESS = (200, 201, 202, 203, 204, 205, 206)
def attr(*args, **kwargs):
"""A decorator which applies the testtools attr decorator
This decorator applies the testtools.testcase.attr if it is in the list of
attributes to testtools we want to apply.
"""
def decorator(f):
if 'type' in kwargs and isinstance(kwargs['type'], str):
f = testtools.testcase.attr(kwargs['type'])(f)
if kwargs['type'] == 'smoke':
f = testtools.testcase.attr('gate')(f)
elif 'type' in kwargs and isinstance(kwargs['type'], list):
for attr in kwargs['type']:
f = testtools.testcase.attr(attr)(f)
if attr == 'smoke':
f = testtools.testcase.attr('gate')(f)
return f
return decorator
def safe_setup(f):
"""A decorator used to wrap the setUpClass for cleaning up resources
when setUpClass failed.
"""
def decorator(cls):
try:
f(cls)
except Exception as se:
etype, value, trace = sys.exc_info()
LOG.exception("setUpClass failed: %s" % se)
try:
cls.tearDownClass()
except Exception as te:
LOG.exception("tearDownClass failed: %s" % te)
try:
raise etype(value), None, trace
finally:
del trace # for avoiding circular refs
return decorator
def services(*args, **kwargs):
"""A decorator used to set an attr for each service used in a test case
This decorator applies a testtools attr for each service that gets
exercised by a test case.
"""
service_list = {
'compute': CONF.service_available.nova,
'image': CONF.service_available.glance,
'baremetal': CONF.service_available.ironic,
'volume': CONF.service_available.cinder,
'orchestration': CONF.service_available.heat,
# NOTE(mtreinish) nova-network will provide networking functionality
# if neutron isn't available, so always set to True.
'network': True,
'identity': True,
'object_storage': CONF.service_available.swift,
'dashboard': CONF.service_available.horizon,
}
def decorator(f):
for service in args:
if service not in service_list:
raise exceptions.InvalidServiceTag('%s is not a valid service'
% service)
attr(type=list(args))(f)
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
for service in args:
if not service_list[service]:
msg = 'Skipped because the %s service is not available' % (
service)
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def stresstest(*args, **kwargs):
"""Add stress test decorator
For all functions with this decorator a attr stress will be
set automatically.
@param class_setup_per: allowed values are application, process, action
``application``: once in the stress job lifetime
``process``: once in the worker process lifetime
``action``: on each action
@param allow_inheritance: allows inheritance of this attribute
"""
def decorator(f):
if 'class_setup_per' in kwargs:
setattr(f, "st_class_setup_per", kwargs['class_setup_per'])
else:
setattr(f, "st_class_setup_per", 'process')
if 'allow_inheritance' in kwargs:
setattr(f, "st_allow_inheritance", kwargs['allow_inheritance'])
else:
setattr(f, "st_allow_inheritance", False)
attr(type='stress')(f)
return f
return decorator
def skip_because(*args, **kwargs):
"""A decorator useful to skip tests hitting known bugs
@param bug: bug number causing the test to skip
@param condition: optional condition to be True for the skip to have place
@param interface: skip the test if it is the same as self._interface
"""
def decorator(f):
@functools.wraps(f)
def wrapper(self, *func_args, **func_kwargs):
skip = False
if "condition" in kwargs:
if kwargs["condition"] is True:
skip = True
elif "interface" in kwargs:
if kwargs["interface"] == self._interface:
skip = True
else:
skip = True
if "bug" in kwargs and skip is True:
if not kwargs['bug'].isdigit():
raise ValueError('bug must be a valid bug number')
msg = "Skipped until Bug: %s is resolved." % kwargs["bug"]
raise testtools.TestCase.skipException(msg)
return f(self, *func_args, **func_kwargs)
return wrapper
return decorator
def requires_ext(*args, **kwargs):
"""A decorator to skip tests if an extension is not enabled
@param extension
@param service
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*func_args, **func_kwargs):
if not is_extension_enabled(kwargs['extension'],
kwargs['service']):
msg = "Skipped because %s extension: %s is not enabled" % (
kwargs['service'], kwargs['extension'])
raise testtools.TestCase.skipException(msg)
return func(*func_args, **func_kwargs)
return wrapper
return decorator
def is_extension_enabled(extension_name, service):
"""A function that will check the list of enabled extensions from config
"""
config_dict = {
'compute': CONF.compute_feature_enabled.api_extensions,
'compute_v3': CONF.compute_feature_enabled.api_v3_extensions,
'volume': CONF.volume_feature_enabled.api_extensions,
'network': CONF.network_feature_enabled.api_extensions,
'object': CONF.object_storage_feature_enabled.discoverable_apis,
}
if config_dict[service][0] == 'all':
return True
if extension_name in config_dict[service]:
return True
return False
at_exit_set = set()
def validate_tearDownClass():
if at_exit_set:
LOG.error(
"tearDownClass does not call the super's "
"tearDownClass in these classes: \n"
+ str(at_exit_set))
atexit.register(validate_tearDownClass)
if sys.version_info >= (2, 7):
class BaseDeps(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
pass
else:
# Define asserts for py26
import unittest2
class BaseDeps(testtools.TestCase,
testtools.testcase.WithAttributes,
testresources.ResourcedTestCase,
unittest2.TestCase):
pass
class BaseTestCase(BaseDeps):
setUpClassCalled = False
_service = None
network_resources = {}
@classmethod
def setUpClass(cls):
if hasattr(super(BaseTestCase, cls), 'setUpClass'):
super(BaseTestCase, cls).setUpClass()
cls.setUpClassCalled = True
@classmethod
def tearDownClass(cls):
at_exit_set.discard(cls)
if hasattr(super(BaseTestCase, cls), 'tearDownClass'):
super(BaseTestCase, cls).tearDownClass()
def setUp(self):
super(BaseTestCase, self).setUp()
if not self.setUpClassCalled:
raise RuntimeError("setUpClass does not calls the super's"
"setUpClass in the "
+ self.__class__.__name__)
at_exit_set.add(self.__class__)
test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0)
try:
test_timeout = int(test_timeout)
except ValueError:
test_timeout = 0
if test_timeout > 0:
self.useFixture(fixtures.Timeout(test_timeout, gentle=True))
if (os.environ.get('OS_STDOUT_CAPTURE') == 'True' or
os.environ.get('OS_STDOUT_CAPTURE') == '1'):
stdout = self.useFixture(fixtures.StringStream('stdout')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout))
if (os.environ.get('OS_STDERR_CAPTURE') == 'True' or
os.environ.get('OS_STDERR_CAPTURE') == '1'):
stderr = self.useFixture(fixtures.StringStream('stderr')).stream
self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr))
if (os.environ.get('OS_LOG_CAPTURE') != 'False' and
os.environ.get('OS_LOG_CAPTURE') != '0'):
log_format = '%(asctime)-15s %(message)s'
self.useFixture(fixtures.LoggerFixture(nuke_handlers=False,
format=log_format,
level=None))
@classmethod
def get_client_manager(cls, interface=None):
"""
Returns an OpenStack client manager
"""
cls.isolated_creds = isolated_creds.IsolatedCreds(
cls.__name__, network_resources=cls.network_resources)
force_tenant_isolation = getattr(cls, 'force_tenant_isolation', None)
if CONF.compute.allow_tenant_isolation or force_tenant_isolation:
creds = cls.isolated_creds.get_primary_creds()
if getattr(cls, '_interface', None):
os = clients.Manager(credentials=creds,
interface=cls._interface,
service=cls._service)
elif interface:
os = clients.Manager(credentials=creds,
interface=interface,
service=cls._service)
else:
os = clients.Manager(credentials=creds,
service=cls._service)
else:
if getattr(cls, '_interface', None):
os = clients.Manager(interface=cls._interface,
service=cls._service)
elif interface:
os = clients.Manager(interface=interface, service=cls._service)
else:
os = clients.Manager(service=cls._service)
return os
@classmethod
def clear_isolated_creds(cls):
"""
Clears isolated creds if set
"""
if getattr(cls, 'isolated_creds'):
cls.isolated_creds.clear_isolated_creds()
@classmethod
def _get_identity_admin_client(cls):
"""
Returns an instance of the Identity Admin API client
"""
os = clients.AdminManager(interface=cls._interface,
service=cls._service)
admin_client = os.identity_client
return admin_client
@classmethod
def set_network_resources(self, network=False, router=False, subnet=False,
dhcp=False):
"""Specify which network resources should be created
@param network
@param router
@param subnet
@param dhcp
"""
# network resources should be set only once from callers
# in order to ensure that even if it's called multiple times in
# a chain of overloaded methods, the attribute is set only
# in the leaf class
if not self.network_resources:
self.network_resources = {
'network': network,
'router': router,
'subnet': subnet,
'dhcp': dhcp}
def assertEmpty(self, list, msg=None):
self.assertTrue(len(list) == 0, msg)
def assertNotEmpty(self, list, msg=None):
self.assertTrue(len(list) > 0, msg)
class NegativeAutoTest(BaseTestCase):
_resources = {}
@classmethod
def setUpClass(cls):
super(NegativeAutoTest, cls).setUpClass()
os = cls.get_client_manager()
cls.client = os.negative_client
os_admin = clients.AdminManager(interface=cls._interface,
service=cls._service)
cls.admin_client = os_admin.negative_client
@staticmethod
def load_schema(file):
"""
Loads a schema from a file on a specified location.
:param file: the file name
"""
#NOTE(mkoderer): must be extended for xml support
fn = os.path.join(
os.path.abspath(os.path.dirname(os.path.dirname(__file__))),
"etc", "schemas", file)
LOG.debug("Open schema file: %s" % (fn))
return json.load(open(fn))
@staticmethod
def load_tests(*args):
"""
Wrapper for testscenarios to set the mandatory scenarios variable
only in case a real test loader is in place. Will be automatically
called in case the variable "load_tests" is set.
"""
if getattr(args[0], 'suiteClass', None) is not None:
loader, standard_tests, pattern = args
else:
standard_tests, module, loader = args
for test in testtools.iterate_tests(standard_tests):
schema_file = getattr(test, '_schema_file', None)
if schema_file is not None:
setattr(test, 'scenarios',
NegativeAutoTest.generate_scenario(schema_file))
return testscenarios.load_tests_apply_scenarios(*args)
@staticmethod
def generate_scenario(description_file):
"""
Generates the test scenario list for a given description.
:param description: A dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
description = NegativeAutoTest.load_schema(description_file)
LOG.debug(description)
generator = importutils.import_class(
CONF.negative.test_generator)()
generator.validate_schema(description)
schema = description.get("json-schema", None)
resources = description.get("resources", [])
scenario_list = []
expected_result = None
for resource in resources:
if isinstance(resource, dict):
expected_result = resource['expected_result']
resource = resource['name']
LOG.debug("Add resource to test %s" % resource)
scn_name = "inv_res_%s" % (resource)
scenario_list.append((scn_name, {"resource": (resource,
str(uuid.uuid4())),
"expected_result": expected_result
}))
if schema is not None:
for name, schema, expected_result in generator.generate(schema):
if (expected_result is None and
"default_result_code" in description):
expected_result = description["default_result_code"]
scenario_list.append((name,
{"schema": schema,
"expected_result": expected_result}))
LOG.debug(scenario_list)
return scenario_list
def execute(self, description_file):
"""
Execute a http call on an api that are expected to
result in client errors. First it uses invalid resources that are part
of the url, and then invalid data for queries and http request bodies.
:param description: A dictionary with the following entries:
name (required) name for the api
http-method (required) one of HEAD,GET,PUT,POST,PATCH,DELETE
url (required) the url to be appended to the catalog url with '%s'
for each resource mentioned
resources: (optional) A list of resource names such as "server",
"flavor", etc. with an element for each '%s' in the url. This
method will call self.get_resource for each element when
constructing the positive test case template so negative
subclasses are expected to return valid resource ids when
appropriate.
json-schema (optional) A valid json schema that will be used to
create invalid data for the api calls. For "GET" and "HEAD",
the data is used to generate query strings appended to the url,
otherwise for the body of the http call.
"""
description = NegativeAutoTest.load_schema(description_file)
LOG.info("Executing %s" % description["name"])
LOG.debug(description)
method = description["http-method"]
url = description["url"]
resources = [self.get_resource(r) for
r in description.get("resources", [])]
if hasattr(self, "resource"):
# Note(mkoderer): The resources list already contains an invalid
# entry (see get_resource).
# We just send a valid json-schema with it
valid_schema = None
schema = description.get("json-schema", None)
if schema:
valid_schema = \
valid.ValidTestGenerator().generate_valid(schema)
new_url, body = self._http_arguments(valid_schema, url, method)
elif hasattr(self, "schema"):
new_url, body = self._http_arguments(self.schema, url, method)
else:
raise Exception("testscenarios are not active. Please make sure "
"that your test runner supports the load_tests "
"mechanism")
if "admin_client" in description and description["admin_client"]:
client = self.admin_client
else:
client = self.client
resp, resp_body = client.send_request(method, new_url,
resources, body=body)
self._check_negative_response(resp.status, resp_body)
def _http_arguments(self, json_dict, url, method):
LOG.debug("dict: %s url: %s method: %s" % (json_dict, url, method))
if not json_dict:
return url, None
elif method in ["GET", "HEAD", "PUT", "DELETE"]:
return "%s?%s" % (url, urllib.urlencode(json_dict)), None
else:
return url, json.dumps(json_dict)
def _check_negative_response(self, result, body):
expected_result = getattr(self, "expected_result", None)
self.assertTrue(result >= 400 and result < 500 and result != 413,
"Expected client error, got %s:%s" %
(result, body))
self.assertTrue(expected_result is None or expected_result == result,
"Expected %s, got %s:%s" %
(expected_result, result, body))
@classmethod
def set_resource(cls, name, resource):
"""
This function can be used in setUpClass context to register a resoruce
for a test.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
:resource: The id of the resource
"""
cls._resources[name] = resource
def get_resource(self, name):
"""
Return a valid uuid for a type of resource. If a real resource is
needed as part of a url then this method should return one. Otherwise
it can return None.
:param name: The name of the kind of resource such as "flavor", "role",
etc.
"""
if isinstance(name, dict):
name = name['name']
if hasattr(self, "resource") and self.resource[0] == name:
LOG.debug("Return invalid resource (%s) value: %s" %
(self.resource[0], self.resource[1]))
return self.resource[1]
if name in self._resources:
return self._resources[name]
return None
def SimpleNegativeAutoTest(klass):
"""
This decorator registers a test function on basis of the class name.
"""
@attr(type=['negative', 'gate'])
def generic_test(self):
self.execute(self._schema_file)
cn = klass.__name__
cn = cn.replace('JSON', '')
cn = cn.replace('Test', '')
# NOTE(mkoderer): replaces uppercase chars inside the class name with '_'
lower_cn = re.sub('(?