networking-odl-16.0.0/0000775000175000017500000000000013656750617014603 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/0000775000175000017500000000000013656750617015350 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/requirements.txt0000664000175000017500000000055613656750541020636 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
sphinx!=1.6.6,!=1.6.7,!=2.1.0,>=1.6.2;python_version>='3.4' # BSD
openstackdocstheme>=1.20.0 # Apache-2.0
reno>=2.7.0 # Apache-2.0
doc8>=0.8.0 # Apache-2.0
networking-odl-16.0.0/doc/source/0000775000175000017500000000000013656750617016650 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/reference/0000775000175000017500000000000013656750617020606 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/reference/index.rst0000664000175000017500000000051413656750541022443 0ustar zuulzuul00000000000000====================
Reference Deployment
====================
This document is intended to guide for versions of OpenStack and
OpenDaylight components to use when OpenStack is deployed with
OpenDaylight.
OpenStack Version Reference
---------------------------
.. toctree::
:maxdepth: 2
pike.rst
ocata.rst
newton.rst
networking-odl-16.0.0/doc/source/reference/ocata.rst0000664000175000017500000000157013656750541022426 0ustar zuulzuul00000000000000Ocata ODL Reference
===================
.. contents::
OpenDaylight Components
-----------------------
With ocata legacy netvirt is recommended to use with boron snapshot.
However legacy netvirt may not work properly with carbon snapshot
onwards.
+-------------------------------------------------------+
| OpenDaylight Componetns |
+===============================+=======================+
| Boron Snapshot | Yes |
+-------------------------------+-----------------------+
| Carbon Snapshot | Yes |
+-------------------------------+-----------------------+
| Nitrogen Snapshot | No |
+-------------------------------+-----------------------+
| Netvirt | odl-openstack-netvirt |
+-------------------------------+-----------------------+
networking-odl-16.0.0/doc/source/reference/newton.rst0000664000175000017500000000135113656750541022646 0ustar zuulzuul00000000000000Newton ODL Reference
====================
.. contents::
OpenDaylight Components
-----------------------
+-------------------------------------------------------+
| OpenDaylight Components |
+===============================+=======================+
| Boron Snapshot | Yes |
+-------------------------------+-----------------------+
| Carbon Snapshot | No |
+-------------------------------+-----------------------+
| Nitrogen Snapshot | No |
+-------------------------------+-----------------------+
| Netvirt | odl-ovsdb-openstack |
+-------------------------------+-----------------------+
networking-odl-16.0.0/doc/source/reference/pike.rst0000664000175000017500000000134513656750541022267 0ustar zuulzuul00000000000000Pike ODL Reference
==================
.. contents::
OpenDaylight Components
-----------------------
+-------------------------------------------------------+
| OpenDaylight Componetns |
+===============================+=======================+
| Boron Snapshot | No |
+-------------------------------+-----------------------+
| Carbon Snapshot | Yes |
+-------------------------------+-----------------------+
| Nitrogen Snapshot | Yes |
+-------------------------------+-----------------------+
| Netvirt | odl-openstack-netvirt |
+-------------------------------+-----------------------+
networking-odl-16.0.0/doc/source/conf.py0000664000175000017500000000533613656750541020152 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'openstackdocstheme',
'oslo_config.sphinxext',
]
# openstackdocstheme options
repository_name = 'openstack/networking-odl'
bug_project = 'networking-odl'
bug_tag = 'doc'
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'networking-odl'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
html_theme = 'openstackdocs'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'doc-%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
latex_elements = {
'makeindex': '',
'printindex': '',
'preamble': r'\setcounter{tocdepth}{3}',
}
networking-odl-16.0.0/doc/source/install/0000775000175000017500000000000013656750617020316 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/install/installation.rst0000664000175000017500000001734713656750541023561 0ustar zuulzuul00000000000000.. _installation:
Installation
============
The ``networking-odl`` repository includes integration with DevStack that
enables creation of a simple OpenDaylight (ODL) development and test
environment. This document discusses what is required for manual installation
and integration into a production OpenStack deployment tool of conventional
architectures that include the following types of nodes:
* Controller - Runs OpenStack control plane services such as REST APIs
and databases.
* Network - Provides connectivity between provider (public) and project
(private) networks. Services provided include layer-3 (routing), DHCP, and
metadata agents. Layer-3 agent is optional. When using netvirt (vpnservice)
DHCP/metadata are optional.
* Compute - Runs the hypervisor and layer-2 agent for the Networking
service.
ODL Installation
----------------
http://docs.opendaylight.org provides manual and general documentation for ODL
Review the following documentation regardless of install scenario:
* `ODL installation `_.
* `OpenDaylight with OpenStack `_.
Choose and review one of the following installation scenarios:
* `GBP with OpenStack `_.
OpenDaylight Group Based Policy allows users to express network configuration
in a declarative rather than imperative way. Often described as asking for
"what you want", rather than "how you can do it", Group Based Policy achieves
this by implementing an Intent System. The Intent System is a process around
an intent driven data model and contains no domain specifics but is capable
of addressing multiple semantic definitions of intent.
* `OVSDB with OpenStack `_.
OpenDaylight OVSDB allows users to take advantage of Network Virtualization
using OpenDaylight SDN capabilities whilst utilizing OpenvSwitch. The stack
includes a Neutron Northbound, a Network Virtualization layer, an OVSDB
southbound plugin, and an OpenFlow southbound plugin.
* `VTN with OpenStack `_.
OpenDaylight Virtual Tenant Network (VTN) is an application that provides
multi-tenant virtual network on an SDN controller. VTN Manager is
implemented as one plugin to the OpenDaylight controller and provides a REST
interface to create/update/delete VTN components. It provides an
implementation of Openstack L2 Network Functions API.
Networking-odl Installation
---------------------------
.. code-block:: console
# sudo pip install networking-odl
.. note::
pip need to be installed before running above command.
Networking-odl Configuration
----------------------------
All related neutron services need to be restarted after configuration change.
#. Configure Openstack neutron server. The neutron server implements ODL as an
ML2 driver. Edit the ``/etc/neutron/neutron.conf`` file:
* Enable the ML2 core plug-in.
.. code-block:: ini
[DEFAULT]
...
core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
* (Optional) Enable ODL L3 router, if QoS feature is desired,
then qos should be appended to service_plugins
.. code-block:: ini
[DEFAULT]
...
service_plugins = odl-router_v2
#. Configure the ML2 plug-in. Edit the
``/etc/neutron/plugins/ml2/ml2_conf.ini`` file:
* Configure the ODL mechanism driver, network type drivers, self-service
(tenant) network types, and enable extension drivers(optional).
.. code-block:: ini
[ml2]
...
mechanism_drivers = opendaylight_v2
type_drivers = local,flat,vlan,vxlan
tenant_network_types = vxlan
extension_drivers = port_security, qos
.. note::
The enabling of extension_driver qos is optional, it should be
enabled if service_plugins for qos is also enabled.
* Configure the vxlan range.
.. code-block:: ini
[ml2_type_vxlan]
...
vni_ranges = 1:1000
* Optionally, enable support for VLAN provider and self-service
networks on one or more physical networks. If you specify only
the physical network, only administrative (privileged) users can
manage VLAN networks. Additionally specifying a VLAN ID range for
a physical network enables regular (non-privileged) users to
manage VLAN networks. The Networking service allocates the VLAN ID
for each self-service network using the VLAN ID range for the
physical network.
.. code-block:: ini
[ml2_type_vlan]
...
network_vlan_ranges = PHYSICAL_NETWORK:MIN_VLAN_ID:MAX_VLAN_ID
Replace ``PHYSICAL_NETWORK`` with the physical network name and
optionally define the minimum and maximum VLAN IDs. Use a comma
to separate each physical network.
For example, to enable support for administrative VLAN networks
on the ``physnet1`` network and self-service VLAN networks on
the ``physnet2`` network using VLAN IDs 1001 to 2000:
.. code-block:: ini
network_vlan_ranges = physnet1,physnet2:1001:2000
* Enable security groups.
.. code-block:: ini
[securitygroup]
...
enable_security_group = true
* Configure ML2 ODL
.. code-block:: ini
[ml2_odl]
...
username =
password =
url = http://:/controller/nb/v2/neutron
port_binding_controller = pseudo-agentdb-binding
* Optionally, To enable ODL DHCP service in an OpenDaylight enabled cloud,
set `enable_dhcp_service=True` under the `[ml2_odl]` section. It will load
the openstack-odl-v2-dhcp-driver which will create special DHCP ports in
neutron for use by the OpenDaylight Controller's DHCP Service. Please make
sure to set `controller-dhcp-enabled = True` within the OpenDaylight
Controller configuration file ``netvirt-dhcpservice-config.xml`` along
with the above configuration.
`OpenDaylight Spec Documentation Link: `_.
.. code-block:: ini
[ml2_odl]
...
enable_dhcp_service = True
Compute/network nodes
---------------------
Each compute/network node runs the OVS services. If compute/network nodes are
already configured to run with Neutron ML2 OVS driver, more steps are
necessary. `OVSDB with OpenStack `_ can be referred to.
#. Install the ``openvswitch`` packages.
#. Start the OVS service.
Using the *systemd* unit:
.. code-block:: console
# systemctl start openvswitch
Using the ``ovs-ctl`` script:
.. code-block:: console
# /usr/share/openvswitch/scripts/ovs-ctl start
#. Configure OVS to use ODL as a manager.
.. code-block:: console
# ovs-vsctl set-manager tcp:${ODL_IP_ADDRESS}:6640
Replace ``ODL_IP_ADDRESS`` with the IP address of ODL controller node
#. Set host OVS configurations if port_binding_controller is pseudo-agent
.. code-block:: console
# sudo neutron-odl-ovs-hostconfig
#. Verify the OVS service.
.. code-block:: console
# ovs-vsctl show
.. note::
After setting config files, you have to restart the neutron server
if you are using screen then it can be directly started from neutron-api
window or you can use service neutron-server restart, latter may or
may not work depending on OS you are using.
networking-odl-16.0.0/doc/source/install/index.rst0000664000175000017500000000016313656750541022153 0ustar zuulzuul00000000000000Installation Guide
==================
.. toctree::
:maxdepth: 2
installation
DevStack plugin
networking-odl-16.0.0/doc/source/install/devstack.rst0000664000175000017500000000005213656750541022645 0ustar zuulzuul00000000000000.. include:: ../../../devstack/README.rst
networking-odl-16.0.0/doc/source/index.rst0000664000175000017500000000323013656750541020503 0ustar zuulzuul00000000000000==========================
Welcome to networking-odl!
==========================
.. Team and repository tags
.. only:: html
.. image:: http://governance.openstack.org/badges/networking-odl.svg
:target: http://governance.openstack.org/reference/tags/index.html
Summary
-------
OpenStack networking-odl is a library of drivers and plugins that integrates
OpenStack Neutron API with OpenDaylight Backend. For example it has ML2
driver and L3 plugin to enable communication of OpenStack Neutron L2
and L3 resources API to OpenDayLight Backend.
To report and discover bugs in networking-odl the following
link can be used:
https://bugs.launchpad.net/networking-odl
Any new code submission or proposal must follow the development
guidelines detailed in HACKING.rst and for further details this
link can be checked:
https://docs.openstack.org/networking-odl/latest/
The OpenDaylight homepage:
https://www.opendaylight.org/
Release notes for the project can be found at:
https://docs.openstack.org/releasenotes/networking-odl/
The project source code repository is located at:
https://opendev.org/openstack/networking-odl
Installation
------------
.. toctree::
:maxdepth: 2
install/index
Configuration options
---------------------
.. toctree::
:maxdepth: 2
configuration/index
Administration Guide
--------------------
.. toctree::
:maxdepth: 2
admin/index
Contributor Guide
-----------------
.. toctree::
:maxdepth: 2
contributor/index
Reference Deployment Guide
--------------------------
.. toctree::
:maxdepth: 2
reference/index
.. only:: html
Indices and tables
------------------
* :ref:`genindex`
* :ref:`search`
networking-odl-16.0.0/doc/source/contributor/0000775000175000017500000000000013656750617021222 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/contributor/usage.rst0000664000175000017500000000013113656750541023047 0ustar zuulzuul00000000000000========
Usage
========
To use networking-odl in a project::
import networking_odl
networking-odl-16.0.0/doc/source/contributor/specs/0000775000175000017500000000000013656750617022337 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/contributor/specs/pike/0000775000175000017500000000000013656750617023267 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/contributor/specs/pike/dep-validations-on-create.rst0000664000175000017500000001171713656750541030762 0ustar zuulzuul00000000000000..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
================================
Dependency Validations on Create
================================
https://blueprints.launchpad.net/networking-odl/+spec/dep-validations-on-create
Right now V2 driver entry dependency validations happen when a journal entry is
picked for processing. This spec proposes that this be moved to entry creation
time, in order to have a clear understanding of the entry dependencies and
conserve journal resources.
Problem Description
===================
Dependency validations are necessary in the V2 driver because each operation
gets recorded in a journal entry and sent to ODL asynchronously. Thus, a
consecutive operation might be sent to ODL before the first one finishes, while
relying on the first operation.
For example, when a subnet gets created it references a network, but if the
network was created right before the subnet was then the subnet create
shouldn't be sent over until the network create was sent.
Currently these checks are performed each time an entry is selected for
processing - if the entry passes the dependency checks then it gets processed
and if the dependency check fails (i.e. finds a previous unhandled entry that
needs to execute before this one) then the entry gets sent back to the queue.
Generally this is not optimal for several reasons:
* No clear indication of relations between the entries.
* The logic is hidden in the code and there's no good way to know why an
entry fails a dependency check.
* Difficult to debug in case of problems.
* Difficult to spot phenomenon such as a cyclic dependency.
* Wasted CPU effort.
* An entry can be checked multiple times for dependencies.
* Lots of redundant DB queries to determine dependencies each time.
Proposed Change
===============
The proposed solution is to move the dependency calculation to entry creation
time.
When a journal entry is created the dependency management system will calculate
the dependencies on other entries (Similarly to how it does now) and if there
are journal entries the new entry should depend on, their IDs will be inserted
into a link table.
Thus, when the journal looks for an entry to pick up it will only look for
entries that no other entry depends on by making sure there aren't any entries
in the dependency table.
When a journal entry is done processing (either successfully or reaches failed
state), the dependency links will be removed from the dependency table so that
dependent rows can be processed.
The proposed table::
+------------------------+
| odl_journal_dependency |
+------------------------+
| parent_id |
| dependent_id |
+------------------------+
The table columns will be foreign keys to the seqnum column in the journal
table. The constraints will be defined as "ON DELETE CASCADE" so that when a
journal entry is removed any possible rows will be removed as well.
The primary key will be made from both columns of the table as this is a link
table and not an actual entity.
If we face DB performance issues (highly unlikely, since this table should
normally have a very small amount of rows if any at all) then an index can be
constructed on the dependent_id column.
The dependency management mechanism will locate parent entries for the given
entry and will populate the table so that the parent entry's seqnum will be
set as the parent_id, and the dependent entry id will be set as dependent_id.
When the journal picks up an entry for processing it will condition it on not
having any rows with the parent_id in the dependency table. This will ensure
that dependent rows get handled after the parent rows have finished processing.
Performance Considerations
==========================
Generally the performance shouldn't be impacted as we're moving the part of
code that does dependency calculations from the entry selection time to entry
creation time. This will assure that dependency calculations happen only once
per journal entry.
However, some simple benchmarks should be performed before & after the change:
* Average Tempest run time.
* Average CPU consumption on Tempest.
* Full sync run time (Start to finish of all entries).
If performance suffers a severe degradation then we should consider
alternative solutions.
Questions
=========
Q: Should entries in "failed" state block other entries?
A: Currently "failed" rows are not considered as blocking for dependency
validations, but we might want to change this as it makes little sense to
process a dependent entry that failed processing.
Q: How will this help debug-ability?
A: It will be easy to query the table contents at any time to figure out which
entries depend on which other entries.
Q: How will we be able to spot cyclic dependencies?
A: Currently this isn't planned as part of the spec, but a DB query (or a
series of them) can help determine if this problem exists.
networking-odl-16.0.0/doc/source/contributor/specs/pike/neutron-port-dhcp.rst0000664000175000017500000002414613656750541027414 0ustar zuulzuul00000000000000..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
======================================================================
Neutron Port Allocation per Subnet for OpenDaylight DHCP Proxy Service
======================================================================
This spec describes the proposal to allocate a Neutron DHCP Port just for
use by OpenDaylight Controller on Subnets that are created or updated with
enable-dhcp to True.
When in OpenDaylight controller, the "controller-dhcp-enabled" configuration
flag is set to true, these Neutron DHCP Ports will be used by the OpenDaylight
Controller to provide DHCP Service instead of using the subnet-gateway-ip as
the DHCP Server IP as it stands today.
The networking-odl driver is not aware about the above OpenDaylight controller
parameter configuration. When controller-dhcp-enabled configuration flag is set
to false the DHCP port will be created and destroyed without causing any harm
to either OpenDaylight controller or networking-odl driver.
Problem Statement
=================
The DHCP service within OpenDaylight currently assumes availability of the
subnet gateway IP address. The subnet gateway ip is not a mandatory parameter
for an OpenStack subnet, and so it might not be available from OpenStack
orchestration. This renders the DHCP service in OpenDaylight to not be
able to serve DHCP offers to virtual endpoints requesting for IP addresses,
thereby resulting in service unavailability. Even if subnet-gateway-ip is
available in the subnet, it is not a good design in OpenDaylight to hijack
that ip address and use that as the DHCP Server IP Address.
Problem - 1: L2 Deployment with 3PP gateway
-------------------------------------------
There can be deployment scenario in which L2 network is created with no
distributed Router/VPN functionality. This deployment can have a separate
gateway for the network such as a 3PP LB VM, which acts as a TCP termination
point and this LB VM is configured with a default gateway IP. It means all
inter-subnet traffic is terminated on this VM which takes the responsibility
of forwarding the traffic.
But the current DHCP service in OpenDaylight controller hijacks gateway IP
address for serving DHCP discover/request messages. If the LB is up, this can
continue to work, DHCP broadcasts will get hijacked by the OpenDaylight, and
responses sent as PKT_OUTs with SIP = GW IP.
However, if the LB is down, and the VM ARPs for the same IP as part of a DHCP
renew workflow, the ARP resolution can fail, due to which renew request will
not be generated. This can cause the DHCP lease to lapse.
Problem - 2: Designated DHCP for SR-IOV VMs via HWVTEP
------------------------------------------------------
In this Deployment scenario, L2 network is created with no distributed Router/
VPN functionality, and HWVTEP for SR-IOV VMs. DHCP flood requests from SR-IOV
VMs(DHCP discover, request during bootup), are flooded by the HWVTEP on the
L2 Broadcast domain, and punted to the controller by designated vswitch. DHCP
offers are sent as unicast responses from Controller, which are forwarded by
the HWVTEP to the VM. DHCP renews can be unicast requests, which the HWVTEP
may forward to an external Gateway VM (3PPLB VM) as unicast packets. Designated
vswitch will never receive these pkts, and thus not be able to punt them to the
controller, so renews will fail.
Proposed Change
===============
In general as part of implementation of this spec, we are introducing a new
configuration parameter 'create_opendaylight_dhcp_port' whose truth value
determines whether the dhcp-proxy-service within the openstack-odl framework
need to be made functional. This service will be responsible for managing the
create/update/delete lifecycle for a new set of Neutron DHCP Ports which will
be provisioned specifically for use by the OpenDaylight Controller's existing
DHCP Service Module.
Detailed Design
===============
Introduce a driver config parameter(create_opendaylight_dhcp_port) to determine
if OpenDaylight based DHCP service is being used. Default setting for the
parameter is false.
When 'create_opendaylight_dhcp_port' is set to True, it triggers the networking
-odl ml2 driver to hook on to OpenStack subnet resource lifecycle and use that
to manage a special DHCP port per subnet for OpenDaylight Controller use. These
special DHCP ports will be shipped to OpenDaylight controller, so that DHCP
Service within the OpenDaylight controller can make use of these as DHCP
Server ports themselves. The port will be used to service DHCP requests for
virtual end points belonging to that subnet.
These special DHCP Ports (one per subnet), will carry unique device-id and
device-owner values.
* device-owner(network:dhcp)
* device-id(OpenDaylight-)
OpenDaylight DHCP service will also introduce a new config parameter controller
-dhcp-mode to indicate if the above DHCP port should be used for servicing DHCP
requests. When the parameter is set to use-odl-dhcp-neutron-port, it is
recommended to enable the create_opendaylight_dhcp_port flag for the networking
-odl driver.
Alternative 1
--------------
The creation of Neutron OpenDaylight DHCP port will be invoked within the
OpenDaylight mechanism Driver subnet-postcommit execution.
Any failures during the neutron dhcp port creation or allocation for the subnet
should trigger failure of the subnet create operation with an appropriate
failure message in logs. On success the subnet and port information will be
persisted to Journal DB and will subsequently synced with the OpenDaylight
controller.
The plugin should initiate the removal of allocated dhcp neutron port at the
time of subnet delete. The port removal will be handled in a subnet-delete-
post-commit execution and any failure during this process should rollback the
subnet delete operation. The subnet delete operation will be allowed only when
all other VMs launched on this subnet are already removed as per existing
Neutron behavior.
A subnet update operation configuring the DHCP state as enabled should allocate
such a port if not previously allocated for the subnet. Similarly a subnet
update operation configuring DHCP state to disabled should remove any
previously allocated OpenDaylight DHCP neutron ports.
Since the invocation of create/delete port will be synchronous within subnet
post-commit, a failure to create/delete port will result in an exception being
thrown which makes the ML2 Plugin to fail the subnet operation and not alter
Openstack DB.
Alternative 2
-------------
The OpenDaylight Neutron DHCP Port creation/deletion is invoked asynchronously
driven by a journal entry callback for any Subnet resource state changes as
part of create/update/delete. A generic journal callback mechanism to be
implemented. Initial consumer of this callback would be the OpenDaylight
DHCP proxy service but this could be used by other services in future.
The Neutron DHCP Port (for OpenDaylight use) creation is triggered when the
subnet journal-entry is moved from PENDING to PROCESSING. On a failure of
port-creation, the journal will be retained in PENDING state and the subnet
itself won't be synced to the OpenDaylight controller. The journal-entry state
is marked as COMPLETED only on successful port creation and successful
synchronization of that subnet resource to OpenDaylight controller. The same
behavior is applicable for subnet update and delete operations too.
The subnet create/update operation that allocates an OpenDaylight DHCP port
to always check if a port exists and allocate new port only if none exists
for the subnet.
Since the invocation of create/delete port will be within the journal callback
and asynchronous to subnet-postcommit, the failure to create/delete port
will result in the created (or updated) subnet to remain in PENDING state. Next
journal sync of this pending subnet will again retry creation/deletion of port
and this cycle will happen until either create/delete port succeeds or the
subnet is itself deleted by the orchestrating tenant. This could result in
piling up of journal PENDING entries for these subnets when there is an
unexpected failure in create/delete DHCP port operation. It is recommended to
not keep retrying the port operation and instead failures would be indicated
in OpenDaylight as DHCP offers/renews will not be honored by the dhcp service
within the OpenDaylight controller, for that subnet.
Recommended Alternative
-----------------------
All of the following cases will need to be addressed by the design.
* Neutron server can crash after submitting information to DB but before
invoking post-commit during a subnet create/update/delete operation. The
dhcp-proxy-service should handle the DHCP port creation/deletion during
such failures when the service is enabled.
* A subnet update operation to disable-dhcp can be immediately followed by
a subnet update operation to enable-dhcp, and such a situation should end up
in creating the neutron-dhcp-port for consumption by OpenDaylight.
* A subnet update operation to enable-dhcp can be immediately followed by a
subnet update operation to disable-dhcp, and such a situation should end up
in deleting the neutron-dhcp-port that was created for use by OpenDaylight.
* A subnet update operation to enable-dhcp can be immediately followed by a
subnet delete operation,and such a situation should end up deleting the
neutron-dhcp-port that was about to be provided for use by OpenDaylight.
* A subnet create operation (with dhcp enabled) can be immediately followed
by a subnet update operation to disable-dhcp, and such a situation should
end up in deleting the neutron-dhcp-port that was created for use by
OpenDaylight.
Design as per Alternative 2 meets the above cases better and is what we propose
to take as the approach that we will pursue for this spec.
Dependencies
============
Feature is dependent on enhancement in OpenDaylight DHCP Service as per the
Spec in [1]
Impact
======
None
Assignee(s)
===========
* Achuth Maniyedath (achuth.m@altencalsoftlabs.com)
* Karthik Prasad(karthik.p@altencalsoftlabs.com)
References
==========
* [1] OpenDaylight spec to cover this feature
https://git.opendaylight.org/gerrit/#/c/52298/
networking-odl-16.0.0/doc/source/contributor/specs/newton/0000775000175000017500000000000013656750617023651 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/contributor/specs/newton/qos-driver.rst0000664000175000017500000001137313656750541026477 0ustar zuulzuul00000000000000==========================================
Quality of Service Driver for OpenDaylight
==========================================
This spec describes the plan to implement quality of service driver for
OpenDaylight Controller.
Problem Statement
=================
OpenStack networking project (neutron [1]) have a extension plugin implemented
and which expose api for quality of service that can be also be implemented by
any backend networking service provider to support QoS. These APIs provide a
way to integrate OpenStack Neutron QoS with any of the backend QoS providers.
OpenDaylight will provide backend for existing functionalities in neutron-QoS.
A notification driver is needed for integration of existing api in Openstack
neutron for QoS with OpenDaylight backend.
Proposed Change
===============
This change will introduce a new notification driver in networking-odl that
will take CRUD requests data for QoS policies from OpenStack neutron and notify
the OpenDaylight controller about the respective operation.
Detailed Design
===============
To enable the formal end to end integration between OpenStack QoS and
OpenDaylight requires an networking-odl QoS notification driver. QoS driver
will act as a shim layer between OpenStack and OpenDaylight that will carry
out following task:
#. After getting QoS policy request data from neutron, It will log a operation
request in opendaylightjournal table.
#. The operation will be picked from opendaylightjournal table and a rest call
for notifying OpenDaylight server will be prepared and sent.
#. This request will processed by neutron northbound in OpenDaylight.
The OpenDaylight neutron northbound project. These models will be based
on the existing neutron qos plugin APIs.
QoS providers in OpenDaylight can listen to these OpenDaylight Neutron
Northbound QoS models and translate it to their specific yang models for QoS.
The following diagram shows the high level integration between OpenStack and
the OpenDaylight QoS provider::
+---------------------------------------------+
| OpenStack Network Server (neutron qos) |
| |
| +---------------------+ |
| | networking-odl | |
| | | |
| | +---------------| |
| | | Notification | |
| | | driver QoS | |
+----------------------|----------------------+
|
| Rest Communication
|
OpenDaylight Controller |
+-----------------------|------------+
| +----------V----+ |
| ODL | QoS Yang Model| |
| Northbound | | |
| (neutron) +---------------+ |
| | |
| | |
| ODL +----V----+ |
| Southbound | QoS | |
| (neutron) +---------+ |
+-----------------|------------------+
|
|
+------------------------------------+
| Network/OVS |
| |
+------------------------------------+
In the above diagram, the OpenDaylight components are shown just to understand
the overall architecture, but it's out of scope of this spec's work items.
This spec will only track progress related to networking-odl notification QoS
driver work.
Dependencies
============
It has a dependency on OpenDaylight Neutron Northbound QoS yang models, but
that is out of scope of this spec.
Impact
======
None
Assignee(s)
===========
Following developers will be the initial contributor to the driver, but we
will be happy to have more contributor on board.
* Manjeet Singh Bhatia (manjeet.s.bhatia@intel.com, irc: manjeets)
References
==========
* [1] https://docs.openstack.org/neutron/latest/contributor/internals/quality_of_service.html
* [2] https://wiki.opendaylight.org/view/NeutronNorthbound:Main
networking-odl-16.0.0/doc/source/contributor/specs/newton/sfc-driver.rst0000664000175000017500000001516113656750541026447 0ustar zuulzuul00000000000000=================================================
Service Function Chaining Driver for OpenDaylight
=================================================
This spec describes the plan to implement OpenStack networking-sfc[1] driver
for OpenDaylight Controller.
Problem Statement
===================
OpenStack SFC project (networking-sfc [1]) exposes generic APIs[2] for Service
Function Chaining (SFC) that can be implemented by any backend networking
service provider to support SFC. These APIs provide a way to integrate
OpenStack SFC with any of the backend SFC providers. OpenDaylight SFC project
provides a very mature implementation of SFC [3], but currently there is no
formal integration mechanism present to consume OpenDaylight as an SFC provider
for networking-sfc.
Recently Tacker project [4] has been approved as an official project in
OpenStack, that opens many possibilities to realize the NFV use cases (e.g SFC)
using OpenStack as a platform. Providing a formal end to end integration
between OpenStack and OpenDaylight for SFC use case will help NFV users
leverage OpenStack, Tacker and OpenDaylight as a solution. A POC for this
integration work has already been implemented [5][6] by Tim Rozet, but in
this POC work, Tacker directly communicates to OpenDaylight SFC & classifier
providers and not through OpenStack SFC APIs (networking-sfc).
Proposed Change
===============
Implementation of this spec will introduce a networking-sfc[1] driver for
OpenDaylight Controller in networking-odl project that will pass through
the networking-sfc API's call to the OpenDaylight Controller.
Detailed Design
===============
To enable the formal end to end integration between OpenStack SFC and
OpenDaylight requires an SFC driver for OpenDaylight. ODL SFC driver will
act as a shim layer between OpenStack and OpenDaylight that will carry out
following two main tasks:
* Translation of OpenStack SFC Classifier API to ODL SFC classifier yang
models**.
* Translation of OpenStack SFC API's to OpenDaylight Neutron Northbound
SFC models** [8].
** This work is not yet done, but the OpenDaylight neutron northbound project
needs to come up with yang models for SFC classification/chain. These models
will be based on the existing networking-sfc APIs. This work is out of scope
of networking-odl work and will be collaborated in the scope of OpenDaylight
Neutron Northbound project.
SFC providers (E.g Net-Virt, GBP, SFC ) in OpenDaylight can listen to these
OpenDaylight Neutron Northbound SFC models and translate it to their specific
yang models for classification/sfc. The following diagram shows the high level
integration between OpenStack and the OpenDaylight SFC provider::
+---------------------------------------------+
| OpenStack Network Server (networking-sfc) |
| +-------------------+ |
| | networking-odl | |
| | SFC Driver | |
| +-------------------+ |
+----------------------|----------------------+
| REST Communication
|
-----------------------
OpenDaylight Controller | |
+-----------------------|-----------------------|---------------+
| +----------v----+ +---v---+ |
| Neutron | SFC Classifier| |SFC | Neutron |
| Northbound | Models | |Models | Northbound|
| Project +---------------+ +-------+ Project |
| / \ | |
| / \ | |
| / \ | |
| +-----V--+ +---V----+ +---V---+ |
| |Net-Virt| ... | GBP | | SFC | ... |
| +---------+ +--------+ +-------+ |
+-----------|----------------|------------------|---------------+
| | |
| | |
+-----------V----------------V------------------V---------------+
| Network/OVS |
| |
+---------------------------------------------------------------+
In the above architecture, the opendaylight components are shown just to
understand the overall architecture, but it's out of scope of this spec's
work items. This spec will only track progress related to networking-odl
OpenStack sfc driver work.
Given that OpenStack SFC APIs are port-pair based API's and OpenDaylight SFC
API's are based on IETF SFC yang models[8], there might be situations where
translation might requires API enhancement from OpenStack SFC. Networking SFC
team is open for these new enhancement requirements given that they are generic
enough to be leveraged by other backend SFC providers[9]. This work will be
leveraging the POC work done by Tim [10] to come up with the first version of
SFC driver.
Dependencies
============
It has a dependency on OpenDaylight Neutron Northbound SFC classifier and chain
yang models, but that is out of scope of this spec.
Impact
======
None
Assignee(s)
===========
Following developers will be the initial contributor to the driver, but we will
be happy to have more contributor on board.
* Anil Vishnoi (vishnoianil@gmail.com, irc: vishnoianil)
* Tim Rozet (trozet@redhat.com, irc: trozet)
References
==========
[1] https://docs.openstack.org/networking-sfc/latest/
[2] https://github.com/openstack/networking-sfc/blob/master/doc/source/contributor/api.rst
[3] https://wiki.opendaylight.org/view/Service_Function_Chaining:Main
[4] https://wiki.openstack.org/wiki/Tacker
[5] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc
[6] https://github.com/trozet/tacker/tree/SFC_brahmaputra/tacker/sfc_classifier
[7] https://tools.ietf.org/html/draft-ietf-netmod-acl-model-05
[8] https://wiki.opendaylight.org/view/NeutronNorthbound:Main
[9] http://eavesdrop.openstack.org/meetings/service_chaining/2016/service_chaining.2016-03-31-17.00.log.html
[10] https://github.com/trozet/tacker/blob/SFC_brahmaputra/tacker/sfc/drivers/opendaylight.py
networking-odl-16.0.0/doc/source/contributor/specs/index.rst0000664000175000017500000000050213656750541024171 0ustar zuulzuul00000000000000.. networking-odl specs documentation index
==============
Specifications
==============
Pike specs
==========
.. toctree::
:glob:
:maxdepth: 1
pike/*
Ocata specs
===========
.. toctree::
:glob:
:maxdepth: 1
ocata/*
Newton specs
============
.. toctree::
:glob:
:maxdepth: 1
newton/*
networking-odl-16.0.0/doc/source/contributor/specs/ocata/0000775000175000017500000000000013656750617023426 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/contributor/specs/ocata/journal-recovery.rst0000664000175000017500000001237013656750541027465 0ustar zuulzuul00000000000000..
This work is licensed under a Creative Commons Attribution 3.0 Unported
License.
http://creativecommons.org/licenses/by/3.0/legalcode
================
Journal Recovery
================
https://blueprints.launchpad.net/networking-odl/+spec/journal-recovery
Journal entries in the failed state need to be handled somehow. This spec will
try to address the issue and propose a solution.
Problem Description
===================
Currently there is no handling for Journal entries that reach the failed state.
A journal entry can reach the failed state for several reasons, some of which
are:
* Reached maximum failed attempts for retrying the operation.
* Inconsistency between ODL and the Neutron DB.
* For example: An update fails because the resource doesn't exist in ODL.
* Bugs that can lead to failure to sync up.
These entries will be left in the journal table forever which is a bit wasteful
since they take up some space on the DB storage and also affect the performance
of the journal table.
Albeit each entry has a negligible effect on it's own, the impact of a large
number of such entries can become quite significant.
Proposed Change
===============
A "journal recovery" routine will run as part of the current journal
maintenance process.
This routine will scan the journal table for rows in the "failed" state and
will try to sync the resource for that entry.
The procedure can be best described by the following flow chart:
asciiflow::
+-----------------+
| For each entry |
| in failed state |
+-------+---------+
|
+-------v--------+
| Query resource |
| on ODL (REST) |
+-----+-----+----+
| | +-----------+
Resource | | Determine |
exists +--Resource doesn't exist--> operation |
| | type |
+-----v-----+ +-----+-----+
| Determine | |
| operation | |
| type | |
+-----+-----+ |
| +------------+ |
+--Create------> Mark entry <--Delete--+
| | completed | |
| +----------^-+ Create/
| | Update
| | |
| +------------+ | +-----v-----+
+--Delete--> Mark entry | | | Determine |
| | pending | | | parent |
| +---------^--+ | | relation |
| | | +-----+-----+
+-----v------+ | | |
| Compare to +--Different--+ | |
| resource | | |
| in DB +--Same------------+ |
+------------+ |
|
+-------------------+ |
| Create entry for <-----Has no parent------+
| resource creation | |
+--------^----------+ Has a parent
| |
| +---------v-----+
+------Parent exists------+ Query parent |
| on ODL (REST) |
+---------+-----+
+------------------+ |
| Create entry for <---Parent doesn't exist--+
| parent creation |
+------------------+
For every error during the process the entry will remain in failed state but
the error shouldn't stop processing of further entries.
The implementation could be done in two phases where the parent handling is
done in a second phase.
For the first phase if we detect an entry that is in failed for a create/update
operation and the resource doesn't exist on ODL we create a new "create
resource" journal entry for the resource.
This proposal utilises the journal mechanism for it's operation while the only
part that deviates from the standard mode of operation is when it queries ODL
directly. This direct query has to be done to get ODL's representation of the
resource.
Performance Impact
------------------
The maintenance thread will have another task to handle. This can lead to
longer processing time and even cause the thread to skip an iteration.
This is not an issue since the maintenance thread runs in parallel and doesn't
directly impact the responsiveness of the system.
Since most operations here involve I/O then CPU probably won't be impacted.
Network traffic would be impacted slightly since we will attempt to fetch the
resource each time from ODL and we might attempt to fetch it's parent.
This is however negligible as we do this only for failed entries, which are
expected to appear rarely.
Alternatives
------------
The partial sync process could make this process obsolete (along with full
sync), but it's a far more complicated and problematic process.
It's better to start with this process which is more lightweight and doable
and consider partial sync in the future.
Assignee(s)
===========
Primary assignee:
mkolesni
Other contributors:
None
References
==========
https://goo.gl/IOMpzJ
networking-odl-16.0.0/doc/source/contributor/drivers_architecture.rst0000664000175000017500000001646513656750541026204 0ustar zuulzuul00000000000000ODL Drivers Architecture
========================
This document covers architectural concepts of the ODL drivers. Although
'driver' is an ML2 term, it's used widely in ODL to refer to any
implementation of APIs. Any mention of ML2 in this document is solely for
reference purposes.
V1 Driver Overview (Removed in Rocky)
-------------------------------------
Note: This architecture has been deprecated in Queens and removed in Rocky.
The documentation is kept as a reference to understand the necessity of
a different architecture.
The first driver version was a naive implementation which synchronously
mirrored all calls to the ODL controller. For example, a create network request
would first get written to the DB by Neutron's ML2 plugin, and then the ODL
driver would send the request to POST the network to the ODL controller.
Although this implementation is simple, it has a few problems:
* ODL is not really synchronous, so if the REST call succeeds it doesn't mean
the action really happened on ODL.
* The "synchronous" call can be a bottleneck under load.
* Upon failure the V1 driver would try to "full sync" the entire Neutron DB
over on the next call, so the next call could take a very long time.
* It doesn't really handle race conditions:
- For example, create subnet and then create port could be sent in parallel
by the driver in an HA Neutron environment, causing the port creation to
fail.
- Full-sync could possibly recreate deleted resources if the deletion happens
in parallel.
.. _v2_design:
V2 Driver Design
----------------
The V2 driver set upon to tackle problems encountered in the V1 driver while
maintaining feature parity.
The major design concept of the V2 driver is *journaling* - instead of passing
the calls directly to the ODL controller, they get registered
in the journal table which keeps a sort of queue of the various operations that
occurred on Neutron and should be mirrored to the controller.
The journal is processed mainly by a journaling thread which runs periodically
and checks if the journal table has any entries in need of processing.
Additionally the thread is triggered in the postcommit hook of the operation
(where applicable).
If we take the example of create network again, after it gets stored in the
Neutron DB by the ML2 plugin, the ODL driver stores a "journal entry"
representing that operation and triggers the journaling thread to take care of
the entry.
The journal entry is recorded in the pre-commit phase (whenever applicable) so
that in case of a commit failure the journal entry gets aborted along with the
original operation, and there's nothing extra needed.
The *get_resources_for_full_sync* method is defined in the ResourceBaseDriver
class, it fetches all the resources needed for full sync, based on resource
type. To override the default behaviour of *get_resources_for_full_sync*
define it in driver class, For example L2 gateway driver needs to provide
customized method for filtering of fetched gateway connection information
from database. Neutron defines *l2_gateway_id* for a l2 gateway connection
but ODL expects *gateway_id*, these kind of pre or post processing can be
done in this method.
Journal Entry Lifecycle
-----------------------
The first state in which a journal entry is created is the 'pending' state. In
this state, the entry is awaiting a thread to pick it up and process it.
Multiple threads can try to grab the same journal entry, but only one will
succeed since the "selection" is done inside a 'select for update' clause.
Special care is taken for GaleraDB since it reports a deadlock if more than
one thread selects the same row simultaneously.
Once an entry has been selected it will be put into the 'processing' state
which acts as a lock. This is done in the same transaction so that in case
multiple threads try to "lock" the same entry only one of them will succeed.
When the winning thread succeeds it will continue with processing the entry.
The first thing the thread does is check for dependencies - if the entry
depends on another one to complete. If a dependency is found, the entry is put
back into the queue and the thread moves on to the next entry.
When there are no dependencies for the entry, the thread analyzes the operation
that occurred and performs the appropriate call to the ODL controller. The call
is made to the correct resource or collection and the type of call (PUT, POST,
DELETE) is determined by the operation type. At this point if the call was
successful (i.e. got a 200 class HTTP code) the entry is marked 'completed'.
In case of a failure the thread determines if this is an expected failure (e.g.
network connectivity issue) or an unexpected failure. For unexpected failures
a counter is raised, so that a given entry won't be retried more than a given
amount of times. Expected failures don't change the counter. If the counter
exceeds the configured amount of retries, the entry is marked as 'failed'.
Otherwise, the entry is marked back as 'pending' so that it can later be
retried.
Full Sync & Recovery
--------------------
.. code:: python
file: networking_odl/journal/base_driver.py
ALL_RESOURCES = {}
class ResourceBaseDriver(object):
# RESOURCES is dictionary of resource_type and resource_suffix to
# be defined by the drivers class.
RESOURCES = {}
def __init__(self, plugin_type, *args, **kwargs):
super(ResourceBaseDriver, self).__init__(*args, **kwargs)
self.plugin_type = plugin_type
# All the common methods to be used by full sync and recovery
# specific to driver.
# Only driver is enough for all the information. Driver has
# plugin_type for fetching the information from db and resource
# suffix is available through driver.RESOURCES.
for resource, resource_suffix in self.RESOURCES.items():
ALL_RESOURCES[resource] = self
def get_resource_for_recovery(self, resource_type, resource_id):
# default definition to be used, if get_resource method is not
# defined then this method gets called by recovery
def get_resources_for_full_sync(self, resource_type):
# default definition to be used, if get_resources method is not
# defined then this method gets called by full sync
@staticmethod
def get_method_name_by_resource_suffix(method_suffix):
# Returns method name given resource suffix
@staticmethod
def get_method(plugin, method_name):
# Returns method for a specific plugin
file: networking_odl//.py
class XXXXDriver(ResourceBaseDriver, XXXXDriverBase):
RESOURCES = {
odl_const.XXXX: odl_const.XXXY,
odl_const.XXXY: odl_const.XXYY
}
def __init__(self, *args, **kwargs):
super(XXXXDriver, self)(plugin_type, *args, **kwargs)
# driver specific things
# get_resources_for_full_sync and get_resource_for_recovery methods are
# optional and they have to be defined, if customized behaviour is
# required. If these methods are not defined in the driver then default
# methods defined in ResourceBaseDriver is used.
def get_resources_for_full_sync(self, resource_type):
# returns resource for full sync
def get_resource_for_recovery(self, resource_type, resource_id):
# returns resource for recovery
networking-odl-16.0.0/doc/source/contributor/maintenance.rst0000664000175000017500000000303113656750541024227 0ustar zuulzuul00000000000000Journal Maintenance
===================
Overview
--------
The V2 ODL driver is Journal based [#]_, which means that there's a journal of
entries detailing the various operations done on a Neutron resource.
The driver has a thread which is in charge of processing the journal of
operations which entails communicating the operation forward to the ODL
controller.
The journal entries can wind up in several states due to various reasons:
* PROCESSING - Stale lock left by a thread due to thread dying or other error
* COMPLETED - After the operation is processed successfully
* FAILED - If there was an unexpected error during the operation
These journal entries need to be dealt with appropriately, hence a maintenance
thread was introduced that takes care of journal maintenance and other related
tasks.
This thread runs in a configurable interval and is HA safe using a shared state
kept in the DB.
Currently the maintenance thread performs:
* Stale lock release
* Completed entries clean up
* Failed entries are handled by the recovery mechanism
* Full sync detect when ODL is "tabula rasa" and syncs all the resources to it
Creating New Maintenance Operations
-----------------------------------
Creating a new maintenance operation is as simple as writing a function
that receives the database session object and registering it using a call to::
MaintenanceThread.register_operation
The best place to do so would be at the _start_maintenance_thread method of
the V2 OpenDaylightMechanismDriver class.
.. [#] See :ref:`v2_design` for details.
networking-odl-16.0.0/doc/source/contributor/testing.rst0000664000175000017500000000004213656750541023421 0ustar zuulzuul00000000000000.. include:: ../../../TESTING.rst
networking-odl-16.0.0/doc/source/contributor/hostconfig.rst0000664000175000017500000001325013656750541024114 0ustar zuulzuul00000000000000Host Configuration
==================
Overview
--------
ODL is agentless configuration. In this scenario Host Configuration is used
to specify the physical host type and other configurations for the host
system. This information is populated by the Cloud Operator is in OVSDB in
Open_vSwitch configuration data in the external_ids field as a key value pair.
This information is then read by ODL and made available to networking-odl
through REST API. Networking-odl populates this information in agent_db in
Neutron and is then used by Neutron scheduler. This information is required
for features like Port binding and Router scheduling.
Refer to this link for detailed design for this feature.
https://docs.google.com/presentation/d/1kq0elysCDEmIWs3omTi5RoXTSBbrewn11Je2d26cI4M/edit?pref=2&pli=1#slide=id.g108988d1e3_0_6
Related ODL changes:
https://git.opendaylight.org/gerrit/#/c/36767/
https://git.opendaylight.org/gerrit/#/c/40143/
Host Configuration fields
-------------------------
- **host-id**
This represents host identification string. This string will be stored in
external_ids field with the key as odl_os_hostconfig_hostid.
Refer to Neutron config definition for host field for details on this field.
https://docs.openstack.org/kilo/config-reference/content/section_neutron.conf.html
- **host-type**
The field is for type of the node. This value corresponds to agent_type in
agent_db. Example value are "ODL L2" and "ODL L3" for Compute and Network
node respectively. Same host can be configured to have multiple
configurations and can therefore can have both L2, L3 and other
configurations at the same time. This string will be populated by ODL based
on the configurations available on the host. See example in section below.
- **config**
This is the configuration data for the host type. Since same node can be
configured to store multiple configurations different external_ids key value
pair are used to store these configuration. The external_ids with keys as
odl_os_hostconfig_config_odl_XXXXXXXX store different configurations.
8 characters after the suffix odl_os_hostconfig_config_odl are host type.
ODL extracts these characters and store that as the host-type fields. For
example odl_os_hostconfig_config_odl_l2, odl_os_hostconfig_config_odl_l3 keys
are used to provide L2 and L3 configurations respectively. ODL will extract
"ODL L2" and "ODL L3" as host-type field from these keys and populate
host-type field.
Config is a Json string. Some examples of config:
OVS configuration example::
{"supported_vnic_types": [{
"vnic_type": "normal",
"vif_type": "ovs",
"vif_details": "{}"
}]
"allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"]",
"bridge_mappings": {"physnet1":"br-ex"}
}"
OVS SR-IOV Hardware Offload configuration example::
{"supported_vnic_types": [{
"vnic_type": "normal",
"vif_type": "ovs",
"vif_details": "{}"},
{"vnic_type": "direct",
"vif_type": "ovs",
"vif_details": "{}"}
}]
"allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"]",
"bridge_mappings": {"physnet1":"br-ex"}
}"
OVS_DPDK configuration example::
{"supported_vnic_types": [{
"vnic_type": "normal",
"vif_type": "vhostuser",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "vhu",
# Assumption: /var/run mounted as tmpfs
"vhostuser_socket_dir": "/var/run/openvswitch",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "client",
"vhostuser_socket": "/var/run/openvswitch/vhu$PORT_ID"}
}]
"allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"]",
"bridge_mappings": {"physnet1":"br-ex"}
}"
VPP configuration example::
{ {"supported_vnic_types": [
{"vnic_type": "normal",
"vif_type": "vhostuser",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "socket_",
"vhostuser_socket_dir": "/tmp",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "server",
"vhostuser_socket": "/tmp/socket_$PORT_ID"
}}],
"allowed_network_types": ["local", "flat", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}}
**Host Config URL**
Url : https://ip:odlport/restconf/operational/neutron:neutron/hostconfigs/
**Commands to setup host config in OVSDB**
::
export OVSUUID=$(ovs-vsctl get Open_vSwitch . _uuid)
ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_hostid=test_host
ovs-vsctl set Open_vSwitch $OVSUUID external_ids:odl_os_hostconfig_config_odl_l2 =
"{"supported_vnic_types": [{"vnic_type": "normal", "vif_type": "ovs", "vif_details": {} }], "allowed_network_types": ["local"], "bridge_mappings": {"physnet1":"br-ex"}}"
Example for host configuration
-------------------------------
::
{
"hostconfigs": {
"hostconfig": [
{
"host-id": "test_host1",
"host-type": "ODL L2",
"config":
"{"supported_vnic_types": [{
"vnic_type": "normal",
"vif_type": "ovs",
"vif_details": {}
}]
"allowed_network_types": ["local", "flat", "gre", "vlan", "vxlan"],
"bridge_mappings": {"physnet1":"br-ex"}}"
},
{
"host-id": "test_host2",
"host-type": "ODL L3",
"config": {}
}]
}
}
networking-odl-16.0.0/doc/source/contributor/contributing.rst0000664000175000017500000000011613656750541024455 0ustar zuulzuul00000000000000============
Contributing
============
.. include:: ../../../CONTRIBUTING.rst
networking-odl-16.0.0/doc/source/contributor/index.rst0000664000175000017500000000150713656750541023062 0ustar zuulzuul00000000000000Contributor Guide
=================
In the Developer/Contributor Guide, you will find information on
networking-odl's lower level design and implementation details.
We will cover only essential details related to just networking-odl
and we won't repeat neutron devref here, for details in neutron,
neutron's devref can be checked:
https://docs.openstack.org/neutron/latest/contributor/index.html
For details regarding OpenStack Neutron's Api:
https://docs.openstack.org/api-ref/network/
Contributor's Reference
-----------------------
.. toctree::
:maxdepth: 2
testing
drivers_architecture
maintenance
usage
contributing
specs/index
Tutorial
--------
.. toctree::
:maxdepth: 2
quickstart.rst
Networking OpenDayLight Internals
---------------------------------
.. toctree::
:maxdepth: 2
hostconfig
networking-odl-16.0.0/doc/source/contributor/quickstart.rst0000664000175000017500000002334513656750541024151 0ustar zuulzuul00000000000000.. _quickstart:
=====================
Developer Quick-Start
=====================
This is a quick walkthrough to get you started developing code for
networking-odl. This assumes you are already familiar with submitting code
reviews to an OpenStack project.
.. see also::
https://docs.openstack.org/infra/manual/developers.html
Setup Dev Environment
=====================
Install OS-specific prerequisites::
# Ubuntu/Debian 14.04:
sudo apt-get update
sudo apt-get install python-dev libssl-dev libxml2-dev curl \
libmysqlclient-dev libxslt1-dev libpq-dev git \
libffi-dev gettext build-essential
# CentOS/RHEL 7.2:
sudo yum install python-devel openssl-devel mysql-devel curl \
libxml2-devel libxslt-devel postgresql-devel git \
libffi-devel gettext gcc
# openSUSE/SLE 12:
sudo zypper --non-interactive install git libffi-devel curl \
libmysqlclient-devel libopenssl-devel libxml2-devel \
libxslt-devel postgresql-devel python-devel \
gettext-runtime
Install pip::
curl -s https://bootstrap.pypa.io/get-pip.py | sudo python
Install common prerequisites::
sudo pip install virtualenv flake8 tox testrepository git-review
You may need to explicitly upgrade virtualenv if you've installed the one
from your OS distribution and it is too old (tox will complain). You can
upgrade it individually, if you need to::
sudo pip install -U virtualenv
Networking-odl source code should be pulled directly from git::
# from your home or source directory
cd ~
git clone https://opendev.org/openstack/networking-odl
cd networking-odl
For installation of networking-odl refer to :doc:`/install/index`.
For testing refer to :doc:`Testing ` guide.
Verifying Successful Installation
==================================
There are some checks you can run quickly to verify that networking-odl
has been installed successfully.
#. Neutron agents must be in running state, if you are using pseudo-agent
for port binding then output of **openstack network agent list** should
be something like::
ubuntu@ubuntu-14:~/devstack$ openstack network agent list
+----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+
| ID | Agent Type | Host | Availability Zone | Alive | State | Binary |
+----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+
| 00628905-6550-43a5-9cda- | ODL L2 | ubuntu-14 | None | True | UP | neutron-odlagent- |
| 175a309ea538 | | | | | | portbinding |
| 37491134-df2a- | DHCP agent | ubuntu-14 | nova | True | UP | neutron-dhcp-agent |
| 45ab-8373-e186154aebee | | | | | | |
| 8e0e5614-4d68-4a42-aacb- | Metadata agent | ubuntu-14 | None | True | UP | neutron-metadata-agent |
| d0a10df470fb | | | | | | |
+----------------------------+----------------+-----------+-------------------+-------+-------+-----------------------------+
Your output of this command may vary depending on the your environment,
for example hostname etc.
#. You can check that opendaylight is running by executing following
command::
ubuntu@ubuntu-14:~/devstack$ ps -eaf | grep opendaylight
Launching Instance and floating IP
==================================
#. Gather paramters required for launching instance. We need flavor Id,
image Id and network id, following comand can be used for launching an
instance::
openstack server create --flavor --image \
--nic net-id= --security-group \
\
For details on creating instances refer to [#third]_ and
[#fourth]_.
#. Attaching floating IPs to created server can be done by following command::
openstack server add floating ip
For details on attaching floating IPs refer to [#fifth]_.
Useful Commands
================
#. For verifying status try following command::
ubuntu@ubuntu-14:/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./karaf status
You should receive following output::
Running ...
#. You can login using available client::
ubuntu@ubuntu-14:/distribution-karaf-0.6.0-SNAPSHOT/bin$ ./client
You will receive output in following format::
Logging in as karaf
3877 [sshd-SshClient[6dbb137d]-nio2-thread-3] WARN org.apache.sshd.client.keyverifier.AcceptAllServerKeyVerifier - Server at [/0.0.0.0:8101, RSA, 56:41:48:1c:38:3b:73:a8:a5:96:8e:69:a5:4c:93:e0] presented unverified {} key: {}
________ ________ .__ .__ .__ __
\_____ \ ______ ____ ____ \______ \ _____ ___.__.| | |__| ____ | |___/ |_
/ | \\____ \_/ __ \ / \ | | \\__ \< | || | | |/ ___\| | \ __\
/ | \ |_> > ___/| | \| ` \/ __ \\___ || |_| / /_/ > Y \ |
\_______ / __/ \___ >___| /_______ (____ / ____||____/__\___ /|___| /__|
\/|__| \/ \/ \/ \/\/ /_____/ \/
Hit '' for a list of available commands
and '[cmd] --help' for help on a specific command.
Hit '' or type 'system:shutdown' or 'logout' to shutdown OpenDaylight.
Now you can run commands as per your for example::
opendaylight-user@root>subnet-show
No SubnetOpData configured.
Following subnetId is present in both subnetMap and subnetOpDataEntry
Following subnetId is present in subnetMap but not in subnetOpDataEntry
Uuid [_value=2131f292-732d-4ba4-b74e-d70c07eceeb4]
Uuid [_value=7a03e5d8-3adb-4b19-b1ec-a26691a08f26]
Uuid [_value=7cd269ea-e06a-4aa3-bc11-697d71be4cbd]
Uuid [_value=6da591bc-6bba-4c8a-a12b-671265898c4f]
Usage 1: To display subnetMaps for a given subnetId subnet-show --subnetmap []
Usage 2: To display subnetOpDataEntry for a given subnetId subnet-show --subnetopdata []
To get help on some command::
opendaylight-user@root>help feature
COMMANDS
info Shows information about selected feature.
install Installs a feature with the specified name and version.
list Lists all existing features available from the defined repositories.
repo-add Add a features repository.
repo-list Displays a list of all defined repositories.
repo-refresh Refresh a features repository.
repo-remove Removes the specified repository features service.
uninstall Uninstalls a feature with the specified name and version.
version-list Lists all versions of a feature available from the currently available repositories.
There are other helpful commands, for example, log:tail, log:set, shutdown
to get tail of logs, set log levels and shutdown.
For checking neutron bundle is installed::
opendaylight-user@root>feature:list -i | grep neutron
odl-neutron-service | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API
odl-neutron-northbound-api | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Northbound
odl-neutron-spi | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: API
odl-neutron-transcriber | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Implementation
odl-neutron-logger | 0.8.0-SNAPSHOT | x | odl-neutron-0.8.0-SNAPSHOT | OpenDaylight :: Neutron :: Logger
For checking netvirt bundle is installed::
opendaylight-user@root>feature:list -i | grep netvirt
odl-netvirt-api | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: api
odl-netvirt-impl | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: impl
odl-netvirt-openstack | 0.4.0-SNAPSHOT | x | odl-netvirt-0.4.0-SNAPSHOT | OpenDaylight :: NetVirt :: OpenStack
#. For exploration of API's following links can be used::
API explorer:
http://localhost:8080/apidoc/explorer
Karaf:
http://localhost:8181/apidoc/explorer/index.html
Detailed information can be found [#sixth]_.
.. rubric:: References
.. [#third] https://docs.openstack.org/mitaka/install-guide-rdo/launch-instance-selfservice.html
.. [#fourth] https://docs.openstack.org/draft/install-guide-rdo/launch-instance.html
.. [#fifth] https://docs.openstack.org/user-guide/cli-manage-ip-addresses.html
.. [#sixth] https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf_API_Explorer
networking-odl-16.0.0/doc/source/admin/0000775000175000017500000000000013656750617017740 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/admin/index.rst0000664000175000017500000000015513656750541021576 0ustar zuulzuul00000000000000====================
Administration Guide
====================
.. toctree::
:maxdepth: 2
:glob:
*
networking-odl-16.0.0/doc/source/admin/reference_architecture.rst0000664000175000017500000000672613656750541025201 0ustar zuulzuul00000000000000Reference Architecture
======================
This document lists the minimum reference architecture to get OpenStack
installed with OpenDayLight. Wherever possible, additional resources will be
stated.
Cloud Composition
-----------------
The basic cloud will have 3 types of nodes:
* Controller Node - Runs OpenStack services and the ODL controller.
* Network Node - Runs the DHCP agent, the metadata agent, and the L3 agent (for
SNAT).
* Compute Node - VMs live here.
Usually each of the first 2 types of nodes will have a cluster of 3 nodes to
support HA. It's also possible to run the ODL controller on separate hardware
than the OpenStack services, but this isn't mandatory.
The last type of nodes can have as many nodes as scale requirements dictate.
Networking Requirements
-----------------------
There are several types of networks on the cloud, the most important for the
reference architecture are:
* Management Network - This is the network used to communicate between the
different management components, i.e. Nova controller to Nova agent, Neutron
to ODL, ODL to OVS, etc.
* External Network - This network provides VMs with external connectivity (i.e.
internet) usually via virtual routers.
* Data Network - This is the network used to connect the VMs to each other and
to network resources such as virtual routers.
The Control Nodes usually are only connected to the Management Network, unless
they have an externally reachable IP on the External Network.
The other node types are connected to all the networks since ODL uses a
distributed routing model so that each Compute Node hosts a "virtual router"
responsible for connecting the VMs from that node to other networks (including
the External Network).
This diagram illustrates how these nodes might be connected::
Controller Node
+-----------------+
| |
+-----------+192.168.0.251 |
| | |
| +-----------------+
|
| Compute Node +----------------+
| +---------------+ | Legend |
| | | +----------------+
+-----------+192.168.0.1 | | |
| | | | --- Management |
| +~~~~~~~~~+10.0.0.1 | | |
| | | | | ~~~ Data |
| | +=======+br-int | | |
| | | | | | === External |
| | | +---------------+ | |
| | | +----------------+
| | | Network Node
| | | +-----------------+
| | | | |
+-----------+192.168.0.100 |
| | | |
+~~~~~~~~~+10.0.0.100 |
| | |
|=======+br-int |
| | |
| +-----------------+
+----+---+
| |
| Router |
| |
+--------+
Minimal Hardware Requirements
-----------------------------
The rule of thumb is the bigger the better, more RAM and more cores will
translate to a better environment. For a POC environment the following is
necessary:
Management Node
~~~~~~~~~~~~~~~
CPU: 2 cores
Memory: 8 GB
Storage: 100 GB
Network: 1 * 1 Gbps NIC
Network Node
~~~~~~~~~~~~
CPU: 2 cores
Memory: 2 GB
Storage: 50 GB
Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs
Compute Node
~~~~~~~~~~~~
CPU: 2+ cores
Memory: 8+ GB
Storage: 100 GB
Network: 1 Gbps NIC (Management Network), 2 * 1+ Gbps NICs
networking-odl-16.0.0/doc/source/configuration/0000775000175000017500000000000013656750617021517 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/configuration/samples/0000775000175000017500000000000013656750617023163 5ustar zuulzuul00000000000000networking-odl-16.0.0/doc/source/configuration/samples/ml2_odl.rst0000664000175000017500000000027013656750541025240 0ustar zuulzuul00000000000000=======================
Sample ml2_conf_odl.ini
=======================
This is sample for ml2_conf_odl.ini.
.. literalinclude:: ../../../../etc/neutron/plugins/ml2/ml2_conf_odl.ini
networking-odl-16.0.0/doc/source/configuration/index.rst0000664000175000017500000000100313656750541023346 0ustar zuulzuul00000000000000.. _configuring:
=======================
Configuration Reference
=======================
This section provides configuration options for networking-odl,
that needs to be set in addition to neutron configuration, for all
other configuration examples like neutron.conf and ml2_conf.ini,
neutron repo can be referred.
.. show-options::
ml2_odl
Configuration Samples
---------------------
This section provides sample configuration file ml2_conf_odl.ini
.. toctree::
:maxdepth: 1
samples/ml2_odl.rst
networking-odl-16.0.0/requirements.txt0000664000175000017500000000170513656750541020066 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=4.0.0 # Apache-2.0
Babel>=2.5.3 # BSD
stevedore>=1.28.0 # Apache-2.0
debtcollector>=1.19.0 # Apache-2.0
neutron-lib>=2.0.0 # Apache-2.0
websocket-client>=0.47.0 # LGPLv2+
# OpenStack CI will install the following projects from git
# if they are in the required-projects list for a job:
neutron>=16.0.0.0b1 # Apache-2.0
networking-l2gw>=12.0.0 # Apache-2.0
networking-sfc>=10.0.0.0b1 # Apache-2.0
networking-bgpvpn>=10.0.0b1 # Apache-2.0
# The comment below indicates this project repo is current with neutron-lib
# and should receive neutron-lib consumption patches as they are released
# in neutron-lib. It also implies the project will stay current with TC
# and infra initiatives ensuring consumption patches can land.
# neutron-lib-current
networking-odl-16.0.0/setup.cfg0000664000175000017500000000466713656750617016441 0ustar zuulzuul00000000000000[metadata]
name = networking-odl
summary = OpenStack Networking
description-file =
README.rst
author = OpenStack
author-email = openstack-discuss@lists.openstack.org
home-page = https://docs.openstack.org/networking-odl/latest/
python-requires = >=3.6
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
[files]
packages =
networking_odl
data_files =
etc/neutron =
etc/neutron/plugins/ml2/ml2_conf_odl.ini
[global]
setup-hooks =
pbr.hooks.setup_hook
[extras]
ceilometer =
ceilometer>=11.0.0
[entry_points]
console_scripts =
neutron-odl-ovs-hostconfig = networking_odl.cmd.set_ovs_hostconfigs:main
neutron-odl-analyze-journal-logs = networking_odl.cmd.analyze_journal:main
neutron.ml2.mechanism_drivers =
opendaylight_v2 = networking_odl.ml2.mech_driver_v2:OpenDaylightMechanismDriver
neutron.service_plugins =
odl-router_v2 = networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin
neutron.db.alembic_migrations =
networking-odl = networking_odl.db.migration:alembic_migrations
networking_odl.ml2.port_binding_controllers =
legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager
pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController
oslo.config.opts =
ml2_odl = networking_odl.common.config:list_opts
networking_sfc.sfc.drivers =
odl_v2 = networking_odl.sfc.sfc_driver_v2:OpenDaylightSFCDriverV2
networking_sfc.flowclassifier.drivers =
odl_v2 = networking_odl.sfc.flowclassifier.sfc_flowclassifier_v2:OpenDaylightSFCFlowClassifierDriverV2
network.statistics.drivers =
opendaylight.v2 = networking_odl.ceilometer.network.statistics.opendaylight_v2.driver:OpenDaylightDriver
[build_releasenotes]
build-dir = releasenotes/build
source-dir = releasenotes/source
all_files = 1
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = networking_odl/locale/networking-odl.pot
[compile_catalog]
directory = networking_odl/locale
domain = networking-odl
[update_catalog]
domain = networking-odl
output_dir = networking_odl/locale
input_file = networking_odl/locale/networking-odl.pot
[egg_info]
tag_build =
tag_date = 0
networking-odl-16.0.0/rally-jobs/0000775000175000017500000000000013656750617016661 5ustar zuulzuul00000000000000networking-odl-16.0.0/rally-jobs/odl.yaml0000664000175000017500000001407113656750541020322 0ustar zuulzuul00000000000000---
NeutronNetworks.create_and_list_networks:
-
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_list_subnets:
-
args:
subnets_per_network: 2
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
subnet: -1
network: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_list_routers:
-
args:
network_create_args:
subnet_create_args:
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args:
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_list_ports:
-
args:
network_create_args:
port_create_args:
ports_per_network: 2
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
port: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_update_networks:
-
args:
network_create_args: {}
network_update_args:
admin_state_up: False
name: "_updated"
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_update_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.4.0.0/16"
subnets_per_network: 2
subnet_update_args:
enable_dhcp: False
name: "_subnet_updated"
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 5
users_per_tenant: 5
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_update_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
router_update_args:
admin_state_up: False
name: "_router_updated"
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_update_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 5
port_update_args:
admin_state_up: False
device_id: "dummy_id"
device_owner: "dummy_owner"
name: "_port_updated"
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_delete_networks:
-
args:
network_create_args: {}
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_delete_subnets:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_delete_routers:
-
args:
network_create_args: {}
subnet_create_args: {}
subnet_cidr_start: "1.1.0.0/30"
subnets_per_network: 2
router_create_args: {}
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
subnet: -1
router: -1
sla:
failure_rate:
max: 0
NeutronNetworks.create_and_delete_ports:
-
args:
network_create_args: {}
port_create_args: {}
ports_per_network: 5
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 1
users_per_tenant: 1
quotas:
neutron:
network: -1
port: -1
sla:
failure_rate:
max: 0
Quotas.neutron_update:
-
args:
max_quota: 1024
runner:
type: "constant"
times: 40
concurrency: 20
context:
users:
tenants: 20
users_per_tenant: 1
sla:
failure_rate:
max: 0
networking-odl-16.0.0/rally-jobs/plugins/0000775000175000017500000000000013656750617020342 5ustar zuulzuul00000000000000networking-odl-16.0.0/rally-jobs/plugins/__init__.py0000664000175000017500000000000013656750541022435 0ustar zuulzuul00000000000000networking-odl-16.0.0/rally-jobs/plugins/README.rst0000664000175000017500000000060713656750541022030 0ustar zuulzuul00000000000000Rally plugins
=============
All \*.py modules from this directory will be auto-loaded by Rally and all
plugins will be discoverable. There is no need of any extra configuration
and there is no difference between writing them here and in rally code base.
Note that it is better to push all interesting and useful benchmarks to Rally
code base, this simplifies administration for Operators.
networking-odl-16.0.0/rally-jobs/extra/0000775000175000017500000000000013656750617020004 5ustar zuulzuul00000000000000networking-odl-16.0.0/rally-jobs/extra/README.rst0000664000175000017500000000025513656750541021471 0ustar zuulzuul00000000000000Extra files
===========
All files from this directory will be copy pasted to gates, so you are able to
use absolute path in rally tasks. Files will be in ~/.rally/extra/*
networking-odl-16.0.0/rally-jobs/README.rst0000664000175000017500000000177113656750541020352 0ustar zuulzuul00000000000000Rally job related files
=======================
This directory contains rally tasks and plugins that are run by OpenStack CI.
Structure
---------
* plugins - directory where you can add rally plugins. Almost everything in
Rally is a plugin. Benchmark context, Benchmark scenario, SLA checks, Generic
cleanup resources, ....
* extra - all files from this directory will be copy pasted to gates, so you
are able to use absolute paths in rally tasks.
Files will be located in ~/.rally/extra/*
* odl.yaml is a task that is run in gates against OpenStack with
Neutron service configured with ODL plugin
Useful links
------------
* More about Rally: https://rally.readthedocs.org/en/latest/
* Rally release notes: https://rally.readthedocs.org/en/latest/release_notes.html
* How to add rally-gates: https://rally.readthedocs.org/en/latest/gates.html
* About plugins: https://rally.readthedocs.org/en/latest/plugins.html
* Plugin samples: https://github.com/openstack/rally/tree/master/samples/plugins
networking-odl-16.0.0/tox.ini0000664000175000017500000001750413656750541016121 0ustar zuulzuul00000000000000[tox]
envlist = docs,py37,pep8
minversion = 3.1.1
skipsdist = True
[testenv]
setenv = VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:1}
OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:1}
OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:1}
OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:60}
passenv = http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY
OS_FAIL_ON_MISSING_DEPS OS_POST_MORTEM_DEBUGGER TRACE_FAILONLY
OS_TEST_DBAPI_ADMIN_CONNECTION OS_DEBUG
usedevelop = True
deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
whitelist_externals = bash
commands = stestr run {posargs}
[testenv:dsvm]
# Fake job to define environment variables shared between dsvm jobs
setenv = OS_SUDO_TESTING=1
OS_FAIL_ON_MISSING_DEPS=1
OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs}
commands = false
[testenv:functional]
setenv = {[testenv]setenv}
OS_TEST_PATH=./networking_odl/tests/functional
OS_LOG_PATH={env:OS_LOG_PATH:/opt/stack/logs}
deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/requirements.txt
-r{toxinidir}/networking_odl/tests/functional/requirements.txt
[testenv:dsvm-functional]
setenv = {[testenv:functional]setenv}
{[testenv:dsvm]setenv}
deps =
{[testenv:functional]deps}
[testenv:pep8]
deps = {[testenv]deps}
-r{toxinidir}/doc/requirements.txt
commands =
flake8
{toxinidir}/tools/coding-checks.sh --pylint '{posargs}'
doc8 doc/source devstack releasenotes/source rally-jobs
neutron-db-manage --subproject networking-odl check_migration
{[testenv:genconfig]commands}
{[testenv:bashate]commands}
{[testenv:capitald]commands}
{[testenv:bandit]commands}
whitelist_externals =
bash
mkdir
[testenv:i18n]
commands = python ./tools/check_i18n.py ./networking_odl ./tools/i18n_cfg.py
[testenv:venv]
deps = {[testenv]deps}
-r{toxinidir}/doc/requirements.txt
commands = {posargs}
[testenv:cover]
setenv =
PYTHON=coverage run --source networking_odl --parallel-mode
commands =
stestr run {posargs}
coverage combine
coverage report --fail-under=80 --skip-covered
coverage html -d cover
coverage xml -o cover/coverage.xml
[testenv:docs]
whitelist_externals = rm
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/doc/requirements.txt
commands =
rm -rf doc/build
sphinx-build -W -b html doc/source doc/build/html
[testenv:pdf-docs]
envdir = {toxworkdir}/docs
deps = {[testenv:docs]deps}
whitelist_externals =
make
commands =
sphinx-build -W -b latex doc/source doc/build/pdf
make -C doc/build/pdf
[testenv:debug]
# Uses default base python
setenv = {[testenv]setenv}
OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:0}
OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:0}
OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:0}
OS_TEST_TIMEOUT={env:OS_TEST_TIMEOUT:0}
commands = oslo_debug_helper -t networking_odl/tests {posargs}
[hacking]
import_exceptions = networking_odl._i18n
local-check-factory = networking_odl.hacking.checks.factory
[testenv:bandit]
# B101: Use of assert detected
deps = -r{toxinidir}/test-requirements.txt
commands = bandit -r networking_odl -x tests -n5 -s B101
[doc8]
# File extensions to check
extensions = .rst
# TODO(yamahata): doc8 work around. remove this when doc8 is fixed.
# doc8(actually docutils) handles relative path inclusion differently from sphinx.
# doc8 wrongly alerts invalid inclusion path with recursive relative inclusion
# https://sourceforge.net/p/docutils/bugs/211/
ignore-path-errors=doc/source/devref/index.rst;D000
[flake8]
# TODO(dougwig) -- uncomment this to test for remaining linkages
# N530 direct neutron imports not allowed
show-source = True
# TODO(mkolesni): Fix I202 if you really care about import checks
ignore = N530,I202
# H106: Dont put vim configuration in source files
# H203: Use assertIs(Not)None to check for None
# H204: Use assert(Not)Equal to check for equality
# H205: Use assert(Greater|Less)(Equal) for comparison
# H904: Delay string interpolations at logging calls
enable-extensions=H106,H203,H204,H205,H904
exclude=./.*,dist,doc,releasenotes,*lib/python*,*egg,build,tools
import-order-style = pep8
[testenv:bashate]
commands = bash -c "find {toxinidir} \
-not \( -type d -name .\?\* -prune \) \
-type f \
\( \
-name \*.sh \
-or \
-path \*/devstack/\*settings\* \
-or \
-path \*/devstack/devstackgaterc \
-or \
-path \*/devstack/entry_points \
-or \
-path \*/devstack/functions \
-or \
-path \*/devstack/odl-releases/common \
-or \
-path \*/devstack/override-defaults \
\) \
# E005 file does not begin with #! or have a .sh prefix
# E006 check for lines longer than 79 columns
# E042 local declaration hides errors
# E043 Arithmetic compound has inconsistent return semantics
-print0 | xargs -0 bashate -v -iE006 -eE005,E042,E043"
whitelist_externals = bash
[testenv:capitald]
usedevelop = False
skip_install = True
deps =
# Check if "Opendaylight" word is in any file
# Only "OpenDaylight" (with uppercase 'D') should be used
commands = bash -c "! grep \
--exclude-dir='.*' \
--exclude-dir='cover' \
--exclude-dir='__pycache__' \
--exclude='tox.ini' \
--exclude='ChangeLog' \
--exclude='*.py' \
--exclude='*.pyc' \
--exclude='*~' \
--recursive \
--line-number \
Opendaylight \
{toxinidir}"
whitelist_externals = bash
[testenv:genconfig]
deps = -r{toxinidir}/requirements.txt
commands =
mkdir -p etc/neutron/plugins/ml2
oslo-config-generator --namespace ml2_odl --output-file etc/neutron/plugins/ml2/ml2_conf_odl.ini.sample
whitelist_externals = mkdir
[testenv:releasenotes]
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/doc/requirements.txt
commands = sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:lower-constraints]
deps =
-c{toxinidir}/lower-constraints.txt
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
[testenv:dev]
# run locally (not in the gate) using editable mode
# https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
# note that order is important to ensure dependencies don't override
commands =
pip install -q -e "git+https://opendev.org/openstack/networking-l2gw#egg=networking_l2gw"
pip install -q -e "git+https://opendev.org/openstack/networking-bgpvpn#egg=networking_bgpvpn"
pip install -q -e "git+https://opendev.org/openstack/networking-sfc#egg=networking_sfc"
pip install -q -e "git+https://opendev.org/openstack/neutron-fwaas#egg=neutron_fwaas"
pip install -q -e "git+https://opendev.org/openstack/ceilometer#egg=ceilometer"
pip install -q -e "git+https://opendev.org/openstack/neutron#egg=neutron"
{[testenv]commands}
[testenv:pep8-dev]
deps = {[testenv]deps}
-r{toxinidir}/doc/requirements.txt
commands =
{[testenv:dev]commands}
{[testenv:pep8]commands}
networking-odl-16.0.0/ChangeLog0000664000175000017500000021610613656750616016362 0ustar zuulzuul00000000000000CHANGES
=======
16.0.0
------
* Make tempest jobs use the latest ODL and zuulv3
* Fix enums in db model
* Fix gate failure
16.0.0.0b1
----------
* Remove references for unittest2
* Additional python2 removal cleanups
* Remove networking-odl python2 jobs
* Try deinit odl\_features in TestOdlFeaturesNoFixture setUpClass
* Change function.func\_doc to function.\_\_doc\_\_
* Switch to Ussuri jobs
* Remove the remaining neutron-lbaas related constants
* Update master for stable/train
15.0.0
------
* Bump pylint version to one that supports python3.7
15.0.0.0b1
----------
* Remove unneeded Zuul branch matcher
* use callback payloads for ROUTER\_CONTROLLER events
* PDF documentation build
* Fix double-digit ODL version number handling for devstack
* Make functional jobs use the latest ODL versions
* Make rally jobs use the latest ODL versions
* Update api-ref location
* Add Python 3 Train unit tests
* py37: fix regex unknown escapes
* Replace git.openstack.org URLs with opendev.org URLs
* Make functional jobs using py3
* Convert filter\_metadata\_diff result to list
* set\_ovs\_hostconfigs on py3 fails with TypeError
* Update master for stable/stein
* Blacklist bandit, bump neutron-lib and retire neutron-lbaas
* stop using common db mixin
* OpenDev Migration Patch
* use trunk constants from neutron-lib
* Fix HOST\_IP fetching method in local.conf.example
* Replace openstack.org git:// URLs with https://
14.0.0
------
* Fix the misspelling of "available"
* Use latest networking-sfc release
* Add bgpvpn-vni as a supported extension for ODL BGPVPN driver
* Use latest Oxygen release instead of snapshot
14.0.0.0b1
----------
* Change openstack-dev to openstack-discuss
* Remove unused ryu from lower constraints
* fix typo mistakes
* Add the project source code repository in README
* [Trivial fix] Correct spelling error
* [Trivial fix] Correct spelling error
* Dont disable services that are not enabled
* devstack: enable flow based tunnels for sfc
* use context manager from neutron-lib
* Deprecate the ceilometer driver
* Use extras for ceilometer dependency
* Remove test-requirements from functional tox env
* Reorder Zuul jobs for better organization
* Remove the duplicated doc8 executa
* Remove openstack-tox-py35-with-neutron-lib-master
* Increment versioning with pbr instruction
* use include\_tasks instead of include
* Remove extra publish-openstack-python-branch-tarball job
* fix tox python3 overrides
* Use constraints in tox venv
* add local tox targets for pep8, py27 and py3
* opt in for neutron-lib consumption patches
* Implement Baked Query
* use is\_retriable from neutron-lib
* Get ceilometer from pypi
* Update Zuul config
* Revert "pseudo port binding: teach agent aliveness"
* Fix PEP8 tox environment
* add python 3.6 unit test job
* switch documentation job to new PTI
* import zuul job settings from project-config
* Removing unused client methods
* Update reno for stable/rocky
* Removing the fullstack tests
13.0.0
------
* Remove dns-integration from NETWORK\_API\_EXTENSIONS
* Ceilometer meters for ODL should use project id of admin
* Narrow down tempest tests to relevant tests
* Assure that PeriodicTask passes context as args
* Consume DB retry decorator from neutron-lib
13.0.0.0b3
----------
* update requirements for neutron-lib 1.18.0
* ODL l3 service provider
* Enabled feature negotation for qos rules
* Add docs requirements to venv
* Remove outdated disclaimer from setup.cfg
* Remove fullstack from the gate
* Add Fluorine functional to the gate
* Skip tempest test test\_snat\_external\_ip
* Fixate pylint version, fullstack non-voting
* use retry\_db\_errors from neutron-lib
* Remove the v1 drivers
* Add release notes link in README
* Remove vpnservice conditional from pre test hook
* Adopt the new enginefacade in networking-odl
* Fixes for the CI
* ODL feature fetch: parse configs
* Remove untested ODL releases
* Remove mock for odl\_features.init()
* use CORE constant from neutron-lib plugins
* Update README reference to latest OpenDaylight version
* Remove old OpenDaylight build references
* Remove nitrogen references as it is EOL
* Clean over verbose import
* Removing unused variable
* Retry journal recording in L3
* Cleanup l3\_odl\_v2 code
* Remove attribute db\_session from ODLBaseDbTestCase
* Remove unnecesary cleanup
* Simplify retry testing functions to only accept context
* Remove session parameter from \_test\_reset\_processing\_rows
* Update DB functions to handle contexts
* Abstract how retriable methods are tested
13.0.0.0b2
----------
* Add flourine jobs for the CI
* Trivial: Update pypi url to new url
* Make oxygen-functional/fullstack voting
* Retire carbon jobs for rocky
* Remove vagrant scripts from the repo
* Make carbon jobs non-voting
* use neutron-lib plugin utils
* Add more information to the ovs flows dump
* Add zuul role show-odl-info to fullstack
* Change default loopingcall interval for tests
* Update references of neutron services
* Clean old output before new doc builds
13.0.0.0b1
----------
* Enabling ML2 for new full sync and recovery mechanism
* Follow the new PTI for document build
* Revert temporary patch that disables linter errors locally
* use rpc Connection rather than create\_connection
* Add lower-constraints job
* Add 'flat' network type to host config docs
* Fix pep8 errors
* Remove tox\_install.sh helper
* Use ALIAS instead of LABEL for BGPVPN API
* Remove usage of the reserved keyword id
* Updated from global requirements
* Better defaults for the debug context
* fix a typo
* Allow Tempest CI to add txt extensions to ini files
* Fix CI issues that block the gate
* use callback payloads for PRECOMMIT\_UPDATE events
* Fix tox installation of neutron
* Updated from global requirements
* use plugin common utils from neutron-lib
* devstack: update carbon definition for carbon SR-3
* Updated from global requirements
* Imported Translations from Zanata
* Remove incorrect DB retry decorators
* ODL DHCP Port to be created only for IPv4 subnets
* use common agent topics from neutron-lib
* Fix log format for oxygen logging
* Update mailmap
* Switch to a blacklist file for test excludes
* Log exceptions on security group callbacks
* devstack: add nitrogen SR2 definition
* Make fullstack and functional native Zuul v3 jobs
* reno: Remove remote names from branch specifiers
* Enable hacking-extensions H204, H205
* Imported Translations from Zanata
* Update reno for stable/queens
12.0.0
------
* Add version specific debug tox environments
* Move zuul\_copy\_output to be a job variable
* Move extensions\_to\_txt to the job defintion
* Zuul: Remove project name
* Replace Chinese quotes to English quotes
* Fix OpenDaylight setup with oxygen
* Use Zuul v3 fetch-subunit-output
* Remove branches filter from jobs
* Wait for worker start before testing in JournalPeriodicProcessorTest
* devstack: update local.conf.example to use lib/neutron
* Remove unit tests for ML2 Mechanism driver
* Fix unit tests for ML2 Mechanism driver
* Add devstack base jobs specific to the project
* Add neutron's tempest plugin to the CI
* Updated from global requirements
* Fix missing parentid on rule delete journal record
* zuulv3: add jobs for ODL oxygen master branch
* Fixes SSL websocket disconnects with client
12.0.0.0b3
----------
* Updated from global requirements
* Fixes websocket to use TLS when ODL NB is TLS
* Make tempest native Zuul v3 jobs
* Improve IPC and forking reliability in tests
* Fix missing variables in devstack/override-defaults
* use multiprovidernet api definition from neutron-lib
* Remove redundant exception
* Base mechanism for recovery
* Correct link address
* modify spelling error of variable
* Updated from global requirements
* Pass binding:profile attribute as a string
* Updated from global requirements
* keystone spelling errors
* Fix broken if in devstack/functions
* Add show-odl-info role
* Don't set use\_stderr = False for tests
* devstack: remove nitrogen snapshot 0.7.1
* Fix dependency calculation when two fixed IPs under same subnet
* Switch to get\_writer\_session
* Remove "-y" option for package install command
* Base mechanism for full sync
* Don't truncate subnetpools from subnet filters
* use callback payloads for \_SPAWN events
* Add OS\_DEBUG to passenv of tox
* Add default timeout for tests triggered by tox
* Capture logging while running tests
* Modify JournalPeriodicProcessor settings on tests at setUp
* Add helper function for JournalPeriodicProcessor
* Force maintenance task when it is started
* Add pidfile to the JournalPeriodicProcessor worker
* Force maintenance task on HUP on the worker
* Add forced processing to the PeriodicTask
* Move maintenance task to a worker
* Add SIGHUP handling to journal periodic processing
* Stoppable sync thread on OpenDaylightJournalThread
* Use nitrogen snapshot for grenade job
* Updated from global requirements
* Disable cinder for grenade Job
* Imported Translations from Zanata
* Raise an exception for unsupported vif
* Follow raising-format-tuple check
* Fix 3rd party import order
* Fix Inconsistent return statements
* Fix unit tests py27, py35
* Add hacking to enforce the config fixture over direct overrides
* Add a NOOP function
* Removing JournalCleanup class
12.0.0.0b2
----------
* Design for full sync and recovery of resources
* Correct missleading example of l2gw in readme
* Fix tests that were changing settings but not reverting
* Make test\_periodic\_task.test\_back\_to\_back\_job more consistent
* Add command line tool to analyze logs
* Reduce timer for periodic task tests
* devstack: add nitrogen-0.7.2 snapshot definition
* devstack: add nitrogen-SR1 release definition
* Switch to lib/neutron
* Add pre\_test\_grenade\_hook
* Add hacking to enforce the config fixture
* Override settings with a config fixture
* Added raw flag to regexp strings missing it
* Enable networking-odl only once
* Use requests.codes.XX constants instead of hardcoded constants
* Update sample config to use v2 for l3
* Remove setting of version/release from releasenotes
* Fix on PeriodicTask locking mechanism
* Updated from global requirements
* use l3 ext gw mode api def from neutron-lib
* use l3 api def from neutron-lib
* Remove JVM memory limitations by default
* Cleanup of OpenDaylight on ./clean.sh
* Initializing logging for set ovs hostconfig command
* devstack: add error check to \_wget and \_xpath
* use qos api def from neutron-lib
* Do not use “-y” for package install
* Rename Zuul jobs according to naming conventions
* Remove boron job
* Ignore rally nitrogen job for ocata branch
* Revert skip of test l3 test case
* tox/pep8: add bandit check
* use command line arguments in the main method
* Updated from global requirements
* Log additional info about entries
* Fix exception handling in journal
* Zuul: add file extension to playbook path
* Delete completed rows immediately when retention=0
* Have create\_pending\_row return the entry
* Reusing context defined in base class
* UT for testing urls for all the objects
* Fixes error handling of DB calls
* use ml2 driver api from neutron-lib
* Ignore I202 in pep8 (flake8), skip Testodll3
* Fix to use . to source script files
* Deprecated the V1 drivers
12.0.0.0b1
----------
* use external net api def from lib
* Imported Translations from Zanata
* use addr pairs api def from lib
* Fixes URL path for SFC v2 Driver
* Correction in dependency calculation for port pair group
* Correction in dependency calculation for port pair chains
* Adding Zuul v3 migrated legacy jobs
* Remove SCREEN\_LOGDIR from devstack
* unblock fullstack/functional tests
* Fixed vhost user prefix in test code & doc
* devstack: add oxygen name
* devstack: add carbon SR2 definition
* devstack: update nitrogen snapshot 0.7.0 -> 0.7.1
* devstack: add nitrogen release definition
* Added ODL installation flag
* Updated from global requirements
* consume common constants from lib
* Trivial Fix: correct typo artifcat to artifact
* devstack: teach how to handle latest ODL release
* devstack: add oxygen-snapshot release definition
* use new payload objects for \*\_INIT callbacks
* Updated from global requirements
* Updated from global requirements
* Don't sleep on exception
* Delete FWaaS
* tests: fix ml2 plugin config path
* change testrepository to stestr
* devstack: set ODL\_GATE\_SERVICE\_PROVIDER to fullstack/functional tests
* devstack: save ODL configuration file for debug
* devstack: symlink odl logfile to $BASET/logs
* odl-releases/README.rst: add nitrogen RC3 example
* devstack: rename local NEXUSPATH to \_NEXUSPATH
* fullstack: remove fullstack+carbon workaround
* full/functest: remove screen usage
* Cleanup registered plugins
* fullstack: wait for network-topology/netvirt
* devstack: setting ovsdb manager last
* Removed unnecessary code
* Updated from global requirements
* Fix to use . to source script files
* Update stable networking-odl release to pike
* use synchronized decorator from neutron-lib
* fullstack: load nicira extension early for carbon
* devstack: show install ODL features
* Full Sync: Moved resource fetching into drivers
* devstack: add xpath into required packages
* Carbon tempest CI fix: let ODL create br-int
* bashate devstack shell scripts
* Remove WebsocketBadStatusException
* Fix: retry journal.record on dependency deleted
* fullstack: skip test\_VM\_connectivity temporally
* Update rows one by one in journal cleanup
* Delete completed rows one by one in during cleanup
* fullstack: don't install mysql/postgres
* devstack: dump more odl restconf info for debug
* Fixes db.delete\_row deletion by row\_id
* devstack: dump group for debug
* fullstack: test arping in addition to ping
* pseudo port binding: teach agent aliveness
* devstack: fix URL for karaf distribution
* Update the documentation link for doc migration
* Add reference deployment guide
* Fix gate issue: br-int not getting controller
* hardware offload support for openvswitch
* Full Sync: Correction in bgpvpn assoc variable
* Fix: tests were no longer avoiding journal calls
* Updated from global requirements
* Fixes SFCv2 full sync errors
* devstack: try metadata in parent dir
* devstack: examples of odl release definitions
* hacking: check string for Opendaylight and noqa support
* devstack: stop odl server after test
* Fix in documentation on how to enable BGPVPN
* db migration: create\_at in opendaylightjournal removed
* Use maintenance interval for maintenance task
* Fixes to PEP8 checks when running test with tox
* fullstack: use v2driver
* fullstack/functional test: setup neutron log
* Update reno for stable/pike
* update sample ml2\_conf\_odl.ini
* devstack: remove boron snapshot release definition
* Add configuration reference
* tox.ini: ignore \*~
* devstack: remove useless cat
* devstack: revise ODL log level
* devstack: use karaf for Nitrogen snapshot
* [Gate] Reduce SSH timeout for gate jobs
* Remove WebTest from test requirements
11.0.0.0rc1
-----------
* pseudo agent: pre-populate agentdb if missing
* add function disassociate\_floatingips to refresh floatingip information when delete port
* [Gate] Reduce test load on tempest jobs
* use neutron-lib for callbacks
* Functional Test for OpenDaylight DHCP Service
* Update subport status for trunk ports
* Add Flag to support OpenDaylight DHCP Service in Devstack
* Addition of driver class for lbaas driver
* use common.utils.get\_odl\_url
* unit: commit session after precommit
* Enabling support for DHCP Service on OpenDaylight Controller
* unit: use self.db\_context
* test\_l3\_odl\_v2.py: use correct context/session
* test\_mechanism\_odl\_v2: use given session
* set-ovs-hostconfig: enable 'flat' by default
* Updated from global requirements
11.0.0.0b3
----------
* Move journal periodic processing to a worker
* unit: fix up merge botch
* Use neutron\_lib for qos driver\_base
* Enabling support for DHCP Service on OpenDaylight Controller
* full\_sync: use given session
* recovery: use given session
* pseudo agent port binding: use neutron worker
* journal: use context instead of session for dhcp port service
* Enable Placement-api for grenade job
* Allowing lock to be applied per task basis
* Rearranging the documentaion layout
* pseudo agent port binding: log owner and device\_id
* Add test\_connectivity test case in fakemachines
* unit test: mock.patch before super.setUp
* Updated from global requirements
* Update URLs in documents according to document migration
* Enable members of lbaas to use custom url builder
* journal: partially implement sg/sgrule dependency
* Use v2 driver for mechanism and l3
* Load port status update worker in V1 driver
* Change dependency validation to calculation
* Create journal dependencies table
* unit: consolidate mocking start\_odl\_sync\_thread
* devstack: add carbon SR1 release definition
* Support for recovery of all resources
* enable warning-is-error for sphinx build
* Bug 1704057: port status upate: missing add of provisioning component
* OpenDaylight Ceilometer Driver
* journal: sleep when error
* Use new Netvirt for dsvm-fullstack
* [Gate] Remove releases older than Boron
* Revert "Update Full stack config"
* Update full stack test
* Change journal entry selection to optimistic locking
* Enable full-sync for the bgpvpn, lbaas, qos, sfc, trunk driver
* Update Full stack config
* functional tests: don't run journal timer
* Allow user to specify own method to make url
* Select new entry when validation fails
* Fetching of session using get\_session is deprecated
* Substantially improve SFC support documentation
* tox\_install\_project.sh: Use git clone --depth 1
* pep8: use import-order-style
* hacking: enable H106, H203 and H904
* new testenv to check Opendaylight
* hacking: enforce OpenDaylight instead of Opendaylight
* Switch from oslosphinx to openstackdocstheme
* Capitalize D in OpenDaylight
* fix up of I4a526ee84784ca6ff8061692437a8c874bb33d6a
* unit test: precommit is called without commit
* devstackgaterc: Enable n-api-meta
* Initialize odl\_features in mech\_driver\_v1
* make odl\_features default to empty feature set
* tox: enable pylint
* Run OdlPortStatusUpdate only in one worker
* Retrieve and process port status updates from ODL
* Revert "devstack: bug workaround 1698129"
* tox: add bash to externals for pep8 and bashate
* Updated from global requirements
* eliminate portbinding by ODL networking topology
* Utilty for determining ODL neutron features
* Correction in Resource URL Mapping
* test\_l3\_odl: use odl l3 plugin, not neutron's
* remove unused code
* Use UUID for SG test
* Use port bindings
* use service type constants from neutron\_lib plugins
* Send port[fixed\_ips] update to ODL Controller
* devstack: add Boron SR4 release definition
* mech\_driver\_v2: remove update\_security\_group work around
* enable test\_security\_group\_update
* Fix vhost string comparison
* Fix config for grenade
* V2.0 Driver for LBaaS V2.0
* Moving \_make\_odl\_url method to common utils
* try tempest tests with floatingip and others
* unbreak gate
* Spec for blueprint neutron-port-dhcp
* Using assertFalse(A) instead of assertEqual(False, A)
* Revert "Allowing lock to be applied per operation basis"
* tox: enable bashate
* tox: generate config with pep8
11.0.0.0b2
----------
* Updated from global requirements
* Rename argument object\_id to object\_ids for \_no\_older\_operations
* Add precommit calls to qos
* devstack: use neutron hostconfig-ovs from 0.6.0 carbon
* Added decorator for bgpvpn, ml2 and l2gateway for postcommit
* The local.conf.example file in the master branch of networking-odl repo does not install the DLUX UI Karaf features needed for the ODL GUI
* devstack: add release definition of 0.6.0 carbon
* Updated from global requirements
* Allowing lock to be applied per operation basis
* Added decorator to assign postcommit method
* Set Initial Status for FIP down
* Add date and organization to copyright of script.py.mako
* Updated from global requirements
* Replaced neutron command with OpenStack commands
* Updated from global requirements
* Adding Websocket client for ODL
* use MechanismDriver from neutron-lib
* Updated from global requirements
* Log exception when journal entry processing fails
* Don't call journal's run\_sync\_thread in unit tests
* don't use run\_process for odl
* logging method call in ml2 driver
* Logging method call for sfc driver
* Add direction to known bandwidth\_limit\_rules parameters
* Logging method call for trunk driver
* Send MAC updates to ODL for SRIOV PFs
* use requests.session to avoid http open/close
* Stop translating log messages
* consume neutron-lib callbacks
* Removed extra call to \_fake\_trunk\_payload
* Correct SUPPORTED\_RULES of QoS driver
* BGPVPN V2 Driver - Moves journal call to precommit
* Migrate neutron.plugins.common to neutron-lib
* Remove notification\_driver from docs
* Add functional tests for QoS
* Remove QoS V1 driver
* Adapt new driver base for QoS
* Disable new N537 hacking check from next neutron-lib
* enable new netvirt
* release note on version bump to 11
* devstack: add release definitions for nitrogen
* bgpvpn: update v2 driver to use precommit
* devstack: odl\_snapshot\_full\_version misargument
* Updated from global requirements
* Updated from global requirements
11.0.0.0b1
----------
* Added quickstart guide for networking-odl
* Update sample conf
* devstack: add boron SR3 release definition
* Add Initialize Parent when OpenDaylightL3RouterPlugin is initialized
* Fixing a typo in function and variable name
* Updated from global requirements
* remove workarounds in devstackgaterc
* Fix unit tests
* Remove subunit-trace fork
* Revert "test-requirement: avoid sqlalchemy 1.1.5+"
* Add unit test for sqltestcase
* Simplify the query by using filter\_by
* Send only data to dependency generators
* test-requirement: avoid sqlalchemy 1.1.5+
* tox.ini: pass OS\_TEST\_DBAPI\_ADMIN\_CONNECTION
* Restructure of qos driver
* full\_sync: sync router before port
* full sync: correct sync order of resource
* secgroup: convert icmpv6 variant name into icmpv6
* Propose spec for dependency validations move
* Correcting links in documentation
* OVS connects to ODL using IP instead of hostname
* Missing 's' in error message string
* Removed old or un necessary configurations
* Fix to correct Opendaylight trunk driver registration
* Fix typos in set\_ovs\_hostconfigs.py
* Updated from global requirements
* Fix call to xpath which causes ODL download to fail on CentOS
* Remove unused logging import
* port binding: trim port\_prefix + PORT\_ID to 14 length
* Functional tests for L2Gateway V2 Driver
* Refactor journal main loop
* Remove references of V1 driver
* Use journal.record everywhere
* bug work around: disable several test cases
* spec: move completed spec to completed directory
* Fix N536 hacking check from neutron-lib
* Make ml2\_context optional in journal.record
* tox: remove sitepackage=True
* Functional tests for BGPVPN V2 Driver
* Adapt new api from db\_api
* Add deprecating warning for qos v1 driver
* Fix ODL URL creation logic
* Switch to neutron\_lib for context
* Revert "odl bug: skip test\_port\_security\_macspoofing\_port"
* TrivialFix: Move portbindings to neutron-lib
* Updated from global requirements
* odl bug: skip test\_port\_security\_macspoofing\_port
* devstack: route for floatingip/ipv6 public range
* Updated from global requirements
* devstack: use localrc\_set and use local.conf
* Fix neutron-odl-ovs-hostconfig failure on compute
* devstack: enable placement-client for subnode
* Enable placement-api for compute node to fix multinode tempest failure
* tox.ini: allows to pass TRACE\_FAILONLY to ostestr
* Drop MANIFEST.in - pbr doesn't need it
* pylint: update .pylint
* Revert "Add a method to query operations"
* Update reno for stable/ocata
* Adding a threshold for coverage
4.0.0
-----
* QoS V2 driver for ODL
* OpenDaylight BGPVPN Version 2 Driver
* put back TrunkDependencyValidationsTestCase
* refactor test\_dependency\_validations
* OpenStack Networking-SFC Ver.2 driver for ODL
* Fix typos (sunbet -> subnet)
* test\_dependency: sort retrieved journal rows
* l2gw/dependency validator: missing comma
* Add a method to query operations
* Add/Update hostconfig examples for OVS-DPDK and VPP
* devstack: show ODL neutron northbound data
* delete sg rule on sg deletion on ODL neutron northbound
* Updated from global requirements
* L2Gateway version 2 driver for OpenDaylight
* Fix typo in doc/source/installation.rst
* devstack: configure external net for new netvirt
* Remove support for py34
* tox: pass OS\_POST\_MORTEM\_DEBUGGER env
* pseudo agent: don't set start\_flag
* avoid ovsdb port conflict
* Typo fix: choses to chooses
* ODL Drivers for Vlan Aware VMs
* Update link reference in README.rst
* Fix typo in doc/source/specs/journal-recovery.rst
* devstack: skip several test cases with v2driver and old netvirt
* Typo fix: binded => bound
* Fix typo in maintenance.rst
* Update hacking version
* Enable smoke tests on grenade job
* Revert "devstack: disable metadata for rally"
* Simplify dependency validations
* Remove the register\_validator method
* devstack: enable placement-api
* Remove SG validations
* devstack: remove trailing - in ODL\_GATE\_SERVICE\_PROVIDER
* Remove link to modindex
* Disable some tempest tests temporarily
* devstack: show info even after tempest fails
* Replace six.iteritems/itervalues with dict.items()/values()
* Use neutron-lib portbindings api-def
* Use neutron-lib provider net api-def
* Use V2 driver by default in devstack
* Updated from global requirements
* devstack: make new netvirt default for ODL boron+
* devstack: check latest revision of ODL snapshot
* Split tempest tests for V2 driver
* run functional/fullstack tests with v2driver
* devstack: run rally with v2driver
* devstack: run rally with new netvirt
* Revert "use osc-lib git master branch"
* Fix failing stack on compute node
* use osc-lib git master branch
* devstack: make pseudo agent port binding as default
* using sys.exit(main()) instead of main()
* Update lbaas-driver-v2 releasenotes
* fix some issues in legacy netvirt with mulitnode in carbon
* java: update oracle java 8 jdk version
* devstack: update NETWORK\_API\_EXTENSIONS
* create Openstack with ODL by vagrant
* remove unused self.url from qos driver
* neutron-lib: use L3 constant from neutron-lib
* fullstack - use configure\_for\_func\_testing.sh to setup env
* functional test: documentation and relnotes
* gitignore: ignore vagrant generated dir
* Removes unnecessary utf-8 encoding
* odl-release: update boron definition
* Upgrade script for networking-odl
* H803 hacking have been deprecated
* fullstack: increase check\_flow\_existance retry times and intervals
* Refactor config code for v2 tests
* Show team and repository tags
* devstack: disable metadata for rally
* Remove q-dhcp from compute node q-dhcp service in compute node causes metadata proxy failure as: checking http://169.254.169.254/2009-04-04/instance-id failed 1/20: up 11.03. request failed failed 2/20: up 23.35. request failed
* doc: unbreak build\_sphinx
* functional: vagrantfile for functional test
* configure\_for\_func\_testing: don't install rabbitmq
* configure\_for\_func\_testing: don't use realpath
* devstack: refactor install\_opendaylight
* Enable networking-odl compute mode in subnode
* Consolidate qos v1 driver classes
* devstack multinode: disable some test cases
* odl client: remove unnecessary except and log
* increase odl http timeout(experiment)
* Add seqnum to dependency checks
* Fix typo in devstack/settings.odl
* Fix devstack for fedora 25
* functional test: configure opendaylight
* devstack: improve odl-release definition
* Fix typo
* devstack: tempest CI fails
* mech driver v2: build dict for sg on update
* Configure L3 for grenade job
* Fix the update of qos-policy
* Show team and repo badges on README
* devstack: reorder mech driver for debug
* Use system subunit command if python-subunit is installed globally
* db: use neutron\_lib.db.model\_base
* functional test: install acl package
* Disable live migration tests
* devstack: enable scenario test\_security\_groups\_basic\_ops
* Fix fullstack CI
* devstack: make tempest timeout longer
* Update reno for stable/newton
* Forgot to reference arch in docs index
3.1.0
-----
* Added basic L3 functional tests
* Run ovs appctl execution of flows more times
* devstack: show related info for debug
* Fixed None reference in SG code in V2
* Add security groups basic functional tests
* Add utils.neutronify
* devstack: enable scenario test\_network\_basic
* devstack: enable scenario test\_minium\_basic
* devstack: exclude scenario tests known to fail
* devstack: option for conntrack for old netvirt
* prevent initial networks create on subnode
* devstack: Stop setting route pointing back to tenant router
* devstack: enable odl-neutron-logger by default
* Switch to using plugins directory in lieu of neutron manager
* devstack: disable configuring neutron on compute nodes
* Added ML2 basic functional tests
* Added FLAT type network
* Complement the implementation of odl lbaas driver\_v2
* devstack: add odl-neutron-logger to ODL karaf feature
* devstack: update beryllium release definition
* Scripts to enable fullstack testing in gate
* devstack: enable stably passing scenario tests
* devstack: remove source devstackgaterc
* fullstack test for networking-odl
* devstack: enable c-api,c-bak,c-sch,c-vol,cinder
* Fixed link to drivers architecture
* Add installation guide for networking-odl
* devstack: run tests with tempest run command
* add dsvm-functional tests tox and gate\_hook
* Allow forwarding of OS\_FAIL\_ON\_MISSING\_DEPS to test envs
* Added initial reference architecture
* Updated from global requirements
3.0.0
-----
* update Boron release definitions since SR1 release
* devstack: create public network connectivity
* mech v2: bug/1546910 work around
* Excluding Tempest from Rally job to avoid failure
* Updated from global requirements
* Fix the implementation of ODLMemberManager in lbaas driver\_v2
* devstack: enable force config drive
* Fix set\_ovs\_hostconfigs exit issue
* Fix few typographical errors
* Add a reference to Neutron Devref and Apiref
* Remove last vestiges of oslo-incubator
* Add grenade plugin
* secgroup: pushdown default secgroup rules to ODL
* Fix up some documentation quirks
* sort values for OVERRIDE\_ENABLED\_SERVICES
* Updated from global requirements
* follow up for https://review.openstack.org/#/c/268820/
* Journal recovery for syncing with ODL
* Add developer docs for ODL drivers
* Added maintenance devref
* tools: catch up neutron-lbaas change
* releasenotes: deprecate lbaasv1 driver
* releasenotes: vlan-transparency
* Adding a line space for proper rendering of doc
* Remove call to configure\_neutron\_odl in case of odl-compute and non-pseudo-agent port binding
* Change import statement to not rename the module
* alembic: db migration fails
* OpenStack Networking-SFC driver for OpenDaylight
* Updated from global requirements
* Journal recovery release notes added
* Full sync release notes added
* Maintenace thread release notes added
* Modify the "create" and "update" method in lbaas driver\_v2
* Release notes for psuedo agent port binding
* Add instructions to enable qos
* Release notes for Host Config
* Imported Translations from Zanata
* add test\_migrations test
* tox: use ostestr
* stop doing any magic cloning of neutron during CI
* devstack: exclude tests that uses ssh
* bug/1614766 work around
* Wrapper method for client#sendjson
* Bug 1608659 - pseudo\_agentdb\_binding AttributeError
* transparent\_vlan support
* trivial fix
* devstack: call odl-ovs-hostconfig conditionally
* Remove dependency from neutron and move main() to the end
* Improve README
* journal: filter.py: AttributeError: 'NoneType'
* Fix typo in DB migration script
* journal: created\_at of second is too coarse
* Enable release notes translation
* Updated from global requirements
* devstack: setup hostconfig on compute node
* Cloud admin script auto-config hostconfig defaults
* Fix a typo in documentation
* Add E123,E125 check and Solve several problems
* Add Apache 2.0 license to source file
* Fix a typo in override-defaults
* Updated from global requirements
* remove add/remove router interface to ODL
* Updated from global requirements
* Include alembic migrations in module
* security group rule: convert unknown protocol name
* Update homepage with developer documentation page
* simplify odl release definition
* use v2 driver for v2 test
* rest client: move parameter check to rest client
* update odl release definitions for Boron 0.5.0
* Journal recovery for basic scenarios
* unbreak test\_l3\_odl failure
* sqlite: datetime should be second precision
* test: journal/maintenance db clean up properly
* test\_maintenance: cleanup looping call
* populate json with both 'project\_id', 'tenant\_id'
* models: use neutron\_lib.db.model\_base
* more bug work around of Moxy bug of 475475
* Sync Security Groups and SG Rules before other resources
* journal.record should use callers plugin context
* Revert "Temporary fix for gate"
* tests/unit: consolidate journal db setup logic
* Add qos extension to devstack override defaults
* Temporary fix for gate
* Treat ODL's 404 hostconfigs, as an empty list On pseudo agent db, when odl response with 404 not\_found for hostconfigs, treat it as an empty list rather then an error. Issue a debug log in this flow
* ml2 v1 driver: work around full\_sync
* Updated from global requirements
* nuke lbaasv1 driver
* devstack: make NEXUSPATH configurable
* ODL QoS driver of v1 Type
* Pass OptGroup variable for RequiredOptError
* Remove reference to neutron.i18n
* Fix the members name in OpenDaylightLbaasDriverV2
* Change the OpenDaylightManager url\_path
* wrap\_db\_retry: retry\_on\_request was deprecated
* add the synchronization between neutron and ODL in driver\_v1
* use carbon snapshot as default odl-release
* devstack: add carbon snapshot to odl-releases
* Enable DeprecationWarning in test environments
* test\_pseudo\_agentdb\_binding.py: adopt neutron\_lib
* journal: port::securitygroup needs only ids
* db: add Mitaka tag for alembic migration revisions
* devstack: refactor ODL\_L3 logic depending on used feature
* Revert "devstack: setup hostconfig in ovsdb"
* Updated from global requirements
* pseudo agent, devstack: hostconf\_uri is set to ''
* Run set\_ovs\_hostconfigs as root with neutron-utils
* devstack:ovsdbd doesn't understand localhost
* Add Python 3.5 classifier and venv
* pseudo agent fails to load with unit test
* devstack: remove the definition of lithium snapshot
* devstack: setup hostconfig in ovsdb
* legacy\_port\_binding: teach VNIC type
* pep8, unittest: unbreak gate failure
* tox: add doc8 check
* devstack: the definition of beryllium snapshot 0.4.4
* devstack: add definition of beryllium SR3
* devstack: add more gate jobs
* settings.odl: eliminate 0.4.2-snapshot which was deleted
* Add releasenotes support with reno
* various update for tox.ini
* devstack: configurable ODL repositories paths
* devstack: add specific ODL snapshot functionality
* devstack: allow to enable v2 driver
* devstack: remove beryllium snapshot 0.4.2
* devstack: load neutron-northbound-service first
* refactor odl-release definition
* Replace assertEqual(None, \*) with assertIsNone in tests
* Remove discover from test-requirements
* Fix bug in call to get\_network - missing network\_id param
* devstack: remove optional bridge configuration
* Fix the order of arguments in assertEqual
* Support for ovs-dpdk, vpp in port binding
* Updated from global requirements
* fix \_enrich\_port() to return the modified "data"
* Add name property to ENUM type
* Fix devstack README indentation
* Corrects pep8 failure in set\_ovs\_hostconfigs.py
* Add \_\_ne\_\_ built-in function
* Fix tox unit test issue
* Switch-agnostic ODL port binding controller
* Add Spec for QoS driver
* Adding Host Config doc
* devstack: support bridge configuration, for vpnservice-openstack
* make dependency validator dynamically registerable
* create dir, doc/specs, for spec
* Updated from global requirements
* Fix db error when running python34 Unit tests
* adopt neutron\_lib for constants and exceptions
* Remove unused Params
* Fixed test\_mechanism\_odl.py due to functions map deprecation in ML2
* Full Sync for L3 resources
* Add a hook for test debug
* more bug work around of Java MOXy bug of 475475
* Use already defined constants
* Revert "Workaround to fix gate py27 and py34 issue."
* Spec for journal recovery
* Updated from global requirements
* drop unnecessary exec permission
* Refactor SG callbacks
* beryllium SR2 definition
* Simplify filtering logic
* ODL v2: Full sync resources
* Complete port details by journal instead of mech driver
* [Trivial Fix] Correct log.debug() format
* Add cleanup operation to maintenance thread
* Journal entries can get stuck forever causing busy wait
* Add journal maintenance thread
* Updated from global requirements
2.0.0
-----
* Provide driver in ODL for L2Gateway
* Workaround to fix gate py27 and py34 issue
* Switch to using hacking checks from neutron-lib
* Fix race between event write and thread processing
* ODL v2: Fix delay in sync pending rows
* move beryllium snapshot to 4.2 from 4.1
* Clean up odl releases definition
* Remove useless argument
* Reduce update db row code duplication
* Service Function Chaining Driver for OpenDaylight
* correct config help message format
* Updated from global requirements
* Make port binding implementation configurable
* Fix journal row locking
* ODL v2: Fix multiple updates race
* Reduce dependency validations code duplication
* Replace operation magic strings by constants
* Move validations to seperate module
* Moved to package networking\_odl.journal
* tox.ini: show-source and ignore in hacking are unnessary
* Fix N231 error about '\_' using with \_i18n lib
* devstack: allow to override ML2\_L3\_PLUGIN
* Preserve existing environment when adding JAVA ppa
* Reduce test code duplication
* devstack: add odl beryllium 0.4.1 SR1 definition
* devstack: remove stale snapstho definition
* devstack: refactor release definition
* Remove unused method
* devstack: switch default odl version to beryllium snapshot
* ODL v2: Improve L3 validation
* Install OpenJDK using yum\_install
* No need to convert mac address to upper case anymore
* Install networking-odl in develop mode
* Cleanup unused oslo-incubator code
* devstack: release definition for lithium 0.3.4-SR4 and 0.3.5 snapshot
* Migrate to oslo.context from Oslo incubator
* Select current java by setting PATH variable
* Setup using the last Oracle JDK 8
* Prevent unit tests from accidentally connecting to OpenDaylight
* Introduce security group callback PRECOMMIT functionality
* Improve validation in the V2 mechanism driver
* ODL v2: Security Group support
* Revert "bug work around of bug #1545218"
* devstack: add odl release definition for lithium 0.3.4 snapshot
* Pass all tests with Python 3.4 and Tox
* ODL v2: Assign row back to pending after validation failure
* devstack: add definition of lithium SR[123]
* Fix the coverage issue
* devstack/settings.odl: document definitions of new release/snapshot
* Fix link address typo error in beryllium-0.4.0
* Lightweight testing to test neutron/networking-odl without ODL
* Q\_ML2\_PLUGIN\_MECHANISM\_DRIVERS should not always be overrode
* devstack: add odl release definition for Beryllium 0.4.1 snapshot
* Nit: Occurances of Openstack
* bug work around of Java MOXy bug of 475475
* bug work around of bug #1545218
* devstack: add odl release definition for Beryllium release
* devstack: add odl release definition for boron snapshot
* Opendaylight L3 service plugin refactor to handle out of sync issues
* Timestamps out of sync in the V2 driver
* Move the IP address from physical interface to the OVS physical bridge
* Enable vhost-user ports on supported platforms
* Show text of response message when failed
* Improve Testing.rst
* add snapshots to the list of directories that need to be removed
* Updated from global requirements
* floatingip's status doesn't change on disassociation
* drop unnecessary executable permission
* mock shouldn't return global value
* Add rally-jobs directory
* Add fixed\_ips fields to update port operation
* mech\_driver: don't send post request to create none resource
* Instance creation fails with the new V2 driver
* drop unnecessary executable permission
* devstack: use odl lithium snapshot 0.3.3 instead of 0.3.1
* devstack: make odl logging friendly for gate job
* Allow skipping installation of Open vSwitch
* Fix the typo in message correctly
* Fix the format of README.rst for devstack
* Reinstate PUBLIC\_BRIDGE as a way of adding interface to PUBLIC\_BRIDGE
* Correct typo in comment
* ODL internal error with allowed\_address\_pairs
* Update Oracle's JDK url to 1.8.0\_66
* Pass environment variable of proxy to tox
* Fix the odl-router entry point
* Don't use install\_package when handling failures
* Use existing java env
* Add ODL\_OVS\_MANAGERS to support clustering
* Use ODL Provider Mappings to instruct ODL to add port to bridges
* Detect and setup required java version in devstack
* Use ODL stable/lithium (aka SR3) by default
* Updated from global requirements
* OpenDaylightTestCase replaces sendjson permenently instead of mock
* Fix up issues after decomposition
* Opendaylight driver refactor to handle out of sync issues
* Correct the developers guide link
* Decompose mechanism driver out of neutron completely
* Add ODL\_BOOT\_WAIT\_URL to odl-releases/beryllium-snapshot-0.4.0
* Update import oslo\_serialization/utils for config-ref generation
* remove unnecessary use\_stderr=True in \_\_init\_\_.py
* Update import of oslo.config
* db: prepare scripts for subproject db tables
* make tempest.api.network.test\_extensions.ExtensionsTestJSON pass
* tox.ini: Fix cover by giving the source directory explicitly
* .coveragerc: Fix paths
* Stale OF entries retained in br-int
* client: consolidate odl client creation
* Change ignore-errors to ignore\_errors
1.0.1
-----
* requirements: Move neutron requirement into tox.ini
1.0.0
-----
* Updated from global requirements
* update the args when init SubnetContext obj
* odl client: gracefully ignore 404 when deleting
* mark out-of-sync when failure in sync\_from\_callback
* Updated from global requirements
* l3\_odl: put request for remove\_interface, not delete
* Support delegation of bind\_port to networking-odl backend driver
* Tweak CI configuration a bit
* Make ODL\_NETVIRT\_DEBUG\_LOGS disabled by default, enabled in pre\_test\_hook.sh
* Updated from global requirements
* Switch to using Opendaylight L3 in the gate
* Major overhaul of plugin.sh and ODL settings
* Fix unit tests
* Remove quantum untracked files from .gitignore
* Corrected URL information
* py34: Add support for python34 jobs
* Updated from global requirements
* Updated from global requirements
* Misc fixes for networking ODL in devstack
* Wipe out the journal directory, in addition to data directory
* Use url to check if ODL is fully initialized
* mech\_driver: full\_sync uses unrelated context for resources
* Updated from global requirements
* IS\_GATE should be disabled by default, set to True in devstack/pre\_test\_hook.sh
* workaround: l3 plugin misses dvr\_deletens\_if\_no\_port method
* lbaas: Fix incorrect url path
* l3\_odl: delete\_router results in exception
* requirements: Move neutron-[fwaas,lbaas] requirement into tox.ini
* define ODL\_NAME unconditionally
* Add Beryllium and Lithium Stable support
* Adding unittest for l3 to create,remove,update a router, a floatingip and an interface
* devstack: Add devstackgaterc file
* Overhaul pre\_test\_hook
* Updated from global requirements
* Updated from global requirements
* Refactoring post infra changes
* Bug 1466917: Explicitly set link up PUBLIC\_INTERFACE after br-ex add
* Update version for Liberty
1.0.0a0
-------
* We should whitelist bash rather than sh
* Add pre\_test\_hook.sh script
* Replace stackforge with openstack
* Be kind, and tell us where it went wrong if you please
* Add capability to save more than 10 logfiles per CI run
* Updated from global requirements
* mech\_driver: don't pass empty string as tenant\_id to ODL
* Update .gitreview file for project rename
* Send mtu and vlan\_transparent for network operations
* Give the JVM more juice
* Add logic to pass sg and sg-rules to ODL
* Use latest Lithium daily build, move ODL\_NETVIRT\_KARAF\_FEATURE
* Get upstream CI job working
* devstack: unzip -u may wait for user input
* devstack/settings.odl: syntax error
* import error by l3\_odl
* devstack: OFFLINE=True is ignored
* install\_opendaylight: eliminate unused local variable, \_pwd
* Make karaf file available as log artifact
* Use latest Helium from daily build instead of unstable Lithium
* De-clutter plugin.sh from release specific logic
* Add create br-ex in odl\_compute nodes
* Use ODL\_RELEASE instead of version number to determine release
* Fix setting up of ODL package variables
* Update version to 2015.1.2 to make pbr happy
* Add logic to use the latest Lithium release
* Revert "Add logic to pass sg and sg-rules to ODL"
* Fix incorrect add/remove router\_interface calls
* Add logic to pass sg and sg-rules to ODL
* Add an example local.conf
* Expose Karaf feature used by Opendaylight's net-virt
* Send router\_interface add/remove calls to ODL
* Switch from neutron.common.log.log to oslo\_log.helpers.log\_method\_call
* Fix broken unit tests for networking-odl
2015.1.1
--------
* Add instructions for configuring LBaaS V2 with ODL
* First cut at LBaaS V2 driver for ODL
* Remove session\_timeout parameter
* Add new LBaaS V2 API shim
* Bump version to 2015.1.1
* Update to distribution-karaf-0.2.3-Helium-SR3
* Add unit tests for L3, LBaaS, and FWaaS
* Update L3, LBaaS, and FWaaS code
* Remove vlan\_transparent and mtu for network APIs
* Correct test\_update\_port\_mac test
* Sync the latest oslo incubated libraries and use oslo.log
* Revert "Limit the tempest tests we run"
* Update ODL port logic to work with Lithium (cont.)
* Limit the tempest tests we run
* Update ODL port logic to work with Lithium
* Wipe out the data directory
* Allow the java memory parameters to be configurable
* Fix karaf logging
* Fix the check/merge jobs for ODL
* Update oslotest version in test-requirements.txt
* Fix documentation files
* Few corrections in devstack/settings in networking-odl
* Use git.openstack.org URL in devstack README
* Fix plugin.sh to handle OFFLINE=True mode
* Add odl-router entrypoint
* ODL\_MODE for plugin settings
* Use HTTP BASIC AUTH exclusively (no longer use JSESSIONID)
* Add pluggable devstack for networking-odl
* Fix oslo imports
* Use in-module ODL driver
* Fix the syntax of the cache module
* This adds existing L3, LBaas and FWaaS drivers
* Re-enable check for @author tag
* Bump hacking and allow author tags
* Fix unit tests for networking-odl
* Thin networking-odl driver
* Establish a successful baseline for CI jobs
* Fixups post split commit
* Rename module
* Updated from global requirements
* Move classes out of l3\_agent.py
* Prettify tox output for functional tests
* Services split, pass 2
* Remove TODO for H404
* Updated from global requirements
* Use comments rather than no-op string statements
* Workflow documentation is now in infra-manual
* tox.ini: Prevent casual addition of bash dependency
* Updated from global requirements
* Get rid of py26 references: OrderedDict, httplib, xml testing
* Updated the README.rst
* pretty\_tox.sh: Portablity improvement
* test\_dhcp\_agent: Fix no-op tests
* Enable undefined-loop-variable pylint check
* Fix incorrect exception order in \_execute\_request
* Migrate to oslo.i18n
* Migrate to oslo.middleware
* Migrate to oslo.utils
* Remove Python 2.6 classifier
* Remove ryu plugin
* Updated from global requirements
* Show progress output while running unit tests
* enable H401 hacking check
* enable H237 check
* Updated from global requirements
* Updated from global requirements
* Update i18n translation for neutron.agents log msg's
* enable F812 check for flake8
* enable F811 check for flake8
* Support pudb as a different post mortem debugger
* switch to oslo.serialization
* Add rootwrap filters for ofagent
* Remove openvswitch core plugin entry point
* Updated from global requirements
* Use correct base class for unit tests for ML2 drivers
* Updated from global requirements
* enable F402 check for flake8
* enable E713 in pep8 tests
* Hyper-V: Remove useless use of "else" clause on for loop
* Enable no-name-in-module pylint check
* Updated from global requirements
* Remove duplicate import of constants module
* Switch run-time import to using importutils.import\_module
* Enable assignment-from-no-return pylint check
* tox.ini: Avoid using bash where unnecessary
* Empty files should not contain copyright or license
* Remove single occurrence of lost-exception warning
* Updated fileutils and its dependencies
* remove E251 exemption from pep8 check
* mock.assert\_called\_once() is not a valid method
* Add pylint tox environment and disable all existing warnings
* Updated from global requirements
* Ignore top-level hidden dirs/files by default
* Remove some duplicate unit tests
* Drop sslutils and versionutils modules
* Removed kombu from requirements
* Updated from global requirements
* Updated from global requirements
* Remove sslutils from openstack.common
* remove linuxbridge plugin
* Open Kilo development
* Implement ModelsMigrationsSync test from oslo.db
* Fix entrypoint of OneConvergencePlugin plugin
* Set dsvm-functional job to use system packages
* Separate Configuration from Freescale SDN ML2 mechanism Driver
* Remove @author(s) from copyright statements
* Stop ignoring 400 errors returned by ODL
* Updated from global requirements
* Adds ipset support for Security Groups
* Add requests\_mock to test-requirements.txt
* Removed kombu from requirements
* Supply missing cisco\_cfg\_agent.ini file
* Updated from global requirements
* Work toward Python 3.4 support and testing
* Revert "Cisco DFA ML2 Mechanism Driver"
* Big Switch: Separate L3 functions into L3 service
* Remove reference to cisco\_cfg\_agent.ini from setup.cfg again
* Adds router service plugin for CSR1kv
* Support for extensions in ML2
* Cisco DFA ML2 Mechanism Driver
* Adding mechanism driver in ML2 plugin for Nuage Networks
* Fix state\_path in tests
* Remove ovs dependency in embrane plugin
* Use lockutils module for tox functional env
* Updated from global requirements
* Add unit tests covering single operations to ODL
* Add specific docs build option to tox
* Fix bigswitch setup.cfg lines
* Remove auto-generation of db schema from models at startup
* Updated from global requirements
* Use jsonutils instead of stdlib json
* Opencontrail plug-in implementation for core resources
* Add delete operations for the ODL MechanismDriver
* Add a tox test environment for random hashseed testing
* Updated from global requirements
* Remove reference to cisco\_cfg\_agent.ini from setup.cfg
* Removed configobj from test requirements
* Updated from global requirements
* Functional tests work fine with random PYTHONHASHSEED
* Set python hash seed to 0 in tox.ini
* Configuration agent for Cisco devices
* Updated from global requirements
* ML2 mechanism driver for SR-IOV capable NIC based switching, Part 2
* This patch changes the name of directory from mech\_arista to arista
* ML2 mechanism driver for SR-IOV capable NIC based switching, Part 1
* Allow to import \_LC, \_LE, \_LI and \_LW functions directly
* Make readme reference git.openstack.org not github
* Bump hacking to version 0.9.2
* Use auth\_token from keystonemiddleware
* Remove reference to setuptools\_git
* Add a gate-specific tox env for functional tests
* Add CONTRIBUTING.rst
* Updated from global requirements
* Updated from global requirements
* Updated from global requirements
* Fix example for running individual tests
* Switch to using of oslo.db
* remove unsupported middleware
* Add config for performance gate job
* Synced log module and its dependencies from olso-incubator
* don't ignore rules that are already enforced
* Updated from global requirements
* Updated from global requirements
* ofagent: move main module from ryu repository
* Remove the useless vim modelines
* Removed 'rpc' and 'notifier' incubator modules
* Use openstack.common.lockutils module for locks in tox functional tests
* Port to oslo.messaging
* Updated from global requirements
* Ignore emacs checkpoint files
* Added missing core\_plugins symbolic names
* remove pep8 E122 exemption and correct style
* remove E112 hacking exemption and fix errors
* Updated from global requirements
* Freescale SDN Mechanism Driver for ML2 Plugin
* Remove run-time version checking for openvswitch features
* Added missing plugin .ini files to setup.cfg
* Updated from global requirements
* Synced jsonutils from oslo-incubator
* Cisco APIC ML2 mechanism driver, part 2
* NSX: get rid of the last Nicira/NVP bits
* Allow vlan type usage for OpenDaylight ml2
* Add missing translation support
* Add mailmap entry
* Ensure core plugin deallocation after every test
* Updated from global requirements
* Remove explicit dependency on amqplib
* Remove duplicate module-rgx line in .pylintrc
* Fix H302 violations
* Updated from global requirements
* Improve ODL ML2 Exception Handling
* Updated from global requirements
* Exclude .ropeproject from flake8 checks
* Enable flake8 E711 and E712 checking
* Enforce required config params for ODL driver
* Updated from global requirements
* Sync service and systemd modules from oslo-incubator
* Move bash whitelisting to pep8 testenv
* ML2: ODL driver sets port status
* Fix Jenkins translation jobs
* ignore build directory for pep8
* Enable hacking H301 check
* Updated from global requirements
* Remove last parts of Quantum compatibility shim
* Open Juno development
* Start using oslosphinx theme for docs
* Updated from global requirements
* ML2: Remove validate\_port\_binding() and unbind\_port()
* add HEAD sentinel file that contains migration revision
* Bugfix and refactoring for ovs\_lib flow methods
* Updated from global requirements
* Updated from global requirements
* Updated from global requirements
* One Convergence Neutron Plugin l3 ext support
* One Convergence Neutron Plugin Implementation
* BigSwitch: Add SSL Certificate Validation
* Updated from global requirements
* Add OpenDaylight ML2 MechanismDriver
* Implementaion of Mechanism driver for Brocade VDX cluster of switches
* Implement Mellanox ML2 MechanismDriver
* Implement OpenFlow Agent mechanism driver
* Finish off rebranding of the Nicira NVP plugin
* BigSwitch: Add agent to support neutron sec groups
* Adds the new IBM SDN-VE plugin
* Updated from global requirements
* Developer documentation
* Rename Neutron core/service plugins for VMware NSX
* Updated from global requirements
* Sync minimum requirements
* Copy cache package from oslo-incubator
* Remove dependent module py3kcompat
* Add migration support from agent to NSX dhcp/metadata services
* Remove psutil dependency
* LBaaS: move agent based driver files into a separate dir
* mailmap: update .mailmap
* Return request-id in API response
* Prepare for multiple cisco ML2 mech drivers
* Support building wheels (PEP-427)
* Use oslo.rootwrap library instead of local copy
* Enables BigSwitch/Restproxy ML2 VLAN driver
* Add an explicit tox job for functional tests
* Base ML2 bulk support on the loaded drivers
* Enable hacking H233 rule
* Update RPC code from oslo
* Configure plugins by name
* Update lockutils and fixture in openstack.common
* Rename nicira configuration elements to match new naming structure
* Remove unused imports
* Rename check\_nvp\_config utility tool
* Corrects broken format strings in check\_i18n.py
* Updates tox.ini to use new features
* Updated from global requirements
* Sync global requirements to pin sphinx to sphinx>=1.1.2,<1.2
* Add fwaas\_driver.ini to setup.cfg
* Add vpnaas and debug filters to setup.cfg
* Updates .gitignore
* Update Zhenguo Niu's mailmap
* Replace stubout with fixtures
* Ensure get\_pid\_to\_kill works with rootwrap script
* Updated from global requirements
* Cleanup HACKING.rst
* Fix import log\_handler error with publish\_errors set
* Updated from global requirements
* Updated from global requirements
* Cleanup and make HACKING.rst DRYer
* Add support for managing async processes
* Remove obsolete redhat-eventlet.patch
* Open Icehouse development
* Updated from global requirements
* Require oslo.config 1.2.0 final
* Use built-in print() instead of print statement
* Add l2 population base classes
* Fix message i18n error
* Install metering\_agent.ini and vpn\_agent.ini
* fix conversion type missing
* Enclose command args in with\_venv.sh
* ML2 Mechanism Driver for Cisco Nexus
* Reference driver implementation (IPsec) for VPNaaS
* Implement ML2 port binding
* Arista ML2 Mechanism driver
* ML2 Mechanism Driver for Tail-f Network Control System (NCS)
* Default to not capturing log output in tests
* Add Neutron l3 metering agent
* Update mailmap
* Fix wrong example in HACKING.rst
* Bumps hacking to 0.7.0
* remove binaries under bin
* Fixes Windows setup dependency bug
* Restore Babel to requirements.txt
* Remove DHCP lease logic
* Remove last vestiges of nose
* Updated from global requirements
* Ignore pbr\*.egg directory
* Fix H102, H103 Apache 2.0 license hacking check error
* Remove openstack.common.exception usage
* Adds Babel dependency missing from 555d27c
* Fix the alphabetical order in requirement files
* Remove comments from requirements.txt (workaround pbr bug)
* remove netifaces dependency of ryu-agent
* Add gre tunneling support for the ML2 plugin
* Add VXLAN tunneling support for the ML2 plugin
* xenapi - rename quantum to neutron
* Fix issue with pip installing oslo.config-1.2.0
* Initial Modular L2 Mechanism Driver implementation
* Add cover/ to .gitignore
* fix some missing change from quantum to neutron
* git remove old non-working packaging files
* Rename Quantum to Neutron
* Rename quantum to neutron in .gitreview
* Sync install\_venv\_common from oslo
* Update to use OSLO db
* Require greenlet 0.3.2 (or later)
* Remove single-version-externally-managed in setup.cfg
* Fix single-version-externally-mananged typo in setup.cfg
* Allow use of lowercase section names in conf files
* Require pbr 0.5.16 or newer
* Update to the latest stevedore
* Rename agent\_loadbalancer directory to loadbalancer
* Remove unit tests that are no longer run
* Update with latest OSLO code
* Remove explicit distribute depend
* Fix and enable H90x tests
* Remove generic Exception when using assertRaises
* Add \*.swo/swp to .gitignore
* python3: Introduce py33 to tox.ini
* Rename README to README.rst
* Rename requires files to standard names
* Initial Modular L2 plugin implementation
* Revert dependency on oslo.config 1.2.0
* Perform a sync with oslo-incubator
* Require oslo.config 1.2.0a2
* update mailmap
* Revert "Fix ./run\_tests.sh --pep8"
* Move to pbr
* Docstrings formatted according to pep257
* relax amqplib and kombu version requirements
* Fix ./run\_tests.sh --pep8
* blueprint mellanox-quantum-plugin
* Update flake8 pinned versions
* Let the cover venv run individual tests
* Copy the RHEL6 eventlet workaround from Oslo
* Remove locals() from strings substitutions
* Enable automatic validation of many HACKING rules
* Shorten the path of the nicira nvp plugin
* Allow pdb debugging in manually-invoked tests
* Reformat openstack-common.conf
* Switch to flake8 from pep8
* Parallelize quantum unit testing:
* blueprint cisco-single-config
* Add lbaas\_agent files to setup.py
* Add VIRTUAL\_ENV key to enviroment passed to patch\_tox\_env
* Pin SQLAlchemy to 0.7.x
* Sync latest Oslo components for updated copyright
* drop rfc.sh
* Replace "OpenStack LLC" with "OpenStack Foundation"
* First havana commit
* remove references to netstack in setup.py
* Switch to final 1.1.0 oslo.config release
* Update to Quantum Client 2.2.0
* Update tox.ini to support RHEL 6.x
* Switch to oslo.config
* Add common test base class to hold common things
* Pin pep8 to 1.3.3
* Add initial testr support
* LBaaS Agent Reference Implementation
* Bump python-quantumclient version to 2.1.2
* Add scheduling feature basing on agent management extension
* Remove compat cfg wrapper
* Unpin PasteDeploy dependency version
* Use testtools instead of unittest or unittest2
* Add midonet to setup.py
* Sync latest install\_venv\_common.py with olso
* Add check-nvp-config utility
* Add unit test for ryu-agent
* Use oslo-config-2013.1b3
* Adds Brocade Plugin implementation
* Synchronize code from oslo
* PLUMgrid quantum plugin
* Update .coveragerc
* Allow tools/install\_venv\_common.py to be run from within the source directory
* Updated to latest oslo-version code
* Use install\_venv\_common.py from oslo
* Cisco plugin cleanup
* Use babel to generate translation file
* Update WebOb version to >=1.2
* Update latest OSLO
* Adding multi switch support to the Cisco Nexus plugin
* Adds support for deploying Quantum on Windows
* Latest OSLO updates
* Port to argparse based cfg
* Add migration support to Quantum
* Undo change to require WebOb 1.2.3, instead, require only >=1.0.8
* .gitignore cleanup
* Upgrade WebOb to 1.2.3
* Logging module cleanup
* Add OVS cleanup utility
* Add tox artifacts to .gitignore
* Add restproxy.ini to config\_path in setup.py
* Add script for checking i18n message
* l3 agent rpc
* Add metadata\_agent.ini to config\_path in setup.py
* Remove \_\_init\_\_.py from bin/ and tools/
* add metadata proxy support for Quantum Networks
* Use auth\_token middleware in keystoneclient
* Add QUANTUM\_ prefix for env used by quantum-debug
* Make tox.ini run pep8 checks on bin
* Explicitly include versioninfo in tarball
* Import lockutils and fileutils from openstack-common
* Updated openstack-common setup and version code
* Ensure that the anyjson version is correct
* Add eventlet\_backdoor and threadgroup from openstack-common
* Add loopingcall from openstack-common
* Added service from openstack-common
* Drop lxml dependency
* Add uuidutils module
* Import order clean-up
* pin sqlalchemy to 0.7
* Correct Intended Audience
* Add OpenStack trove classifier for PyPI
* Improve unit test times
* l3\_nat\_agent was renamed to l3\_agent and this was missed
* Support for several HA RabbitMQ servers
* add missing files from setup.py
* Create .mailmap file
* Lower webob dep from v1.2.0 to v1.0.8
* Implements agent for Quantum Networking testing
* Create utility to clean-up netns
* Update rootwrap; track changes in nova/cinder
* Execute unit tests for Cisco plugin with Quantum tests
* Add lease expiration script support for dnsmasq
* Add nosehtmloutput as a test dependency
* quantum l3 + floating IP support
* Updates pip requirements
* NEC OpenFlow plugin support
* remove old gflags config code
* RPC support for OVS Plugin and Agent
* Initial implemention of MetaPlugin
* RPC support for Linux Bridge Plugin and Agent
* Exempt openstack-common from pep8 check
* fix bug lp:1025526,update iniparser.py to accept empty value
* Introduce files from openstack common
* fix bug lp:1019230,update rpc from openstack-common
* implement dhcp agent for quantum
* Use setuptools git plugin for file inclusion
* Remove paste configuration details to a seperate file. blueprint use-common-cfg
* Implements the blueprint use-common-cfg for the quantum service. More specifically uses global CONF for the quantum.conf file
* Add authZ through incorporation of policy checks
* Bug #1013967 - Quantum is breaking on tests with pep 1.3
* Use openstack.common.exception
* API v2: mprove validation of post/put, rename few attributes
* Add API v2 support
* Fix up test running to match jenkins expectation
* Add build\_sphinx options
* Quantum should use openstack.common.jsonutils
* Remove hardcoded version for pep8 from tools/test-requires
* Quantum should use openstack.common.importutils
* PEP8 fixes
* Bug #1002605
* Parse linuxbridge plugins using openstack.common.cfg
* Add HACKING.rst to tarball generation bug 1001220
* Include AUTHORS in release package
* Change Resource.\_\_call\_\_() to not leak internal errors
* Removed simplejson from pip-requires
* Remove dependency on python-quantumclient
* Add sphinx to the test build deps
* Add HACKING.rst coding style doc
* bug 963152: add a few missing files to sdist tarball
* Fix path to python-quantumclient
* Split out pip requires and aligned tox file
* Fix missing files in sdist package [bug 954906]
* Downgraded required version of WebOb to 1.0.8
* more files missing in sdist tarball
* make sure pip-requires is included in setup.py sdist
* remove pep8 and strict lxml version from setup.py
* plugin: introduce ryu plugin
* bug 934459: pip no longer supports -E
* blueprint quantum-ovs-tunnel-agent
* Initial commit: nvp plugin
* Cleanup the source distribution
* blueprint quantum-linux-bridge-plugin
* Remove quantum CLI console script
* Bug 925372: remove deprecated webob attributes (and also specify stable webob version in pip-requires)
* Make tox config work
* Pin versions to standard versions
* Split out quantum.client and quantum.common
* Quantum was missing depend on lxml
* moving batch config out of quantum-server repo
* Getting ready for the client split
* Removed erroneous print from setup.py
* Base version.py on glance
* Fix lp bug 897882
* Install a good version of pip in the venv
* Rename .quantum-venv to .venv
* Remove plugin pip-requires
* Bug #890028
* Fix for bug 900316
* Second round of packaging changes
* Changes to make pip-based tests work with jenkins
* Fix for bug 888811
* Fix for Bug #888820 - pip-requires file support for plugins
* blueprint quantum-packaging
* Add .gitreview config file for gerrit
* Add code-coverage support to run\_tests.sh (lp860160)
2011.3
------
* Add rfc.sh to help with gerrit workflow
* merge tyler's unit tests for cisco plugin changes lp845140
* merge salv's no-cheetah CLI branch lp 842190
* merge sumit's branch for lp837752
* Merging latest from lp:quantum
* Merging lo:~salvatore-orlando/quantum/quantum-api-auth
* Updating CLI for not using Cheetah anymore. Now using a mechanism based on Python built-in templates
* Merging Sumit's changes including fixes for multinic support, and CLI module for working with extensions
* Merging from Cisco branch
* Merging from lp:quantum
* merge cisco consolidated plugin changes
* Merging lp:~salvatore-orlando/quantum/bug834449
* merge trunk
* Merging from lp:quantum
* merge salvatore's new cli code
* Addressing comments from Dan
* Merging from quantum
* merge cisco extensions branch
* Merging from Sumit's branch, changes to VIF-driver and Scheduler; extension action names have been changed in response to Salvatore's review comments in the extensions branch review
* Syncing with Cisco extensions branch
* Merging from Sumit's branch, import ordering related changes
* Merging the Cisco branch
* Finishing cli work Fixing bug with XML deserialization
* Merging lp:~salvatore-orlando/quantum/quantum-api-alignment
* merge latest quantum branch and resolve conflicts
* Merging lp:~asomya/quantum/lp833163 Fix for Bug #833163: Pep8 violations in recent packaging changes that were merged into trunk (Critical)
* PEP8 fixes for setup.py
* Merging lp:~cisco-openstack/quantum/802dot1qbh-vifdriver-scheduler
* Merging lp:~cisco-openstack/quantum/l2network-plugin-persistence
* Merging lp:quantum
* merging with lp:quantum
* Making Keystone version configurable
* Merging branch: lp:~danwent/quantum/test-refactor
* Syncing with lp:quantum
* Merging fixes and changes batch-config script. Thanks lp:danwent !
* Merging lp:~asomya/quantum/lp824145 Fix for Bug#824145 : Adding a setup script for quantum
* merge trunk pep8 fixes adapting CLI to API v1.0 Fixing wsgi to avoid failure with extensions
* merge trunk
* Pulling in changes from lp:quantum
* Merging Cisco's contribution to Quantum. Thanks to various folks at Cisco Systems, Quantum will have plugins to integrate with Cisco UCS blade servers using 802.1Qbh, Cisco Nexus family of switches and the ability for Quantum plugin to have multiple switches/devices within a single Quantum plugin
* Merging from Sumit's branch pylint fixes and incorporating review comments
* Mergin from cisco brach
* Merging from lp:quantum
* Introducting cheetah Updating list\_nets in CLI Writing unit tests for list\_nets Stubbing out with FakeConnection now
* Merging quantum extenions framework into trunk. Thanks rajaram vinkesh, deepak & santhosh for the great work!
* lp Bug#824145 : Adding a setup script for quantum
* skeleton for cli unit tests
* merge trunk
* Merged quantum trunk
* - Adding setup script
* force batch\_config.py to use json, as XML has issues (see bug: 798262)
* update batch\_config.py to use new client lib, hooray for deleting code
* Merging changes addressing Bug # 802772. Thanks lp:danwent !
* Merging bugfix for Bug 822890 - Added License file for Quantum code distribution
* L2 Network Plugin Framework merge
* Adding Apache Version 2.0 license file. This is the official license agreement under which Quantum code is available to the Open Source community
* merge
* merge heckj's pip-requires fixes
* updates to pip-requires for CI
* Merged quantum trunk
* Merging changes from lp:quantum
* Completing API spec alignment Unit tests aligned with changes in the API spec
* Merging the brand new Quantum-client-library feature
* Merging lp:quantum updates
* persistence of l2network & ucs plugins using mysql - db\_conn.ini - configuration details of making a connection to the database - db\_test\_plugin.py - contains abstraction methods for storing database values in a dict and unit test cases for DB testing - l2network\_db.py - db methods for l2network models - l2network\_models.py - class definitions for the l2 network tables - ucs\_db.py - db methods for ucs models - ucs\_models.py - class definition for the ucs tables dynamic loading of the 2nd layer plugin db's based on passed arguments Create, Delete, Get, Getall, Update database methods at - Quantum, L2Network and Ucs Unit test cases for create, delete, getall and update operations for L2Network and Ucs plugins pep8 checks done branch based off revision 34 plugin-framework
* Merged from trunk
* merged the latest changes from plugin-framework branch - revision 39 conforming to the new cisco plugin directory structure and moving all db related modules into cisco/db folder updated db\_test\_plugin.py - added import of cisco constants module - added LOG.getLogger for logging component name - updated import module paths for l2network\_models/db and ucs\_models/db to use the new directory structure - updated (rearranged) imports section to obey openstack alphabetical placement convention updated db\_conn.ini - updated database name from cisco\_naas to quantum\_l2network unit test cases ran successfully and pep8 checks done again
* merge branch for to fix bug817826
* Merging the latest changes from lp:quantum
* fix bug 817826 and similar error in batch\_config.py
* merge Salvatore's api branch with fixes for tests. Tweaking branch to remove unwanted bin/quantum.py as part of merge
* Santhosh/Rajaram|latest merge from quantum and made extensions use options to load plugin
* Apply fix for bug #797419 merging lp:~salvatore-orlando/quantum/bug797419
* Merging branch lp:~netstack/quantum/quantum-unit-tests
* Merged from quantum trunk
* Adapated plugin infrastructure to allow API to pass options to plugins Now using in-memory sqlite db for tests on FakePlugin teardown() now 'resets' the in-memory db Adding unit tests for APIs
* Adding Routes>=1.12.3 to tools/pip-requires
* Merging dan wendlandt's bugfixes for Bug #800466 and improvements that enable Quantum to seamlessly run on KVM!
* more pep8 goodness
* refactor batch\_config, allow multiple attaches with the empty string
* merge and pep8 cleanup
* Merging latest changes from parent repo - lp:network-service , Parent repo had approved merge proposal for merging lp:~santhom/network-service/quantum\_testing\_framework , which has now been merged into lp:network-service
* Merging pep8 and functional test related changes lp:~santhom/network-service/quantum\_testing\_framework branch
* add example to usage string for batch\_config.py
* Bug fixes and clean-up, including supporting libvirt
* Santhosh/Vinkesh | Added the testing framework. Moved the smoketest to tests/functional
* Pushing initial started code based on Glance project and infrstructure work done by the melange team
* Merging in latest changes from lp:quantum
networking-odl-16.0.0/.mailmap0000664000175000017500000000121013656750541016212 0ustar zuulzuul00000000000000# Format is:
#
#
lawrancejing
Jiajun Liu
Zhongyue Luo
Kun Huang
Zhenguo Niu
Isaku Yamahata
Isaku Yamahata
Morgan Fainberg
Michel Peterson
networking-odl-16.0.0/networking_odl.egg-info/0000775000175000017500000000000013656750617021322 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl.egg-info/entry_points.txt0000664000175000017500000000217613656750616024625 0ustar zuulzuul00000000000000[console_scripts]
neutron-odl-analyze-journal-logs = networking_odl.cmd.analyze_journal:main
neutron-odl-ovs-hostconfig = networking_odl.cmd.set_ovs_hostconfigs:main
[network.statistics.drivers]
opendaylight.v2 = networking_odl.ceilometer.network.statistics.opendaylight_v2.driver:OpenDaylightDriver
[networking_odl.ml2.port_binding_controllers]
legacy-port-binding = networking_odl.ml2.legacy_port_binding:LegacyPortBindingManager
pseudo-agentdb-binding = networking_odl.ml2.pseudo_agentdb_binding:PseudoAgentDBBindingController
[networking_sfc.flowclassifier.drivers]
odl_v2 = networking_odl.sfc.flowclassifier.sfc_flowclassifier_v2:OpenDaylightSFCFlowClassifierDriverV2
[networking_sfc.sfc.drivers]
odl_v2 = networking_odl.sfc.sfc_driver_v2:OpenDaylightSFCDriverV2
[neutron.db.alembic_migrations]
networking-odl = networking_odl.db.migration:alembic_migrations
[neutron.ml2.mechanism_drivers]
opendaylight_v2 = networking_odl.ml2.mech_driver_v2:OpenDaylightMechanismDriver
[neutron.service_plugins]
odl-router_v2 = networking_odl.l3.l3_odl_v2:OpenDaylightL3RouterPlugin
[oslo.config.opts]
ml2_odl = networking_odl.common.config:list_opts
networking-odl-16.0.0/networking_odl.egg-info/dependency_links.txt0000664000175000017500000000000113656750616025367 0ustar zuulzuul00000000000000
networking-odl-16.0.0/networking_odl.egg-info/requires.txt0000664000175000017500000000110013656750616023711 0ustar zuulzuul00000000000000pbr>=4.0.0
Babel>=2.5.3
stevedore>=1.28.0
debtcollector>=1.19.0
neutron-lib>=2.0.0
websocket-client>=0.47.0
neutron>=16.0.0.0b1
networking-l2gw>=12.0.0
networking-sfc>=10.0.0.0b1
networking-bgpvpn>=10.0.0b1
[ceilometer]
ceilometer>=11.0.0
[test]
hacking!=0.13.0,<0.14,>=0.12.0
coverage>=4.5.1
doc8>=0.8.0
flake8-import-order>=0.17.1
python-subunit>=1.2.0
oslotest>=3.3.0
stestr>=2.0.0
pecan>=1.3.2
testresources>=2.0.1
testscenarios>=0.5.0
testtools>=2.3.0
bandit!=1.6.0,>=1.4.0
bashate>=0.5.1
ceilometer>=11.0.0
[test:(python_version>="3.0")]
pylint==2.2.0
astroid==2.1.0
networking-odl-16.0.0/networking_odl.egg-info/pbr.json0000664000175000017500000000006013656750616022773 0ustar zuulzuul00000000000000{"git_version": "56d5d9594", "is_release": true}networking-odl-16.0.0/networking_odl.egg-info/PKG-INFO0000664000175000017500000000427313656750616022424 0ustar zuulzuul00000000000000Metadata-Version: 2.1
Name: networking-odl
Version: 16.0.0
Summary: OpenStack Networking
Home-page: https://docs.openstack.org/networking-odl/latest/
Author: OpenStack
Author-email: openstack-discuss@lists.openstack.org
License: UNKNOWN
Description: ==========================
Welcome to networking-odl!
==========================
.. Team and repository tags
.. image:: http://governance.openstack.org/badges/networking-odl.svg
:target: http://governance.openstack.org/reference/tags/index.html
.. Change things from this point on
Summary
-------
OpenStack networking-odl is a library of drivers and plugins that integrates
OpenStack Neutron API with OpenDaylight Backend. For example it has ML2
driver and L3 plugin to enable communication of OpenStack Neutron L2
and L3 resources API to OpenDayLight Backend.
To report and discover bugs in networking-odl the following
link can be used:
https://bugs.launchpad.net/networking-odl
Any new code submission or proposal must follow the development
guidelines detailed in HACKING.rst and for further details this
link can be checked:
https://docs.openstack.org/networking-odl/latest/
The OpenDaylight homepage:
https://www.opendaylight.org/
Release notes for the project can be found at:
https://docs.openstack.org/releasenotes/networking-odl/
The project source code repository is located at:
https://opendev.org/openstack/networking-odl
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Requires-Python: >=3.6
Provides-Extra: ceilometer
Provides-Extra: test
networking-odl-16.0.0/networking_odl.egg-info/top_level.txt0000664000175000017500000000001713656750616024051 0ustar zuulzuul00000000000000networking_odl
networking-odl-16.0.0/networking_odl.egg-info/not-zip-safe0000664000175000017500000000000113656750616023547 0ustar zuulzuul00000000000000
networking-odl-16.0.0/networking_odl.egg-info/SOURCES.txt0000664000175000017500000003524213656750617023214 0ustar zuulzuul00000000000000.coveragerc
.mailmap
.pylintrc
.stestr.conf
AUTHORS
CONTRIBUTING.rst
ChangeLog
HACKING.rst
LICENSE
README.rst
TESTING.rst
babel.cfg
bindep.txt
lower-constraints.txt
requirements.txt
setup.cfg
setup.py
tempest-blacklist.txt
test-requirements.txt
tox.ini
.zuul.d/jobs.yaml
.zuul.d/project.yaml
devstack/README.rst
devstack/devstackgaterc
devstack/entry_points
devstack/functions
devstack/jetty-legacy.patch
devstack/local.conf.example
devstack/override-defaults
devstack/plugin.sh
devstack/settings
devstack/settings.odl
devstack/setup_java.sh
devstack/files/debs/networking-odl
devstack/files/rpms/networking-odl
devstack/odl-etc/opendaylight/datastore/initial/config/netvirt-impl-config_netvirt-impl-config.xml
devstack/odl-releases/README.rst
devstack/odl-releases/common
devstack/odl-releases/fluorine-latest
devstack/odl-releases/fluorine-snapshot-0.9
devstack/odl-releases/fluorine-snapshot-0.9.0
devstack/odl-releases/latest-release
devstack/odl-releases/latest-snapshot
devstack/odl-releases/neon-latest
devstack/odl-releases/neon-snapshot-0.10.2
devstack/odl-releases/sodium-latest
devstack/upgrade/resources.sh
devstack/upgrade/settings
devstack/upgrade/upgrade.sh
doc/requirements.txt
doc/source/conf.py
doc/source/index.rst
doc/source/admin/index.rst
doc/source/admin/reference_architecture.rst
doc/source/configuration/index.rst
doc/source/configuration/samples/ml2_odl.rst
doc/source/contributor/contributing.rst
doc/source/contributor/drivers_architecture.rst
doc/source/contributor/hostconfig.rst
doc/source/contributor/index.rst
doc/source/contributor/maintenance.rst
doc/source/contributor/quickstart.rst
doc/source/contributor/testing.rst
doc/source/contributor/usage.rst
doc/source/contributor/specs/index.rst
doc/source/contributor/specs/newton/qos-driver.rst
doc/source/contributor/specs/newton/sfc-driver.rst
doc/source/contributor/specs/ocata/journal-recovery.rst
doc/source/contributor/specs/pike/dep-validations-on-create.rst
doc/source/contributor/specs/pike/neutron-port-dhcp.rst
doc/source/install/devstack.rst
doc/source/install/index.rst
doc/source/install/installation.rst
doc/source/reference/index.rst
doc/source/reference/newton.rst
doc/source/reference/ocata.rst
doc/source/reference/pike.rst
etc/policy.json
etc/neutron/plugins/ml2/ml2_conf_odl.ini
networking_odl/__init__.py
networking_odl/_i18n.py
networking_odl.egg-info/PKG-INFO
networking_odl.egg-info/SOURCES.txt
networking_odl.egg-info/dependency_links.txt
networking_odl.egg-info/entry_points.txt
networking_odl.egg-info/not-zip-safe
networking_odl.egg-info/pbr.json
networking_odl.egg-info/requires.txt
networking_odl.egg-info/top_level.txt
networking_odl/bgpvpn/__init__.py
networking_odl/bgpvpn/odl_v2.py
networking_odl/ceilometer/__init__.py
networking_odl/ceilometer/network/__init__.py
networking_odl/ceilometer/network/statistics/__init__.py
networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py
networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py
networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py
networking_odl/cmd/__init__.py
networking_odl/cmd/analyze_journal.py
networking_odl/cmd/set_ovs_hostconfigs.py
networking_odl/cmd/test_setup_hostconfigs.sh
networking_odl/common/__init__.py
networking_odl/common/callback.py
networking_odl/common/client.py
networking_odl/common/config.py
networking_odl/common/constants.py
networking_odl/common/exceptions.py
networking_odl/common/filters.py
networking_odl/common/lightweight_testing.py
networking_odl/common/odl_features.py
networking_odl/common/postcommit.py
networking_odl/common/utils.py
networking_odl/common/websocket_client.py
networking_odl/db/__init__.py
networking_odl/db/db.py
networking_odl/db/head.py
networking_odl/db/models.py
networking_odl/db/migration/__init__.py
networking_odl/db/migration/alembic_migrations/README
networking_odl/db/migration/alembic_migrations/__init__.py
networking_odl/db/migration/alembic_migrations/env.py
networking_odl/db/migration/alembic_migrations/script.py.mako
networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD
networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD
networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.py
networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.py
networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.py
networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.py
networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.py
networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.py
networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.py
networking_odl/db/migration/alembic_migrations/versions/pike/contract/7cbef5a56298_drop_created_at_column.py
networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.py
networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.py
networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.py
networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.py
networking_odl/dhcp/__init__.py
networking_odl/dhcp/odl_dhcp_driver.py
networking_odl/dhcp/odl_dhcp_driver_base.py
networking_odl/hacking/__init__.py
networking_odl/hacking/checks.py
networking_odl/journal/__init__.py
networking_odl/journal/base_driver.py
networking_odl/journal/cleanup.py
networking_odl/journal/dependency_validations.py
networking_odl/journal/full_sync.py
networking_odl/journal/journal.py
networking_odl/journal/periodic_task.py
networking_odl/journal/recovery.py
networking_odl/journal/worker.py
networking_odl/l2gateway/__init__.py
networking_odl/l2gateway/driver_v2.py
networking_odl/l3/__init__.py
networking_odl/l3/l3_flavor.py
networking_odl/l3/l3_odl_v2.py
networking_odl/locale/en_GB/LC_MESSAGES/networking_odl.po
networking_odl/ml2/README.odl
networking_odl/ml2/__init__.py
networking_odl/ml2/legacy_port_binding.py
networking_odl/ml2/mech_driver_v2.py
networking_odl/ml2/port_binding.py
networking_odl/ml2/port_status_update.py
networking_odl/ml2/pseudo_agentdb_binding.py
networking_odl/qos/__init__.py
networking_odl/qos/qos_driver_v2.py
networking_odl/qos/qos_utils.py
networking_odl/sfc/__init__.py
networking_odl/sfc/sfc_driver_v2.py
networking_odl/sfc/flowclassifier/__init__.py
networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py
networking_odl/tests/__init__.py
networking_odl/tests/base.py
networking_odl/tests/match.py
networking_odl/tests/functional/__init__.py
networking_odl/tests/functional/base.py
networking_odl/tests/functional/requirements.txt
networking_odl/tests/functional/test_bgpvpn.py
networking_odl/tests/functional/test_l2gateway.py
networking_odl/tests/functional/test_l3.py
networking_odl/tests/functional/test_ml2_drivers.py
networking_odl/tests/functional/test_odl_dhcp_driver.py
networking_odl/tests/functional/test_qos.py
networking_odl/tests/functional/test_trunk_drivers.py
networking_odl/tests/functional/db/__init__.py
networking_odl/tests/functional/db/test_migrations.py
networking_odl/tests/unit/__init__.py
networking_odl/tests/unit/base_v2.py
networking_odl/tests/unit/test_base_db.py
networking_odl/tests/unit/bgpvpn/__init__.py
networking_odl/tests/unit/bgpvpn/test_odl_v2.py
networking_odl/tests/unit/ceilometer/__init__.py
networking_odl/tests/unit/ceilometer/network/__init__.py
networking_odl/tests/unit/ceilometer/network/statistics/__init__.py
networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.py
networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.py
networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.py
networking_odl/tests/unit/cmd/__init__.py
networking_odl/tests/unit/cmd/test_analyze_journal.py
networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py
networking_odl/tests/unit/common/__init__.py
networking_odl/tests/unit/common/test_callback.py
networking_odl/tests/unit/common/test_client.py
networking_odl/tests/unit/common/test_filters.py
networking_odl/tests/unit/common/test_lightweight_testing.py
networking_odl/tests/unit/common/test_odl_features.py
networking_odl/tests/unit/common/test_postcommit.py
networking_odl/tests/unit/common/test_utils.py
networking_odl/tests/unit/common/test_websocket_client.py
networking_odl/tests/unit/db/__init__.py
networking_odl/tests/unit/db/test_db.py
networking_odl/tests/unit/dhcp/__init__.py
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py
networking_odl/tests/unit/dhcp/test_odl_dhcp_driver_base.py
networking_odl/tests/unit/journal/__init__.py
networking_odl/tests/unit/journal/helper.py
networking_odl/tests/unit/journal/test_base_driver.py
networking_odl/tests/unit/journal/test_cleanup.py
networking_odl/tests/unit/journal/test_dependency_validations.py
networking_odl/tests/unit/journal/test_full_sync.py
networking_odl/tests/unit/journal/test_journal.py
networking_odl/tests/unit/journal/test_periodic_task.py
networking_odl/tests/unit/journal/test_recovery.py
networking_odl/tests/unit/l2gateway/__init__.py
networking_odl/tests/unit/l2gateway/test_driver_v2.py
networking_odl/tests/unit/l3/__init__.py
networking_odl/tests/unit/l3/test_l3_flavor.py
networking_odl/tests/unit/l3/test_l3_odl_v2.py
networking_odl/tests/unit/ml2/__init__.py
networking_odl/tests/unit/ml2/config-ovs-external_ids.sh
networking_odl/tests/unit/ml2/odl_teststub.js
networking_odl/tests/unit/ml2/test_legacy_port_binding.py
networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py
networking_odl/tests/unit/ml2/test_port_binding.py
networking_odl/tests/unit/ml2/test_port_status_update.py
networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py
networking_odl/tests/unit/qos/__init__.py
networking_odl/tests/unit/qos/test_qos_driver_v2.py
networking_odl/tests/unit/sfc/__init__.py
networking_odl/tests/unit/sfc/constants.py
networking_odl/tests/unit/sfc/test_sfc_driver_v2.py
networking_odl/tests/unit/sfc/flowclassifier/__init__.py
networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py
networking_odl/tests/unit/trunk/__init__.py
networking_odl/tests/unit/trunk/test_trunk_driver_v2.py
networking_odl/trunk/__init__.py
networking_odl/trunk/constants.py
networking_odl/trunk/trunk_driver_v2.py
playbooks/multinode-setup.yaml
playbooks/devstack/pre.yaml
playbooks/devstack-tox/post.yaml
playbooks/devstack-tox/pre.yaml
playbooks/devstack-tox/run.yaml
playbooks/functional/pre.yaml
playbooks/legacy/grenade-dsvm-networking-odl/post.yaml
playbooks/legacy/grenade-dsvm-networking-odl/run.yaml
playbooks/tempest/post.yaml
playbooks/tempest/pre.yaml
playbooks/tempest/run.yaml
rally-jobs/README.rst
rally-jobs/odl.yaml
rally-jobs/extra/README.rst
rally-jobs/plugins/README.rst
rally-jobs/plugins/__init__.py
releasenotes/notes/.placeholder
releasenotes/notes/add-analyze-journal-cmd-189eae2cac4d60a5.yaml
releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml
releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml
releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml
releasenotes/notes/bgpvpn-vni-support-0804d0c0789cd1db.yaml
releasenotes/notes/delete-completed-rows-immediately-d3aee2ff5278b3f4.yaml
releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml
releasenotes/notes/deprecate-v1-0dd4f07c68a4a0a4.yaml
releasenotes/notes/deprecate_ceilometer-0d2830fa1fc6ba4e.yaml
releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml
releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml
releasenotes/notes/fix-sfc-full-sync-4eafe97d27b8b33e.yaml
releasenotes/notes/fix-sfcv2-urlpath-f339357bed1a538c.yaml
releasenotes/notes/fix-tls-websocket-3bee50093c3e90cf.yaml
releasenotes/notes/fix-ws-ssl-timeout-e16cd41779c05d42.yaml
releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml
releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml
releasenotes/notes/functional-test-b0855d6f1d85da30.yaml
releasenotes/notes/ignore_agent_aliveness-935a1aa8c285dfa2.yaml
releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml
releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml
releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml
releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml
releasenotes/notes/make-ceilometer-dependency-optional-fb0407dd2d367599.yaml
releasenotes/notes/make_sync_timeout_float-490072005e3f3413.yaml
releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml
releasenotes/notes/neutron-dhcp-port-dcbc3a1008f45cc2.yaml
releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml
releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml
releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml
releasenotes/notes/odl-l3-flavor-f093e6c0fb4e9dd8.yaml
releasenotes/notes/odl_features-option-type-change-367385ae7d1e949e.yaml
releasenotes/notes/ovs_hardware_offload_support-38d2b0b7386b8ca7.yaml
releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml
releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml
releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml
releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml
releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml
releasenotes/notes/remove-neutron-lbaas-6afe0b0f7b61290a.yaml
releasenotes/notes/remove-v1-driver-df408f9916fc5e5d.yaml
releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml
releasenotes/notes/set-ovs-hostconfig-flat-default-a3c189858304e2ed.yaml
releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml
releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml
releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml
releasenotes/notes/version-bump-16230eadac71cbb0.yaml
releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml
releasenotes/notes/websocket-client-7c8117671aeea181.yaml
releasenotes/source/conf.py
releasenotes/source/index.rst
releasenotes/source/newton.rst
releasenotes/source/ocata.rst
releasenotes/source/pike.rst
releasenotes/source/queens.rst
releasenotes/source/rocky.rst
releasenotes/source/stein.rst
releasenotes/source/train.rst
releasenotes/source/unreleased.rst
releasenotes/source/_static/.placeholder
releasenotes/source/_templates/.placeholder
releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po
releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po
roles/show-odl-info/README.rst
roles/show-odl-info/defaults/main.yaml
roles/show-odl-info/tasks/main.yaml
roles/show-odl-info/tasks/ovs_flows.yaml
tools/check_bash.sh
tools/check_i18n.py
tools/check_i18n_test_case.txt
tools/clean.sh
tools/coding-checks.sh
tools/configure_for_func_testing.sh
tools/i18n_cfg.py
tools/install_venv.py
tools/with_venv.shnetworking-odl-16.0.0/etc/0000775000175000017500000000000013656750617015356 5ustar zuulzuul00000000000000networking-odl-16.0.0/etc/neutron/0000775000175000017500000000000013656750617017050 5ustar zuulzuul00000000000000networking-odl-16.0.0/etc/neutron/plugins/0000775000175000017500000000000013656750617020531 5ustar zuulzuul00000000000000networking-odl-16.0.0/etc/neutron/plugins/ml2/0000775000175000017500000000000013656750617021223 5ustar zuulzuul00000000000000networking-odl-16.0.0/etc/neutron/plugins/ml2/ml2_conf_odl.ini0000664000175000017500000000401613656750541024256 0ustar zuulzuul00000000000000[DEFAULT]
[ml2_odl]
#
# From ml2_odl
#
# HTTP URL of OpenDaylight REST interface. (string value)
#url =
# HTTP username for authentication. (string value)
#username =
# HTTP password for authentication. (string value)
#password =
# HTTP timeout in seconds. (integer value)
#timeout = 10
# Tomcat session timeout in minutes. (integer value)
#session_timeout = 30
# Sync thread timeout in seconds. (integer value)
#sync_timeout = 10
# Number of times to retry a row before failing. (integer value)
#retry_count = 5
# Journal maintenance operations interval in seconds. (integer value)
#maintenance_interval = 300
# Time to keep completed rows (in seconds).
# For performance reasons it's not recommended to change this from the default
# value (0) which indicates completed rows aren't kept.
# This value will be checked every maintenance_interval by the cleanup
# thread. To keep completed rows indefinitely, set the value to -1
# (integer value)
#completed_rows_retention = 0
# Test without real ODL. (boolean value)
#enable_lightweight_testing = false
# Name of the controller to be used for port binding. (string value)
#port_binding_controller = pseudo-agentdb-binding
# Time in seconds to wait before a processing row is
# marked back to pending. (integer value)
#processing_timeout = 100
# Path for ODL host configuration REST interface (string value)
#odl_hostconf_uri = /restconf/operational/neutron:neutron/hostconfigs
# Poll interval in seconds for getting ODL hostconfig (integer value)
#restconf_poll_interval = 30
# Enable websocket for pseudo-agent-port-binding. (boolean value)
#enable_websocket_pseudo_agentdb = false
# Wait this many seconds before retrying the odl features fetch
# (integer value)
#odl_features_retry_interval = 5
# A list of features supported by ODL (list value)
#odl_features =
# Enables the networking-odl driver to supply special neutron ports of
# "dhcp" type to OpenDaylight Controller for its use in providing DHCP
# Service. (boolean value)
#enable_dhcp_service = false
networking-odl-16.0.0/etc/policy.json0000664000175000017500000001465213656750541017554 0ustar zuulzuul00000000000000{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"create_port": "",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"create_floatingip:floating_ip_address": "rule:admin_only",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only"
}
networking-odl-16.0.0/HACKING.rst0000664000175000017500000000256313656750541016403 0ustar zuulzuul00000000000000Neutron Style Commandments
=======================
- Step 1: Read the OpenStack Style Commandments
https://docs.openstack.org/hacking/latest/
- Step 2: Read on
Neutron Specific Commandments
--------------------------
- [N319] Validate that debug level logs are not translated
- [N320] Validate that LOG messages, except debug ones, have translations
- [N321] Validate that jsonutils module is used instead of json
- [N322] We do not use @authors tags in source files. We have git to track
authorship.
- [N323] Detect common errors with assert_called_once_with
Creating Unit Tests
-------------------
For every new feature, unit tests should be created that both test and
(implicitly) document the usage of said feature. If submitting a patch for a
bug that had no unit test, a new passing unit test should be added. If a
submitted bug fix does have a unit test, be sure to add a new one that fails
without the patch and passes with the patch.
All unittest classes must ultimately inherit from testtools.TestCase. In the
Neutron test suite, this should be done by inheriting from
neutron.tests.base.BaseTestCase.
All setUp and tearDown methods must upcall using the super() method.
tearDown methods should be avoided and addCleanup calls should be preferred.
Never manually create tempfiles. Always use the tempfile fixtures from
the fixture library to ensure that they are cleaned up.
networking-odl-16.0.0/TESTING.rst0000664000175000017500000001503413656750541016451 0ustar zuulzuul00000000000000Testing Networking-odl + neutron
================================
Overview
--------
The unit tests (networking_odl/tests/unit/) are meant to cover as much code as
possible and should be executed without the service running. They are
designed to test the various pieces of the neutron tree to make sure
any new changes don't break existing functionality.
# TODO (Manjeet): Update functional testing doc.
Development process
-------------------
It is expected that any new changes that are proposed for merge
come with tests for that feature or code area. Ideally any bugs
fixes that are submitted also have tests to prove that they stay
fixed! In addition, before proposing for merge, all of the
current tests should be passing.
Virtual environments
~~~~~~~~~~~~~~~~~~~~
Testing OpenStack projects, including Neutron, is made easier with `DevStack `_.
Create a machine (such as a VM or Vagrant box) running a distribution supported
by DevStack and install DevStack there. For example, there is a Vagrant script
for DevStack at https://github.com/bcwaldon/vagrant_devstack.
.. note::
If you prefer not to use DevStack, you can still check out source code on your local
machine and develop from there.
Running unit tests
------------------
There are two mechanisms for running tests: tox, and nose. Before submitting
a patch for review you should always ensure all test pass; a tox run is
triggered by the jenkins gate executed on gerrit for each patch pushed for
review.
With these mechanisms you can either run the tests in the standard
environment or create a virtual environment to run them in.
By default after running all of the tests, any pep8 errors
found in the tree will be reported.
With `nose`
~~~~~~~~~~~
You can use `nose`_ to run individual tests, as well as use for debugging
portions of your code::
. .venv/bin/activate
pip install nose
nosetests
There are disadvantages to running Nose - the tests are run sequentially, so
race condition bugs will not be triggered, and the full test suite will
take significantly longer than tox & testr. The upside is that testr has
some rough edges when it comes to diagnosing errors and failures, and there is
no easy way to set a breakpoint in the Neutron code, and enter an
interactive debugging session while using testr.
.. _nose: https://nose.readthedocs.org/en/latest/index.html
With `tox`
~~~~~~~~~~
Networking-odl, like other OpenStack projects, uses `tox`_ for managing the virtual
environments for running test cases. It uses `Testr`_ for managing the running
of the test cases.
Tox handles the creation of a series of `virtualenvs`_ that target specific
versions of Python (2.6, 2.7, 3.3, etc).
Testr handles the parallel execution of series of test cases as well as
the tracking of long-running tests and other things.
Running unit tests is as easy as executing this in the root directory of the
Neutron source code::
tox
Running tests for syntax and style check for written code::
tox -e pep8
For more information on the standard Tox-based test infrastructure used by
OpenStack and how to do some common test/debugging procedures with Testr,
see this wiki page:
https://wiki.openstack.org/wiki/Testr
.. _Testr: https://wiki.openstack.org/wiki/Testr
.. _tox: http://tox.readthedocs.org/en/latest/
.. _virtualenvs: https://pypi.org/project/virtualenv/
Tests written can also be debugged by adding pdb break points. Normally if you add
a break point and just run the tests with normal flags they will end up in failing.
There is debug flag you can use to run after adding pdb break points in the tests.
Set break points in your test code and run::
tox -e debug networking_odl.tests.unit.db.test_db.DbTestCase.test_validate_updates_same_object_uuid
The package oslotest was used to enable debugging in the tests. For more
information see the link:
https://docs.openstack.org/oslotest/latest/user/features.html
Running individual tests
~~~~~~~~~~~~~~~~~~~~~~~~
For running individual test modules or cases, you just need to pass
the dot-separated path to the module you want as an argument to it.
For executing a specific test case, specify the name of the test case
class separating it from the module path with a colon.
For example, the following would run only the TestUtils tests from
networking_odl/tests/unit/common/test_utils.py ::
$ tox -e py37 networking_odl.tests.unit.common.test_utils.TestUtils
Adding more tests
~~~~~~~~~~~~~~~~~
There might not be full coverage yet. New patches for adding tests
which are not there are always welcome.
To get a grasp of the areas where tests are needed, you can check
current coverage by running::
$ tox -e cover
Debugging
---------
It's possible to debug tests in a tox environment::
$ tox -e venv -- python -m testtools.run [test module path]
Tox-created virtual environments (venv's) can also be activated
after a tox run and reused for debugging::
$ tox -e venv
$ . .tox/venv/bin/activate
$ python -m testtools.run [test module path]
Tox packages and installs the neutron source tree in a given venv
on every invocation, but if modifications need to be made between
invocation (e.g. adding more pdb statements), it is recommended
that the source tree be installed in the venv in editable mode::
# run this only after activating the venv
$ pip install --editable .
Editable mode ensures that changes made to the source tree are
automatically reflected in the venv, and that such changes are not
overwritten during the next tox run.
Running functional tests
------------------------
Neutron defines different classes of test cases. One of them is functional
test. It requires pre-configured environment. But it's lighter than
running devstack or openstack deployment.
For definitions of functional tests, please refer to:
https://docs.openstack.org/neutron/latest/contributor/index.html
The script is provided to setup the environment.
At first make sure the latest version of pip command::
# ensure you have the latest version of pip command
# for example on ubuntu
$ sudo apt-get install python-pip
$ sudo pip --upgrade pip
And then run functional test as follows::
# assuming devstack is setup with networking-odl
$ cd networking-odl
$ ./tools/configure_for_func_testing.sh /path/to/devstack
$ tox -e dsvm-functional
For setting up devstack, please refer to neutron documentation:
* https://wiki.openstack.org/wiki/NeutronDevstack
* https://docs.openstack.org/neutron/latest/contributor/index.html
* https://docs.openstack.org/neutron/latest/contributor/testing/testing.html
networking-odl-16.0.0/.pylintrc0000664000175000017500000000627713656750541016460 0ustar zuulzuul00000000000000# The format of this file isn't really documented; just use --generate-rcfile
[MASTER]
# Add to the black list. It should be a base name, not a
# path. You may set this option multiple times.
#
ignore=.git,tests
[MESSAGES CONTROL]
# NOTE(gus): This is a long list. A number of these are important and
# should be re-enabled once the offending code is fixed (or marked
# with a local disable)
disable=
# "F" Fatal errors that prevent further processing
import-error,
# "I" Informational noise
locally-disabled,
# "E" Error for important programming issues (likely bugs)
access-member-before-definition,
no-member,
no-method-argument,
no-self-argument,
# "W" Warnings for stylistic problems or minor programming issues
abstract-method,
arguments-differ,
attribute-defined-outside-init,
bad-builtin,
bad-indentation,
broad-except,
cyclic-import,
dangerous-default-value,
deprecated-lambda,
expression-not-assigned,
fixme,
global-statement,
no-init,
non-parent-init-called,
protected-access,
redefined-builtin,
redefined-outer-name,
signature-differs,
star-args,
super-init-not-called,
unpacking-non-sequence,
unused-argument,
unused-import,
unused-variable,
# "C" Coding convention violations
bad-continuation,
invalid-name,
missing-docstring,
superfluous-parens,
# "R" Refactor recommendations
abstract-class-little-used,
abstract-class-not-used,
duplicate-code,
interface-not-implemented,
no-self-use,
too-few-public-methods,
too-many-ancestors,
too-many-arguments,
too-many-branches,
too-many-instance-attributes,
too-many-lines,
too-many-locals,
too-many-public-methods,
too-many-return-statements,
too-many-statements,
# new for python3 version of pylint
chained-comparison,
consider-using-dict-comprehension,
consider-using-in,
consider-using-set-comprehension,
unnecessary-pass,
useless-object-inheritance,
self-cls-assignment,
no-else-return,
[BASIC]
# Variable names can be 1 to 31 characters long, with lowercase and underscores
variable-rgx=[a-z_][a-z0-9_]{0,30}$
# Argument names can be 2 to 31 characters long, with lowercase and underscores
argument-rgx=[a-z_][a-z0-9_]{1,30}$
# Method names should be at least 3 characters long
# and be lowecased with underscores
method-rgx=([a-z_][a-z0-9_]{2,}|setUp|tearDown)$
# Module names matching neutron-* are ok (files in bin/)
module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+)|(neutron-[a-z0-9_-]+))$
# Don't require docstrings on tests.
no-docstring-rgx=((__.*__)|([tT]est.*)|setUp|tearDown)$
[FORMAT]
# Maximum number of characters on a single line.
max-line-length=79
[VARIABLES]
# List of additional names supposed to be defined in builtins. Remember that
# you should avoid to define new builtins when possible.
# _ is used by our localization
additional-builtins=_
[CLASSES]
# List of interface methods to ignore, separated by a comma.
ignore-iface-methods=
[IMPORTS]
# Deprecated modules which should not be used, separated by a comma
deprecated-modules=
# should use oslo_serialization.jsonutils
json
[TYPECHECK]
# List of module names for which member attributes should not be checked
ignored-modules=six.moves,_MovedItems
[REPORTS]
# Tells whether to display a full report or only the messages
reports=no
networking-odl-16.0.0/releasenotes/0000775000175000017500000000000013656750617017274 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/notes/0000775000175000017500000000000013656750617020424 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/notes/qos-driver-v2-4c869a6f0b8e3a4d.yaml0000664000175000017500000000045413656750541026232 0ustar zuulzuul00000000000000---
prelude: >
QoS Driver V2 for networking-odl
features:
- A new version of QoS driver that integrate OpenStack
neutron QoS API with OpenDaylight backend. This driver
uses journaling mechanism unlike v1 driver, which will
first log the operation in journal table before execution.
networking-odl-16.0.0/releasenotes/notes/fix-sfcv2-urlpath-f339357bed1a538c.yaml0000664000175000017500000000010613656750541027006 0ustar zuulzuul00000000000000---
fixes:
- |
Fixes ODL Neutron NB URL path for SFC v2 Driver.
networking-odl-16.0.0/releasenotes/notes/.placeholder0000664000175000017500000000000013656750541022671 0ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/notes/full-sync-f6b7ec1bd9ea0e52.yaml0000664000175000017500000000110513656750541025651 0ustar zuulzuul00000000000000---
prelude: >
Full sync supports and ODL controller with no Neutron
resources on it.
This support is for the V2 driver, as V1 driver already
supports this.
features:
- The full sync process looks for a "canary" network on
the ODL controller side.
If such a network is found, it doesn't do anything.
If the network is missing then all the neutron
resources are re-created on ODL.
This supports cases when ODL controller comes online
with no Neutron resources on it (also referred to as
"cold reboot", but can happen on various cases).
networking-odl-16.0.0/releasenotes/notes/flat-network-support-7c032aabc21902b1.yaml0000664000175000017500000000024113656750541027614 0ustar zuulzuul00000000000000---
prelude: >
Added FLAT type networks support.
features:
- In addition to existing supported types, networks
of type FLAT can be also used with ODL.
networking-odl-16.0.0/releasenotes/notes/add-host-config-8fb45d7f9732a795.yaml0000664000175000017500000000063513656750541026440 0ustar zuulzuul00000000000000---
prelude: >
Host Configuration data population from agentless
OpenDayLight.
features:
- This configuration is used to get the information
about physical host type and other config data like
supported vnic types stored in ovsdb. Networking-odl
can fetch this info from OpenDaylight via REST API
request and feed agents_db table in neutron, which
will be used by neutron scheduler.
networking-odl-16.0.0/releasenotes/notes/make_sync_timeout_float-490072005e3f3413.yaml0000664000175000017500000000045213656750541030112 0ustar zuulzuul00000000000000---
prelude: >
The config parameter sync_timeout sometimes uses the
fraction value and because it is set to Intopt, that
use of fraction value may end up in error.
upgrade:
- |
Making config paramter sync_timeout of FloatOpt type to allow
use of fraction values for timeouts.
networking-odl-16.0.0/releasenotes/notes/network-statistics-from-opendaylight-057a6b3c30626527.yaml0000664000175000017500000000032713656750541032604 0ustar zuulzuul00000000000000---
prelude: >
Network Statistics From OpenDaylight.
features:
- Add a ceilometer driver to collect network
statistics information using REST APIs exposed by
network-statistics module in OpenDaylight.
networking-odl-16.0.0/releasenotes/notes/sfc-driver-v2-9378b0db810b6fcb.yaml0000664000175000017500000000130513656750541026170 0ustar zuulzuul00000000000000---
prelude: >
Networking SFC V2 driver for networking-odl.
features:
- Second version of the driver to support networking-sfc
API through OpenDaylight controller. This driver
support CRUD operation for flow classifier, port-pair,
port-pair-group and port-pair-chain. This is version 2
driver and it does support the journal based
implementation, where operations are committed in the
data store first and then journal thread sycn it with
OpenDaylight. This implementation guarantee the ordering
of the CRUD events.
networking-sfc ocata or later is required.
https://review.opendev.org/#/c/363893/ is the corresponding patch
of networking-sfc in Ocata cycle.
networking-odl-16.0.0/releasenotes/notes/deprecate-v1-0dd4f07c68a4a0a4.yaml0000664000175000017500000000110213656750541026040 0ustar zuulzuul00000000000000---
prelude: >
V1 drivers are marked depracated beginning with Queens cycle, to be removed
in Rocky cycle.
deprecations:
- The V1 drivers are not actively maintained by the networking-odl team for a
few cycles already and aren't guaranteed to even work.
As such, the networking-odl team has decided that the drivers will be
marked as deprecated beginning Queens cycle, and removed in the beginning
of the Rocky cycle.
If you're still using the V1 drivers, please switch to using the V2 drivers
by updating the appropriate configuration values.
networking-odl-16.0.0/releasenotes/notes/odl-l3-flavor-f093e6c0fb4e9dd8.yaml0000664000175000017500000000102613656750541026253 0ustar zuulzuul00000000000000---
prelude: >
OpenStack neutron allows L3 flavors to enable multiple L3
backends in the same cloud. This is ODL L3 flavor driver to
implement L3 flavors for OpenStack Neutron and OpenDaylight
integration.
features:
- |
L3 flavor driver to implement L3 resource operation callbacks
related to router and floating ip create delete and update.
upgrade:
- |
The configuration upgrades are required to enable l3 flavors,
service_providers should be added to neutron.conf based on
flavor used.
networking-odl-16.0.0/releasenotes/notes/ignore_agent_aliveness-935a1aa8c285dfa2.yaml0000664000175000017500000000137413656750541030314 0ustar zuulzuul00000000000000---
prelude: >
Agent aliveness will be ignored during port binding.
other:
- |
During scale tests we saw that neutron agent aliveness mechanism is not
working properly and was marking agents as down and thus failing on port
binding. We assessed that aliveness in our context is not actually
interesting, as we only use the agentdb mechanism to store the information
we need for port binding.
As a result of this assessment we decided to remove the aliveness
awareness from the code and try to bind the port disregarding that. The
consequence of this is that a "neutron agent-list" call might show the
agent as dead or alive but that's not information we should depend on to
understand if we are binding to that node.
networking-odl-16.0.0/releasenotes/notes/trunk-drivers-3592691bdd08929e.yaml0000664000175000017500000000052213656750541026215 0ustar zuulzuul00000000000000---
prelude: >
Trunk Drivers v1 and v2 for networking-odl.
features:
- |
A new driver to integrate OpenStack TrunkPort API with OpenDayLight
backend. It supports CRUD operations for TrunkPorts. The version v2
driver will first log the call in journal table before execution.
Version v1 driver doesn't log any calls.
networking-odl-16.0.0/releasenotes/notes/fix-ws-ssl-timeout-e16cd41779c05d42.yaml0000664000175000017500000000034613656750541027145 0ustar zuulzuul00000000000000---
fixes:
- |
Fixes an issue with SSL websocket connections where a read timeout was
causing the client to close the connection. Read timeout is normal when
no port status update is being sent by the server (ODL).
networking-odl-16.0.0/releasenotes/notes/fix-sfc-full-sync-4eafe97d27b8b33e.yaml0000664000175000017500000000007113656750541027141 0ustar zuulzuul00000000000000---
fixes:
- Fixes full sync errors with SFCv2 driver.
networking-odl-16.0.0/releasenotes/notes/websocket-client-7c8117671aeea181.yaml0000664000175000017500000000034313656750541026702 0ustar zuulzuul00000000000000---
prelude: >
Websocket-client provides framework to create
webscket clients for ODL.
features:
- Features include callback on new notifications
and callback on reconnection which includes
status information.networking-odl-16.0.0/releasenotes/notes/odl_features-option-type-change-367385ae7d1e949e.yaml0000664000175000017500000000117613656750541031654 0ustar zuulzuul00000000000000---
prelude: >
The config option odl_features_json has been added to allow specifying
features in the same format ODL returns during negotiation.
features:
- |
The odl_features_json option accepts a JSON compatible with the JSON
response from ODL's API for retrieving features
("/restconf/operational/neutron:neutron/features").
If this option is configured, networking_odl will not query ODL for
its feature support and will instead use the configured value. If
odl_features and odl_features_json are both specified, odl_features_json
will take precedence and odl_features will not be used at all.
networking-odl-16.0.0/releasenotes/notes/remove-v1-driver-df408f9916fc5e5d.yaml0000664000175000017500000000070313656750541026734 0ustar zuulzuul00000000000000---
prelude: >
The v1 drivers, which were deprecated in the Queens cycle, are removed.
All existing usages should be updated to use the v2 drivers.
upgrade:
- |
If you've been using v1 drivers, update your configuration to use the
v2 drivers.
Otherwise, neutron won't boot properly if v1 drivers are still used.
critical:
- |
The v1 drivers are removed. If you're still using v1 drivers, migrate to
use the v2 drivers.
networking-odl-16.0.0/releasenotes/notes/journal-recovery-88e583ad2db22bcc.yaml0000664000175000017500000000047413656750541027172 0ustar zuulzuul00000000000000---
prelude: >
Journal recovery for the V2 driver handles failed
journal entries.
features:
- The journal recovery mechanism handles failed journal
entries by inspecting ODL and deciding on the correct
course of action.
This support should be sufficient for the majority of
entry failures.
networking-odl-16.0.0/releasenotes/notes/functional-test-b0855d6f1d85da30.yaml0000664000175000017500000000057113656750541026641 0ustar zuulzuul00000000000000---
prelude: >
The new class of test cases, functional test, has been
added. So was help scripts to setup necessary environment.
other:
- The functional tests were added. It's new class of test cases,
which requires pre-configured environment. Environment to
run such tests can be configured by tool in
networking-odl/tools.configure_for_func_testing.sh
networking-odl-16.0.0/releasenotes/notes/nuke-lbaasv1-driver-fce366522350fe21.yaml0000664000175000017500000000035413656750541027221 0ustar zuulzuul00000000000000---
prelude: >
Remove LbaaS v1 driver, as LbaaS removed v1 API.
upgrade:
- Upgrade to use LBaaS v2 driver and migrate to use
LBaaS v2 driver.
deprecations:
- LBaaS v1 API driver for ODL is removed.
* LBaaS v2 API driver
networking-odl-16.0.0/releasenotes/notes/add-analyze-journal-cmd-189eae2cac4d60a5.yaml0000664000175000017500000000053113656750541030254 0ustar zuulzuul00000000000000---
prelude: >
Add command line tool to analyze logs
features:
- This tool can be used to analyze logs and determine the journal's
operation efficiency.
The tool tracks a journal entry's recording and processing to determine
how much time it took since a journal entry was recorded until it was
processed and sent to ODL.
networking-odl-16.0.0/releasenotes/notes/sfc-driver-v1-d11fd5fd17114f2c.yaml0000664000175000017500000000056513656750541026167 0ustar zuulzuul00000000000000---
prelude: >
Networking SFC V1 driver for networking-odl.
features:
- First version of the driver to support networking-sfc
API through OpenDaylight controller. This driver
support CRUD operation for flow classifier, port-pair,
port-pair-group and port-pair-chain. This is version 1
driver and does not support the journal based
implementation.
networking-odl-16.0.0/releasenotes/notes/set-ovs-hostconfig-flat-default-a3c189858304e2ed.yaml0000664000175000017500000000077313656750541031564 0ustar zuulzuul00000000000000---
prelude: >
update the default value of supported network
type for ovs-set-hostconfig. enable 'flat' by default
'flat' type wasn't enabled because legacy netvirt
doesn't support it. Now new netvirt is introduced
to deprecate legacy netvirt and New netvirt supports
flat. So update default value for network type
to reflect it.
upgrade:
- If you're still using legacy netvirt, you need to
disable flat network type explicitly when issuing
set-ovs-hostconfig command.
networking-odl-16.0.0/releasenotes/notes/l2gw-driver-v2-b32aacf882ed446c.yaml0000664000175000017500000000061613656750541026353 0ustar zuulzuul00000000000000---
prelude: >
L2Gateway Driver v2 or networking-odl.
features:
- |
A new version of L2Gateway driver that integrate OpenStack neutron
L2Gateway API with OpenDaylight backend. It supports CRUD operations
for l2gateway and l2gateway_connection. This driver uses journalling
mechanism, unlike v1 driver, which will first log the operation in
journal table before execution.
networking-odl-16.0.0/releasenotes/notes/pseudo-agent-port-binding-0a3d1d193b99293e.yaml0000664000175000017500000000106613656750541030432 0ustar zuulzuul00000000000000---
prelude: >
Agentless Port binding controller using agentdb
for persistency with ODL provided host configuration.
features:
- Reads host configuration from ODL using a REST/get
and stores the information in Neutron agentdb for
persistency. This host configuration is read back
from agentdb and applied during port binding.
Without this feature several out-of-sync race
conditions were caused due to incorrect host
information.
fixes:
- Includes the following bug fixes
Bug 1608659 - pseudo_agentdb_binding AttributeError.
networking-odl-16.0.0/releasenotes/notes/vlan-transparency-63c153d310eacc5d.yaml0000664000175000017500000000145513656750541027237 0ustar zuulzuul00000000000000---
prelude: >
Support for vlan-transparency.
features:
- The extension `vlan-transparent` is supported for Newton
release, unconditionally only vxlan is considered to
support its extension independent of ODL openstack
provider. It's future work to allow ODL openstack
provider to report list of supported network types
at start up statically.
issues:
- Currently only network type of VXLAN is statically
considered to support vlan-transparent independently
of OpenDaylight openstack provider.
It should use capability report by OpenDaylight
openstack provider statically instead of static hard
code.
other:
- For details please read
'VLAN trunking networks for NFV
'_.
networking-odl-16.0.0/releasenotes/notes/lbaas-driver-v2-46bf34992f4785d1.yaml0000664000175000017500000000105513656750541026302 0ustar zuulzuul00000000000000---
prelude: >
Complement the implementation of odl lbaas driver_v2.
features:
- Complement the implementation of odl lbaas driver_v2.
It supports CRUD operations for loadbalancer, listener,
pool, member and healthmonitor.
fixes:
- Includes the following bug fixes
Bug 1640076 - Using odl lbaas driver_v2 to create listener failed.
Bug 1633030 - Using odl lbaas driver_v2 to create loadbalancer failed.
Bug 1613583 - Odl lbaas driver_v2 Line 61 url_path error.
Bug 1613583 - Using ODL lbaas driver_v2 to create member failed.networking-odl-16.0.0/releasenotes/notes/maintenance-thread-e54c3b4bd7c03546.yaml0000664000175000017500000000103013656750541027236 0ustar zuulzuul00000000000000---
prelude: >
Maintenance thread for the V2 driver.
features:
- The maintenance thread was introduced in the V2 driver
in order to perform various journal maintenance tasks,
such as
* Stale lock release
* Completed entry cleanup
* Full sync
* Journal recovery
The thread runs in a configurable interval and is HA
safe so at most one will be executing regardless of how
many threads are running concurrently.
upgrade:
- Maintenace lock table was added to synchronize multiple
threads.
networking-odl-16.0.0/releasenotes/notes/new-netvirt-default-0eccc77d3cb54484.yaml0000664000175000017500000000066413656750541027514 0ustar zuulzuul00000000000000---
prelude: >
The default setting for OpenDayligut openstack
service provider was changed from ovsdb netvirt
(odl-ovsdb-openstack) to new
netvirt(odl-netvirt-openstack) for OpenDaylight
Boron/Carbon or later.
other:
- With devstack by default with OpenDaylight after
Boron version, new netvirt openstack service
provider(odl-netvirt-openstack) is used instead
of legacy netvirt(odl-ovsdb-openstack).
networking-odl-16.0.0/releasenotes/notes/bgpvpn-driver-v2-36c0772d510587f4.yaml0000664000175000017500000000066013656750541026421 0ustar zuulzuul00000000000000---
prelude: >
BGPVPN Version 2 Driver for OpenDaylight.
features:
- |
A new version of BGPVPN driver that integrate OpenStack Neutron
BGPVPN API with OpenDaylight backend. It supports CRUD operations
for BGPVPN and enables networks and routers to be associated to
such BGPVPNs. This driver uses journaling mechanism, unlike v1 driver,
which will first log the operation in journal table before execution.
networking-odl-16.0.0/releasenotes/notes/qos-driver-v1-711698186ca693c4.yaml0000664000175000017500000000051313656750541025733 0ustar zuulzuul00000000000000---
prelude: >
QoS Driver V1 for networking-odl.
features:
- A new driver to integrate OpenStack neutron QoS API with
OpenDayLight backend. It supports CRUD operations for
QoS policy and its associated rules. The QoS driver in
tree is of version v1, which does not log the operation
request in journal table.
networking-odl-16.0.0/releasenotes/notes/neutron-dhcp-port-dcbc3a1008f45cc2.yaml0000664000175000017500000000121313656750541027226 0ustar zuulzuul00000000000000---
prelude: >
Allocate a neutron port for each subnet to service DHCP requests within
OpenDaylight controller DHCP service.
features:
- |
The feature is to be enabled only for ml2 mechanism V2 Driver, when config
parameter enable_dhcp_service is set to True in ml2_conf.ini. Creates a new
DHCP Neutron port to be serviced by OpenDaylight Netvirt when a Subnet is
created or updated with enable-dhcp parameter. The allocated port is to be
removed when the Subnet is deleted or updated with disbale-dhcp parameter.
The port is identifed with device-id as OpenDaylight- and
device-owner as network:dhcp.
networking-odl-16.0.0/releasenotes/notes/delete-completed-rows-immediately-d3aee2ff5278b3f4.yaml0000664000175000017500000000035613656750541032377 0ustar zuulzuul00000000000000---
prelude: >
Completed rows are deleted by default.
upgrade:
- Completed rows will now be immediately deleted upon completion.
To retain the completed rows, set the completed_rows_retention
configuration value explicitly.
networking-odl-16.0.0/releasenotes/notes/version-bump-16230eadac71cbb0.yaml0000664000175000017500000000030313656750541026254 0ustar zuulzuul00000000000000---
prelude: >
networking-odl adopts version number aligned
with neutron from Pike release.
The version number is bumped 11.x.x.
other:
- version is bumped to 11:pike from 4:ocata.
networking-odl-16.0.0/releasenotes/notes/remove-network-topology-67daff08f3d6ff14.yaml0000664000175000017500000000062013656750541030527 0ustar zuulzuul00000000000000---
prelude: >
Eliminate network topology based port binding
upgrade:
- If network topology based port binding,
network-topology, is used, migrate to pseodu agent
based port binding, pseudo-agentdb-binding.
deprecations:
- network topology based port binding was removed.
So is network-topology value for port_binding_controllers.
Migrate pseudo-agentdb-binding port binding.
networking-odl-16.0.0/releasenotes/notes/port-binding-default-b5f24ad350b47eb0.yaml0000664000175000017500000000117113656750541027603 0ustar zuulzuul00000000000000---
prelude: >
Change the default value of port_binding_controller
from network-topology to pseudo-agentdb-binding
as networking-topology will be deprecated.
upgrade:
- pseudo-agentdb-binding is supported by the version
of OpenDaylight Boron(0.5.x) or later.
So for the version of OpenDaylight Beryllium or earlier,
the option, port_binding_controller, needs to be
explicitly configured to be legacy-port-binding or
network-topology(deprecated).
deprecations:
- port binding controller, network-topology, is
deprecated with OpenStack Ocata and will be removed
in future openstack version.
networking-odl-16.0.0/releasenotes/notes/deprecate-qos-driver-v1-96bce9842413700b.yaml0000664000175000017500000000016613656750541027736 0ustar zuulzuul00000000000000---
deprecations:
- The QoS V1 driver is deprecated in the Pike cycle and will be removed
in the Queens release.networking-odl-16.0.0/releasenotes/notes/fix-tls-websocket-3bee50093c3e90cf.yaml0000664000175000017500000000013213656750541027144 0ustar zuulzuul00000000000000---
fixes:
- |
Fixes using TLS secured websocket when HTTPS is used in ML2 ODL URL.
networking-odl-16.0.0/releasenotes/notes/devstack-default-driver-v2-6ae6ce789b4a6cc9.yaml0000664000175000017500000000040613656750541030743 0ustar zuulzuul00000000000000---
prelude: >
Changed devstack default to V2 driver.
other:
- Starting with Ocata, Devstack will use V2 drivers (where available) by
default. To force the use of V1 architecture drivers you can specify
'ODL_V2DRIVER=False' in the local.conf file.
networking-odl-16.0.0/releasenotes/notes/bgpvpn-vni-support-0804d0c0789cd1db.yaml0000664000175000017500000000112213656750541027306 0ustar zuulzuul00000000000000---
prelude: >
Support for bgpvpn-vni.
features:
- |
BGPVPN OpenDaylight v2 driver will be enhanced to support
bgpvpn-vni extension. Bgpvpn VNI resource represents the VNI to
use on VXLAN encapsulated packets transferred to or from ODL
managed computes themselves or for traffic from ODL-managed
computers towards the DC-Gateway. Acceptance and realisation
of the vni attribute in a bgpvpn is available in OpenDaylight(ODL)
controller from Neon release of ODL
other:
- OpenDaylight changes are available at
https://git.opendaylight.org/gerrit/#/c/63405/
networking-odl-16.0.0/releasenotes/notes/drop-py27-support-3bc8094e1823cfcf.yaml0000664000175000017500000000033213656750541027056 0ustar zuulzuul00000000000000---
upgrade:
- |
Python 2.7 support has been dropped. Last release of networking-odl to
support python 2.7 is OpenStack Train. The minimum version of Python now
supported by networking-odl is Python 3.6.
networking-odl-16.0.0/releasenotes/notes/deprecate_ceilometer-0d2830fa1fc6ba4e.yaml0000664000175000017500000000066713656750541030022 0ustar zuulzuul00000000000000---
prelude: >
Ceilometer is marked as deprecated beginning with Stein cycle to be removed
in release T
deprecations:
- |
Ceilometer driver should not be part of networking-odl because it forces a
hard dependency with ceilometer into the project which does not really make
sense. As such, it was decided the driver will be marked as deprecated
beginning with Stein cycle and removed in the beginning of release T
networking-odl-16.0.0/releasenotes/notes/remove_qos_driver_v1-2bfbf1f979082b07.yaml0000664000175000017500000000050113656750541027662 0ustar zuulzuul00000000000000---
prelude: >
As the QoS v2 driver adapted new framework from OpenStack neutron's
qos driver framework, QoS v1 driver using notification_drivers is no
longer needed.
upgrade:
- Removing QoS V1 driver which is using deprecated notification
driver framework from OpenStack Neutron's QoS driver base.
networking-odl-16.0.0/releasenotes/notes/ovs_hardware_offload_support-38d2b0b7386b8ca7.yaml0000664000175000017500000000045313656750541031477 0ustar zuulzuul00000000000000---
features:
- The ``opendaylight`` mechanism driver now supports hardware offload via
SR-IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch``
2.8.0 and 'Linux Kernel' 4.12 allows to control the SR-IOV VF
via OpenFlow control plane and gain accelerated 'Open vSwitch'.
networking-odl-16.0.0/releasenotes/notes/odl-feature-negotiation-ece3201a6e9f8f74.yaml0000664000175000017500000000234513656750541030341 0ustar zuulzuul00000000000000---
prelude: >
OpenDaylight feature negotiation allows for networking_odl to adapt its
behavior to the features supported by the specific ODL version.
features:
- Networking-odl first attempts to read the ODL features from the
odl_features config value. If this config value is not present,
networking-odl requests the features from ODL via REST call. Note that this
occurs during the plugin initialize and if ODL is unreachable
networking-odl will keep trying until successful, essentially blocking
networking-odl initialization (and functionality) until successful.
As such, it is recommended that in production environments you manually
configure the odl_features config value. If you are not sure which features
your ODL supports, please consult the ODL documentation or you can retrieve
the list like this,
$ curl -u : http://:8080/restconf/operational/neutron:neutron/features | python -mjson.tool
Note that the features returned in the json have a namespace which should
be omitted from the config value. So, if you got to features, say
neutron-extensions:feature1 and neutron-extensions:feature2, the config
file should have,
odl_features=feature1,feature2
networking-odl-16.0.0/releasenotes/notes/make-ceilometer-dependency-optional-fb0407dd2d367599.yaml0000664000175000017500000000074713656750541032463 0ustar zuulzuul00000000000000---
prelude: >
Ceilometer becomes an optional dependency
features:
- |
If a user wants to utilize the ceilometer driver when installing
networking-odl, the user should install networking-odl with the following
syntax ==> networking-odl[ceilometer]
In case, that driver is not needed, then only networking-odl should be
used
This makes sense because ceilometer driver is only loaded when neutron
cfg requires network.statistics.driver and not by default
networking-odl-16.0.0/releasenotes/notes/add-beryllium-sr4-7eced33ec292bcc8.yaml0000664000175000017500000000037513656750541027211 0ustar zuulzuul00000000000000---
prelude: >
Add ODL Beryllium SR4 release definition.
features:
- Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot
definition and remove Beryllium 0.4.4 snapshot as OpenDaylight
Beryllium 0.4.4 SR4 has been released.
networking-odl-16.0.0/releasenotes/notes/remove-neutron-lbaas-6afe0b0f7b61290a.yaml0000664000175000017500000000036013656750541027633 0ustar zuulzuul00000000000000---
deprecations:
- |
Neutron-lbaas is retired in Train cycle, so all the related dependencies
must be removed from networking-odl, for details see:
http://lists.openstack.org/pipermail/openstack-discuss/2019-May/006158.html
networking-odl-16.0.0/releasenotes/source/0000775000175000017500000000000013656750617020574 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/train.rst0000664000175000017500000000017613656750541022443 0ustar zuulzuul00000000000000==========================
Train Series Release Notes
==========================
.. release-notes::
:branch: stable/train
networking-odl-16.0.0/releasenotes/source/_static/0000775000175000017500000000000013656750617022222 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/_static/.placeholder0000664000175000017500000000000013656750541024467 0ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/conf.py0000664000175000017500000002064213656750541022073 0ustar zuulzuul00000000000000# -*- coding: utf-8 -*-
#
# Networking OpenDaylight Release Notes documentation build configuration file, created by
# sphinx-quickstart on Fri Jul 22 14:54:21 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'openstackdocstheme',
'reno.sphinxext',
]
# openstackdocstheme options
repository_name = 'openstack/networking-odl'
bug_project = 'networking-odl'
bug_tag = 'doc'
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Networking OpenDaylight Release Notes'
copyright = u'2016, networking-odl developers'
# Release notes are version independent.
# The full version, including alpha/beta/rc tags.
release = ''
# The short X.Y version.
version = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# " v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%Y-%m-%d %H:%M'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'NetworkingOpenDaylightReleaseNotesdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'NetworkingOpenDaylightReleaseNotes.tex', u'Networking OpenDaylight Release Notes Documentation',
u'networking-odl developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'networkingopendaylightreleasenotes', u'Networking OpenDaylight Release Notes Documentation',
[u'networking-odl developers'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'NetworkingOpenDaylightReleaseNotes', u'Networking OpenDaylight Release Notes Documentation',
u'networking-odl developers', 'NetworkingOpenDaylightReleaseNotes', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
# -- Options for Internationalization output ------------------------------
locale_dirs = ['locale/']
networking-odl-16.0.0/releasenotes/source/stein.rst0000664000175000017500000000022113656750541022437 0ustar zuulzuul00000000000000===================================
Stein Series Release Notes
===================================
.. release-notes::
:branch: stable/stein
networking-odl-16.0.0/releasenotes/source/queens.rst0000664000175000017500000000022313656750541022617 0ustar zuulzuul00000000000000===================================
Queens Series Release Notes
===================================
.. release-notes::
:branch: stable/queens
networking-odl-16.0.0/releasenotes/source/unreleased.rst0000664000175000017500000000015313656750541023450 0ustar zuulzuul00000000000000============================
Current Series Release Notes
============================
.. release-notes::
networking-odl-16.0.0/releasenotes/source/rocky.rst0000664000175000017500000000022113656750541022444 0ustar zuulzuul00000000000000===================================
Rocky Series Release Notes
===================================
.. release-notes::
:branch: stable/rocky
networking-odl-16.0.0/releasenotes/source/index.rst0000664000175000017500000000076413656750541022440 0ustar zuulzuul00000000000000.. Networking OpenDaylight Release Notes documentation master file, created by
sphinx-quickstart on Fri Jul 22 14:54:21 2016.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to Networking OpenDaylight Release Notes's documentation!
=================================================================
Contents:
.. toctree::
:maxdepth: 2
unreleased
train
stein
rocky
queens
pike
ocata
newton
networking-odl-16.0.0/releasenotes/source/ocata.rst0000664000175000017500000000022113656750541022404 0ustar zuulzuul00000000000000===================================
Ocata Series Release Notes
===================================
.. release-notes::
:branch: stable/ocata
networking-odl-16.0.0/releasenotes/source/newton.rst0000664000175000017500000000022313656750541022631 0ustar zuulzuul00000000000000===================================
Newton Series Release Notes
===================================
.. release-notes::
:branch: stable/newton
networking-odl-16.0.0/releasenotes/source/_templates/0000775000175000017500000000000013656750617022731 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/_templates/.placeholder0000664000175000017500000000000013656750541025176 0ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/locale/0000775000175000017500000000000013656750617022033 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/locale/fr/0000775000175000017500000000000013656750617022442 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/locale/fr/LC_MESSAGES/0000775000175000017500000000000013656750617024227 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/locale/fr/LC_MESSAGES/releasenotes.po0000664000175000017500000000145313656750541027257 0ustar zuulzuul00000000000000# Gérald LONLAS , 2016. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Networking OpenDaylight Release Notes\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-02-09 19:46+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2016-10-22 05:33+0000\n"
"Last-Translator: Gérald LONLAS \n"
"Language-Team: French\n"
"Language: fr\n"
"X-Generator: Zanata 3.9.6\n"
"Plural-Forms: nplurals=2; plural=(n > 1)\n"
msgid "Contents:"
msgstr "Contenu :"
msgid "Current Series Release Notes"
msgstr "Note de la release actuelle"
msgid "Welcome to Networking OpenDaylight Release Notes's documentation!"
msgstr ""
"Bienvenue dans la documentation de la note de Release de Networking "
"OpenDaylight"
networking-odl-16.0.0/releasenotes/source/locale/en_GB/0000775000175000017500000000000013656750617023005 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000013656750617024572 5ustar zuulzuul00000000000000networking-odl-16.0.0/releasenotes/source/locale/en_GB/LC_MESSAGES/releasenotes.po0000664000175000017500000006577413656750541027642 0ustar zuulzuul00000000000000# Andi Chandler , 2017. #zanata
msgid ""
msgstr ""
"Project-Id-Version: Networking OpenDaylight Release Notes\n"
"Report-Msgid-Bugs-To: \n"
"POT-Creation-Date: 2018-03-07 20:20+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-12-13 01:16+0000\n"
"Last-Translator: Andi Chandler \n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid "11.0.0"
msgstr "11.0.0"
msgid "3.0.0"
msgstr "3.0.0"
msgid "3.1.0"
msgstr "3.1.0"
msgid "4.0.0"
msgstr "4.0.0"
msgid ""
"A new driver to integrate OpenStack TrunkPort API with OpenDayLight backend. "
"It supports CRUD operations for TrunkPorts. The version v2 driver will first "
"log the call in journal table before execution. Version v1 driver doesn't "
"log any calls."
msgstr ""
"A new driver to integrate OpenStack TrunkPort API with OpenDayLight backend. "
"It supports CRUD operations for TrunkPorts. The version v2 driver will first "
"log the call in journal table before execution. Version v1 driver doesn't "
"log any calls."
msgid ""
"A new driver to integrate OpenStack neutron QoS API with OpenDayLight "
"backend. It supports CRUD operations for QoS policy and its associated "
"rules. The QoS driver in tree is of version v1, which does not log the "
"operation request in journal table."
msgstr ""
"A new driver to integrate OpenStack neutron QoS API with OpenDayLight "
"backend. It supports CRUD operations for QoS policy and its associated "
"rules. The QoS driver in tree is of version v1, which does not log the "
"operation request in journal table."
msgid ""
"A new version of BGPVPN driver that integrate OpenStack Neutron BGPVPN API "
"with OpenDaylight backend. It supports CRUD operations for BGPVPN and "
"enables networks and routers to be associated to such BGPVPNs. This driver "
"uses journaling mechanism, unlike v1 driver, which will first log the "
"operation in journal table before execution."
msgstr ""
"A new version of BGPVPN driver that integrate OpenStack Neutron BGPVPN API "
"with OpenDaylight backend. It supports CRUD operations for BGPVPN and "
"enables networks and routers to be associated to such BGPVPNs. This driver "
"uses journaling mechanism, unlike v1 driver, which will first log the "
"operation in journal table before execution."
msgid ""
"A new version of L2Gateway driver that integrate OpenStack neutron L2Gateway "
"API with OpenDaylight backend. It supports CRUD operations for l2gateway and "
"l2gateway_connection. This driver uses journalling mechanism, unlike v1 "
"driver, which will first log the operation in journal table before execution."
msgstr ""
"A new version of L2Gateway driver that integrate OpenStack neutron L2Gateway "
"API with OpenDaylight backend. It supports CRUD operations for l2gateway and "
"l2gateway_connection. This driver uses journalling mechanism, unlike v1 "
"driver, which will first log the operation in journal table before execution."
msgid ""
"A new version of QoS driver that integrate OpenStack neutron QoS API with "
"OpenDaylight backend. This driver uses journaling mechanism unlike v1 "
"driver, which will first log the operation in journal table before execution."
msgstr ""
"A new version of QoS driver that integrate OpenStack neutron QoS API with "
"OpenDaylight backend. This driver uses journaling mechanism unlike v1 "
"driver, which will first log the operation in journal table before execution."
msgid "Add ODL Beryllium SR4 release definition."
msgstr "Add ODL Beryllium SR4 release definition."
msgid ""
"Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot "
"definition and remove Beryllium 0.4.4 snapshot as OpenDaylight Beryllium "
"0.4.4 SR4 has been released."
msgstr ""
"Add OpenDaylight Beryllium SR4 release and Beryllium 0.4.5 snapshot "
"definition and remove Beryllium 0.4.4 snapshot as OpenDaylight Beryllium "
"0.4.4 SR4 has been released."
msgid ""
"Add a ceilometer driver to collect network statistics information using REST "
"APIs exposed by network-statistics module in OpenDaylight."
msgstr ""
"Add a Ceilometer driver to collect network statistics information using REST "
"APIs exposed by network-statistics module in OpenDaylight."
msgid "Added FLAT type networks support."
msgstr "Added FLAT type networks support."
msgid ""
"Agentless Port binding controller using agentdb for persistency with ODL "
"provided host configuration."
msgstr ""
"Agentless Port binding controller using agentdb for persistence with ODL "
"provided host configuration."
msgid ""
"Allocate a neutron port for each subnet to service DHCP requests within "
"OpenDaylight controller DHCP service."
msgstr ""
"Allocate a Neutron port for each subnet to service DHCP requests within "
"OpenDaylight controller DHCP service."
msgid ""
"As the QoS v2 driver adapted new framework from OpenStack neutron's qos "
"driver framework, QoS v1 driver using notification_drivers is no longer "
"needed."
msgstr ""
"As the QoS v2 driver adapted new framework from OpenStack Neutron's QoS "
"driver framework, QoS v1 driver using notification_drivers is no longer "
"needed."
msgid "BGPVPN Version 2 Driver for OpenDaylight."
msgstr "BGPVPN Version 2 Driver for OpenDaylight."
msgid "Bug Fixes"
msgstr "Bug Fixes"
msgid ""
"Change the default value of port_binding_controller from network-topology to "
"pseudo-agentdb-binding as networking-topology will be deprecated."
msgstr ""
"Change the default value of port_binding_controller from network-topology to "
"pseudo-agentdb-binding as networking-topology will be deprecated."
msgid "Changed devstack default to V2 driver."
msgstr "Changed devstack default to V2 driver."
msgid "Complement the implementation of odl lbaas driver_v2."
msgstr "Complement the implementation of ODL LBaaS driver_v2."
msgid ""
"Complement the implementation of odl lbaas driver_v2. It supports CRUD "
"operations for loadbalancer, listener, pool, member and healthmonitor."
msgstr ""
"Complement the implementation of ODL LBaaS driver_v2. It supports CRUD "
"operations for load balancer, listener, pool, member and health monitor."
msgid "Completed rows are deleted by default."
msgstr "Completed rows are deleted by default."
msgid ""
"Completed rows will now be immediately deleted upon completion. To retain "
"the completed rows, set the completed_rows_retention configuration value "
"explicitly."
msgstr ""
"Completed rows will now be immediately deleted upon completion. To retain "
"the completed rows, set the completed_rows_retention configuration value "
"explicitly."
msgid "Contents:"
msgstr "Contents:"
msgid "Current Series Release Notes"
msgstr "Current Series Release Notes"
msgid ""
"Currently only network type of VXLAN is statically considered to support "
"vlan-transparent independently of OpenDaylight openstack provider. It should "
"use capability report by OpenDaylight openstack provider statically instead "
"of static hard code."
msgstr ""
"Currently only network type of VXLAN is statically considered to support "
"vlan-transparent independently of OpenDaylight openstack provider. It should "
"use capability report by OpenDaylight openstack provider statically instead "
"of static hard code."
msgid "Deprecation Notes"
msgstr "Deprecation Notes"
msgid "Eliminate network topology based port binding"
msgstr "Eliminate network topology based port binding"
msgid ""
"Features include callback on new notifications and callback on reconnection "
"which includes status information."
msgstr ""
"Features include callback on new notifications and callback on reconnection "
"which includes status information."
msgid ""
"First version of the driver to support networking-sfc API through "
"OpenDaylight controller. This driver support CRUD operation for flow "
"classifier, port-pair, port-pair-group and port-pair-chain. This is version "
"1 driver and does not support the journal based implementation."
msgstr ""
"First version of the driver to support networking-sfc API through "
"OpenDaylight controller. This driver support CRUD operation for flow "
"classifier, port-pair, port-pair-group and port-pair-chain. This is version "
"1 driver and does not support the journal based implementation."
msgid "Fixes ODL Neutron NB URL path for SFC v2 Driver."
msgstr "Fixes ODL Neutron NB URL path for SFC v2 Driver."
msgid "Fixes full sync errors with SFCv2 driver."
msgstr "Fixes full sync errors with SFCv2 driver."
msgid ""
"For details please read 'VLAN trunking networks for NFV '_."
msgstr ""
"For details please read 'VLAN trunking networks for NFV '_."
msgid ""
"Full sync supports and ODL controller with no Neutron resources on it. This "
"support is for the V2 driver, as V1 driver already supports this."
msgstr ""
"Full sync supports an ODL controller with no Neutron resources on it. This "
"support is for the V2 driver, as V1 driver already supports this."
msgid "Host Configuration data population from agentless OpenDayLight."
msgstr "Host Configuration data populated from agentless OpenDayLight."
msgid ""
"If network topology based port binding, network-topology, is used, migrate "
"to pseodu agent based port binding, pseudo-agentdb-binding."
msgstr ""
"If network topology based port binding, network-topology, is used, migrate "
"to pseudo agent based port binding, pseudo-agentdb-binding."
msgid ""
"If you're still using legacy netvirt, you need to disable flat network type "
"explicitly when issuing set-ovs-hostconfig command."
msgstr ""
"If you're still using legacy netvirt, you need to disable flat network type "
"explicitly when issuing set-ovs-hostconfig command."
msgid ""
"In addition to existing supported types, networks of type FLAT can be also "
"used with ODL."
msgstr ""
"In addition to existing supported types, networks of type FLAT can be also "
"used with ODL."
msgid ""
"Includes the following bug fixes Bug 1608659 - pseudo_agentdb_binding "
"AttributeError."
msgstr ""
"Includes the following bug fixes Bug 1608659 - pseudo_agentdb_binding "
"AttributeError."
msgid "Journal recovery for the V2 driver handles failed journal entries."
msgstr "Journal recovery for the V2 driver handles failed journal entries."
msgid "Known Issues"
msgstr "Known Issues"
msgid "L2Gateway Driver v2 or networking-odl."
msgstr "L2Gateway Driver v2 or networking-odl."
msgid "LBaaS v1 API driver for ODL is removed. * LBaaS v2 API driver"
msgstr "LBaaS v1 API driver for ODL is removed. * LBaaS v2 API driver"
msgid "Maintenace lock table was added to synchronize multiple threads."
msgstr "Maintenance lock table was added to synchronise multiple threads."
msgid "Maintenance thread for the V2 driver."
msgstr "Maintenance thread for the V2 driver."
msgid "Network Statistics From OpenDaylight."
msgstr "Network Statistics From OpenDaylight."
msgid "Networking SFC V1 driver for networking-odl."
msgstr "Networking SFC V1 driver for networking-odl."
msgid "Networking SFC V2 driver for networking-odl."
msgstr "Networking SFC V2 driver for networking-odl."
msgid ""
"Networking-odl first attempts to read the ODL features from the odl_features "
"config value. If this config value is not present, networking-odl requests "
"the features from ODL via REST call. Note that this occurs during the plugin "
"initialize and if ODL is unreachable networking-odl will keep trying until "
"successful, essentially blocking networking-odl initialization (and "
"functionality) until successful. As such, it is recommended that in "
"production environments you manually configure the odl_features config "
"value. If you are not sure which features your ODL supports, please consult "
"the ODL documentation or you can retrieve the list like this, $ curl -u "
": http://:8080/restconf/operational/neutron:neutron/"
"features | python -mjson.tool Note that the features returned in the json "
"have a namespace which should be omitted from the config value. So, if you "
"got to features, say neutron-extensions:feature1 and neutron-extensions:"
"feature2, the config file should have, odl_features=feature1,feature2"
msgstr ""
"Networking-odl first attempts to read the ODL features from the odl_features "
"config value. If this config value is not present, networking-odl requests "
"the features from ODL via REST call. Note that this occurs during the plugin "
"initialise and if ODL is unreachable networking-odl will keep trying until "
"successful, essentially blocking networking-odl initialisation (and "
"functionality) until successful. As such, it is recommended that in "
"production environments you manually configure the odl_features config "
"value. If you are not sure which features your ODL supports, please consult "
"the ODL documentation or you can retrieve the list like this, $ curl -u "
": http://:8080/restconf/operational/neutron:neutron/"
"features | python -mjson.tool Note that the features returned in the JSON "
"have a namespace which should be omitted from the config value. So, if you "
"got to features, say neutron-extensions:feature1 and neutron-extensions:"
"feature2, the config file should have, odl_features=feature1,feature2"
msgid "New Features"
msgstr "New Features"
msgid "Newton Series Release Notes"
msgstr "Newton Series Release Notes"
msgid "Ocata Series Release Notes"
msgstr "Ocata Series Release Notes"
msgid ""
"OpenDaylight feature negotiation allows for networking_odl to adapt its "
"behavior to the features supported by the specific ODL version."
msgstr ""
"OpenDaylight feature negotiation allows for networking_odl to adapt its "
"behaviour to the features supported by the specific ODL version."
msgid "Other Notes"
msgstr "Other Notes"
msgid "Pike Series Release Notes"
msgstr "Pike Series Release Notes"
msgid "Prelude"
msgstr "Prelude"
msgid "QoS Driver V1 for networking-odl."
msgstr "QoS Driver V1 for networking-odl."
msgid "QoS Driver V2 for networking-odl"
msgstr "QoS Driver V2 for networking-odl"
msgid ""
"Reads host configuration from ODL using a REST/get and stores the "
"information in Neutron agentdb for persistency. This host configuration is "
"read back from agentdb and applied during port binding. Without this feature "
"several out-of-sync race conditions were caused due to incorrect host "
"information."
msgstr ""
"Reads host configuration from ODL using a REST/get and stores the "
"information in Neutron agentdb for persistence. This host configuration is "
"read back from agentdb and applied during port binding. Without this feature "
"several out-of-sync race conditions were caused due to incorrect host "
"information."
msgid "Remove LbaaS v1 driver, as LbaaS removed v1 API."
msgstr "Remove LBaaS v1 driver, as LBaaS removed v1 API."
msgid ""
"Removing QoS V1 driver which is using deprecated notification driver "
"framework from OpenStack Neutron's QoS driver base."
msgstr ""
"Removing QoS V1 driver which is using deprecated notification driver "
"framework from OpenStack Neutron's QoS driver base."
msgid ""
"Second version of the driver to support networking-sfc API through "
"OpenDaylight controller. This driver support CRUD operation for flow "
"classifier, port-pair, port-pair-group and port-pair-chain. This is version "
"2 driver and it does support the journal based implementation, where "
"operations are committed in the data store first and then journal thread "
"sycn it with OpenDaylight. This implementation guarantee the ordering of the "
"CRUD events. networking-sfc ocata or later is required. https://review."
"openstack.org/#/c/363893/ is the corresponding patch of networking-sfc in "
"Ocata cycle."
msgstr ""
"Second version of the driver to support networking-sfc API through "
"OpenDaylight controller. This driver support CRUD operation for flow "
"classifier, port-pair, port-pair-group and port-pair-chain. This is version "
"2 driver and it does support the journal based implementation, where "
"operations are committed in the data store first and then journal thread "
"sycn it with OpenDaylight. This implementation guarantee the ordering of the "
"CRUD events. networking-sfc ocata or later is required. https://review."
"openstack.org/#/c/363893/ is the corresponding patch of networking-sfc in "
"Ocata cycle."
msgid ""
"Starting with Ocata, Devstack will use V2 drivers (where available) by "
"default. To force the use of V1 architecture drivers you can specify "
"'ODL_V2DRIVER=False' in the local.conf file."
msgstr ""
"Starting with Ocata, Devstack will use V2 drivers (where available) by "
"default. To force the use of V1 architecture drivers you can specify "
"'ODL_V2DRIVER=False' in the local.conf file."
msgid "Support for vlan-transparency."
msgstr "Support for VLAN-transparency."
msgid ""
"The QoS V1 driver is deprecated in the Pike cycle and will be removed in the "
"Queens release."
msgstr ""
"The QoS V1 driver is deprecated in the Pike cycle and will be removed in the "
"Queens release."
msgid ""
"The V1 drivers are not actively maintained by the networking-odl team for a "
"few cycles already and aren't guaranteed to even work. As such, the "
"networking-odl team has decided that the drivers will be marked as "
"deprecated beginning Queens cycle, and removed in the beginning of the Rocky "
"cycle. If you're still using the V1 drivers, please switch to using the V2 "
"drivers by updating the appropriate configuration values."
msgstr ""
"The V1 drivers are not actively maintained by the networking-odl team for a "
"few cycles already and aren't guaranteed to even work. As such, the "
"networking-odl team has decided that the drivers will be marked as "
"deprecated beginning Queens cycle, and removed in the beginning of the Rocky "
"cycle. If you're still using the V1 drivers, please switch to using the V2 "
"drivers by updating the appropriate configuration values."
msgid ""
"The ``opendaylight`` mechanism driver now supports hardware offload via SR-"
"IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch`` 2.8.0 "
"and 'Linux Kernel' 4.12 allows to control the SR-IOV VF via OpenFlow control "
"plane and gain accelerated 'Open vSwitch'."
msgstr ""
"The ``opendaylight`` mechanism driver now supports hardware offload via SR-"
"IOV. It allows binding direct (SR-IOV) ports. Using ``openvswitch`` 2.8.0 "
"and 'Linux Kernel' 4.12 allows to control the SR-IOV VF via OpenFlow control "
"plane and gain accelerated 'Open vSwitch'."
msgid ""
"The default setting for OpenDayligut openstack service provider was changed "
"from ovsdb netvirt (odl-ovsdb-openstack) to new netvirt(odl-netvirt-"
"openstack) for OpenDaylight Boron/Carbon or later."
msgstr ""
"The default setting for OpenDayligut openstack service provider was changed "
"from ovsdb netvirt (odl-ovsdb-openstack) to new netvirt(odl-netvirt-"
"openstack) for OpenDaylight Boron/Carbon or later."
msgid ""
"The extension `vlan-transparent` is supported for Newton release, "
"unconditionally only vxlan is considered to support its extension "
"independent of ODL openstack provider. It's future work to allow ODL "
"openstack provider to report list of supported network types at start up "
"statically."
msgstr ""
"The extension `vlan-transparent` is supported for the Newton release. "
"Unconditionally only VXLAN is considered to support its extension "
"independent of ODL openstack provider. It's future work to allow ODL "
"openstack provider to report list of supported network types at start up "
"statically."
msgid ""
"The feature is to be enabled only for ml2 mechanism V2 Driver, when config "
"parameter enable_dhcp_service is set to True in ml2_conf.ini. Creates a new "
"DHCP Neutron port to be serviced by OpenDaylight Netvirt when a Subnet is "
"created or updated with enable-dhcp parameter. The allocated port is to be "
"removed when the Subnet is deleted or updated with disbale-dhcp parameter. "
"The port is identifed with device-id as OpenDaylight- and device-"
"owner as network:dhcp."
msgstr ""
"The feature is to be enabled only for ml2 mechanism V2 Driver, when config "
"parameter enable_dhcp_service is set to True in ml2_conf.ini. It creates a "
"new DHCP Neutron port to be serviced by OpenDaylight Netvirt when a Subnet "
"is created or updated with enable-dhcp parameter. The allocated port is "
"removed when the Subnet is deleted or updated with disbale-dhcp parameter. "
"The port is identifed with device-id as OpenDaylight- and device-"
"owner as network:dhcp."
msgid ""
"The full sync process looks for a \"canary\" network on the ODL controller "
"side. If such a network is found, it doesn't do anything. If the network is "
"missing then all the neutron resources are re-created on ODL. This supports "
"cases when ODL controller comes online with no Neutron resources on it (also "
"referred to as \"cold reboot\", but can happen on various cases)."
msgstr ""
"The full sync process looks for a \"canary\" network on the ODL controller "
"side. If such a network is found, it doesn't do anything. If the network is "
"missing then all the Neutron resources are re-created on ODL. This supports "
"cases when the ODL controller comes online with no Neutron resources on it "
"(also referred to as \"cold reboot\", but can happen in various cases)."
msgid ""
"The functional tests were added. It's new class of test cases, which "
"requires pre-configured environment. Environment to run such tests can be "
"configured by tool in networking-odl/tools.configure_for_func_testing.sh"
msgstr ""
"The functional tests were added. They are a new class of test cases, which "
"requires pre-configured environment. The environment to run such tests can "
"be configured by tool in networking-odl/tools.configure_for_func_testing.sh"
msgid ""
"The journal recovery mechanism handles failed journal entries by inspecting "
"ODL and deciding on the correct course of action. This support should be "
"sufficient for the majority of entry failures."
msgstr ""
"The journal recovery mechanism handles failed journal entries by inspecting "
"ODL and deciding on the correct course of action. This support should be "
"sufficient for the majority of entry failures."
msgid ""
"The maintenance thread was introduced in the V2 driver in order to perform "
"various journal maintenance tasks, such as * Stale lock release * Completed "
"entry cleanup * Full sync * Journal recovery The thread runs in a "
"configurable interval and is HA safe so at most one will be executing "
"regardless of how many threads are running concurrently."
msgstr ""
"The maintenance thread was introduced in the V2 driver in order to perform "
"various journal maintenance tasks, such as * Stale lock release * Completed "
"entry cleanup * Full sync * Journal recovery The thread runs in a "
"configurable interval and is HA safe so at most one will be executing "
"regardless of how many threads are running concurrently."
msgid ""
"The new class of test cases, functional test, has been added. So was help "
"scripts to setup necessary environment."
msgstr ""
"The new class of test cases, functional test, has been added. So was help "
"scripts to setup necessary environment."
msgid ""
"This configuration is used to get the information about physical host type "
"and other config data like supported vnic types stored in ovsdb. Networking-"
"odl can fetch this info from OpenDaylight via REST API request and feed "
"agents_db table in neutron, which will be used by neutron scheduler."
msgstr ""
"This configuration is used to get the information about physical host type "
"and other config data like supported VNIC types stored in ovsdb. Networking-"
"odl can fetch this info from OpenDaylight via REST API request and feed "
"agents_db table in neutron, which will be used by Neutron scheduler."
msgid "Trunk Drivers v1 and v2 for networking-odl."
msgstr "Trunk Drivers v1 and v2 for networking-odl."
msgid "Upgrade Notes"
msgstr "Upgrade Notes"
msgid "Upgrade to use LBaaS v2 driver and migrate to use LBaaS v2 driver."
msgstr "Upgrade to use LBaaS v2 driver and migrate to use LBaaS v2 driver."
msgid ""
"V1 drivers are marked depracated beginning with Queens cycle, to be removed "
"in Rocky cycle."
msgstr ""
"V1 drivers are marked deprecated beginning with the Queens cycle, to be "
"removed in Rocky cycle."
msgid "Websocket-client provides framework to create webscket clients for ODL."
msgstr ""
"Websocket-client provides a framework to create websocket clients for ODL."
msgid "Welcome to Networking OpenDaylight Release Notes's documentation!"
msgstr "Welcome to Networking OpenDaylight release notes documentation!"
msgid ""
"With devstack by default with OpenDaylight after Boron version, new netvirt "
"openstack service provider(odl-netvirt-openstack) is used instead of legacy "
"netvirt(odl-ovsdb-openstack)."
msgstr ""
"With devstack by default with OpenDaylight after Boron version, new netvirt "
"openstack service provider(odl-netvirt-openstack) is used instead of legacy "
"netvirt(odl-ovsdb-openstack)."
msgid ""
"network topology based port binding was removed. So is network-topology "
"value for port_binding_controllers. Migrate pseudo-agentdb-binding port "
"binding."
msgstr ""
"network topology based port binding was removed. So is network-topology "
"value for port_binding_controllers. Migrate pseudo-agentdb-binding port "
"binding."
msgid ""
"networking-odl adopts version number aligned with neutron from Pike release. "
"The version number is bumped 11.x.x."
msgstr ""
"networking-odl adopts version number aligned with neutron from Pike release. "
"The version number is bumped 11.x.x."
msgid ""
"port binding controller, network-topology, is deprecated with OpenStack "
"Ocata and will be removed in future openstack version."
msgstr ""
"port binding controller, network-topology, is deprecated with OpenStack "
"Ocata and will be removed in future openstack version."
msgid ""
"pseudo-agentdb-binding is supported by the version of OpenDaylight Boron(0.5."
"x) or later. So for the version of OpenDaylight Beryllium or earlier, the "
"option, port_binding_controller, needs to be explicitly configured to be "
"legacy-port-binding or network-topology(deprecated)."
msgstr ""
"pseudo-agentdb-binding is supported by the version of OpenDaylight Boron(0.5."
"x) or later. So for the version of OpenDaylight Beryllium or earlier, the "
"option, port_binding_controller, needs to be explicitly configured to be "
"legacy-port-binding or network-topology(deprecated)."
msgid ""
"update the default value of supported network type for ovs-set-hostconfig. "
"enable 'flat' by default 'flat' type wasn't enabled because legacy netvirt "
"doesn't support it. Now new netvirt is introduced to deprecate legacy "
"netvirt and New netvirt supports flat. So update default value for network "
"type to reflect it."
msgstr ""
"update the default value of supported network type for ovs-set-hostconfig. "
"enable 'flat' by default 'flat' type wasn't enabled because legacy netvirt "
"doesn't support it. Now new netvirt is introduced to deprecate legacy "
"netvirt and New netvirt supports flat. So update default value for network "
"type to reflect it."
msgid "version is bumped to 11:pike from 4:ocata."
msgstr "version is bumped to 11:pike from 4:ocata."
networking-odl-16.0.0/releasenotes/source/pike.rst0000664000175000017500000000021713656750541022252 0ustar zuulzuul00000000000000===================================
Pike Series Release Notes
===================================
.. release-notes::
:branch: stable/pike
networking-odl-16.0.0/PKG-INFO0000664000175000017500000000427313656750617015706 0ustar zuulzuul00000000000000Metadata-Version: 2.1
Name: networking-odl
Version: 16.0.0
Summary: OpenStack Networking
Home-page: https://docs.openstack.org/networking-odl/latest/
Author: OpenStack
Author-email: openstack-discuss@lists.openstack.org
License: UNKNOWN
Description: ==========================
Welcome to networking-odl!
==========================
.. Team and repository tags
.. image:: http://governance.openstack.org/badges/networking-odl.svg
:target: http://governance.openstack.org/reference/tags/index.html
.. Change things from this point on
Summary
-------
OpenStack networking-odl is a library of drivers and plugins that integrates
OpenStack Neutron API with OpenDaylight Backend. For example it has ML2
driver and L3 plugin to enable communication of OpenStack Neutron L2
and L3 resources API to OpenDayLight Backend.
To report and discover bugs in networking-odl the following
link can be used:
https://bugs.launchpad.net/networking-odl
Any new code submission or proposal must follow the development
guidelines detailed in HACKING.rst and for further details this
link can be checked:
https://docs.openstack.org/networking-odl/latest/
The OpenDaylight homepage:
https://www.opendaylight.org/
Release notes for the project can be found at:
https://docs.openstack.org/releasenotes/networking-odl/
The project source code repository is located at:
https://opendev.org/openstack/networking-odl
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Requires-Python: >=3.6
Provides-Extra: ceilometer
Provides-Extra: test
networking-odl-16.0.0/bindep.txt0000664000175000017500000000051013656750541016575 0ustar zuulzuul00000000000000# This overrides the default fallback that can be located at:
# https://opendev.org/openstack/project-config/src/branch/master/nodepool/elements/bindep-fallback.txt
isc-dhcp-client [platform:ubuntu]
netcat-openbsd [platform:ubuntu]
iputils-arping [platform:ubuntu test]
dhclient [platform:fedora]
arping [platform:fedora test]
networking-odl-16.0.0/.zuul.d/0000775000175000017500000000000013656750617016102 5ustar zuulzuul00000000000000networking-odl-16.0.0/.zuul.d/jobs.yaml0000664000175000017500000002177213656750541017730 0ustar zuulzuul00000000000000- job:
name: networking-odl-config-job
description: Fake job to hold configuration settings for jobs
vars:
odl_version_map:
oxygen: &oxygen oxygen-latest
fluorine: &fluorine fluorine-snapshot-0.9
neon: &neon neon-latest
sodium: &sodium sodium-snapshot-0.11
common_devstack_vars: &devstack_vars
devstack_localrc:
ODL_TIMEOUT: 60
ODL_RELEASE: latest-snapshot
# Set here which ODL openstack service provider to use
ODL_NETVIRT_KARAF_FEATURE: odl-neutron-service,odl-restconf-all,odl-aaa-authn,odl-dlux-core,odl-mdsal-apidocs,odl-netvirt-openstack,odl-neutron-logger,odl-neutron-hostconfig-ovs
# Switch to using the ODL's L3 implementation
ODL_L3: True
# public network connectivity
ODL_PROVIDER_MAPPINGS: public:br-ex
PUBLIC_PHYSICAL_NETWORK: public
PUBLIC_BRIDGE: br-ex
Q_USE_PUBLIC_VETH: False
# Enable debug logs for odl ovsdb
ODL_NETVIRT_DEBUG_LOGS: True
# Database
MYSQL_PASSWORD: secretmysql
DATABASE_QUERY_LOGGING: True
OS_LOG_PATH: '{{ zuul.executor.log_root }}'
IS_GATE: True
devstack_services: &devstack_services
c-api: True
c-bak: True
c-sch: True
c-vol: True
cinder: True
dstat: True
g-api: True
g-reg: True
horizon: False
key: True
mysql: True
n-api-meta: True
n-api: True
n-cauth: False
n-cond: True
n-cpu: True
n-crt: True
n-novnc: False
n-obj: True
n-sch: True
neutron: True
neutron-agent: False
neutron-api: True
neutron-dhcp: True
neutron-l3: False
neutron-metadata-agent: True
neutron-qos: True
placement-api: True
placement-client: False
q-agt: False
q-dhcp: False
q-l3: False
q-meta: False
q-svc: False
rabbit: True
devstack_plugins: &devstack_plugins
networking-odl: https://opendev.org/openstack/networking-odl
- job:
name: networking-odl-tempest-base
parent: devstack-tempest
description: |
Base job for tempest-based tests
pre-run: playbooks/tempest/pre.yaml
run: playbooks/tempest/run.yaml
post-run: playbooks/tempest/post.yaml
voting: false
required-projects: &required-projects
- openstack/ceilometer
- openstack/networking-odl
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack/neutron-fwaas
- openstack/devstack-gate
- openstack/neutron
- openstack/tempest
- openstack/neutron-tempest-plugin
roles:
- zuul: openstack/devstack
timeout: 10800
irrelevant-files: &irrelevant_files
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
vars:
<<: *devstack_vars
tox_envlist: all-plugin
zuul_copy_output:
'{{ devstack_log_dir }}/screen-karaf.log': 'logs'
devstack_services:
q-svc: true
extensions_to_txt:
log: True
ini: True
devstack_plugins:
<<: *devstack_plugins
neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
# Only scenario tests and some compute API tests actually verify ODL is working
# Any API networking tests don't bring up any VMs and just verify that stuff happened in the
# Neutron DB, so they don't actually fail even if ODL is not running at all.
tempest_test_regex: tempest\.(api.compute|scenario|thirdparty)|neutron_tempest_plugin.scenario
tempest_test_blacklist: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tempest-blacklist.txt"
- job:
name: networking-odl-tempest-multinode-base
parent: tempest-multinode-full-py3
required-projects: *required-projects
roles:
- zuul: zuul/zuul-jobs
- zuul: openstack/neutron-tempest-plugin
timeout: 10800
pre-run: playbooks/multinode-setup.yaml
irrelevant-files: *irrelevant_files
vars:
<<: *devstack_vars
tox_envlist: all-plugin
zuul_copy_output:
'{{ devstack_log_dir }}/screen-karaf.log': 'logs'
devstack_services:
q-svc: true
extensions_to_txt:
log: True
ini: True
devstack_plugins:
<<: *devstack_plugins
neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
# Only scenario tests and some compute API tests actually verify ODL is working
# Any API networking tests don't bring up any VMs and just verify that stuff happened in the
# Neutron DB, so they don't actually fail even if ODL is not running at all.
tempest_test_regex: tempest\.(api.compute|scenario|thirdparty)|neutron_tempest_plugin.scenario
tempest_test_blacklist: "{{ ansible_user_dir }}/{{ zuul.project.src_dir }}/tempest-blacklist.txt"
- job:
name: networking-odl-tempest-neon
parent: networking-odl-tempest-base
vars:
devstack_localrc:
ODL_RELEASE: *neon
- job:
name: networking-odl-tempest-sodium
parent: networking-odl-tempest-base
vars:
devstack_localrc:
ODL_RELEASE: *sodium
- job:
name: networking-odl-devstack-base
parent: devstack
description: |
Base job for devstack-based tests
pre-run: playbooks/devstack/pre.yaml
required-projects:
- openstack/ceilometer
- openstack/networking-odl
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack/neutron-fwaas
roles:
- zuul: openstack/devstack
timeout: 9000
irrelevant-files: *irrelevant_files
vars:
<<: *devstack_vars
run_devstack: True
- job:
name: networking-odl-devstack-base-tox
parent: networking-odl-devstack-base
pre-run: playbooks/devstack-tox/pre.yaml
run: playbooks/devstack-tox/run.yaml
post-run: playbooks/devstack-tox/post.yaml
description: |
Base job for devstack tests that use a tox environment
- job:
name: networking-odl-functional-base
parent: networking-odl-devstack-base-tox
pre-run: playbooks/functional/pre.yaml
description: |
Base job for functional tests
timeout: 1800
roles:
- zuul: openstack/devstack
vars:
tox_envlist: functional
zuul_copy_output:
'{{ devstack_log_dir }}/functional-logs': 'logs'
'{{ devstack_log_dir }}/screen-karaf.log': 'logs'
extensions_to_txt:
log: True
devstack_localrc:
HOST_IP: 127.0.0.1
UNSTACK_KEEP_ODL: True
run_devstack: False
- job:
name: networking-odl-functional-neon
parent: networking-odl-functional-base
vars:
devstack_localrc:
ODL_RELEASE: *neon
- job:
name: networking-odl-functional-sodium
parent: networking-odl-functional-base
vars:
devstack_localrc:
ODL_RELEASE: *sodium
- job:
name: networking-odl-tempest-neon-multinode
parent: networking-odl-tempest-multinode-base
- job:
name: networking-odl-tempest-sodium-multinode
parent: networking-odl-tempest-multinode-base
- job:
name: networking-odl-rally-neon
parent: rally-task-at-devstack
vars:
devstack_localrc:
ODL_RELEASE: *neon
devstack_plugins:
rally-openstack: https://opendev.org/openstack/rally-openstack
networking-odl: https://opendev.org/openstack/networking-odl
rally_task: rally-jobs/odl.yaml
timeout: 7500
required-projects:
- openstack/devstack
- openstack/devstack-gate
- openstack/networking-odl
- openstack/rally
- openstack/rally-openstack
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^neutron/locale/.*$
- ^neutron/tests/unit/.*$
- ^releasenotes/.*$
- ^tools/.*$
- ^tox.ini$
- job:
name: networking-odl-rally-sodium
parent: rally-task-at-devstack
vars:
devstack_localrc:
ODL_RELEASE: *sodium
devstack_plugins:
rally-openstack: https://opendev.org/openstack/rally-openstack
networking-odl: https://opendev.org/openstack/networking-odl
rally_task: rally-jobs/odl.yaml
timeout: 7500
required-projects:
- openstack/devstack
- openstack/devstack-gate
- openstack/networking-odl
- openstack/rally
- openstack/rally-openstack
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^neutron/locale/.*$
- ^neutron/tests/unit/.*$
- ^releasenotes/.*$
- ^tools/.*$
- ^tox.ini$
# >>> LEGACY JOBS TO REPLACE
- job:
name: networking-odl-grenade
parent: legacy-dsvm-base
run: playbooks/legacy/grenade-dsvm-networking-odl/run.yaml
post-run: playbooks/legacy/grenade-dsvm-networking-odl/post.yaml
timeout: 9000
required-projects:
- openstack/grenade
- openstack/devstack-gate
- openstack/networking-odl
# <<< LEGACY JOBS TO REPLACE
networking-odl-16.0.0/.zuul.d/project.yaml0000664000175000017500000000726713656750541020444 0ustar zuulzuul00000000000000- project:
templates:
- openstack-python3-ussuri-jobs-neutron
- release-notes-jobs-python3
- periodic-stable-jobs-neutron
- publish-openstack-docs-pti
- check-requirements
check:
jobs:
- openstack-tox-pep8:
required-projects:
- openstack/ceilometer
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-cover:
required-projects:
- openstack/ceilometer
- openstack/neutron
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-docs:
required-projects:
- openstack/ceilometer
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-lower-constraints:
required-projects:
- openstack/ceilometer
- openstack/neutron
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-py36:
required-projects:
- openstack/ceilometer
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- networking-odl-grenade:
voting: false
irrelevant-files:
- ^(test-|)requirements.txt$
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- networking-odl-tempest-neon
- networking-odl-tempest-sodium
- networking-odl-tempest-neon-multinode:
voting: false
- networking-odl-tempest-sodium-multinode:
voting: false
- networking-odl-rally-neon:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
- networking-odl-rally-sodium:
voting: false
irrelevant-files:
- ^.*\.rst$
- ^doc/.*$
- ^releasenotes/.*$
- networking-odl-functional-neon
- networking-odl-functional-sodium
gate:
jobs:
- openstack-tox-pep8:
required-projects:
- openstack/ceilometer
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-docs:
required-projects:
- openstack/ceilometer
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-lower-constraints:
required-projects:
- openstack/ceilometer
- openstack/neutron
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- openstack-tox-py36:
required-projects:
- openstack/ceilometer
- openstack/neutron-fwaas
- openstack/networking-l2gw
- openstack/networking-sfc
- openstack/networking-bgpvpn
- networking-odl-functional-neon
- networking-odl-functional-sodium
networking-odl-16.0.0/test-requirements.txt0000664000175000017500000000137313656750541021044 0ustar zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
coverage>=4.5.1 # Apache-2.0
doc8>=0.8.0 # Apache-2.0
flake8-import-order>=0.17.1 # LGPLv3
python-subunit>=1.2.0 # Apache-2.0/BSD
oslotest>=3.3.0 # Apache-2.0
stestr>=2.0.0 # Apache-2.0
pecan>=1.3.2 # BSD
pylint==2.2.0;python_version>="3.0" # GPLv2
testresources>=2.0.1 # Apache-2.0/BSD
testscenarios>=0.5.0 # Apache-2.0/BSD
testtools>=2.3.0 # MIT
bandit!=1.6.0,>=1.4.0 # Apache-2.0
bashate>=0.5.1 # Apache-2.0
astroid==2.1.0;python_version>="3.0" # LGPLv2.1
# To test ceilometer client
ceilometer>=11.0.0
networking-odl-16.0.0/networking_odl/0000775000175000017500000000000013656750617017630 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/__init__.py0000664000175000017500000000125613656750541021741 0ustar zuulzuul00000000000000# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
gettext.install('networking_odl')
networking-odl-16.0.0/networking_odl/dhcp/0000775000175000017500000000000013656750617020546 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/dhcp/__init__.py0000664000175000017500000000000013656750541022641 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/dhcp/odl_dhcp_driver_base.py0000664000175000017500000000747613656750541025253 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as n_const
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
OPENDAYLIGHT_DEVICE_ID = 'OpenDaylight'
class OdlDhcpDriverBase(object):
# NOTE:(Karthik Prasad/karthik.prasad) Not validating based on value change
# of enable_dhcp in case of subnet update event, instead validating on
# port_id presence in DB by locking the session, this will enable user to
# reissue the same command in case of failure.
def create_or_delete_dhcp_port(self, subnet_context):
# NOTE:(Achuth) Fixes bug 1746715
# DHCP port to be created for IPv4 subnets only, since ODL doesn't
# support IPv6 neutron port ARP responses. This prevents validations
# in ODL and avoids processing these ports incorrectly.
if subnet_context.current['ip_version'] != 4:
LOG.warning("ODL DHCP port is supported only for IPv4 subnet %s",
subnet_context.current['id'])
return
port_id = self.get_dhcp_port_if_exists(subnet_context)
plugin = subnet_context._plugin
if not port_id and subnet_context.current['enable_dhcp']:
LOG.debug("Creating ODL DHCP port for subnet %s of network %s",
subnet_context.current['id'],
subnet_context.current['network_id'])
port = self._make_dhcp_port_dict(subnet_context)
# TODO(boden): rehome and consume from neutron-lib
p_utils.create_port(plugin, subnet_context._plugin_context, port)
if port_id and not subnet_context.current['enable_dhcp']:
self._delete_port(plugin, subnet_context._plugin_context, port_id)
def _delete_port(self, plugin, context, port_id):
LOG.debug("Deleting ODL DHCP port with id %s", port_id)
plugin.delete_port(context, port_id)
def _make_dhcp_port_dict(self, subnet_context):
subnet_id = subnet_context.current['id']
port_dict = dict(
name='',
admin_state_up=True,
device_id=OPENDAYLIGHT_DEVICE_ID + '-' + subnet_id,
device_owner=n_const.DEVICE_OWNER_DHCP,
network_id=subnet_context.current['network_id'],
fixed_ips=[dict(subnet_id=subnet_id)],
tenant_id=subnet_context.network.current['tenant_id'])
return {'port': port_dict}
def get_dhcp_port_if_exists(self, subnet_context):
plugin = subnet_context._plugin
plugin_context = subnet_context._plugin_context
network_id = subnet_context._subnet['network_id']
subnet_id = subnet_context.current['id']
device_id = OPENDAYLIGHT_DEVICE_ID + '-' + subnet_id
LOG.debug("Retrieving ODL DHCP port for subnet %s", subnet_id)
filters = {
'network_id': [network_id],
'device_id': [device_id],
'device_owner': [n_const.DEVICE_OWNER_DHCP]
}
ports = plugin.get_ports(plugin_context, filters=filters)
if ports:
port = ports[0]
LOG.debug("Retrieved ODL owned port %s for subnet %s",
port['id'], subnet_id)
return port['id']
return None
networking-odl-16.0.0/networking_odl/dhcp/odl_dhcp_driver.py0000664000175000017500000000651013656750541024245 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.callbacks import registry
from neutron_lib import constants as n_const
from neutron_lib.plugins import directory
from oslo_log import log as logging
from neutron.plugins.ml2 import driver_context
from networking_odl.common import constants
from networking_odl.dhcp import odl_dhcp_driver_base as driver_base
LOG = logging.getLogger(__name__)
@registry.has_registry_receivers
class OdlDhcpDriver(driver_base.OdlDhcpDriverBase):
@registry.receives(constants.ODL_SUBNET, [constants.BEFORE_COMPLETE])
def handle_subnet_event(self, resource, event, trigger, context=None,
operation=None, row=None, **kwargs):
if (operation == constants.ODL_CREATE or
operation == constants.ODL_UPDATE):
try:
subnet_ctxt = self._get_subnet_context(context,
row.data['network_id'],
row.data['id'])
self.create_or_delete_dhcp_port(subnet_ctxt)
except Exception as e:
LOG.error("Error while processing %s subnet %s: %s", operation,
row.data['id'], e)
@registry.receives(constants.ODL_PORT, [constants.BEFORE_COMPLETE])
def handle_port_update_event(self, resource, event, trigger,
context=None, operation=None,
row=None, **kwargs):
if operation == constants.ODL_UPDATE:
try:
self._delete_if_dhcp_port(context, row)
except Exception as e:
device_id = row.data['device_id']
subnet_id = device_id[13:] if device_id else ''
LOG.error("Error while processing %s port %s of subnet %s: %s",
operation, row.data['id'], subnet_id, e)
def _get_subnet_context(self, context, network_id, subnet_id):
plugin = directory.get_plugin()
network = plugin.get_network(context, network_id)
subnet = plugin.get_subnet(context, subnet_id)
return driver_context.SubnetContext(plugin, context,
subnet, network)
def _delete_if_dhcp_port(self, context, row):
device_owner = row.data['device_owner']
device_id = row.data['device_id']
fixed_ips = row.data['fixed_ips']
device_id_type = driver_base.OPENDAYLIGHT_DEVICE_ID
if (device_owner and device_owner == n_const.DEVICE_OWNER_DHCP and
device_id and
device_id.startswith(device_id_type) and not fixed_ips):
plugin = directory.get_plugin()
self._delete_port(plugin, context, row.data['id'])
networking-odl-16.0.0/networking_odl/l3/0000775000175000017500000000000013656750617020146 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/l3/__init__.py0000664000175000017500000000000013656750541022241 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/l3/l3_odl_v2.py0000664000175000017500000001673513656750541022313 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_dvr_db
from neutron.db import l3_gwmode_db
from neutron_lib import constants as q_const
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants as plugin_constants
from oslo_log import log as logging
from networking_odl.common import config # noqa
from networking_odl.common import constants as odl_const
from networking_odl.journal import full_sync
from networking_odl.journal import journal
LOG = logging.getLogger(__name__)
L3_RESOURCES = {
odl_const.ODL_ROUTER: odl_const.ODL_ROUTERS,
odl_const.ODL_FLOATINGIP: odl_const.ODL_FLOATINGIPS
}
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def _record_in_journal(context, object_type, operation, object_id, data):
journal.record(context, object_type, object_id, operation, data)
class OpenDaylightL3RouterPlugin(
extraroute_db.ExtraRoute_db_mixin,
l3_dvr_db.L3_NAT_with_dvr_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin):
"""Implementation of the OpenDaylight L3 Router Service Plugin.
This class implements a L3 service plugin that provides
router and floatingip resources and manages associated
request/response.
"""
supported_extension_aliases = ["dvr", "router", "ext-gw-mode",
"extraroute"]
def __init__(self):
super(OpenDaylightL3RouterPlugin, self).__init__()
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(plugin_constants.L3, L3_RESOURCES)
def get_plugin_type(self):
return plugin_constants.L3
def get_plugin_description(self):
"""Returns string description of the plugin."""
return ("L3 Router Service Plugin for basic L3 forwarding "
"using OpenDaylight.")
@journal.call_thread_on_end
def create_router(self, context, router):
router_dict = super(
OpenDaylightL3RouterPlugin, self).create_router(context, router)
_record_in_journal(
context, odl_const.ODL_ROUTER, odl_const.ODL_CREATE,
router_dict['id'], router_dict)
return router_dict
@journal.call_thread_on_end
def update_router(self, context, router_id, router):
router_dict = super(
OpenDaylightL3RouterPlugin, self).update_router(
context, router_id, router)
_record_in_journal(
context, odl_const.ODL_ROUTER, odl_const.ODL_UPDATE,
router_id, router_dict)
return router_dict
@journal.call_thread_on_end
def delete_router(self, context, router_id):
router_dict = self.get_router(context, router_id)
dependency_list = [router_dict['gw_port_id']]
super(OpenDaylightL3RouterPlugin, self).delete_router(context,
router_id)
_record_in_journal(
context, odl_const.ODL_ROUTER, odl_const.ODL_DELETE,
router_id, dependency_list)
@journal.call_thread_on_end
def create_floatingip(self, context, floatingip,
initial_status=q_const.FLOATINGIP_STATUS_ACTIVE):
fip = floatingip['floatingip']
if fip.get('port_id') is None:
initial_status = q_const.FLOATINGIP_STATUS_DOWN
fip_dict = super(
OpenDaylightL3RouterPlugin, self).create_floatingip(
context, floatingip, initial_status)
_record_in_journal(
context, odl_const.ODL_FLOATINGIP, odl_const.ODL_CREATE,
fip_dict['id'], fip_dict)
return fip_dict
@journal.call_thread_on_end
def update_floatingip(self, context, floatingip_id, floatingip):
fip_dict = super(
OpenDaylightL3RouterPlugin, self).update_floatingip(
context, floatingip_id, floatingip)
# Update status based on association
if fip_dict.get('port_id') is None:
fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN
else:
fip_dict['status'] = q_const.FLOATINGIP_STATUS_ACTIVE
self.update_floatingip_status(context, floatingip_id,
fip_dict['status'])
_record_in_journal(
context, odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE,
floatingip_id, fip_dict)
return fip_dict
@journal.call_thread_on_end
def delete_floatingip(self, context, floatingip_id):
floatingip_dict = self.get_floatingip(context, floatingip_id)
dependency_list = [floatingip_dict['router_id'],
floatingip_dict['floating_network_id']]
super(OpenDaylightL3RouterPlugin, self).delete_floatingip(
context, floatingip_id)
_record_in_journal(
context, odl_const.ODL_FLOATINGIP, odl_const.ODL_DELETE,
floatingip_id, dependency_list)
def disassociate_floatingips(self, context, port_id, do_notify=True):
fip_dicts = self.get_floatingips(context,
filters={'port_id': [port_id]})
router_ids = super(
OpenDaylightL3RouterPlugin, self).disassociate_floatingips(
context, port_id, do_notify)
for fip_dict in fip_dicts:
fip_dict = self.get_floatingip(context, fip_dict['id'])
fip_dict['status'] = q_const.FLOATINGIP_STATUS_DOWN
self.update_floatingip_status(context, fip_dict['id'],
fip_dict['status'])
_record_in_journal(
context, odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE,
fip_dict['id'], fip_dict)
return router_ids
@journal.call_thread_on_end
def add_router_interface(self, context, router_id, interface_info):
new_router = super(
OpenDaylightL3RouterPlugin, self).add_router_interface(
context, router_id, interface_info)
return new_router
@journal.call_thread_on_end
def remove_router_interface(self, context, router_id, interface_info):
new_router = super(
OpenDaylightL3RouterPlugin, self).remove_router_interface(
context, router_id, interface_info)
return new_router
dvr_deletens_if_no_port_warned = False
def dvr_deletens_if_no_port(self, context, port_id):
# TODO(yamahata): implement this method or delete this logging
# For now, this is defined to avoid attribute exception
# Since ODL L3 does not create namespaces, this is always going to
# be a noop. When it is confirmed, delete this comment and logging
if not self.dvr_deletens_if_no_port_warned:
LOG.debug('dvr is not suported yet. '
'this method needs to be implemented')
self.dvr_deletens_if_no_port_warned = True
return []
networking-odl-16.0.0/networking_odl/l3/l3_flavor.py0000664000175000017500000002015713656750541022410 0ustar zuulzuul00000000000000# Copyright 2018 Intel Corporation.
# Copyright 2018 Isaku Yamahata
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from neutron.objects import router as l3_obj
from neutron.services.l3_router.service_providers import base
from neutron_lib.callbacks import events
from neutron_lib.callbacks import priority_group
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as q_const
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_odl.common import constants as odl_const
from networking_odl.journal import full_sync
from networking_odl.journal import journal
LOG = logging.getLogger(__name__)
L3_RESOURCES = {
odl_const.ODL_ROUTER: odl_const.ODL_ROUTERS,
odl_const.ODL_FLOATINGIP: odl_const.ODL_FLOATINGIPS
}
@registry.has_registry_receivers
class ODLL3ServiceProvider(base.L3ServiceProvider):
@log_helpers.log_method_call
def __init__(self, l3_plugin):
super(ODLL3ServiceProvider, self).__init__(l3_plugin)
self.journal = journal.OpenDaylightJournalThread()
# TODO(yamahata): add method for fullsync to retrieve
# all the router with odl service provider.
# other router with other service provider should be filtered.
full_sync.register(plugin_constants.L3, L3_RESOURCES)
self.odl_provider = __name__ + "." + self.__class__.__name__
@property
def _flavor_plugin(self):
try:
return self._flavor_plugin_ref
except AttributeError:
self._flavor_plugin_ref = directory.get_plugin(
plugin_constants.FLAVORS)
return self._flavor_plugin_ref
def _validate_l3_flavor(self, context, router_id):
if router_id is None:
return False
router = l3_obj.Router.get_object(context, id=router_id)
flavor = self._flavor_plugin.get_flavor(context, router.flavor_id)
provider = self._flavor_plugin.get_flavor_next_provider(
context, flavor['id'])[0]
return str(provider['driver']) == self.odl_provider
def _update_floatingip_status(self, context, fip_dict):
port_id = fip_dict.get('port_id')
status = q_const.ACTIVE if port_id else q_const.DOWN
l3_obj.FloatingIP.update_object(context, {'status': status},
id=fip_dict['id'])
@registry.receives(resources.ROUTER_CONTROLLER,
[events.PRECOMMIT_ADD_ASSOCIATION])
@log_helpers.log_method_call
def _router_add_association(self, resource, event, trigger, payload=None):
context = payload.context
router_dict = payload.request_body
router_dict['gw_port_id'] = payload.latest_state.gw_port_id
router_id = payload.resource_id
if not self._validate_l3_flavor(context, router_id):
return
journal.record(context, odl_const.ODL_ROUTER, router_dict['id'],
odl_const.ODL_CREATE, router_dict)
@registry.receives(resources.ROUTER, [events.PRECOMMIT_UPDATE],
priority_group.PRIORITY_ROUTER_DRIVER)
@log_helpers.log_method_call
def _router_update_precommit(self, resource, event, trigger, **kwargs):
# NOTE(manjeets) router update bypasses the driver controller
# and argument type is different.
payload = kwargs.get('payload', None)
if payload:
context = payload.context
router_id = payload.states[0]['id']
router_dict = payload.request_body
gw_port_id = payload.states[0]['gw_port_id']
else:
# TODO(manjeets) Remove this shim once payload is fully adapted
# https://bugs.launchpad.net/neutron/+bug/1747747
context = kwargs['context']
router_id = kwargs['router_db'].id
router_dict = kwargs['router']
gw_port_id = kwargs['router_db'].gw_port_id
if not self._validate_l3_flavor(context, router_id):
return
if 'gw_port_id' not in router_dict:
router_dict['gw_port_id'] = gw_port_id
journal.record(context, odl_const.ODL_ROUTER,
router_id, odl_const.ODL_UPDATE, router_dict)
@registry.receives(resources.ROUTER_CONTROLLER,
[events.PRECOMMIT_DELETE_ASSOCIATIONS])
@log_helpers.log_method_call
def _router_del_association(self, resource, event, trigger, payload=None):
router_id = payload.latest_state.id
context = payload.context
if not self._validate_l3_flavor(context, router_id):
return
# TODO(yamahata): process floating ip etc. or just raise error?
dependency_list = [payload.latest_state.gw_port_id]
journal.record(context, odl_const.ODL_ROUTER, router_id,
odl_const.ODL_DELETE, dependency_list)
@registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_CREATE])
@log_helpers.log_method_call
def _floatingip_create_precommit(self, resource, event, trigger, **kwargs):
context = kwargs['context']
fip_dict = copy.deepcopy(kwargs['floatingip'])
router_id = kwargs['floatingip_db'].router_id
if not self._validate_l3_flavor(context, router_id):
return
fip_dict['id'] = kwargs['floatingip_id']
self._update_floatingip_status(context, fip_dict)
if fip_dict['floating_ip_address'] is None:
fip_dict['floating_ip_address'] = \
kwargs['floatingip_db'].floating_ip_address
journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'],
odl_const.ODL_CREATE, fip_dict)
@registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_UPDATE])
@log_helpers.log_method_call
def _floatingip_update_precommit(self, resource, event, trigger, **kwargs):
context = kwargs['context']
fip_dict = kwargs['floatingip']
router_id = kwargs['floatingip_db'].router_id
fip_dict['id'] = kwargs['floatingip_db'].id
if not self._validate_l3_flavor(context, router_id):
return
self._update_floatingip_status(context, fip_dict)
journal.record(context, odl_const.ODL_FLOATINGIP, fip_dict['id'],
odl_const.ODL_UPDATE, fip_dict)
@registry.receives(resources.FLOATING_IP, [events.PRECOMMIT_DELETE])
@log_helpers.log_method_call
def _floatingip_delete_precommit(self, resource, event, trigger, **kwargs):
context = kwargs['context']
fip_data = l3_obj.FloatingIP.get_objects(
context,
floating_port_id=kwargs['port']['id'])[0]
if not self._validate_l3_flavor(context, fip_data.router_id):
return
dependency_list = [fip_data.router_id, fip_data.floating_network_id]
journal.record(context, odl_const.ODL_FLOATINGIP, fip_data.id,
odl_const.ODL_DELETE, dependency_list)
@registry.receives(resources.FLOATING_IP, [events.AFTER_CREATE,
events.AFTER_UPDATE,
events.AFTER_DELETE])
@registry.receives(resources.ROUTER, [events.AFTER_CREATE,
events.AFTER_UPDATE,
events.AFTER_DELETE])
@log_helpers.log_method_call
def _l3_postcommit(self, resource, event, trigger, **kwargs):
self.journal.set_sync_event()
networking-odl-16.0.0/networking_odl/trunk/0000775000175000017500000000000013656750617020773 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/trunk/__init__.py0000664000175000017500000000000013656750541023066 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/trunk/trunk_driver_v2.py0000664000175000017500000002113113656750541024464 0ustar zuulzuul00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as t_consts
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from neutron.services.trunk.drivers import base as trunk_base
from networking_odl.common import config as odl_conf
from networking_odl.common import constants as odl_const
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.trunk import constants as odltrunk_const
LOG = logging.getLogger(__name__)
TRUNK_RESOURCES = {
odl_const.ODL_TRUNK: odl_const.ODL_TRUNKS
}
@registry.has_registry_receivers
class OpenDaylightTrunkHandlerV2(object):
def __init__(self):
cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl")
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(resources.TRUNK, TRUNK_RESOURCES)
LOG.info('initialized trunk driver for OpendayLight')
@staticmethod
def _record_in_journal(context, trunk_id, operation, data):
journal.record(context, odl_const.ODL_TRUNK, trunk_id, operation, data)
# TODO(vthapar) Revisit status updates once websockets are fully
# implemented - https://review.opendev.org/#/c/421127/
@log_helpers.log_method_call
def trunk_create_precommit(self, resource, event, trunk_plugin, payload):
data = payload.current_trunk.to_dict()
data['status'] = t_consts.TRUNK_ACTIVE_STATUS
self._record_in_journal(payload.context, payload.trunk_id,
odl_const.ODL_CREATE, data)
@log_helpers.log_method_call
def trunk_update_precommit(self, resource, event,
trunk_plugin, payload=None):
if isinstance(payload, events.EventPayload):
# TODO(boden): remove shim once all callbacks use lib paylaods
payload.desired_state.update(status=t_consts.TRUNK_ACTIVE_STATUS)
data = payload.desired_state.to_dict()
trunk_id = payload.resource_id
else:
payload.current_trunk.update(status=t_consts.TRUNK_ACTIVE_STATUS)
data = payload.current_trunk.to_dict()
trunk_id = payload.trunk_id
self._record_in_journal(payload.context, trunk_id,
odl_const.ODL_UPDATE, data)
@log_helpers.log_method_call
def trunk_delete_precommit(self, resource, event, trunk_plugin, payload):
# fill in data with parent ids, will be used in parent validations
trunk_dict = payload.original_trunk.to_dict()
data = [subport['port_id'] for subport in trunk_dict['sub_ports']]
data.append(trunk_dict['port_id'])
self._record_in_journal(payload.context, payload.trunk_id,
odl_const.ODL_DELETE, data)
@log_helpers.log_method_call
def trunk_create_postcommit(self, resource, event, trunk_plugin, payload):
payload.current_trunk.update(status=t_consts.TRUNK_ACTIVE_STATUS)
self.journal.set_sync_event()
@log_helpers.log_method_call
def trunk_update_postcommit(self, resource, event, trunk_plugin, payload):
payload.current_trunk.update(status=t_consts.TRUNK_ACTIVE_STATUS)
self.journal.set_sync_event()
@log_helpers.log_method_call
def trunk_delete_postcommit(self, resource, event, trunk_plugin, payload):
self.journal.set_sync_event()
@log_helpers.log_method_call
def trunk_subports_set_status(self, resource, event, trunk_plugin,
payload):
core_plugin = directory.get_plugin()
admin_context = context.get_admin_context()
if event == events.AFTER_DELETE:
status = n_const.PORT_STATUS_DOWN
else:
parent_id = payload.current_trunk.port_id
parent_port = core_plugin._get_port(admin_context, parent_id)
status = parent_port['status']
for subport in payload.subports:
self._set_subport_status(core_plugin, admin_context,
subport.port_id, status)
@log_helpers.log_method_call
def trunk_subports_update_status(self, resource, event, trigger, **kwargs):
core_plugin = directory.get_plugin()
admin_context = context.get_admin_context()
port = kwargs['port']
original_port = kwargs['original_port']
if port['status'] == original_port['status']:
return
for subport_id in self._get_subports_ids(port['id']):
self._set_subport_status(core_plugin, admin_context, subport_id,
port['status'])
def _set_subport_status(self, plugin, admin_context, port_id, status):
plugin.update_port_status(admin_context, port_id, status)
def _get_subports_ids(self, port_id):
trunk_plugin = directory.get_plugin('trunk')
filters = {'port_id': port_id}
trunks = trunk_plugin.get_trunks(context.get_admin_context(),
filters=filters)
if not trunks:
return ()
trunk = trunks[0]
return (subport['port_id'] for subport in trunk['sub_ports'])
@registry.has_registry_receivers
class OpenDaylightTrunkDriverV2(trunk_base.DriverBase):
@property
def is_loaded(self):
try:
return (odl_const.ODL_ML2_MECH_DRIVER_V2 in
cfg.CONF.ml2.mechanism_drivers)
except cfg.NoSuchOptError:
return False
@registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT])
def register(self, resource, event, trigger, payload=None):
super(OpenDaylightTrunkDriverV2, self).register(
resource, event, trigger, payload=payload)
self._handler = OpenDaylightTrunkHandlerV2()
registry.subscribe(self._handler.trunk_create_precommit,
resources.TRUNK, events.PRECOMMIT_CREATE)
registry.subscribe(self._handler.trunk_create_postcommit,
resources.TRUNK, events.AFTER_CREATE)
registry.subscribe(self._handler.trunk_update_precommit,
resources.TRUNK, events.PRECOMMIT_UPDATE)
registry.subscribe(self._handler.trunk_update_postcommit,
resources.TRUNK, events.AFTER_UPDATE)
registry.subscribe(self._handler.trunk_delete_precommit,
resources.TRUNK, events.PRECOMMIT_DELETE)
registry.subscribe(self._handler.trunk_delete_postcommit,
resources.TRUNK, events.AFTER_DELETE)
for event_ in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE):
registry.subscribe(self._handler.trunk_update_precommit,
resources.SUBPORTS, event_)
for event_ in (events.AFTER_CREATE, events.AFTER_DELETE):
registry.subscribe(self._handler.trunk_update_postcommit,
resources.SUBPORTS, event_)
# Upon subport creation/deletion we need to set the right port
# status:
# 1. Set it to parent status when it is attached to the trunk
# 2. Set it to down when is removed from the trunk
registry.subscribe(self._handler.trunk_subports_set_status,
resources.SUBPORTS, event_)
# NOTE(ltomasbo): if the status of the parent port changes, the
# subports need to update their status too
registry.subscribe(self._handler.trunk_subports_update_status,
resources.PORT, events.AFTER_UPDATE)
@classmethod
def create(cls):
return cls(odl_const.ODL_ML2_MECH_DRIVER_V2,
odltrunk_const.SUPPORTED_INTERFACES,
odltrunk_const.SUPPORTED_SEGMENTATION_TYPES,
None,
can_trunk_bound_port=True)
networking-odl-16.0.0/networking_odl/trunk/constants.py0000664000175000017500000000163613656750541023363 0ustar zuulzuul00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.api.definitions import portbindings
from neutron_lib.services.trunk import constants as t_consts
SUPPORTED_INTERFACES = (
portbindings.VIF_TYPE_OVS,
portbindings.VIF_TYPE_VHOST_USER,
)
SUPPORTED_SEGMENTATION_TYPES = (
t_consts.SEGMENTATION_TYPE_VLAN,
)
networking-odl-16.0.0/networking_odl/bgpvpn/0000775000175000017500000000000013656750617021124 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/bgpvpn/__init__.py0000664000175000017500000000000013656750541023217 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/bgpvpn/odl_v2.py0000664000175000017500000001417213656750541022664 0ustar zuulzuul00000000000000#
# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.api.definitions import bgpvpn as bgpvpn_const
from neutron_lib.api.definitions import bgpvpn_vni as bgpvpn_vni_def
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_bgpvpn.neutron.extensions import bgpvpn as bgpvpn_ext
from networking_bgpvpn.neutron.services.service_drivers import driver_api
from networking_odl.common import constants as odl_const
from networking_odl.common import odl_features
from networking_odl.common import postcommit
from networking_odl.journal import full_sync
from networking_odl.journal import journal
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
LOG = logging.getLogger(__name__)
BGPVPN_VNI = 'bgpvpn-vni'
BGPVPN_RESOURCES = {
odl_const.ODL_BGPVPN: odl_const.ODL_BGPVPNS,
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION:
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATIONS,
odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION:
odl_const.ODL_BGPVPN_ROUTER_ASSOCIATIONS
}
@postcommit.add_postcommit('bgpvpn', 'net_assoc', 'router_assoc')
class OpenDaylightBgpvpnDriver(driver_api.BGPVPNDriver):
"""OpenDaylight BGPVPN Driver
This code is the backend implementation for the OpenDaylight BGPVPN
driver for Openstack Neutron.
"""
@log_helpers.log_method_call
def __init__(self, service_plugin):
LOG.info("Initializing OpenDaylight BGPVPN v2 driver")
super(OpenDaylightBgpvpnDriver, self).__init__(service_plugin)
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(bgpvpn_const.ALIAS, BGPVPN_RESOURCES,
self.get_resources)
if odl_features.has(BGPVPN_VNI):
self.more_supported_extension_aliases = [bgpvpn_vni_def.ALIAS]
@staticmethod
def get_resources(context, resource_type):
plugin = directory.get_plugin(bgpvpn_const.ALIAS)
if resource_type == odl_const.ODL_BGPVPN:
obj_getter = getattr(plugin,
'get_%s' % BGPVPN_RESOURCES[resource_type])
return obj_getter(context)
method_name = 'get_%s' % BGPVPN_RESOURCES[resource_type]
return full_sync.get_resources_require_id(plugin, context,
plugin.get_bgpvpns,
method_name)
@log_helpers.log_method_call
def create_bgpvpn_precommit(self, context, bgpvpn):
journal.record(context, odl_const.ODL_BGPVPN,
bgpvpn['id'], odl_const.ODL_CREATE, bgpvpn)
@log_helpers.log_method_call
def update_bgpvpn_precommit(self, context, bgpvpn):
journal.record(context, odl_const.ODL_BGPVPN,
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
@log_helpers.log_method_call
def delete_bgpvpn_precommit(self, context, bgpvpn):
journal.record(context, odl_const.ODL_BGPVPN,
bgpvpn['id'], odl_const.ODL_DELETE, [])
@log_helpers.log_method_call
def create_net_assoc_precommit(self, context, net_assoc):
our_bgpvpn = None
bgpvpns = self.get_bgpvpns(context)
for bgpvpn in bgpvpns:
# ODL only allows a network to be associated with one BGPVPN
if bgpvpn['id'] == net_assoc['bgpvpn_id']:
our_bgpvpn = bgpvpn
else:
if bgpvpn['networks'] and (net_assoc['network_id'] in
bgpvpn['networks']):
raise bgpvpn_ext.BGPVPNNetworkAssocExistsAnotherBgpvpn(
driver="OpenDaylight V2",
network=net_assoc['network_id'],
bgpvpn=bgpvpn['id'])
journal.record(context, odl_const.ODL_BGPVPN,
our_bgpvpn['id'], odl_const.ODL_UPDATE, our_bgpvpn)
@log_helpers.log_method_call
def delete_net_assoc_precommit(self, context, net_assoc):
bgpvpn = self.get_bgpvpn(context, net_assoc['bgpvpn_id'])
# NOTE(yamahata): precommit is called within db transaction.
# so removing network_id is still associated.
# it needs to be removed explicitly from dict.
bgpvpn['networks'].remove(net_assoc['network_id'])
journal.record(context, odl_const.ODL_BGPVPN,
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
@log_helpers.log_method_call
def create_router_assoc_precommit(self, context, router_assoc):
associated_routers = self.get_router_assocs(context,
router_assoc['bgpvpn_id'])
for assoc_router in associated_routers:
if(router_assoc["router_id"] != assoc_router["router_id"]):
raise bgpvpn_ext.BGPVPNMultipleRouterAssocNotSupported(
driver="OpenDaylight V2")
bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id'])
journal.record(context, odl_const.ODL_BGPVPN,
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
@log_helpers.log_method_call
def delete_router_assoc_precommit(self, context, router_assoc):
bgpvpn = self.get_bgpvpn(context, router_assoc['bgpvpn_id'])
# NOTE(yamahata): precommit is called within db transaction.
# so removing router_id is still associated.
# it needs to be removed explicitly from dict.
bgpvpn['routers'].remove(router_assoc['router_id'])
journal.record(context, odl_const.ODL_BGPVPN,
bgpvpn['id'], odl_const.ODL_UPDATE, bgpvpn)
networking-odl-16.0.0/networking_odl/common/0000775000175000017500000000000013656750617021120 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/common/utils.py0000664000175000017500000000314413656750541022630 0ustar zuulzuul00000000000000# Copyright (c) 2014 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from six.moves import urllib_parse as urlparse
from networking_odl.common import constants as odl_const
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
def try_del(d, keys):
"""Ignore key errors when deleting from a dictionary."""
for key in keys:
try:
del d[key]
except KeyError:
pass
def make_url_object(object_type):
obj_pl = odl_const.RESOURCE_URL_MAPPINGS.get(object_type, None)
if obj_pl is None:
obj_pl = neutronify(object_type + 's')
return obj_pl
# TODO(manjeets) consolidate this method with make_url_object
def neutronify(name):
"""Adjust the resource name for use with Neutron's API"""
return name.replace('_', '-')
def get_odl_url(path=''):
'''Make a URL for some ODL resource (path)'''
purl = urlparse.urlsplit(cfg.CONF.ml2_odl.url)
features_url = urlparse.urlunparse((
purl.scheme, purl.netloc, path, '', '', ''))
return features_url
networking-odl-16.0.0/networking_odl/common/__init__.py0000664000175000017500000000000013656750541023213 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/common/config.py0000664000175000017500000000746113656750541022743 0ustar zuulzuul00000000000000# Copyright (c) 2014 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from networking_odl._i18n import _
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication.")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication.")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
cfg.FloatOpt('sync_timeout', default=10,
help=_("Sync thread timeout in seconds or fraction.")),
cfg.IntOpt('retry_count', default=5,
help=_("Number of times to retry a row before failing.")),
cfg.IntOpt('maintenance_interval', default=300,
help=_("Journal maintenance operations interval in seconds.")),
cfg.IntOpt('completed_rows_retention', default=0,
help=_("Time to keep completed rows (in seconds)."
"For performance reasons it's not recommended to "
"change this from the default value (0) which "
"indicates completed rows aren't kept."
"This value will be checked every maintenance_interval "
"by the cleanup thread. To keep completed rows "
"indefinitely, set the value to -1")),
cfg.BoolOpt('enable_lightweight_testing',
default=False,
help=_('Test without real ODL.')),
cfg.StrOpt('port_binding_controller',
default='pseudo-agentdb-binding',
help=_('Name of the controller to be used for port binding.')),
cfg.IntOpt('processing_timeout', default='100',
help=_("Time in seconds to wait before a "
"processing row is marked back to pending.")),
cfg.StrOpt('odl_hostconf_uri',
help=_("Path for ODL host configuration REST interface"),
default="/restconf/operational/neutron:neutron/hostconfigs"),
cfg.IntOpt('restconf_poll_interval', default=30,
help=_("Poll interval in seconds for getting ODL hostconfig")),
cfg.BoolOpt('enable_websocket_pseudo_agentdb', default=False,
help=_('Enable websocket for pseudo-agent-port-binding.')),
cfg.IntOpt('odl_features_retry_interval', default=5,
help=_("Wait this many seconds before retrying the odl features"
" fetch")),
cfg.ListOpt('odl_features',
help='A list of features supported by ODL.'),
cfg.StrOpt('odl_features_json',
help='Features supported by ODL, in the json format returned'
'by ODL. Note: This config option takes precedence over'
'odl_features.'),
cfg.BoolOpt('enable_dhcp_service', default=False,
help=_('Enables the networking-odl driver to supply special'
' neutron ports of "dhcp" type to OpenDaylight'
' Controller for its use in providing DHCP Service.')),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def list_opts():
return [('ml2_odl', odl_opts)]
networking-odl-16.0.0/networking_odl/common/websocket_client.py0000664000175000017500000003307613656750541025023 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import ssl
import threading
import time
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from requests import codes
from requests import exceptions
import websocket
from networking_odl._i18n import _
from networking_odl.common import client as odl_client
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
LOG = log.getLogger(__name__)
ODL_OPERATIONAL_DATASTORE = "OPERATIONAL"
ODL_CONFIGURATION_DATASTORE = "CONFIGURATION"
ODL_NOTIFICATION_SCOPE_BASE = "BASE"
ODL_NOTIFICATION_SCOPE_ONE = "ONE"
ODL_NOTIFICATION_SCOPE_SUBTREE = "SUBTREE"
ODL_WEBSOCKET_DISCONNECTED = "ODL_WEBSOCKET_DISCONNECTED"
ODL_WEBSOCKET_CONNECTING = "ODL_WEBSOCKET_CONNECTING"
ODL_WEBSOCKET_CONNECTED = "ODL_WEBSOCKET_CONNECTED"
class OpenDaylightWebsocketClient(object):
"""Thread for the OpenDaylight Websocket """
def __init__(self, odl_rest_client, path, datastore, scope, leaf_node_only,
packet_handler, timeout, status_cb=None):
self.odl_rest_client = odl_rest_client
self.path = path
self.datastore = datastore
self.scope = scope
self.leaf_node_only = leaf_node_only
self.packet_handler = packet_handler
self.timeout = timeout
self.exit_websocket_thread = False
self.status_cb = status_cb
self.current_status = ODL_WEBSOCKET_DISCONNECTED
self._odl_sync_thread = self.start_odl_websocket_thread()
@classmethod
def odl_create_websocket(cls, odl_url, path, datastore, scope,
packet_handler, status_cb=None,
leaf_node_only=False):
"""Create a websocket connection with ODL.
This method will create a websocket client based on path,
datastore and scope params. On data recv from websocket
packet_handler callback is called. status_cb callback can be
provided if notifications are requried for socket status
changes
"""
if odl_url is None:
LOG.error("invalid odl url", exc_info=True)
raise ValueError(_("Invalid ODL URL"))
odl_rest_client = odl_client.OpenDaylightRestClient.create_client(
odl_url)
return cls(
odl_rest_client, path, datastore, scope, leaf_node_only,
packet_handler, cfg.CONF.ml2_odl.timeout, status_cb
)
def start_odl_websocket_thread(self):
# Start the websocket thread
LOG.debug("starting a new websocket thread")
odl_websocket_thread = threading.Thread(
name='websocket',
target=self.run_websocket_thread)
odl_websocket_thread.start()
return odl_websocket_thread
def set_exit_flag(self, value=True):
# set flag to exit
self.exit_websocket_thread = value
def run_websocket_thread(self, exit_after_run=False):
# TBD connections are persistent so there is really no way to know
# when it is a "first connection". We need to wait for the
# dis/reconnect logic to be able to know this
first_connection = True
ws = None
while not self.exit_websocket_thread:
if exit_after_run:
# Permanently waiting thread model breaks unit tests
# Adding this arg to exit after one run for unit tests
self.set_exit_flag()
# connect if necessary
if ws is None:
try:
ws = self._connect_ws()
except ValueError:
LOG.error("websocket irrecoverable error ")
return
if ws is None:
time.sleep(cfg.CONF.ml2_odl.restconf_poll_interval)
continue
# read off the websocket
try:
data = ws.recv()
if not data:
LOG.warning("websocket received 0 bytes")
continue
except websocket.WebSocketTimeoutException:
continue
except ssl.SSLError as e:
message = e.args[0] if e.args else None
# TODO(trozet): Workaround due to SSL Timeout not being caught
# in websocket-client lib (issue 387). Remove when fixed in
# websocket-client lib.
if message and 'timed out' in message:
continue
else:
LOG.error("SSL websocket unexpected exception, "
"closing and restarting...", exc_info=True)
# TODO(rsood): Websocket reconnect can cause race
# conditions
self._close_ws(ws)
ws = None
continue
except websocket.WebSocketConnectionClosedException:
# per websocket-client, "If remote host closed the connection
# or some network error happened"
LOG.warning("websocket connection closed or IO error",
exc_info=True)
self._close_ws(ws)
ws = None
continue
except Exception:
# Connection closed trigger reconnection
LOG.error("websocket unexpected exception, "
"closing and restarting...", exc_info=True)
# TODO(rsood): Websocket reconnect can cause race conditions
self._close_ws(ws)
ws = None
continue
# Call handler for data received
try:
self.packet_handler(data, first_connection)
first_connection = False
except Exception:
LOG.error("Error in packet_handler callback",
exc_info=True)
self._close_ws(ws)
def _set_websocket_status(self, status):
LOG.info("websocket transition to status %s", status)
try:
if self.status_cb:
self.status_cb(status)
except Exception:
LOG.error("Error in status_cb", exc_info=True)
def _subscribe_websocket(self):
"""ODL Websocket change notification subscription"""
# Check ODL URL for details on this process
# https://wiki.opendaylight.org/view/OpenDaylight_Controller:MD-SAL:Restconf:Change_event_notification_subscription#rpc_create-data-change-event-subscription # noqa: E501 # pylint: disable=line-too-long
# Invoke rpc create-data-change-event-subscription
ws_create_dce_subs_url = ("restconf/operations/sal-remote:"
"create-data-change-event-subscription")
odl_subscription_data = {'input': {
'path': self.path,
'sal-remote-augment:datastore': self.datastore,
'sal-remote-augment:scope': self.scope,
'sal-remote-augment:notification-output-type': 'JSON'
}}
try:
response = self.odl_rest_client.sendjson('post',
ws_create_dce_subs_url,
odl_subscription_data)
response.raise_for_status()
except exceptions.ConnectionError:
LOG.error("cannot connect to the opendaylight controller")
return None
except exceptions.HTTPError as e:
# restconf returns 400 on operation when path is not available
if e.response.status_code == codes.bad_request:
LOG.debug("response code bad_request (400)"
"check path for websocket connection")
raise ValueError(_("bad_request (http400),check path."))
else:
LOG.warning("websocket connection failed",
exc_info=True)
return None
except Exception:
LOG.error("websocket subscription failed", exc_info=True)
return None
# Subscribing to stream. Returns websocket URL to listen to
ws_dce_subs_url = """restconf/streams/stream/"""
try:
stream_name = response.json()
stream_name = stream_name['output']['stream-name']
url = ws_dce_subs_url + stream_name
if self.leaf_node_only:
url += "?odl-leaf-nodes-only=true"
response = self.odl_rest_client.get(url)
response.raise_for_status()
stream_url = response.headers['location']
LOG.debug("websocket stream URL: %s", stream_url)
return stream_url
except exceptions.ConnectionError:
LOG.error("cannot connect to the opendaylight controller")
return None
except exceptions.HTTPError as e:
# restconf returns 404 on operation when there is no entry
if e.response.status_code == codes.not_found:
LOG.debug("response code not_found (404)"
"unable to websocket connection url")
raise ValueError(_("bad_request (http400),check path"))
else:
LOG.warning("websocket connection failed")
return None
except ValueError:
with excutils.save_and_reraise_exception():
LOG.error("websocket subscribe got invalid stream name")
except KeyError:
LOG.error("websocket subscribe got bad stream data")
raise ValueError(_("websocket subscribe bad stream data"))
except Exception:
LOG.error("websocket subscription failed", exc_info=True)
return None
def _socket_create_connection(self, stream_url):
ws = None
try:
ws = websocket.create_connection(stream_url,
timeout=self.timeout)
except ValueError:
with excutils.save_and_reraise_exception():
LOG.error("websocket create connection invalid URL")
except Exception:
# Although a number of exceptions can occur here
# we handle them all the same way, return None.
# As such, enough to just "except Exception."
LOG.exception("websocket create connection failed",
exc_info=True)
return None
if ws is None or not ws.connected:
LOG.error("websocket create connection unsuccessful")
return None
LOG.debug("websocket connection established")
return ws
def _connect_ws(self):
self._set_websocket_status(ODL_WEBSOCKET_CONNECTING)
stream_url = self._subscribe_websocket()
if stream_url is None:
return None
if 'https:' in self.odl_rest_client.url and 'wss:' not in stream_url:
LOG.warning('TLS ODL URL detected, but websocket URL is not. '
'Forcing websocket URL to TLS')
stream_url = stream_url.replace('ws:', 'wss:')
# Delay here causes websocket notification lose (ODL Bug 8299)
ws = self._socket_create_connection(stream_url)
if ws is not None:
self._set_websocket_status(ODL_WEBSOCKET_CONNECTED)
return ws
def _close_ws(self, ws):
LOG.debug("closing websocket")
try:
if ws is not None:
ws.close()
except Exception:
LOG.error("Error while closing websocket", exc_info=True)
self._set_websocket_status(ODL_WEBSOCKET_DISCONNECTED)
class EventDataParser(object):
"""Helper class to parse websocket notification data"""
NOTIFICATION_TAG = 'notification'
DC_NOTIFICATION_TAG = 'data-changed-notification'
DC_EVENT_TAG = 'data-change-event'
OPERATION_DELETE = 'deleted'
OPERATION_CREATE = 'created'
OPERATION_UPDATE = 'updated'
def __init__(self, item):
self.item = item
@classmethod
def get_item(cls, payload):
try:
data = jsonutils.loads(payload)
except ValueError:
LOG.warning("invalid websocket notification")
return
try:
dn_events = (data[cls.NOTIFICATION_TAG]
[cls.DC_NOTIFICATION_TAG]
[cls.DC_EVENT_TAG])
if not isinstance(dn_events, list):
dn_events = [dn_events]
for e in dn_events:
yield cls(e)
except KeyError:
LOG.warning("invalid JSON for websocket notification")
def get_fields(self):
return (self.get_operation(),
self.get_path(),
self.get_data())
def get_path(self):
return self.item.get('path')
def get_data(self):
return self.item.get('data')
def get_operation(self):
return self.item.get('operation')
@staticmethod
def extract_field(text, key):
pattern = r'\[' + key + r'=(.*?)\]'
match = re.search(pattern, text)
if match:
return match.group(1)
return None
networking-odl-16.0.0/networking_odl/common/postcommit.py0000664000175000017500000000400313656750541023661 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import types
from oslo_log import helpers as log_helpers
import six
def _build_func(client_method):
@log_helpers.log_method_call
def f(self, *args, **kwargs):
self.journal.set_sync_event()
f.__name__ = client_method
return f
def _unboundmethod(func, cls):
if six.PY3:
# python 3.x doesn't have unbound methods
func.__qualname__ = cls.__qualname__ + '.' + func.__name__ # PEP 3155
return func
# python 2.x
return types.MethodType(func, None, cls)
def _get_method_name(op, resource):
return op + '_' + resource + '_postcommit'
def _build_method(cls, resource):
# add methods like the following:
#
# @log_helpers.log_method_call
# def __postcommit(self, *args, **kwargs):
# self.journal.set_sync_event()
operations = ['create', 'update', 'delete']
for op in operations:
client_method = _get_method_name(op, resource)
if hasattr(cls, client_method) and client_method not in cls.__dict__:
f = _build_func(client_method)
unbound = _unboundmethod(f, cls)
setattr(cls, client_method, unbound)
def _build_methods(cls, *resources):
for resource in resources:
_build_method(cls, resource)
def add_postcommit(*args):
def postcommit(cls):
_build_methods(cls, *args)
return cls
return postcommit
networking-odl-16.0.0/networking_odl/common/client.py0000664000175000017500000001131113656750541022741 0ustar zuulzuul00000000000000# Copyright (c) 2014 Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
import requests
from requests import sessions
from networking_odl.common import utils
LOG = log.getLogger(__name__)
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
class OpenDaylightRestClient(object):
@staticmethod
def _check_opt(url):
if not url:
raise cfg.RequiredOptError('url', cfg.OptGroup('ml2_odl'))
required_opts = ('url', 'username', 'password')
for opt in required_opts:
if not getattr(cfg.CONF.ml2_odl, opt):
raise cfg.RequiredOptError(opt, cfg.OptGroup('ml2_odl'))
@classmethod
def create_client(cls, url=None):
if cfg.CONF.ml2_odl.enable_lightweight_testing:
LOG.debug("ODL lightweight testing is enabled, "
"returning a OpenDaylightLwtClient instance")
# Have to import at here, otherwise we create a dependency loop
from networking_odl.common import lightweight_testing as lwt
cls = lwt.OpenDaylightLwtClient
url = url or cfg.CONF.ml2_odl.url
cls._check_opt(url)
return cls(
url,
cfg.CONF.ml2_odl.username,
cfg.CONF.ml2_odl.password,
cfg.CONF.ml2_odl.timeout)
def __init__(self, url, username, password, timeout):
super(OpenDaylightRestClient, self).__init__()
self.url = url
self.timeout = timeout
self.session = sessions.Session()
self.session.auth = (username, password)
def get_resource(self, resource_type, resource_id):
response = self.get(utils.make_url_object(resource_type) + '/' +
resource_id)
if response.status_code == requests.codes.not_found:
return None
return self._check_response(response).json()
def get(self, urlpath='', data=None):
return self.request('get', urlpath, data)
def request(self, method, urlpath='', data=None):
headers = {'Content-Type': 'application/json'}
url = '/'.join([self.url, urlpath])
LOG.debug(
"Sending METHOD (%(method)s) URL (%(url)s) JSON (%(data)s)",
{'method': method, 'url': url, 'data': data})
return self.session.request(
method, url=url, headers=headers, data=data, timeout=self.timeout)
def sendjson(self, method, urlpath, obj):
"""Send json to the OpenDaylight controller."""
data = jsonutils.dumps(obj, indent=2) if obj else None
try:
return self._check_response(
self.request(method, urlpath, data))
except Exception:
with excutils.save_and_reraise_exception():
LOG.error("REST request ( %(method)s ) to "
"url ( %(urlpath)s ) is failed. "
"Request body : [%(body)s] service",
{'method': method,
'urlpath': urlpath,
'body': obj})
def _check_response(self, response):
try:
response.raise_for_status()
except requests.HTTPError as error:
with excutils.save_and_reraise_exception():
LOG.debug("Exception from ODL: %(e)s %(text)s",
{'e': error, 'text': response.text}, exc_info=1)
else:
LOG.debug("Got response:\n"
"(%(response)s)", {'response': response.text})
return response
class OpenDaylightRestClientGlobal(object):
"""ODL Rest client as global variable
The creation of OpenDaylightRestClient needs to be delayed until
configuration values need to be configured at first.
"""
def __init__(self):
super(OpenDaylightRestClientGlobal, self).__init__()
self._lock = threading.Lock()
self._client = None
def get_client(self):
with self._lock:
if self._client is None:
self._client = OpenDaylightRestClient.create_client()
return self._client
networking-odl-16.0.0/networking_odl/common/callback.py0000664000175000017500000001226413656750541023227 0ustar zuulzuul00000000000000# Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import api as db_api
from oslo_log import log as logging
from oslo_utils import excutils
from networking_odl.common import constants as odl_const
LOG = logging.getLogger(__name__)
ODLResource = collections.namedtuple('ODLResource', ('singular', 'plural'))
_RESOURCE_MAPPING = {
resources.SECURITY_GROUP: ODLResource(odl_const.ODL_SG, odl_const.ODL_SGS),
resources.SECURITY_GROUP_RULE: ODLResource(odl_const.ODL_SG_RULE,
odl_const.ODL_SG_RULES),
}
_OPERATION_MAPPING = {
events.PRECOMMIT_CREATE: odl_const.ODL_CREATE,
events.PRECOMMIT_UPDATE: odl_const.ODL_UPDATE,
events.PRECOMMIT_DELETE: odl_const.ODL_DELETE,
events.AFTER_CREATE: odl_const.ODL_CREATE,
events.AFTER_UPDATE: odl_const.ODL_UPDATE,
events.AFTER_DELETE: odl_const.ODL_DELETE,
}
LOG_TEMPLATE = ("(%(msg)s) with ODL_OPS (%(op)s) ODL_RES_TYPE (%(res_type)s) "
"ODL_RES_ID (%(res_id)s)) ODL_RES_DICT (%(res_dict)s) "
"DATA (%(data)s)")
def _log_on_callback(lvl, msg, op, res_type, res_id, res_dict, data):
LOG.log(lvl, LOG_TEMPLATE,
{'msg': msg, 'op': op, 'res_type': res_type, 'res_id': res_id,
'res_dict': res_dict, 'data': data,
'exc_info': lvl >= logging.ERROR})
class OdlSecurityGroupsHandler(object):
def __init__(self, precommit, postcommit):
assert postcommit is not None
self._precommit = precommit
self._postcommit = postcommit
self._subscribe()
def _subscribe(self):
if self._precommit is not None:
for event in (events.PRECOMMIT_CREATE, events.PRECOMMIT_DELETE):
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP, event)
registry.subscribe(self.sg_callback_precommit,
resources.SECURITY_GROUP_RULE, event)
registry.subscribe(
self.sg_callback_precommit, resources.SECURITY_GROUP,
events.PRECOMMIT_UPDATE)
for event in (events.AFTER_CREATE, events.AFTER_DELETE):
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP, event)
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP_RULE, event)
registry.subscribe(self.sg_callback_postcommit,
resources.SECURITY_GROUP, events.AFTER_UPDATE)
def _sg_callback(self, callback, resource, event, trigger, **kwargs):
if 'payload' in kwargs:
# TODO(boden): remove shim once all callbacks use payloads
context = kwargs['payload'].context
res = kwargs['payload'].desired_state
res_id = kwargs['payload'].resource_id
copy_kwargs = kwargs
else:
context = kwargs['context']
res = kwargs.get(resource)
res_id = kwargs.get("%s_id" % resource)
copy_kwargs = kwargs.copy()
copy_kwargs.pop('context')
if res_id is None:
res_id = res.get('id')
odl_res_type = _RESOURCE_MAPPING[resource]
odl_ops = _OPERATION_MAPPING[event]
odl_res_dict = None if res is None else {odl_res_type.singular: res}
_log_on_callback(logging.DEBUG, "Calling callback", odl_ops,
odl_res_type, res_id, odl_res_dict, copy_kwargs)
try:
callback(context, odl_ops, odl_res_type, res_id, odl_res_dict,
**copy_kwargs)
except Exception as e:
# In case of precommit, neutron registry notification caller
# doesn't log its exception. In networking-odl case, we don't
# normally throw exception. So log it here for debug
with excutils.save_and_reraise_exception():
if not db_api.is_retriable(e):
_log_on_callback(logging.ERROR, "Exception from callback",
odl_ops, odl_res_type, res_id,
odl_res_dict, copy_kwargs)
def sg_callback_precommit(self, resource, event, trigger, **kwargs):
self._sg_callback(self._precommit, resource, event, trigger, **kwargs)
def sg_callback_postcommit(self, resource, event, trigger, **kwargs):
self._sg_callback(self._postcommit, resource, event, trigger, **kwargs)
networking-odl-16.0.0/networking_odl/common/odl_features.py0000664000175000017500000000745413656750541024154 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import itertools
import time
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from requests import exceptions
from networking_odl.common import client as odl_client
from networking_odl.common import utils
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
LOG = log.getLogger(__name__)
OPERATIONAL_PORT_STATUS = 'operational-port-status'
EMPTY_FEATURES = {}
feature_configs = copy.copy(EMPTY_FEATURES)
def init():
'''initialize odl_features.
Initialize odl_features. Try first from configuration and then try pulling
via rest call from ODL.
'''
global feature_configs
feature_configs = None
if cfg.CONF.ml2_odl.odl_features_json is not None:
json = jsonutils.loads(cfg.CONF.ml2_odl.odl_features_json)
feature_configs = _load_features(json)
return
if cfg.CONF.ml2_odl.odl_features is not None:
feature_configs = {feature: '' for feature
in cfg.CONF.ml2_odl.odl_features}
return
wait_interval = cfg.CONF.ml2_odl.odl_features_retry_interval
for times_tried in itertools.count():
feature_configs = _fetch_features()
if feature_configs is not None:
break
LOG.warning('Failed to retrieve ODL features, attempt %i', times_tried)
time.sleep(wait_interval)
def has(feature):
return feature in feature_configs
def get_config(feature):
return feature_configs[feature]
def deinit():
'''Set odl_features back to it's pre-initlialized '''
global feature_configs
feature_configs = copy.copy(EMPTY_FEATURES)
def _load_features(json):
"""parse and save features from json"""
features = json['features']
if 'feature' not in features:
return copy.copy(EMPTY_FEATURES)
# documentation on the JSON received can be found at:
# https://github.com/opendaylight/neutron/blob/master/model/src/main/yang/neutron-extensions.yang
LOG.info('Retrieved ODL features %s', features)
response = {}
for feature in features['feature']:
cfg = feature.get('configuration', '')
response[feature['service-provider-feature'].split(':')[1]] = cfg
return response
def _fetch_features():
'''Fetch the list of features declared by ODL.
This function should be called once during initialization
'''
path = 'restconf/operational/neutron:neutron/neutron:features'
features_url = utils.get_odl_url(path)
client = odl_client.OpenDaylightRestClient.create_client(features_url)
try:
response = client.request('get')
except exceptions.ConnectionError:
LOG.error("Error connecting to ODL to retrieve features",
exc_info=True)
return None
if response.status_code == 400:
LOG.debug('ODL does not support feature negotiation')
return copy.copy(EMPTY_FEATURES)
if response.status_code == 404:
LOG.debug('No features configured')
return copy.copy(EMPTY_FEATURES)
if response.status_code != 200:
LOG.warning('error fetching features: %i',
response.status_code)
return None
return _load_features(response.json())
networking-odl-16.0.0/networking_odl/common/lightweight_testing.py0000664000175000017500000001403613656750541025546 0ustar zuulzuul00000000000000# Copyright (c) 2015 Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
import requests
import six
from oslo_log import log as logging
from oslo_serialization import jsonutils
from networking_odl._i18n import _
from networking_odl.common import client
from networking_odl.common import constants as odl_const
LOG = logging.getLogger(__name__)
OK = requests.codes.ok
NO_CONTENT = requests.codes.no_content
NOT_ALLOWED = requests.codes.not_allowed
NOT_FOUND = requests.codes.not_found
BAD_REQUEST = requests.codes.bad_request
class OpenDaylightLwtClient(client.OpenDaylightRestClient):
"""Lightweight testing client"""
lwt_dict = {odl_const.ODL_NETWORKS: {},
odl_const.ODL_SUBNETS: {},
odl_const.ODL_PORTS: {},
odl_const.ODL_SGS: {},
odl_const.ODL_SG_RULES: {}}
@classmethod
def _make_response(cls, status_code=OK, content=None):
"""Only supports 'content-type': 'application/json'"""
response = requests.models.Response()
response.status_code = status_code
if content:
response.raw = six.BytesIO(
jsonutils.dumps(content).encode('utf-8'))
return response
@classmethod
def _get_resource_id(cls, urlpath):
# resource ID is the last element of urlpath
return str(urlpath).rsplit('/', 1)[-1]
@classmethod
def post(cls, resource_type, resource_dict, urlpath, resource_list):
"""No ID in URL, elements in resource_list must have ID"""
if resource_list is None:
raise ValueError(_("resource_list can not be None"))
for resource in resource_list:
if resource['id'] in resource_dict:
LOG.debug("%s %s already exists", resource_type,
resource['id'])
response = cls._make_response(NOT_ALLOWED)
raise requests.exceptions.HTTPError(response=response)
resource_dict[resource['id']] = deepcopy(resource)
return cls._make_response(NO_CONTENT)
@classmethod
def put(cls, resource_type, resource_dict, urlpath, resource_list):
resource_id = cls._get_resource_id(urlpath)
if resource_list is None:
raise ValueError(_("resource_list can not be None"))
if resource_id and len(resource_list) != 1:
LOG.debug("Updating %s with multiple resources", urlpath)
response = cls._make_response(BAD_REQUEST)
raise requests.exceptions.HTTPError(response=response)
for resource in resource_list:
res_id = resource_id or resource['id']
if res_id in resource_dict:
resource_dict[res_id].update(deepcopy(resource))
else:
LOG.debug("%s %s does not exist", resource_type, res_id)
response = cls._make_response(NOT_FOUND)
raise requests.exceptions.HTTPError(response=response)
return cls._make_response(NO_CONTENT)
@classmethod
def delete(cls, resource_type, resource_dict, urlpath, resource_list):
if resource_list is None:
resource_id = cls._get_resource_id(urlpath)
id_list = [resource_id]
else:
id_list = [res['id'] for res in resource_list]
for res_id in id_list:
removed = resource_dict.pop(res_id, None)
if removed is None:
LOG.debug("%s %s does not exist", resource_type, res_id)
response = cls._make_response(NOT_FOUND)
raise requests.exceptions.HTTPError(response=response)
return cls._make_response(NO_CONTENT)
@classmethod
def get(cls, resource_type, resource_dict, urlpath, resource_list=None):
resource_id = cls._get_resource_id(urlpath)
if resource_id:
resource = resource_dict.get(resource_id)
if resource is None:
LOG.debug("%s %s does not exist", resource_type, resource_id)
response = cls._make_response(NOT_FOUND)
raise requests.exceptions.HTTPError(response=response)
else:
# When getting single resource, return value is a dict
r_list = {resource_type[:-1]: deepcopy(resource)}
return cls._make_response(OK, r_list)
r_list = [{resource_type[:-1]: deepcopy(res)}
for res in resource_dict.values()]
return cls._make_response(OK, r_list)
def sendjson(self, method, urlpath, obj=None):
"""Lightweight testing without ODL"""
if '/' not in urlpath:
urlpath += '/'
resource_type = str(urlpath).split('/', 1)[0]
resource_type = resource_type.replace('-', '_')
resource_dict = self.lwt_dict.get(resource_type)
if resource_dict is None:
LOG.debug("Resource type %s is not supported", resource_type)
response = self._make_response(NOT_FOUND)
raise requests.exceptions.HTTPError(response=response)
func = getattr(self, str(method).lower())
resource_list = None
if obj:
# If obj is not None, it can only have one entry
assert len(obj) == 1, "Obj can only have one entry"
key, resource_list = list(obj.items())[0]
if not isinstance(resource_list, list):
# Need to transform resource_list to a real list, i.e. [res]
resource_list = [resource_list]
return func(resource_type, resource_dict, urlpath, resource_list)
networking-odl-16.0.0/networking_odl/common/constants.py0000664000175000017500000000564713656750541023516 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
ODL_NETWORK = 'network'
ODL_NETWORKS = 'networks'
ODL_SUBNET = 'subnet'
ODL_SUBNETS = 'subnets'
ODL_PORT = 'port'
ODL_PORTS = 'ports'
ODL_SG = 'security_group'
ODL_SGS = 'security_groups'
ODL_SG_RULE = 'security_group_rule'
ODL_SG_RULES = 'security_group_rules'
ODL_ROUTER = 'router'
ODL_ROUTERS = 'routers'
ODL_FLOATINGIP = 'floatingip'
ODL_FLOATINGIPS = 'floatingips'
ODL_QOS = 'qos'
ODL_QOS_POLICY = 'policy'
ODL_QOS_POLICIES = 'policies'
ODL_SFC = 'sfc'
ODL_SFC_FLOW_CLASSIFIER = 'flowclassifier'
ODL_SFC_FLOW_CLASSIFIERS = 'flowclassifiers'
ODL_SFC_PORT_PAIR = 'portpair'
ODL_SFC_PORT_PAIRS = 'portpairs'
ODL_SFC_PORT_PAIR_GROUP = 'portpairgroup'
ODL_SFC_PORT_PAIR_GROUPS = 'portpairgroups'
ODL_SFC_PORT_CHAIN = 'portchain'
ODL_SFC_PORT_CHAINS = 'portchains'
NETWORKING_SFC_FLOW_CLASSIFIER = 'flow_classifier'
NETWORKING_SFC_FLOW_CLASSIFIERS = 'flow_classifiers'
NETWORKING_SFC_PORT_PAIR = 'port_pair'
NETWORKING_SFC_PORT_PAIRS = 'port_pairs'
NETWORKING_SFC_PORT_PAIR_GROUP = 'port_pair_group'
NETWORKING_SFC_PORT_PAIR_GROUPS = 'port_pair_groups'
NETWORKING_SFC_PORT_CHAIN = 'port_chain'
NETWORKING_SFC_PORT_CHAINS = 'port_chains'
ODL_TRUNK = 'trunk'
ODL_TRUNKS = 'trunks'
ODL_L2GATEWAY = 'l2_gateway'
ODL_L2GATEWAYS = 'l2_gateways'
ODL_L2GATEWAY_CONNECTION = 'l2gateway_connection'
ODL_L2GATEWAY_CONNECTIONS = 'l2_gateway_connections'
ODL_BGPVPN = 'bgpvpn'
ODL_BGPVPNS = 'bgpvpns'
ODL_BGPVPN_NETWORK_ASSOCIATION = 'bgpvpn_network_association'
ODL_BGPVPN_NETWORK_ASSOCIATIONS = 'bgpvpn_network_associations'
ODL_BGPVPN_ROUTER_ASSOCIATION = 'bgpvpn_router_association'
ODL_BGPVPN_ROUTER_ASSOCIATIONS = 'bgpvpn_router_associations'
ODL_ML2_MECH_DRIVER_V2 = "opendaylight_v2"
ODL_CREATE = 'create'
ODL_UPDATE = 'update'
ODL_DELETE = 'delete'
# Constants for journal operation states
PENDING = 'pending'
PROCESSING = 'processing'
FAILED = 'failed'
COMPLETED = 'completed'
# Journal Callback events
BEFORE_COMPLETE = 'before_complete'
# dict to store url mappings
RESOURCE_URL_MAPPINGS = {
ODL_QOS_POLICY: "%s/%s" % (ODL_QOS, ODL_QOS_POLICIES),
ODL_SFC_FLOW_CLASSIFIER: "%s/%s" % (ODL_SFC, ODL_SFC_FLOW_CLASSIFIERS),
ODL_SFC_PORT_CHAIN: "%s/%s" % (ODL_SFC, ODL_SFC_PORT_CHAINS),
ODL_SFC_PORT_PAIR: "%s/%s" % (ODL_SFC, ODL_SFC_PORT_PAIRS),
ODL_SFC_PORT_PAIR_GROUP: "%s/%s" % (ODL_SFC, ODL_SFC_PORT_PAIR_GROUPS)
}
networking-odl-16.0.0/networking_odl/common/exceptions.py0000664000175000017500000000365413656750541023657 0ustar zuulzuul00000000000000# Copyright (c) 2017 NEC Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import exceptions
from neutron._i18n import _
class NetworkingODLException(exceptions.NeutronException):
"""Base Networking-ODL exception."""
pass
class FullSyncError(NetworkingODLException):
"""Base exception for Full Sync"""
pass
class UnsupportedResourceType(NetworkingODLException):
"""An exception for unsupported resource for full sync and recovery"""
message = _("unsupported resource type: %(resource)s")
class PluginMethodNotFound(NetworkingODLException, AttributeError):
"""An exception indicating plugin method was not found.
Specialization of AttributeError and NetworkingODLException indicating
requested plugin method could not be found.
:param method: Name of the method being accessed.
:param plugin: Plugin name expected to have required method.
"""
message = _("%(method)s not found in %(plugin)s")
class ResourceNotRegistered(FullSyncError):
"""An exception indicating resource is not registered for maintenance task.
Specialization of FullSync error indicating resource is not registered
for maintenance tasks full sync and recovery.
:param resource_type: Resource type not registered for maintenance task.
"""
message = _("%(resource_type)s resource is not registered for maintenance")
networking-odl-16.0.0/networking_odl/common/filters.py0000664000175000017500000001610313656750541023137 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants as n_const
from oslo_log import log
from oslo_serialization import jsonutils
from networking_odl.common import constants as odl_const
from networking_odl.common import utils as odl_utils
LOG = log.getLogger(__name__)
# NOTE(yamahata): As neutron keystone v3 support, tenant_id would be renamed to
# project_id. In order to keep compatibility, populate both
# 'project_id' and 'tenant_id'
# for details refer to
# https://specs.openstack.org/openstack/neutron-specs/specs/newton/moving-to-keystone-v3.html
def _populate_project_id_and_tenant_id(resource_dict):
# NOTE(yamahata): l3 plugin passes data as dependency_list as python list
# delete_router, delete_floatingip
if not isinstance(resource_dict, dict):
return
project_id = resource_dict.get('project_id',
resource_dict.get('tenant_id'))
if project_id is not None:
# NOTE(yamahata): project_id can be ""(empty string)
resource_dict.setdefault('project_id', project_id)
resource_dict.setdefault('tenant_id', project_id)
def _filter_unmapped_null(resource_dict, unmapped_keys):
# NOTE(yamahata): bug work around
# https://bugs.eclipse.org/bugs/show_bug.cgi?id=475475
# Null-value for an unmapped element causes next mapped
# collection to contain a null value
# JSON: { "unmappedField": null, "mappedCollection": [ "a" ] }
#
# Java Object:
# class Root {
# Collection mappedCollection = new ArrayList;
# }
#
# Result:
# Field B contains one element; null
#
# TODO(yamahata): update along side with neutron and ODL
# add when neutron adds more extensions
# delete when ODL neutron northbound supports it
# TODO(yamahata): do same thing for other resources
keys_to_del = [key for key in unmapped_keys
if resource_dict.get(key) is None]
if keys_to_del:
odl_utils.try_del(resource_dict, keys_to_del)
_NETWORK_UNMAPPED_KEYS = ['qos_policy_id']
_SUBNET_UNMAPPED_KEYS = ['segment_id', 'subnetpool_id']
_PORT_UNMAPPED_KEYS = ['binding:profile', 'dns_name',
'port_security_enabled', 'qos_policy_id']
_FIP_UNMAPPED_KEYS = ['port_id', 'fixed_ip_address', 'router_id']
def _filter_network_create(network):
odl_utils.try_del(network, ['status', 'subnets'])
_filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
def _filter_network_update(network):
odl_utils.try_del(network, ['id', 'status', 'subnets',
'tenant_id', 'project_id'])
_filter_unmapped_null(network, _NETWORK_UNMAPPED_KEYS)
def _filter_floatingip(fip):
_filter_unmapped_null(fip, _FIP_UNMAPPED_KEYS)
def _filter_subnet_create(subnet):
_filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS)
def _filter_subnet_update(subnet):
odl_utils.try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
'tenant_id', 'project_id'])
_filter_unmapped_null(subnet, _SUBNET_UNMAPPED_KEYS)
def _convert_value_to_str(dictionary, key):
try:
# use jsonutils to convert unicode & ascii
dictionary[key] = jsonutils.dumps(dictionary[key])
except KeyError:
LOG.warning("key %s is not present in dict %s", key, dictionary)
def _filter_port(port, attributes):
odl_utils.try_del(port, attributes)
_filter_unmapped_null(port, _PORT_UNMAPPED_KEYS)
# ODL excpects binding:profile to be a string, not a dict
_convert_value_to_str(port, key='binding:profile')
def _filter_port_create(port):
"""Filter out port attributes not required for a create."""
_filter_port(port, ['status'])
def _filter_port_update(port):
"""Filter out port attributes for an update operation."""
_filter_port(port, ['network_id', 'id', 'status', 'tenant_id',
'project_id'])
def _filter_router_update(router):
"""Filter out attributes for an update operation."""
odl_utils.try_del(router, ['id', 'tenant_id', 'project_id', 'status'])
# neutron has multiple ICMPv6 names
# https://bugs.launchpad.net/tempest/+bug/1671366
# REVISIT(yamahata): once neutron upstream is fixed to store unified form,
# this can be removed.
_ICMPv6_NAMES = (
n_const.PROTO_NAME_ICMP,
n_const.PROTO_NAME_IPV6_ICMP,
n_const.PROTO_NAME_IPV6_ICMP_LEGACY,
)
def _sgrule_scrub_icmpv6_name(sgrule):
if (sgrule.get('ethertype') == n_const.IPv6 and
sgrule.get('protocol') in _ICMPv6_NAMES):
sgrule['protocol'] = n_const.PROTO_NAME_IPV6_ICMP_LEGACY
# ODL neturon northbound knows the following protocol names.
# It's safe to pass those names
_ODL_KNOWN_PROTOCOL_NAMES = (
n_const.PROTO_NAME_TCP,
n_const.PROTO_NAME_UDP,
n_const.PROTO_NAME_ICMP,
n_const.PROTO_NAME_IPV6_ICMP_LEGACY,
)
def _sgrule_scrub_unknown_protocol_name(protocol):
"""Convert unknown protocol name to actual interger.
OpenDaylight does't want to keep catching up list of protocol names.
So networking-odl converts unknown protcol name into integer
"""
if protocol in _ODL_KNOWN_PROTOCOL_NAMES:
return protocol
if protocol in n_const.IP_PROTOCOL_MAP:
return n_const.IP_PROTOCOL_MAP[protocol]
return protocol
def _filter_security_group_rule(sg_rule):
_sgrule_scrub_icmpv6_name(sg_rule)
if sg_rule.get('protocol'):
sg_rule['protocol'] = _sgrule_scrub_unknown_protocol_name(
sg_rule['protocol'])
_FILTER_MAP = {
(odl_const.ODL_NETWORK, odl_const.ODL_CREATE): _filter_network_create,
(odl_const.ODL_NETWORK, odl_const.ODL_UPDATE): _filter_network_update,
(odl_const.ODL_SUBNET, odl_const.ODL_CREATE): _filter_subnet_create,
(odl_const.ODL_SUBNET, odl_const.ODL_UPDATE): _filter_subnet_update,
(odl_const.ODL_PORT, odl_const.ODL_CREATE): _filter_port_create,
(odl_const.ODL_PORT, odl_const.ODL_UPDATE): _filter_port_update,
(odl_const.ODL_ROUTER, odl_const.ODL_UPDATE): _filter_router_update,
(odl_const.ODL_SG_RULE, odl_const.ODL_CREATE): _filter_security_group_rule,
(odl_const.ODL_SG_RULE, odl_const.ODL_UPDATE): _filter_security_group_rule,
(odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE): _filter_floatingip,
}
def filter_for_odl(object_type, operation, data):
"""Filter out the attributed before sending the data to ODL"""
filter_key = (object_type, operation)
if filter_key in _FILTER_MAP:
_FILTER_MAP[filter_key](data)
_populate_project_id_and_tenant_id(data)
networking-odl-16.0.0/networking_odl/qos/0000775000175000017500000000000013656750617020432 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/qos/__init__.py0000664000175000017500000000000013656750541022525 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/qos/qos_utils.py0000664000175000017500000000256513656750541023032 0ustar zuulzuul00000000000000# Copyright (c) 2016 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
def enforce_policy_format(policy):
if 'bandwidth_limit_rules' not in policy.keys():
policy['bandwidth_limit_rules'] = []
if 'dscp_marking_rules' not in policy.keys():
policy['dscp_marking_rules'] = []
return policy
# NOTE(manjeets) keeping common methods for formatting
# qos data in qos_utils for code reuse.
def convert_rules_format(data):
policy = copy.deepcopy(data)
policy.pop('tenant_id', None)
policy.pop('rules', None)
for rule in data.get('rules', []):
rule_type = rule['type'] + '_rules'
rule.pop('type', None)
rule.pop('qos_policy_id', None)
rule['tenant_id'] = data['tenant_id']
policy[rule_type] = [rule]
return enforce_policy_format(policy)
networking-odl-16.0.0/networking_odl/qos/qos_driver_v2.py0000664000175000017500000000752313656750541023573 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.db import constants as db_const
from neutron_lib.plugins import constants as nlib_const
from neutron_lib.services.qos import base
from neutron_lib.services.qos import constants as qos_consts
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_odl.common import constants as odl_const
from networking_odl.common import odl_features
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.qos import qos_utils
LOG = logging.getLogger(__name__)
VIF_TYPES = [portbindings.VIF_TYPE_OVS, portbindings.VIF_TYPE_VHOST_USER]
VNIC_TYPES = [portbindings.VNIC_NORMAL]
QOS_RESOURCES = {
odl_const.ODL_QOS_POLICY: odl_const.ODL_QOS_POLICIES
}
DEFAULT_QOS_RULES = {
qos_consts.RULE_TYPE_BANDWIDTH_LIMIT: {
qos_consts.MAX_KBPS: {
'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]},
qos_consts.MAX_BURST: {
'type:range': [0, db_const.DB_INTEGER_MAX_VALUE]},
qos_consts.DIRECTION: {
'type:values': [constants.EGRESS_DIRECTION]}
}
}
QOS_RULES = 'qos-rules'
class OpenDaylightQosDriver(base.DriverBase):
"""OpenDaylight QOS Driver
This code is backend implementation for OpenDaylight Qos
driver for Openstack Neutron.
"""
@staticmethod
def create():
try:
supported_rules = odl_features.get_config(QOS_RULES)
except KeyError:
supported_rules = DEFAULT_QOS_RULES
return OpenDaylightQosDriver(supported_rules=supported_rules)
def __init__(self, supported_rules,
name='OpenDaylightQosDriver',
vif_types=VIF_TYPES,
vnic_types=VNIC_TYPES,
requires_rpc_notifications=False):
super(OpenDaylightQosDriver, self).__init__(
name, vif_types, vnic_types, supported_rules,
requires_rpc_notifications)
LOG.debug("Initializing OpenDaylight Qos driver")
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(nlib_const.QOS, QOS_RESOURCES)
def _record_in_journal(self, context, op_const, qos_policy):
data = qos_utils.convert_rules_format(qos_policy.to_dict())
journal.record(context, odl_const.ODL_QOS_POLICY,
data['id'], op_const, data)
@log_helpers.log_method_call
def create_policy_precommit(self, context, qos_policy):
self._record_in_journal(context, odl_const.ODL_CREATE, qos_policy)
@log_helpers.log_method_call
def update_policy_precommit(self, context, qos_policy):
self._record_in_journal(context, odl_const.ODL_UPDATE, qos_policy)
@log_helpers.log_method_call
def delete_policy_precommit(self, context, qos_policy):
self._record_in_journal(context, odl_const.ODL_DELETE, qos_policy)
@log_helpers.log_method_call
def create_policy(self, context, policy):
self.journal.set_sync_event()
@log_helpers.log_method_call
def update_policy(self, context, policy):
self.journal.set_sync_event()
@log_helpers.log_method_call
def delete_policy(self, context, policy):
self.journal.set_sync_event()
networking-odl-16.0.0/networking_odl/hacking/0000775000175000017500000000000013656750617021234 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/hacking/__init__.py0000664000175000017500000000000013656750541023327 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/hacking/checks.py0000664000175000017500000001103113656750541023036 0ustar zuulzuul00000000000000# Copyright 2017 Intel Corporation.
# Copyright 2017 Isaku Yamahata
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import tokenize
from hacking.checks import docstrings
# TODO(yamahata): enable neutron checking
# from neutron.hacking import checks
from neutron_lib.hacking import checks
_ND01_MSG = (
"ND01: use OpenDaylight (capital D) instead of Opendaylight") # noqa
_ND01_OPENDAYLIGHT = 'Opendaylight' # noqa
_ND02_MSG = (
"ND02: use the config fixture provided by oslo_config and use config()"
" instead of %s") # noqa
_ND02_REGEXP_DIRECT = re.compile(r'cfg\.CONF\..* =')
_ND03_MSG = (
"ND03: The import of %s has a redundant alias."
)
_ND03_REGEXP_REDUNDANT_IMPORT_ALIAS = re.compile(r'.*import (.+) as \1$')
def check_opendaylight_lowercase(logical_line, filename, noqa):
"""ND01 - Enforce using OpenDaylight."""
if noqa:
return
if _ND01_OPENDAYLIGHT in logical_line:
pos = logical_line.find(_ND01_OPENDAYLIGHT)
yield (pos, _ND01_MSG)
def _check_opendaylight_lowercase(logical_line, tokens, noqa, token_type):
"""ND01 - Enforce using OpenDaylight in given token."""
if noqa:
return
for _token_type, text, start_index, _, _ in tokens:
if _token_type == token_type:
pos = text.find(_ND01_OPENDAYLIGHT)
if pos >= 0:
msg = "{} in {}".format(
_ND01_MSG, tokenize.tok_name[token_type].lower())
yield (start_index[1] + pos, msg)
def check_opendaylight_lowercase_comment(logical_line, tokens, noqa):
"""ND01 - Enforce using OpenDaylight in comment."""
for res in _check_opendaylight_lowercase(
logical_line, tokens, noqa, tokenize.COMMENT):
yield res
def check_opendaylight_lowercase_string(logical_line, tokens, noqa):
"""ND01 - Enforce using OpenDaylight in string."""
for res in _check_opendaylight_lowercase(
logical_line, tokens, noqa, tokenize.STRING):
yield res
def check_opendaylight_lowercase_docstring(
physical_line, previous_logical, tokens):
"""ND01 - Enforce using OpenDaylight in docstring."""
docstring = docstrings.is_docstring(tokens, previous_logical)
if docstring and _ND01_OPENDAYLIGHT in docstring:
pos = physical_line.find(_ND01_OPENDAYLIGHT)
return (pos, _ND01_MSG + " in docstring")
return None
def check_config_over_set_override(logical_line, filename, noqa):
"""ND02 - Enforcement of config fixture
Enforce agreement of not use set_override() but use
instead the fixture's config() helper for tests.
"""
if noqa:
return
if 'networking_odl/tests/' not in filename:
return
if 'cfg.CONF.set_override' in logical_line:
yield (0, _ND02_MSG % "using cfg.CONF.set_override()")
def check_config_over_direct_override(logical_line, filename, noqa):
"""ND02 - Enforcement of config fixture
Enforce usage of the fixture's config() helper instead
of overriding a setting directly
"""
if noqa:
return
if 'networking_odl/tests/' not in filename:
return
if _ND02_REGEXP_DIRECT.match(logical_line):
yield (0, _ND02_MSG % "overriding it directly.")
def check_redundant_import_alias(logical_line):
"""ND03 - Checking no redundant import alias.
ND03: from neutron.plugins.ml2 import driver_context as driver_context
OK: from neutron.plugins.ml2 import driver_context
"""
match = re.match(_ND03_REGEXP_REDUNDANT_IMPORT_ALIAS, logical_line)
if match:
yield (0, _ND03_MSG % match.group(1))
def factory(register):
checks.factory(register)
register(check_opendaylight_lowercase)
register(check_opendaylight_lowercase_comment)
register(check_opendaylight_lowercase_string)
register(check_opendaylight_lowercase_docstring)
register(check_config_over_set_override)
register(check_config_over_direct_override)
register(check_redundant_import_alias)
networking-odl-16.0.0/networking_odl/tests/0000775000175000017500000000000013656750617020772 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/match.py0000664000175000017500000000251313656750541022435 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import re
from oslo_serialization import jsonutils
def json(obj):
return MatchJson(obj)
class MatchJson(object):
def __init__(self, obj):
self._obj = obj
def __eq__(self, json_text):
return self._obj == jsonutils.loads(json_text)
def __repr__(self):
return "MatchJson({})".format(repr(self._obj))
def wildcard(text):
return MatchWildcard(text)
class MatchWildcard(object):
def __init__(self, obj):
self._text = text = str(obj)
self._reg = re.compile(fnmatch.translate(text))
def __eq__(self, obj):
return self._reg.match(str(obj))
def __repr__(self):
return "MatchWildcard({})".format(self._text)
networking-odl-16.0.0/networking_odl/tests/__init__.py0000664000175000017500000000000013656750541023065 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/0000775000175000017500000000000013656750617021751 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/__init__.py0000664000175000017500000000000013656750541024044 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/dhcp/0000775000175000017500000000000013656750617022667 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/dhcp/__init__.py0000664000175000017500000000000013656750541024762 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/dhcp/test_odl_dhcp_driver_base.py0000664000175000017500000003074613656750541030427 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from neutron_lib import constants as n_const
from neutron_lib.plugins import directory
from networking_odl.dhcp import odl_dhcp_driver_base as driver_base
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests import base as odl_base
from networking_odl.tests.unit import base_v2
from oslo_config import cfg
from oslo_utils import uuidutils
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
ODL_TENANT_ID = uuidutils.generate_uuid()
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
class OdlDhcpDriverTestBase(base_v2.OpenDaylightConfigBase):
def setUp(self):
self.useFixture(odl_base.OpenDaylightFeaturesFixture())
self.useFixture(odl_base.OpenDaylightJournalThreadFixture())
self.useFixture(odl_base.OpenDaylightPseudoAgentPrePopulateFixture())
super(OdlDhcpDriverTestBase, self).setUp()
def get_network_and_subnet_context(self, cidr, dhcp_flag, create_subnet,
create_network, ipv4=True):
data = {}
network_id = uuidutils.generate_uuid()
subnet_id = uuidutils.generate_uuid()
plugin = directory.get_plugin()
data['network_id'] = network_id
data['subnet_id'] = subnet_id
data['context'] = self.context
data['plugin'] = plugin
network, network_context = \
self.get_network_context(network_id, create_network, ipv4)
if create_network:
data['network_context'] = network_context
data['network'] = network
subnet, subnet_context = \
self.get_subnet_context(network_id, subnet_id, cidr,
dhcp_flag, create_subnet, ipv4)
if create_subnet:
data['subnet_context'] = subnet_context
data['subnet'] = subnet
return data
def get_subnet_context(self, network_id, subnet_id, cidr,
dhcp_flag, create_subnet, ipv4=True):
if ipv4:
index = cidr.rfind('.') + 1
ip_range = cidr[:index]
cidr_end = ip_range + str(254)
ipv6_ramode = None
ipv6_addmode = None
ipversion = 4
else:
index = cidr.rfind(':') + 1
ip_range = cidr[:index]
cidr_end = cidr[:index - 1] + 'ffff:ffff:ffff:fffe'
ipv6_ramode = 'slaac'
ipv6_addmode = 'slaac'
ipversion = 6
current = {'ipv6_ra_mode': ipv6_ramode,
'allocation_pools': [{'start': ip_range + str(2),
'end': cidr_end}],
'host_routes': [],
'ipv6_address_mode': ipv6_addmode,
'cidr': cidr,
'id': subnet_id,
'name': '',
'enable_dhcp': dhcp_flag,
'network_id': network_id,
'tenant_id': ODL_TENANT_ID,
'project_id': ODL_TENANT_ID,
'dns_nameservers': [],
'gateway_ip': ip_range + str(1),
'ip_version': ipversion,
'shared': False}
subnet = {'subnet': AttributeDict(current)}
if create_subnet:
plugin = directory.get_plugin()
result, subnet_context = plugin._create_subnet_db(self.context,
subnet)
return subnet, subnet_context
else:
return subnet
def get_network_context(self, network_id, create_network, ipv4=True):
netwrk = 'netv4'
if not ipv4:
netwrk = 'netv6'
current = {'status': 'ACTIVE',
'subnets': [],
'name': netwrk,
'provider:physical_network': None,
'admin_state_up': True,
'tenant_id': ODL_TENANT_ID,
'project_id': ODL_TENANT_ID,
'provider:network_type': 'local',
'router:external': False,
'shared': False,
'id': network_id,
'provider:segmentation_id': None}
network = {'network': AttributeDict(current)}
if create_network:
plugin = directory.get_plugin()
result, network_context = plugin._create_network_db(
self.context, network)
return [network, network_context]
return network
def get_port_id(self, plugin, plugin_context, network_id, subnet_id):
device_id = driver_base.OPENDAYLIGHT_DEVICE_ID + '-' + subnet_id
filters = {
'network_id': [network_id],
'device_id': [device_id],
'device_owner': [n_const.DEVICE_OWNER_DHCP]
}
ports = plugin.get_ports(plugin_context, filters=filters)
if ports:
port = ports[0]
return port['id']
class OdlDhcpDriverBaseTestCase(OdlDhcpDriverTestBase):
def setUp(self):
super(OdlDhcpDriverBaseTestCase, self).setUp()
def test_dhcp_driver_not_loaded_without_flag(self):
mech = mech_driver_v2.OpenDaylightMechanismDriver()
mech.initialize()
args = [mech, 'dhcp_driver']
self.assertRaises(AttributeError, getattr, *args)
def test_dhcp_port_create(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.10.0/24', True, True,
True)
dhcp_driver.create_or_delete_dhcp_port(data['subnet_context'])
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_port_create_v6network(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('2001:db8:abcd:0012::0/64',
True, True, True, False)
dhcp_driver.create_or_delete_dhcp_port(data['subnet_context'])
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
def test_dhcp_create_without_dhcp_flag(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.20.0/24', False, True,
True)
dhcp_driver.create_or_delete_dhcp_port(data['subnet_context'])
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
def test_dhcp_port_create_with_multiple_create_request(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.30.0/24', True, True,
True)
dhcp_driver.create_or_delete_dhcp_port(data['subnet_context'])
dhcp_driver.create_or_delete_dhcp_port(data['subnet_context'])
# If there are multiple ports will one_or_none wiill throw error
# MultipleResultsFound
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_update_from_disable_to_enable(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.40.0/24', False, True,
True)
subnet_context = data['subnet_context']
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
subnet_context.current['enable_dhcp'] = True
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_update_from_enable_to_enable(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
subnet_context.current['enable_dhcp'] = True
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_update_from_enable_to_disable(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.60.0/24', True, True,
True)
subnet_context = data['subnet_context']
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
subnet_context.current['enable_dhcp'] = False
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
def test_dhcp_update_from_disable_to_disable(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.70.0/24', False, True,
True)
subnet_context = data['subnet_context']
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
subnet_context.current['enable_dhcp'] = False
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
def test_dhcp_delete_when_dhcp_enabled(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.80.0/24', True, True,
True)
subnet_context = data['subnet_context']
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
subnet_context.current['enable_dhcp'] = False
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
def test_dhcp_delete_when_dhcp_delete(self):
dhcp_driver = driver_base.OdlDhcpDriverBase()
data = self.get_network_and_subnet_context('10.0.90.0/24', False, True,
True)
subnet_context = data['subnet_context']
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
class AttributeDict(dict):
def __init__(self, *args, **kwargs):
super(AttributeDict, self).__init__(*args, **kwargs)
self.__dict__ = self
networking-odl-16.0.0/networking_odl/tests/unit/dhcp/test_odl_dhcp_driver.py0000664000175000017500000001012313656750541027420 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import testscenarios
from networking_odl.common import constants as odl_const
from networking_odl.dhcp import odl_dhcp_driver
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base
from oslo_config import cfg
load_tests = testscenarios.load_tests_apply_scenarios
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase):
def setUp(self):
super(OdlDhcpDriverTestCase, self).setUp()
self.cfg.config(enable_dhcp_service=True, group='ml2_odl')
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mech.initialize()
def test_dhcp_flag_test(self):
self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service)
def test_dhcp_driver_load(self):
self.assertTrue(isinstance(self.mech.dhcp_driver,
odl_dhcp_driver.OdlDhcpDriver))
def test_dhcp_port_create_on_subnet_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
self.mech.journal.sync_pending_entries()
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port)
def test_dhcp_port_create_on_v6subnet_event(self):
data = self.get_network_and_subnet_context('2001:db8:abcd:0012::0/64',
True, True, True, False)
subnet_context = data['subnet_context']
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
self.mech.journal.sync_pending_entries()
port = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port)
def test_dhcp_delete_on_port_update_event(self):
data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,
True)
subnet_context = data['subnet_context']
plugin = data['plugin']
self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context)
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNotNone(port_id)
port = plugin.get_port(data['context'], port_id)
port['fixed_ips'] = []
ports = {'port': port}
plugin.update_port(data['context'], port_id, ports)
mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(
subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)
self.mech.journal.sync_pending_entries()
port_id = self.get_port_id(data['plugin'],
data['context'],
data['network_id'],
data['subnet_id'])
self.assertIsNone(port_id)
networking-odl-16.0.0/networking_odl/tests/unit/l3/0000775000175000017500000000000013656750617022267 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/l3/__init__.py0000664000175000017500000000000013656750541024362 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/l3/test_l3_odl_v2.py0000664000175000017500000005274113656750541025470 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import requests
from neutron.db import l3_db
from neutron.plugins.ml2 import plugin
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron.tests.unit import testlib_api
from neutron_lib.api.definitions import external_net
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import filters
from networking_odl.db import db
from networking_odl.journal import journal
from networking_odl.l3 import l3_odl_v2
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests import base as odl_base
from networking_odl.tests.unit import test_base_db
EMPTY_DEP = {'gw_port_id': None}
FLOATINGIP_ID = uuidutils.generate_uuid()
NETWORK_ID = uuidutils.generate_uuid()
ROUTER_ID = uuidutils.generate_uuid()
SUBNET_ID = uuidutils.generate_uuid()
PORT_ID = uuidutils.generate_uuid()
class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
def setUp(self):
self.useFixture(odl_base.OpenDaylightRestClientFixture())
self.useFixture(odl_base.OpenDaylightFeaturesFixture())
self.cfg = self.useFixture(config_fixture.Config())
self.useFixture(odl_base.OpenDaylightJournalThreadFixture())
super(OpenDayLightMechanismConfigTests, self).setUp()
self.cfg.config(mechanism_drivers=[
'logger', 'opendaylight_v2'], group='ml2')
self.cfg.config(
port_binding_controller='legacy-port-binding', group='ml2_odl')
def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
password='somepass'):
self.cfg.config(url=url, group='ml2_odl')
self.cfg.config(username=username, group='ml2_odl')
self.cfg.config(password=password, group='ml2_odl')
def _test_missing_config(self, **kwargs):
self._set_config(**kwargs)
self.assertRaisesRegex(cfg.RequiredOptError,
r'value required for option \w+ in group '
r'\[ml2_odl\]',
plugin.Ml2Plugin)
def test_valid_config(self):
self._set_config()
plugin.Ml2Plugin()
def test_missing_url_raises_exception(self):
self._test_missing_config(url=None)
def test_missing_username_raises_exception(self):
self._test_missing_config(username=None)
def test_missing_password_raises_exception(self):
self._test_missing_config(password=None)
class DataMatcher(object):
def __init__(self, operation, object_type, object_dict):
self._data = object_dict.copy()
self._object_type = object_type
filters.filter_for_odl(object_type, operation, self._data)
def __eq__(self, s):
data = jsonutils.loads(s)
return self._data == data[self._object_type]
def __ne__(self, s):
return not self.__eq__(s)
class OpenDaylightL3TestCase(test_db_base_plugin_v2.NeutronDbPluginV2TestCase,
test_base_db.ODLBaseDbTestCase,
base.BaseTestCase):
def setUp(self):
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(core_plugin='neutron.plugins.ml2.plugin.Ml2Plugin')
self.cfg.config(mechanism_drivers=[
'logger', 'opendaylight_v2'], group='ml2')
self.useFixture(odl_base.OpenDaylightRestClientFixture())
self.cfg.config(service_plugins=['odl-router_v2'])
core_plugin = cfg.CONF.core_plugin
service_plugins = {'l3_plugin_name': 'odl-router_v2'}
self.useFixture(odl_base.OpenDaylightJournalThreadFixture())
mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver,
'_record_in_journal').start()
mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver,
'sync_from_callback_precommit').start()
mock.patch.object(mech_driver_v2.OpenDaylightMechanismDriver,
'sync_from_callback_postcommit').start()
self.useFixture(odl_base.OpenDaylightPeriodicTaskFixture())
self.useFixture(odl_base.OpenDaylightFeaturesFixture())
self.useFixture(odl_base.OpenDaylightPseudoAgentPrePopulateFixture())
super(OpenDaylightL3TestCase, self).setUp(
plugin=core_plugin, service_plugins=service_plugins)
self.plugin = directory.get_plugin()
self.plugin._network_is_external = mock.Mock(return_value=True)
self.driver = directory.get_plugin(constants.L3)
self.thread = journal.OpenDaylightJournalThread()
@staticmethod
def _get_mock_router_operation_info(network, subnet):
router = {odl_const.ODL_ROUTER:
{'name': 'router1',
'admin_state_up': True,
'tenant_id': network['network']['tenant_id'],
'external_gateway_info': {'network_id':
network['network']['id']}}}
return router
@staticmethod
def _get_mock_floatingip_operation_info(network, subnet):
floatingip = {odl_const.ODL_FLOATINGIP:
{'floating_network_id': network['network']['id'],
'tenant_id': network['network']['tenant_id'],
'subnet_id': None,
'floating_ip_address': None}}
return floatingip
@staticmethod
def _get_mock_router_interface_operation_info(network, subnet):
router_intf_dict = {'subnet_id': subnet['subnet']['id'],
'id': network['network']['id']}
return router_intf_dict
@classmethod
def _get_mock_operation_info(cls, object_type, *args):
getter = getattr(cls, '_get_mock_' + object_type + '_operation_info')
return getter(*args)
@classmethod
def _get_mock_request_response(cls, status_code):
response = mock.Mock(status_code=status_code)
response.raise_for_status = mock.Mock() if status_code < 400 else (
mock.Mock(side_effect=requests.exceptions.HTTPError(status_code)))
return response
def _test_operation(self, status_code, expected_calls, *args, **kwargs):
request_response = self._get_mock_request_response(status_code)
with mock.patch('requests.sessions.Session.request',
return_value=request_response) as mock_method:
self.thread.sync_pending_entries()
if expected_calls:
mock_method.assert_called_with(
headers={'Content-Type': 'application/json'},
timeout=cfg.CONF.ml2_odl.timeout, *args, **kwargs)
self.assertEqual(expected_calls, mock_method.call_count)
def _call_operation_object(self, operation, object_type, object_id,
network, subnet):
object_dict = self._get_mock_operation_info(
object_type, network, subnet)
method = getattr(self.driver, operation + '_' + object_type)
if operation == odl_const.ODL_CREATE:
new_object_dict = method(self.db_context, object_dict)
elif operation == odl_const.ODL_UPDATE:
new_object_dict = method(self.db_context, object_id, object_dict)
else:
new_object_dict = method(self.db_context, object_id)
return new_object_dict
def _test_operation_thread_processing(self, object_type, operation,
network, subnet, object_id,
expected_calls=1):
http_requests = {odl_const.ODL_CREATE: 'post',
odl_const.ODL_UPDATE: 'put',
odl_const.ODL_DELETE: 'delete'}
status_codes = {odl_const.ODL_CREATE: requests.codes.created,
odl_const.ODL_UPDATE: requests.codes.ok,
odl_const.ODL_DELETE: requests.codes.no_content}
http_request = http_requests[operation]
status_code = status_codes[operation]
# Create database entry.
new_object_dict = self._call_operation_object(
operation, object_type, object_id, network, subnet)
# Setup expected results.
if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]:
url = (cfg.CONF.ml2_odl.url + '/' + object_type + 's/' +
object_id)
else:
url = cfg.CONF.ml2_odl.url + '/' + object_type + 's'
if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]:
kwargs = {
'url': url,
'data': DataMatcher(operation, object_type, new_object_dict)}
else:
kwargs = {'url': url, 'data': None}
# Call threading routine to process database entry. Test results.
self._test_operation(status_code, expected_calls, http_request,
**kwargs)
return new_object_dict
def _test_thread_processing(self, object_type):
# Create network and subnet.
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as network:
with self.subnet(network=network, cidr='10.0.0.0/24'):
# Add and process create request.
new_object_dict = self._test_operation_thread_processing(
object_type, odl_const.ODL_CREATE, network, None, None)
object_id = new_object_dict['id']
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(1, len(rows))
# Add and process 'update' request. Adds to database.
self._test_operation_thread_processing(
object_type, odl_const.ODL_UPDATE, network, None,
object_id)
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(2, len(rows))
# Add and process 'delete' request. Adds to database.
self._test_operation_thread_processing(
object_type, odl_const.ODL_DELETE, network, None,
object_id)
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(3, len(rows))
def _test_db_results(self, object_id, operation, object_type):
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(1, len(rows))
self.assertEqual(operation, rows[0]['operation'])
self.assertEqual(object_type, rows[0]['object_type'])
self.assertEqual(object_id, rows[0]['object_uuid'])
self._db_cleanup()
@contextlib.contextmanager
def _prepare_resource(self, resource_type):
# Create network and subnet for testing.
kwargs = {'arg_list': (external_net.EXTERNAL,),
external_net.EXTERNAL: True}
with self.network(**kwargs) as network:
with self.subnet(network=network):
yield self._get_mock_operation_info(
resource_type, network, None)
def _test_object_db(self, object_type):
with self._prepare_resource(object_type) as object_dict:
# Add and test 'create' database entry.
method = getattr(self.driver,
odl_const.ODL_CREATE + '_' + object_type)
new_object_dict = method(self.db_context, object_dict)
object_id = new_object_dict['id']
self._test_db_results(object_id, odl_const.ODL_CREATE, object_type)
# Add and test 'update' database entry.
method = getattr(self.driver,
odl_const.ODL_UPDATE + '_' + object_type)
method(self.db_context, object_id, object_dict)
self._test_db_results(object_id, odl_const.ODL_UPDATE, object_type)
# Add and test 'delete' database entry.
method = getattr(self.driver,
odl_const.ODL_DELETE + '_' + object_type)
method(self.db_context, object_id)
self._test_db_results(object_id, odl_const.ODL_DELETE, object_type)
def _test_dependency_processing(
self, test_operation, test_object, test_id, test_data,
dep_operation, dep_object, dep_id, dep_data):
# Mock sendjson to verify that it never gets called.
mock_sendjson = mock.patch.object(client.OpenDaylightRestClient,
'sendjson').start()
# Create dependency db row and mark as 'processing' so it won't
# be processed by the journal thread.
ctxt = self.db_context
journal.record(ctxt, dep_object, dep_id, dep_operation, dep_data)
row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)
db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING)
# Create test row with dependent ID.
journal.record(ctxt, test_object, test_id, test_operation, test_data)
# Call journal thread.
self.thread.sync_pending_entries()
# Verify that dependency row is still set at 'processing'.
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.PROCESSING)
self.assertEqual(1, len(rows))
# Verify that the test row was processed and set back to 'pending'
# to be processed again.
rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)
self.assertEqual(1, len(rows))
# Verify that _json_data was not called.
self.assertFalse(mock_sendjson.call_count)
def test_router_db(self):
self._test_object_db(odl_const.ODL_ROUTER)
def test_floatingip_db(self):
self._test_object_db(odl_const.ODL_FLOATINGIP)
def test_router_threading(self):
self._test_thread_processing(odl_const.ODL_ROUTER)
def test_floatingip_threading(self):
self._test_thread_processing(odl_const.ODL_FLOATINGIP)
def test_delete_network_validate_ext_delete_router_dep(self):
router_context = [NETWORK_ID]
self._test_dependency_processing(
odl_const.ODL_DELETE, odl_const.ODL_NETWORK, NETWORK_ID, None,
odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID,
router_context)
def test_create_router_validate_ext_create_port_dep(self):
router_context = {'gw_port_id': PORT_ID}
self._test_dependency_processing(
odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID,
router_context,
odl_const.ODL_CREATE, odl_const.ODL_PORT, PORT_ID,
{'fixed_ips': [], 'network_id': None, odl_const.ODL_SGS: None,
'tenant_id': 'tenant'})
def test_delete_router_validate_ext_delete_floatingip_dep(self):
floatingip_context = [ROUTER_ID]
self._test_dependency_processing(
odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, None,
odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
floatingip_context)
def test_delete_router_validate_self_create_dep(self):
self._test_dependency_processing(
odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP)
def test_delete_router_validate_self_update_dep(self):
self._test_dependency_processing(
odl_const.ODL_DELETE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP)
def test_update_router_validate_self_create_dep(self):
self._test_dependency_processing(
odl_const.ODL_UPDATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP,
odl_const.ODL_CREATE, odl_const.ODL_ROUTER, ROUTER_ID, EMPTY_DEP)
def test_create_floatingip_validate_ext_create_network_dep(self):
floatingip_context = {'floating_network_id': NETWORK_ID}
self._test_dependency_processing(
odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
floatingip_context,
odl_const.ODL_CREATE, odl_const.ODL_NETWORK, NETWORK_ID, {})
def test_update_floatingip_validate_self_create_dep(self):
floatingip_context = {'floating_network_id': NETWORK_ID}
self._test_dependency_processing(
odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
floatingip_context,
odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
EMPTY_DEP)
def test_delete_floatingip_validate_self_create_dep(self):
self._test_dependency_processing(
odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
EMPTY_DEP,
odl_const.ODL_CREATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
{})
def test_delete_floatingip_validate_self_update_dep(self):
self._test_dependency_processing(
odl_const.ODL_DELETE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
EMPTY_DEP,
odl_const.ODL_UPDATE, odl_const.ODL_FLOATINGIP, FLOATINGIP_ID,
{})
@mock.patch.object(journal, 'record')
def test__record_in_journal_retries(self, record_mock):
self._test_retry_exceptions(
l3_odl_v2._record_in_journal, record_mock, True)
def _assert_record_in_journal(self, record_in_journal, resource_type,
operation):
record_in_journal.assert_called_with(
mock.ANY, resource_type, operation, mock.ANY, mock.ANY)
def _call_and_assert_recorded_in_journal(
self, resource_type, operation, function, *args):
with mock.patch.object(l3_odl_v2,
'_record_in_journal') as record_in_journal:
function(self.db_context, *args)
record_in_journal.assert_called_with(
mock.ANY, resource_type, operation, mock.ANY, mock.ANY)
def test_create_router_records_in_journal(self):
with self._prepare_resource(odl_const.ODL_ROUTER) as router:
self._call_and_assert_recorded_in_journal(
odl_const.ODL_ROUTER, odl_const.ODL_CREATE,
self.driver.create_router, router)
def test_update_router_records_in_journal(self):
with self._prepare_resource(odl_const.ODL_ROUTER) as router:
result = self.driver.create_router(self.db_context, router)
self._call_and_assert_recorded_in_journal(
odl_const.ODL_ROUTER, odl_const.ODL_UPDATE,
self.driver.update_router, result['id'], router)
def test_delete_router_records_in_journal(self):
with self._prepare_resource(odl_const.ODL_ROUTER) as router:
result = self.driver.create_router(self.db_context, router)
self._call_and_assert_recorded_in_journal(
odl_const.ODL_ROUTER, odl_const.ODL_DELETE,
self.driver.delete_router, result['id'])
def test_create_fip_records_in_journal(self):
with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip:
self._call_and_assert_recorded_in_journal(
odl_const.ODL_FLOATINGIP, odl_const.ODL_CREATE,
self.driver.create_floatingip, fip)
def test_update_fip_records_in_journal(self):
with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip:
result = self.driver.create_floatingip(self.db_context, fip)
self._call_and_assert_recorded_in_journal(
odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE,
self.driver.update_floatingip, result['id'], fip)
def test_delete_fip_records_in_journal(self):
with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip:
result = self.driver.create_floatingip(self.db_context, fip)
self._call_and_assert_recorded_in_journal(
odl_const.ODL_FLOATINGIP, odl_const.ODL_DELETE,
self.driver.delete_floatingip, result['id'])
@mock.patch.object(l3_db.L3_NAT_dbonly_mixin, 'disassociate_floatingips')
@mock.patch.object(l3_odl_v2.OpenDaylightL3RouterPlugin, 'get_floatingips')
def test_disassociate_floatingips_records_in_journal(
self, get_fips, disassociate_floatingips):
with self._prepare_resource(odl_const.ODL_FLOATINGIP) as fip:
result = self.driver.create_floatingip(self.db_context, fip)
get_fips.return_value = [result]
self._call_and_assert_recorded_in_journal(
odl_const.ODL_FLOATINGIP, odl_const.ODL_UPDATE,
self.driver.disassociate_floatingips, 'fake_id')
self.assertTrue(disassociate_floatingips.called)
networking-odl-16.0.0/networking_odl/tests/unit/l3/test_l3_flavor.py0000664000175000017500000002053413656750541025567 0ustar zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.objects import router as l3_obj
from neutron_lib.callbacks import events
from neutron_lib.callbacks import resources
from oslo_config import fixture as config_fixture
from oslo_utils import uuidutils
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.l3 import l3_flavor
from networking_odl.tests import base
from networking_odl.tests.unit import base_v2
_operation_map = {'del': odl_const.ODL_DELETE,
'update': odl_const.ODL_UPDATE,
'add': odl_const.ODL_CREATE}
class OpenDaylightL3FlavorTestCase(base_v2.OpenDaylightConfigBase):
def setUp(self):
self.useFixture(base.OpenDaylightJournalThreadFixture())
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(service_plugins=['router'])
super(OpenDaylightL3FlavorTestCase, self).setUp()
self.flavor_driver = l3_flavor.ODLL3ServiceProvider(mock.MagicMock())
def _get_mock_fip_kwargs(self):
fipid = uuidutils.generate_uuid()
fip_db = mock.Mock(floating_ip_address='192.168.1.2',
router_id=None, id=fipid,
floating_network_id=fipid)
projectid = uuidutils.generate_uuid()
floating_data = {'floatingip_id': str(fipid),
'router_id': None,
'context': self.db_context,
'floatingip_db': fip_db,
'floatingip': {'project_id': str(projectid),
'floating_ip_address': '172.24.5.4',
'port_id': None,
'id': fip_db.id,
'router_id': None,
'status': 'DOWN',
'floating_network_id': str(fipid)
}}
return floating_data
def _get_mock_router_kwargs(self):
router_db = mock.Mock(gw_port_id=uuidutils.generate_uuid(),
id=uuidutils.generate_uuid())
router = {odl_const.ODL_ROUTER:
{'name': 'router1',
'admin_state_up': True,
'tenant_id': uuidutils.generate_uuid(),
'id': router_db.id,
'external_gateway_info': {'network_id':
uuidutils.generate_uuid()}},
'context': self.db_context,
"router_db": router_db}
return router
def _test_fip_operation(self, event, operation, fip, ops=True):
method = getattr(self.flavor_driver,
'_floatingip_%s_%s' % (operation, event))
method(odl_const.ODL_FLOATINGIP, mock.ANY, mock.ANY, **fip)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if ops:
if operation != odl_const.ODL_DELETE:
self.assertEqual(fip['floatingip'], row.data)
self.assertEqual(odl_const.ODL_FLOATINGIP, row.object_type)
self.assertEqual(fip['floatingip_id'], row.object_uuid)
else:
self.assertIsNone(row)
def _test_router_operation(self, event, operation, router, ops=True):
method = getattr(self.flavor_driver,
'_router_%s_%s' % (operation, event))
if event == 'precommit':
method(odl_const.ODL_ROUTER, mock.ANY, mock.ANY, **router)
else:
payload = events.DBEventPayload(
router.get('context'), states=(router.get('router_db'),),
request_body=router.get(resources.ROUTER),
resource_id=router.get(resources.ROUTER).get('id'))
method(odl_const.ODL_ROUTER, mock.ANY, mock.ANY, payload=payload)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if ops:
if operation in ['del', odl_const.ODL_DELETE]:
self.assertEqual(router['router_id'], row.object_uuid)
else:
self.assertEqual(router['router'], row.data)
self.assertEqual(_operation_map[operation], row.operation)
else:
self.assertIsNone(row)
def test_router_add_association(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=True):
router = self._get_mock_router_kwargs()
# Driver Association payload is different and expects
# router_id
router['router_id'] = router['router']['id']
self._test_router_operation("association", "add", router)
def test_l3_operations_for_different_flavor(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=False):
router = self._get_mock_router_kwargs()
router['router_id'] = router['router']['id']
self._test_router_operation("association", "add", router, False)
self._test_router_operation("association", "del", router, False)
def test_l3_router_update_precommit(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=True):
router = self._get_mock_router_kwargs()
router['router_id'] = router['router']['id']
self._test_router_operation("precommit", "update", router)
def test_router_del_association(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=True):
router = self._get_mock_router_kwargs()
router['router_id'] = router['router']['id']
self._test_router_operation("association", "del", router)
def test_fip_precommit_create(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=True):
fip = self._get_mock_fip_kwargs()
self._test_fip_operation("precommit", odl_const.ODL_CREATE, fip)
def test_l3_fip_different_flavor(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=False):
fip = self._get_mock_fip_kwargs()
fip['old_floatingip'] = fip['floatingip']
self._test_fip_operation("precommit",
odl_const.ODL_CREATE, fip, False)
self._test_fip_operation("precommit",
odl_const.ODL_UPDATE, fip, False)
def test_fip_precommit_delete(self):
# As precommit delete gets port data
fip = self._get_mock_fip_kwargs()
port = {'port': {'id': uuidutils.generate_uuid()},
'context': self.db_context,
'floatingip_id': fip['floatingip_id']}
with mock.patch.object(l3_obj.FloatingIP, 'get_objects',
return_value=[fip['floatingip_db']]):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=True):
self._test_fip_operation("precommit",
odl_const.ODL_DELETE, port)
def test_fip_precommit_update(self):
with mock.patch.object(self.flavor_driver,
'_validate_l3_flavor',
return_value=True):
fip = self._get_mock_fip_kwargs()
self._test_fip_operation("precommit", odl_const.ODL_UPDATE, fip)
networking-odl-16.0.0/networking_odl/tests/unit/trunk/0000775000175000017500000000000013656750617023114 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/trunk/__init__.py0000664000175000017500000000000013656750541025207 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/trunk/test_trunk_driver_v2.py0000664000175000017500000002364213656750541027655 0ustar zuulzuul00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.services.trunk import callbacks
from neutron.services.trunk import models
from neutron_lib.callbacks import events
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as trunk_consts
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.tests.unit import base_v2
from networking_odl.trunk import trunk_driver_v2 as trunk_driver
FAKE_TRUNK = {
'status': 'ACTIVE',
'sub_ports': [{'segmentation_type': 'vlan',
'port_id': 'fake_port_id',
'segmentation_id': 101},
{'segmentation_type': 'vlan',
'port_id': 'fake_port_id',
'segmentation_id': 102}],
'name': 'trunk0',
'admin_state_up': 'true',
'tenant_id': 'fake_tenant_id',
'updated_at': '2016-11-16T10:17:44Z',
'revision_number': 2,
'project_id': 'fake_project_id',
'port_id': 'fake_port_id',
'id': 'fake_id',
'description': 'fake trunk port'}
FAKE_PARENT = {
'id': 'fake_parent_id',
'tenant_id': 'fake_tenant_id',
'name': 'parent_port',
'admin_state_up': 'true',
'status': 'ACTIVE'}
class TestTrunkHandler(base_v2.OpenDaylightConfigBase):
def setUp(self):
super(TestTrunkHandler, self).setUp()
self.handler = (trunk_driver.
OpenDaylightTrunkHandlerV2())
def _fake_trunk_payload(self):
payload = callbacks.TrunkPayload(
self.db_context, 'fake_id',
mock.Mock(return_value=FAKE_TRUNK),
mock.Mock(return_value=FAKE_TRUNK),
mock.Mock(return_value=FAKE_TRUNK['sub_ports']))
payload.current_trunk.status = trunk_consts.TRUNK_DOWN_STATUS
payload.current_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK)
payload.original_trunk.status = trunk_consts.TRUNK_DOWN_STATUS
payload.original_trunk.to_dict = mock.Mock(return_value=FAKE_TRUNK)
return payload
def _call_operation_object(self, operation, timing, fake_payload):
method = getattr(self.handler, 'trunk_%s_%s' % (operation, timing))
method(mock.ANY, mock.ANY, mock.ANY, fake_payload)
def _test_event(self, operation, timing):
with db_api.CONTEXT_WRITER.using(self.db_context):
fake_payload = self._fake_trunk_payload()
self._call_operation_object(operation, timing, fake_payload)
if timing == 'precommit':
self.db_context.session.flush()
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if timing == 'precommit':
self.assertEqual(operation, row['operation'])
self.assertEqual(odl_const.ODL_TRUNK, row['object_type'])
self.assertEqual(fake_payload.trunk_id, row['object_uuid'])
elif timing == 'after':
self.assertIsNone(row)
def test_trunk_create_precommit(self):
self._test_event("create", "precommit")
def test_trunk_create_postcommit(self):
self._test_event("create", "postcommit")
def test_trunk_update_precommit(self):
self._test_event("update", "precommit")
def test_trunk_update_postcommit(self):
self._test_event("update", "postcommit")
def test_trunk_delete_precommit(self):
self._test_event("delete", "precommit")
def test_trunk_delete_postcommit(self):
self._test_event("delete", "postcommit")
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_set_subport_status')
def test_trunk_subports_set_status_create_parent_active(
self, mock_set_subport_status):
resource = resources.SUBPORTS
event_type = events.AFTER_CREATE
fake_payload = self._fake_trunk_payload()
core_plugin = directory.get_plugin()
fake_payload.subports = [models.SubPort(port_id='fake_port_id',
segmentation_id=101,
segmentation_type='vlan',
trunk_id='fake_id')]
parent_port = FAKE_PARENT
with mock.patch.object(core_plugin, '_get_port') as gp:
gp.return_value = parent_port
self.handler.trunk_subports_set_status(resource, event_type,
mock.ANY, fake_payload)
mock_set_subport_status.assert_called_once_with(
core_plugin, mock.ANY, 'fake_port_id',
n_const.PORT_STATUS_ACTIVE)
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_set_subport_status')
def test_trunk_subports_set_status_create_parent_down(
self, mock_set_subport_status):
resource = resources.SUBPORTS
event_type = events.AFTER_CREATE
fake_payload = self._fake_trunk_payload()
core_plugin = directory.get_plugin()
fake_payload.subports = [models.SubPort(port_id='fake_port_id',
segmentation_id=101,
segmentation_type='vlan',
trunk_id='fake_id')]
parent_port = FAKE_PARENT.copy()
parent_port['status'] = n_const.PORT_STATUS_DOWN
with mock.patch.object(core_plugin, '_get_port') as gp:
gp.return_value = parent_port
self.handler.trunk_subports_set_status(resource, event_type,
mock.ANY, fake_payload)
mock_set_subport_status.assert_called_once_with(
core_plugin, mock.ANY, 'fake_port_id',
n_const.PORT_STATUS_DOWN)
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_set_subport_status')
def test_trunk_subports_set_status_delete(self, mock_set_subport_status):
resource = resources.SUBPORTS
event_type = events.AFTER_DELETE
fake_payload = self._fake_trunk_payload()
fake_payload.subports = [models.SubPort(port_id='fake_port_id',
segmentation_id=101,
segmentation_type='vlan',
trunk_id='fake_id')]
self.handler.trunk_subports_set_status(resource, event_type, mock.ANY,
fake_payload)
mock_set_subport_status.assert_called_once_with(
mock.ANY, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_DOWN)
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_get_subports_ids')
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_set_subport_status')
def test_trunk_subports_update_status_parent_down_to_active(
self, mock_set_subport_status, mock_get_subports_ids):
resource = resources.PORT
event_type = events.AFTER_UPDATE
core_plugin = directory.get_plugin()
port = FAKE_PARENT.copy()
original_port = FAKE_PARENT.copy()
original_port['status'] = n_const.PORT_STATUS_DOWN
port_kwargs = {'port': port, 'original_port': original_port}
mock_get_subports_ids.return_value = ['fake_port_id']
self.handler.trunk_subports_update_status(resource, event_type,
mock.ANY, **port_kwargs)
mock_set_subport_status.assert_called_once_with(
core_plugin, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_ACTIVE)
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_get_subports_ids')
@mock.patch.object(trunk_driver.OpenDaylightTrunkHandlerV2,
'_set_subport_status')
def test_trunk_subports_update_status_parent_active_to_down(
self, mock_set_subport_status, mock_get_subports_ids):
resource = resources.PORT
event_type = events.AFTER_UPDATE
core_plugin = directory.get_plugin()
port = FAKE_PARENT.copy()
original_port = FAKE_PARENT.copy()
port['status'] = n_const.PORT_STATUS_DOWN
port_kwargs = {'port': port, 'original_port': original_port}
mock_get_subports_ids.return_value = ['fake_port_id']
self.handler.trunk_subports_update_status(resource, event_type,
mock.ANY, **port_kwargs)
mock_set_subport_status.assert_called_once_with(
core_plugin, mock.ANY, 'fake_port_id', n_const.PORT_STATUS_DOWN)
class TestTrunkDriver(base_v2.OpenDaylightConfigBase):
def setUp(self):
super(TestTrunkDriver, self).setUp()
def test_is_loaded(self):
driver = trunk_driver.OpenDaylightTrunkDriverV2.create()
self.cfg.config(mechanism_drivers=["logger",
odl_const.ODL_ML2_MECH_DRIVER_V2],
group='ml2')
self.assertTrue(driver.is_loaded)
self.cfg.config(mechanism_drivers=['logger'], group='ml2')
self.assertFalse(driver.is_loaded)
self.cfg.config(core_plugin='some_plugin')
self.assertFalse(driver.is_loaded)
networking-odl-16.0.0/networking_odl/tests/unit/bgpvpn/0000775000175000017500000000000013656750617023245 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/bgpvpn/__init__.py0000664000175000017500000000000013656750541025340 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/bgpvpn/test_odl_v2.py0000664000175000017500000002047613656750541026050 0ustar zuulzuul00000000000000#
# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from networking_odl.bgpvpn import odl_v2 as driverv2
from networking_odl.common import constants as odl_const
from networking_odl.common import odl_features
from networking_odl.db import db
from networking_odl.tests.unit import base_v2
from neutron_lib.api.definitions import bgpvpn_vni as bgpvpn_vni_def
class OpenDaylightBgpvpnDriverTestCase(base_v2.OpenDaylightConfigBase):
def setUp(self):
super(OpenDaylightBgpvpnDriverTestCase, self).setUp()
self.driver = driverv2.OpenDaylightBgpvpnDriver(service_plugin=None)
def _get_fake_bgpvpn(self, net=False, router=False):
net_id = []
router_id = []
if router:
router_id = ['ROUTER_ID']
if net:
net_id = ['NET_ID']
fake_bgpvpn = {'route_targets': '100:1',
'route_distinguishers': ['100:1'],
'id': 'BGPVPN_ID',
'networks': net_id,
'routers': router_id}
return fake_bgpvpn
def _get_fake_router_assoc(self):
fake_router_assoc = {'id': 'ROUTER_ASSOC_ID',
'bgpvpn_id': 'BGPVPN_ID',
'router_id': 'ROUTER_ID'}
return fake_router_assoc
def _get_fake_net_assoc(self):
fake_net_assoc = {'id': 'NET_ASSOC_ID',
'bgpvpn_id': 'BGPVPN_ID',
'network_id': 'NET_ID'}
return fake_net_assoc
def _assert_op(self, operation, object_type, data, precommit=True):
rows = sorted(db.get_all_db_rows_by_state(self.db_context,
odl_const.PENDING),
key=lambda x: x.seqnum)
if precommit:
self.db_context.session.flush()
self.assertEqual(operation, rows[0]['operation'])
self.assertEqual(object_type, rows[0]['object_type'])
self.assertEqual(data['id'], rows[0]['object_uuid'])
else:
self.assertEqual([], rows)
def test_create_bgpvpn(self):
fake_data = self._get_fake_bgpvpn()
self.driver.create_bgpvpn_precommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN,
fake_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN,
fake_data, False)
def test_update_bgpvpn(self):
fake_data = self._get_fake_bgpvpn()
self.driver.update_bgpvpn_precommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN,
fake_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_BGPVPN,
fake_data, False)
def test_delete_bgpvpn(self):
fake_data = self._get_fake_bgpvpn()
self.driver.delete_bgpvpn_precommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_BGPVPN,
fake_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_BGPVPN,
fake_data, False)
def test_create_router_assoc(self):
fake_rtr_assoc_data = self._get_fake_router_assoc()
fake_rtr_upd_bgpvpn_data = self._get_fake_bgpvpn(router=True)
with mock.patch.object(self.driver, 'get_router_assocs',
return_value=[]), \
mock.patch.object(self.driver, 'get_bgpvpn',
return_value=fake_rtr_upd_bgpvpn_data):
self.driver.create_router_assoc_precommit(self.db_context,
fake_rtr_assoc_data)
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_rtr_upd_bgpvpn_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_rtr_upd_bgpvpn_data, False)
def test_delete_router_assoc(self):
fake_rtr_assoc_data = self._get_fake_router_assoc()
fake_bgpvpn_data = self._get_fake_bgpvpn(router=True)
with mock.patch.object(self.driver, 'get_bgpvpn',
return_value=fake_bgpvpn_data):
self.driver.delete_router_assoc_precommit(self.db_context,
fake_rtr_assoc_data)
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_bgpvpn_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_bgpvpn_data, False)
def test_create_net_assoc(self):
fake_net_assoc_data = self._get_fake_net_assoc()
fake_net_upd_bgpvpn_data = self._get_fake_bgpvpn(net=True)
# todo(vivekanandan) add check for case when assoc already exists
with mock.patch.object(self.driver, 'get_bgpvpns',
return_value=[fake_net_upd_bgpvpn_data]):
self.driver.create_net_assoc_precommit(self.db_context,
fake_net_assoc_data)
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_net_upd_bgpvpn_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_net_upd_bgpvpn_data, False)
def test_delete_net_assoc(self):
fake_net_assoc_data = self._get_fake_net_assoc()
fake_bgpvpn_data = self._get_fake_bgpvpn(net=True)
with mock.patch.object(self.driver, 'get_bgpvpn',
return_value=fake_bgpvpn_data):
self.driver.delete_net_assoc_precommit(self.db_context,
fake_net_assoc_data)
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_bgpvpn_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_UPDATE,
odl_const.ODL_BGPVPN,
fake_bgpvpn_data, False)
def _get_bgpvpn_driver_with_vni(self):
feature_json = """{"features": {"feature":
[{"service-provider-feature":
"neutron-extensions:operational-port-status"},
{"service-provider-feature":
"neutron-extensions:bgpvpn-vni"}]}}"""
self.cfg.config(odl_features_json=feature_json, group='ml2_odl')
odl_features.init()
bgpvpn_driver = driverv2.OpenDaylightBgpvpnDriver(service_plugin=None)
return bgpvpn_driver
def test_bgpvpn_vni_feature(self):
bgpvpn_driver = self._get_bgpvpn_driver_with_vni()
self.assertIn(bgpvpn_vni_def.ALIAS,
bgpvpn_driver.more_supported_extension_aliases)
def test_bgpvpn_vni_create_with_vni(self):
bgpvpn_driver = self._get_bgpvpn_driver_with_vni()
fake_data = self._get_fake_bgpvpn()
fake_data['vni'] = 100
bgpvpn_driver.create_bgpvpn_precommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN,
fake_data)
self.run_journal_processing()
self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_BGPVPN,
fake_data, False)
networking-odl-16.0.0/networking_odl/tests/unit/common/0000775000175000017500000000000013656750617023241 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/common/__init__.py0000664000175000017500000000000013656750541025334 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/common/test_websocket_client.py0000664000175000017500000003151413656750541030176 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
import requests
import websocket
from networking_odl.common.client import OpenDaylightRestClient as odl_client
from networking_odl.common import websocket_client as wsc
from networking_odl.tests import base
class TestWebsocketClient(base.DietTestCase):
"""Test class for Websocket Client."""
FAKE_WEBSOCKET_STREAM_NAME_DATA = {
'output': {
'stream-name': 'data-change-event-subscription/neutron:neutron/'
'neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE'
}}
INVALID_WEBSOCKET_STREAM_NAME_DATA = {
'outputs': {
'stream-name': 'data-change-event-subscription/neutron:neutron/'
'neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE'
}}
FAKE_WEBSOCKET_SUBS_DATA = {
'location': 'ws://localhost:8185/data-change-event-subscription/'
'neutron:neutron/neutron:hostconfigs/datastore=OPERATIONAL'
'/scope=SUBTREE'}
ODL_URI = "http://localhost:8080/"
WEBSOCKET_URI = (
"ws://localhost:8185/" +
"data-change-event-subscription/neutron:neutron/" +
"neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE")
WEBSOCKET_SSL_URI = (
"wss://localhost:8185/" +
"data-change-event-subscription/neutron:neutron/" +
"neutron:hostconfigs/datastore=OPERATIONAL/scope=SUBTREE")
mock_callback_handler = mock.MagicMock()
def setUp(self):
"""Setup test."""
self.useFixture(base.OpenDaylightRestClientFixture())
mock.patch.object(wsc.OpenDaylightWebsocketClient,
'start_odl_websocket_thread').start()
self.cfg = self.useFixture(config_fixture.Config())
super(TestWebsocketClient, self).setUp()
self.mgr = wsc.OpenDaylightWebsocketClient.odl_create_websocket(
TestWebsocketClient.ODL_URI,
"restconf/operational/neutron:neutron/hostconfigs",
wsc.ODL_OPERATIONAL_DATASTORE, wsc.ODL_NOTIFICATION_SCOPE_SUBTREE,
TestWebsocketClient.mock_callback_handler
)
def _get_raised_response(self, status_code):
response = requests.Response()
response.status_code = status_code
return response
@classmethod
def _get_mock_request_response(cls, status_code):
response = mock.Mock(status_code=status_code)
side_effect = None
# NOTE(rajivk): requests.codes.bad_request constant value is 400,
# so it filters requests where client(4XX) or server(5XX) has erred.
if status_code >= requests.codes.bad_request:
side_effect = requests.exceptions.HTTPError()
response.raise_for_status = mock.Mock(side_effect=side_effect)
return response
@mock.patch.object(odl_client, 'sendjson')
def test_subscribe_websocket_sendjson(self, mocked_sendjson):
request_response = self._get_raised_response(
requests.codes.unauthorized)
mocked_sendjson.return_value = request_response
stream_url = self.mgr._subscribe_websocket()
self.assertIsNone(stream_url)
request_response = self._get_raised_response(
requests.codes.bad_request)
mocked_sendjson.return_value = request_response
self.assertRaises(ValueError, self.mgr._subscribe_websocket)
request_response = self._get_mock_request_response(requests.codes.ok)
request_response.json = mock.Mock(
return_value=(TestWebsocketClient.
INVALID_WEBSOCKET_STREAM_NAME_DATA))
mocked_sendjson.return_value = request_response
self.assertRaises(ValueError, self.mgr._subscribe_websocket)
request_response = self._get_mock_request_response(requests.codes.ok)
request_response.json = mock.Mock(return_value={""})
mocked_sendjson.return_value = request_response
self.assertIsNone(self.mgr._subscribe_websocket())
@mock.patch.object(odl_client, 'get')
def test_subscribe_websocket_get(self, mocked_get):
request_response = self._get_raised_response(requests.codes.not_found)
mocked_get.return_value = request_response
self.assertRaises(ValueError, self.mgr._subscribe_websocket)
request_response = self._get_raised_response(
requests.codes.bad_request)
mocked_get.return_value = request_response
stream_url = self.mgr._subscribe_websocket()
self.assertIsNone(stream_url)
request_response = self._get_raised_response(
requests.codes.unauthorized)
mocked_get.return_value = request_response
stream_url = self.mgr._subscribe_websocket()
self.assertIsNone(stream_url)
@mock.patch.object(odl_client, 'sendjson')
@mock.patch.object(odl_client, 'get')
def test_subscribe_websocket(self, mocked_get, mocked_sendjson):
request_response = self._get_mock_request_response(requests.codes.ok)
request_response.json = mock.Mock(
return_value=TestWebsocketClient.FAKE_WEBSOCKET_STREAM_NAME_DATA)
mocked_sendjson.return_value = request_response
request_response = self._get_mock_request_response(requests.codes.ok)
request_response.headers = TestWebsocketClient.FAKE_WEBSOCKET_SUBS_DATA
mocked_get.return_value = request_response
stream_url = self.mgr._subscribe_websocket()
self.assertEqual(TestWebsocketClient.WEBSOCKET_URI, stream_url)
@mock.patch.object(websocket, 'create_connection')
def test_create_connection(self, mock_create_connection):
mock_create_connection.return_value = None
return_value = self.mgr._socket_create_connection("localhost")
self.assertIsNone(return_value)
@mock.patch.object(websocket, 'create_connection',
side_effect=Exception("something went wrong"))
def test_create_connection_handles_exception(self, mock_create_connection):
self.assertIsNone(self.mgr._socket_create_connection("localhost"))
def test_websocket_connect(self):
self.mgr._subscribe_websocket = mock.MagicMock(
return_value=TestWebsocketClient.WEBSOCKET_URI)
self.mgr._socket_create_connection = mock.MagicMock(return_value=True)
self.mgr._connect_ws()
self.mgr._socket_create_connection.assert_called_with(
TestWebsocketClient.WEBSOCKET_URI)
def test_websocket_connect_ssl(self):
self.mgr._subscribe_websocket = mock.MagicMock(
return_value=TestWebsocketClient.WEBSOCKET_SSL_URI)
self.mgr._socket_create_connection = mock.MagicMock(return_value=True)
self.mgr._connect_ws()
self.mgr._socket_create_connection.assert_called_with(
TestWebsocketClient.WEBSOCKET_SSL_URI)
def test_websocket_connect_ssl_negative_uri(self):
self.mgr._subscribe_websocket = mock.MagicMock(
return_value=TestWebsocketClient.WEBSOCKET_URI)
self.mgr._socket_create_connection = mock.MagicMock(return_value=True)
self.mgr.odl_rest_client.url = self.mgr.odl_rest_client.url.replace(
'http:', 'https:')
self.mgr._connect_ws()
self.mgr._socket_create_connection.assert_called_with(
TestWebsocketClient.WEBSOCKET_SSL_URI)
def test_run_websocket_thread(self):
self.mgr._connect_ws = mock.MagicMock(return_value=None)
self.cfg.config(restconf_poll_interval=0, group='ml2_odl')
self.mgr.run_websocket_thread(True)
assert self.mgr._connect_ws.call_count == 1
self.mgr.set_exit_flag(False)
self.mgr._connect_ws = mock.MagicMock(return_value=1)
with mock.patch.object(wsc, 'LOG') as mock_log:
self.mgr.run_websocket_thread(True)
self.assertTrue(mock_log.error.called)
self.mgr.set_exit_flag(False)
ws = mock.MagicMock()
ws.recv.return_value = None
self.mgr._connect_ws = mock.MagicMock(return_value=ws)
self.mgr._close_ws = mock.MagicMock(return_value=None)
with mock.patch.object(wsc, 'LOG') as mock_log:
self.mgr.run_websocket_thread(True)
self.assertTrue(mock_log.warning.called)
self.mgr.set_exit_flag(False)
ws = mock.MagicMock()
ws.recv.return_value = "Test Data"
self.mgr._connect_ws = mock.MagicMock(return_value=ws)
self.mgr._close_ws = mock.MagicMock(return_value=None)
self.mgr.run_websocket_thread(True)
TestWebsocketClient.mock_callback_handler.assert_called_once()
class TestEventDataParser(base.DietTestCase):
"""Test class for Websocket Client."""
# test data port status payload
sample_port_status_payload = """{"notification":
{"xmlns":"urn:ietf:params:xml:ns:netconf:notification:1.0",
"data-changed-notification": { "xmlns":
"urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote",
"data-change-event":
[{"path":
"/neutron:neutron/neutron:ports/neutron:port\
[neutron:uuid='a51e439f-4d02-4e76-9b0d-08f6c08855dd']\
/neutron:uuid",
"data":{"uuid":{"xmlns":"urn:opendaylight:neutron",
"content":"a51e439f-4d02-4e76-9b0d-08f6c08855dd"}},
"operation":"created"},
{"path":
"/neutron:neutron/neutron:ports/neutron:port\
[neutron:uuid='a51e439f-4d02-4e76-9b0d-08f6c08855dd']\
/neutron:status",
"data":{"status":{"xmlns":"urn:opendaylight:neutron",
"content":"ACTIVE"}},
"operation":"created"}
]},
"eventTime":"2017-03-23T09:28:55.379-07:00"}}"""
sample_port_status_payload_one_item = """{"notification":
{"xmlns": "urn:ietf:params:xml:ns:netconf:notification:1.0",
"data-changed-notification": {
"data-change-event": {
"data": { "status": {
"content": "ACTIVE",
"xmlns": "urn:opendaylight:neutron"
}},
"operation": "updated",
"path": "/neutron:neutron/neutron:ports/neutron:port\
[neutron:uuid='d6e6335d-9568-4949-aef1-4107e34c5f28']\
/neutron:status"
},
"xmlns": "urn:opendaylight:params:xml:ns:yang:controller:md:\
sal:remote"
},
"eventTime": "2017-02-22T02:27:32+02:00" }}"""
def setUp(self):
"""Setup test."""
super(TestEventDataParser, self).setUp()
def test_get_item_port_status_payload(self):
sample = jsonutils.loads(self.sample_port_status_payload)
expected_events = (sample
[wsc.EventDataParser.NOTIFICATION_TAG]
[wsc.EventDataParser.DC_NOTIFICATION_TAG]
[wsc.EventDataParser.DC_EVENT_TAG])
event_0 = expected_events[0]
event = wsc.EventDataParser.get_item(self.sample_port_status_payload)
operation, path, data = next(event).get_fields()
self.assertEqual(event_0.get('operation'), operation)
self.assertEqual(event_0.get('path'), path)
self.assertEqual(event_0.get('data'), data)
uuid = wsc.EventDataParser.extract_field(path, "neutron:uuid")
self.assertEqual("'a51e439f-4d02-4e76-9b0d-08f6c08855dd'", uuid)
uuid = wsc.EventDataParser.extract_field(path, "invalidkey")
self.assertIsNone(uuid)
def test_get_item_port_status_payload_one_item(self):
sample = jsonutils.loads(self.sample_port_status_payload_one_item)
expected_events = (sample
[wsc.EventDataParser.NOTIFICATION_TAG]
[wsc.EventDataParser.DC_NOTIFICATION_TAG]
[wsc.EventDataParser.DC_EVENT_TAG])
event = (wsc.EventDataParser.
get_item(self.sample_port_status_payload_one_item))
operation, path, data = next(event).get_fields()
self.assertEqual(expected_events.get('operation'), operation)
self.assertEqual(expected_events.get('path'), path)
self.assertEqual(expected_events.get('data'), data)
uuid = wsc.EventDataParser.extract_field(path, "neutron:uuid")
self.assertEqual("'d6e6335d-9568-4949-aef1-4107e34c5f28'", uuid)
networking-odl-16.0.0/networking_odl/tests/unit/common/test_postcommit.py0000664000175000017500000000507113656750541027047 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_odl.common import postcommit
from neutron.tests import base
class BaseTest(object):
def create_resource1_postcommit(self):
pass
update_resource1_postcommit = create_resource1_postcommit
delete_resource1_postcommit = create_resource1_postcommit
update_resource2_postcommit = create_resource1_postcommit
delete_resource2_postcommit = create_resource1_postcommit
create_resource2_postcommit = create_resource1_postcommit
class TestPostCommit(base.DietTestCase):
def _get_class(self, *args):
@postcommit.add_postcommit(*args)
class TestClass(BaseTest):
pass
return TestClass
def _get_methods_name(self, resources):
ops = ['create', 'update', 'delete']
m_names = [op + '_' + resource + '_postcommit' for op in ops
for resource in resources]
return m_names
def test_with_one_resource(self):
cls = self._get_class('resource1')
m_names = self._get_methods_name(['resource1'])
for m_name in m_names:
self.assertEqual(m_name, getattr(cls, m_name).__name__)
def test_with_two_resource(self):
cls = self._get_class('resource1', 'resource2')
m_names = self._get_methods_name(['resource1', 'resource2'])
for m_name in m_names:
self.assertEqual(m_name, getattr(cls, m_name).__name__)
def test_with_two_resource_create_defined_for_one(self):
m_names = self._get_methods_name(['resource1', 'resource2'])
@postcommit.add_postcommit('resource1', 'resource2')
class TestClass(BaseTest):
def create_resource1_postcommit(self):
pass
create_resource1_postcommit.__name__ = 'test_method'
for m_name in m_names[1:]:
self.assertEqual(m_name, getattr(TestClass, m_name).__name__)
self.assertEqual('test_method',
getattr(TestClass, m_names[0]).__name__)
networking-odl-16.0.0/networking_odl/tests/unit/common/test_utils.py0000664000175000017500000000536113656750541026013 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests import base
from oslo_config import fixture as config_fixture
from networking_odl.common import constants as odl_const
from networking_odl.common import utils
class TestUtils(base.DietTestCase):
def setUp(self):
self.cfg = self.useFixture(config_fixture.Config())
super(TestUtils, self).setUp()
# TODO(manjeets) remove this test once neutronify is
# consolidated with make_plural
def test_neutronify(self):
self.assertEqual('a-b-c', utils.neutronify('a_b_c'))
def test_neutronify_empty(self):
self.assertEqual('', utils.neutronify(''))
@staticmethod
def _get_resources():
return {odl_const.ODL_SG: 'security-groups',
odl_const.ODL_SG_RULE: 'security-group-rules',
odl_const.ODL_NETWORK: 'networks',
odl_const.ODL_SUBNET: 'subnets',
odl_const.ODL_ROUTER: 'routers',
odl_const.ODL_PORT: 'ports',
odl_const.ODL_FLOATINGIP: 'floatingips',
odl_const.ODL_QOS_POLICY: 'qos/policies',
odl_const.ODL_TRUNK: 'trunks',
odl_const.ODL_BGPVPN: 'bgpvpns',
odl_const.ODL_SFC_FLOW_CLASSIFIER: 'sfc/flowclassifiers',
odl_const.ODL_SFC_PORT_PAIR: 'sfc/portpairs',
odl_const.ODL_SFC_PORT_PAIR_GROUP: 'sfc/portpairgroups',
odl_const.ODL_SFC_PORT_CHAIN: 'sfc/portchains',
odl_const.ODL_L2GATEWAY: 'l2-gateways',
odl_const.ODL_L2GATEWAY_CONNECTION: 'l2gateway-connections'}
def test_all_resources_url(self):
for obj, url in self._get_resources().items():
self.assertEqual(utils.make_url_object(obj), url)
def test_get_odl_url(self):
"""test make uri."""
self.cfg.config(url='http://localhost:8080/controller/nb/v2/neutron',
group='ml2_odl')
test_path = '/restconf/neutron:neutron/hostconfigs'
expected = "http://localhost:8080/restconf/neutron:neutron/hostconfigs"
test_uri = utils.get_odl_url(path=test_path)
self.assertEqual(expected, test_uri)
networking-odl-16.0.0/networking_odl/tests/unit/common/test_client.py0000664000175000017500000000416613656750541026133 0ustar zuulzuul00000000000000# Copyright (c) 2015 Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from networking_odl.common import client
from neutron.tests import base
class ClientTestCase(base.DietTestCase):
def setUp(self):
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(mechanism_drivers=[
'logger', 'opendaylight_v2'], group='ml2')
super(ClientTestCase, self).setUp()
def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
password='somepass'):
self.cfg.config(url=url, group='ml2_odl')
self.cfg.config(username=username, group='ml2_odl')
self.cfg.config(password=password, group='ml2_odl')
def _test_missing_config(self, **kwargs):
self._set_config(**kwargs)
self.assertRaisesRegex(cfg.RequiredOptError,
r'value required for option \w+ in group '
r'\[ml2_odl\]',
client.OpenDaylightRestClient._check_opt,
cfg.CONF.ml2_odl.url)
def test_valid_config(self):
self._set_config()
client.OpenDaylightRestClient._check_opt(cfg.CONF.ml2_odl.url)
def test_missing_url_raises_exception(self):
self._test_missing_config(url=None)
def test_missing_username_raises_exception(self):
self._test_missing_config(username=None)
def test_missing_password_raises_exception(self):
self._test_missing_config(password=None)
networking-odl-16.0.0/networking_odl/tests/unit/common/test_filters.py0000664000175000017500000001374613656750541026331 0ustar zuulzuul00000000000000# Copyright (C) 2016 Intel Corp. Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
from neutron.tests import base
from neutron_lib import constants as n_const
from networking_odl.common import filters
PROFILE = {"capabilities": ["switchdev"]}
PROFILE_STR = '{"capabilities": ["switchdev"]}'
FAKE_PORT = {'status': 'DOWN',
'binding:host_id': '',
'allowed_address_pairs': [],
'device_owner': 'fake_owner',
'binding:profile': {"capabilities": ["switchdev"]},
'fixed_ips': [],
'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
'security_groups': [],
'device_id': 'fake_device',
'name': '',
'admin_state_up': True,
'network_id': 'c13bba05-eb07-45ba-ace2-765706b2d701',
'tenant_id': 'bad_tenant_id',
'binding:vif_details': {},
'binding:vnic_type': 'normal',
'binding:vif_type': 'unbound',
'mac_address': '12:34:56:78:21:b6'}
class TestFilters(base.DietTestCase):
def _check_id(self, resource, project_id):
filters._populate_project_id_and_tenant_id(resource)
self.assertIn(resource['project_id'], project_id)
self.assertIn(resource['tenant_id'], project_id)
def _test_populate_project_id_and_tenant_id(self, project_id):
self._check_id({'project_id': project_id}, project_id)
self._check_id({'tenant_id': project_id}, project_id)
self._check_id({'project_id': project_id,
'tenant_id': project_id}, project_id)
def test_populate_project_id_and_tenant_id_with_id(self):
self._test_populate_project_id_and_tenant_id(
'01234567-890a-bcde-f012-3456789abcde')
self._test_populate_project_id_and_tenant_id("")
def test_populate_project_id_and_tenant_id_without_id(self):
resource = {}
filters._populate_project_id_and_tenant_id(resource)
self.assertNotIn('project_id', resource)
self.assertNotIn('tenant_id', resource)
def test_populate_project_id_and_tenant_id_with_router(self):
# test case for OpenDaylightL3RouterPlugin.delete_router()
# it passes data as dependency_list as list, not dict
resource0 = ['gw_port_id']
resource1 = resource0[:]
filters._populate_project_id_and_tenant_id(resource1)
self.assertEqual(resource0, resource1)
def test_populate_project_id_and_tenant_id_with_floatingip(self):
# test case for OpenDaylightL3RouterPlugin.delete_floatingip()
# it passes data as dependency_list as list, not dict.
resource0 = ['router_uuid', 'floatingip_uuid']
resource1 = resource0[:]
filters._populate_project_id_and_tenant_id(resource1)
self.assertEqual(resource0, resource1)
def test_sgrule_scrub_unknown_protocol_name(self):
KNOWN_PROTO_NAMES = (n_const.PROTO_NAME_TCP,
n_const.PROTO_NAME_UDP,
n_const.PROTO_NAME_ICMP,
n_const.PROTO_NAME_IPV6_ICMP_LEGACY)
for protocol_name in KNOWN_PROTO_NAMES:
self.assertEqual(
protocol_name,
filters._sgrule_scrub_unknown_protocol_name(protocol_name))
self.assertEqual(
n_const.PROTO_NUM_AH,
filters._sgrule_scrub_unknown_protocol_name(n_const.PROTO_NAME_AH))
self.assertEqual("1", filters._sgrule_scrub_unknown_protocol_name("1"))
def test_sgrule_scrub_icmpv6_name(self):
for protocol_name in (n_const.PROTO_NAME_ICMP,
n_const.PROTO_NAME_IPV6_ICMP,
n_const.PROTO_NAME_IPV6_ICMP_LEGACY):
sgrule = {'ethertype': n_const.IPv6,
'protocol': protocol_name}
filters._sgrule_scrub_icmpv6_name(sgrule)
self.assertEqual(n_const.PROTO_NAME_IPV6_ICMP_LEGACY,
sgrule['protocol'])
def test_convert_value_to_string(self):
port = {"binding:profile": PROFILE,
"other_param": ["some", "values"]}
filters._convert_value_to_str(port, 'binding:profile')
self.assertIs(type(port['binding:profile']), str)
self.assertEqual(port['binding:profile'], PROFILE_STR)
self.assertIsNot(type(port['other_param']), str)
def test_convert_value_to_string_unicode(self):
port = {"binding:profile": {u"capabilities": [u"switchdev"]}}
filters._convert_value_to_str(port, "binding:profile")
self.assertEqual(port["binding:profile"], PROFILE_STR)
def test_convert_value_to_string_missing_key_is_logged(self):
port = {}
with mock.patch.object(filters, 'LOG') as mock_log:
filters._convert_value_to_str(port, 'invalid_key')
mock_log.warning.assert_called_once_with(
"key %s is not present in dict %s", 'invalid_key', port)
def _filter_port_func_binding_profile_to_string(self, func):
port = copy.deepcopy(FAKE_PORT)
func(port)
self.assertEqual(port["binding:profile"], PROFILE_STR)
def test_filter_port_create_binding_profile_string(self):
self._filter_port_func_binding_profile_to_string(
filters._filter_port_create)
def test_filter_port_update_binding_profile_string(self):
self._filter_port_func_binding_profile_to_string(
filters._filter_port_update)
networking-odl-16.0.0/networking_odl/tests/unit/common/test_callback.py0000664000175000017500000001727213656750541026413 0ustar zuulzuul00000000000000# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from networking_odl.common import callback
from networking_odl.common import constants as odl_const
from networking_odl.tests import base
import mock
from neutron_lib.callbacks import events
from neutron_lib.callbacks import resources
import testtools
FAKE_ID = 'fakeid'
class ODLCallbackTestCase(testtools.TestCase):
def setUp(self):
self.useFixture(base.OpenDaylightRestClientFixture())
super(ODLCallbackTestCase, self).setUp()
self._precommit = mock.Mock()
self._postcommit = mock.Mock()
self.sgh = callback.OdlSecurityGroupsHandler(self._precommit,
self._postcommit)
def _test_callback_precommit_for_sg(self, event, op, sg, sg_id):
plugin_context_mock = mock.Mock()
expected_dict = ({resources.SECURITY_GROUP: sg}
if sg is not None else None)
self.sgh.sg_callback_precommit(resources.SECURITY_GROUP,
event,
None,
context=plugin_context_mock,
security_group=sg,
security_group_id=sg_id)
self._precommit.assert_called_with(
plugin_context_mock, op,
callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id,
expected_dict, security_group=sg, security_group_id=sg_id)
def _test_callback_postcommit_for_sg(self, event, op, sg, sg_id):
plugin_context_mock = mock.Mock()
expected_dict = ({resources.SECURITY_GROUP: sg}
if sg is not None else None)
self.sgh.sg_callback_postcommit(resources.SECURITY_GROUP,
event,
None,
context=plugin_context_mock,
security_group=sg,
security_group_id=sg_id)
self._postcommit.assert_called_with(
plugin_context_mock, op,
callback._RESOURCE_MAPPING[resources.SECURITY_GROUP], sg_id,
expected_dict, security_group=sg, security_group_id=sg_id)
def test_callback_precommit_sg_create(self):
sg = mock.Mock()
sg_id = sg.get('id')
self._test_callback_precommit_for_sg(
events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, sg, sg_id)
def test_callback_postcommit_sg_create(self):
sg = mock.Mock()
sg_id = sg.get('id')
self._test_callback_postcommit_for_sg(
events.AFTER_CREATE, odl_const.ODL_CREATE, sg, sg_id)
def test_callback_precommit_sg_update(self):
self._test_callback_precommit_for_sg(
events.PRECOMMIT_UPDATE, odl_const.ODL_UPDATE, mock.Mock(),
FAKE_ID)
def test_callback_postcommit_sg_update(self):
self._test_callback_postcommit_for_sg(
events.AFTER_UPDATE, odl_const.ODL_UPDATE, mock.Mock(), FAKE_ID)
def test_callback_precommit_sg_delete(self):
self._test_callback_precommit_for_sg(
events.PRECOMMIT_DELETE, odl_const.ODL_DELETE, None, FAKE_ID)
def test_callback_postcommit_sg_delete(self):
self._test_callback_postcommit_for_sg(
events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID)
def _test_callback_precommit_for_sg_rules(
self, event, op, sg_rule, sg_rule_id):
plugin_context_mock = mock.Mock()
expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule}
if sg_rule is not None else None)
self.sgh.sg_callback_precommit(resources.SECURITY_GROUP_RULE,
event,
None,
context=plugin_context_mock,
security_group_rule=sg_rule,
security_group_rule_id=sg_rule_id)
self._precommit.assert_called_with(
plugin_context_mock, op,
callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE],
sg_rule_id, expected_dict, security_group_rule=sg_rule,
security_group_rule_id=sg_rule_id)
def _test_callback_postcommit_for_sg_rules(
self, event, op, sg_rule, sg_rule_id):
plugin_context_mock = mock.Mock()
expected_dict = ({resources.SECURITY_GROUP_RULE: sg_rule}
if sg_rule is not None else None)
self.sgh.sg_callback_postcommit(resources.SECURITY_GROUP_RULE,
event,
None,
context=plugin_context_mock,
security_group_rule=sg_rule,
security_group_rule_id=sg_rule_id)
self._postcommit.assert_called_with(
plugin_context_mock, op,
callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE],
sg_rule_id, expected_dict,
security_group_rule=sg_rule, security_group_rule_id=sg_rule_id,
)
def test_callback_precommit_sg_rules_create(self):
rule = mock.Mock()
rule_id = rule.get('id')
self._test_callback_precommit_for_sg_rules(
events.PRECOMMIT_CREATE, odl_const.ODL_CREATE, rule, rule_id)
def test_callback_postcommit_sg_rules_create(self):
rule = mock.Mock()
rule_id = rule.get('id')
self._test_callback_postcommit_for_sg_rules(
events.AFTER_CREATE, odl_const.ODL_CREATE, rule, rule_id)
def test_callback_precommit_sg_rules_delete(self):
self._test_callback_precommit_for_sg_rules(
events.PRECOMMIT_DELETE, odl_const.ODL_DELETE, None, FAKE_ID)
def test_callback_postcommit_sg_rules_delete(self):
self._test_callback_postcommit_for_sg_rules(
events.AFTER_DELETE, odl_const.ODL_DELETE, None, FAKE_ID)
def test_callback_exception(self):
class TestException(Exception):
def __init__(self):
pass
self._precommit.side_effect = TestException()
resource = callback._RESOURCE_MAPPING[resources.SECURITY_GROUP_RULE]
op = callback._OPERATION_MAPPING[events.PRECOMMIT_CREATE]
rule = mock.Mock()
rule_id = rule.get('id')
with mock.patch.object(callback, 'LOG') as log_mock:
self.assertRaises(TestException,
self._test_callback_precommit_for_sg_rules,
events.PRECOMMIT_CREATE, odl_const.ODL_CREATE,
rule, rule_id)
log_mock.log.assert_called_with(
logging.ERROR, callback.LOG_TEMPLATE,
{'msg': 'Exception from callback', 'op': op,
'res_type': resource, 'res_id': rule_id,
'res_dict': {odl_const.ODL_SG_RULE: rule},
'data': {odl_const.ODL_SG_RULE: rule,
'security_group_rule_id': rule_id},
'exc_info': True})
networking-odl-16.0.0/networking_odl/tests/unit/common/test_odl_features.py0000664000175000017500000001607513656750541027333 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
from requests import exceptions
from networking_odl.common.client import OpenDaylightRestClient
from networking_odl.common import odl_features
from networking_odl.tests import base
class TestOdlFeatures(base.DietTestCase):
"""Basic tests for odl_features"""
feature_json = """{"features": {"feature":
[{"service-provider-feature":
"neutron-extensions:operational-port-status"},
{"service-provider-feature":
"neutron-extensions:feature-with-config",
"configuration": "steal-your-face"}]}}"""
feature_list = 'thing1, thing2'
def setUp(self):
self.features_fixture = base.OpenDaylightFeaturesFixture()
self.useFixture(self.features_fixture)
self.cfg = self.useFixture(config_fixture.Config())
super(TestOdlFeatures, self).setUp()
self.addCleanup(odl_features.deinit)
@mock.patch.object(OpenDaylightRestClient, 'request')
def test_fetch_exception(self, mocked_client):
mocked_client.side_effect = exceptions.ConnectionError()
self.assertIsNone(odl_features._fetch_features())
@mock.patch.object(OpenDaylightRestClient, 'request')
def test_fetch_404(self, mocked_client):
mocked_client.return_value = mock.MagicMock(status_code=404)
self.assertNotEqual(id(odl_features._fetch_features()),
id(odl_features.EMPTY_FEATURES))
@mock.patch.object(OpenDaylightRestClient, 'request')
def test_fetch_400(self, mocked_client):
mocked_client.return_value = mock.MagicMock(status_code=400)
self.assertNotEqual(id(odl_features._fetch_features()),
id(odl_features.EMPTY_FEATURES))
@mock.patch.object(OpenDaylightRestClient, 'request')
def test_fetch_500(self, mocked_client):
mocked_client.return_value = mock.MagicMock(status_code=500)
self.assertIsNone(odl_features._fetch_features())
@mock.patch.object(OpenDaylightRestClient, 'request')
def test_init(self, mocked_client):
self.cfg.config(odl_features=None, group='ml2_odl')
self.cfg.config(odl_features_json=None, group='ml2_odl')
response = mock.MagicMock()
response.status_code = 200
response.json = mock.MagicMock(
return_value=jsonutils.loads(self.feature_json))
mocked_client.return_value = response
self._assert_odl_feature_config({
odl_features.OPERATIONAL_PORT_STATUS: '',
'feature-with-config': 'steal-your-face',
})
def _assert_odl_feature_config(self, features):
odl_features.init()
for k, v in features.items():
self.assertTrue(odl_features.has(k))
self.assertEqual(odl_features.get_config(k), v)
def test_init_from_config_json(self):
self.cfg.config(odl_features_json=self.feature_json, group='ml2_odl')
self._assert_odl_feature_config({
odl_features.OPERATIONAL_PORT_STATUS: '',
'feature-with-config': 'steal-your-face',
})
@mock.patch.object(odl_features, '_fetch_features')
def test_init_without_config_calls__fetch_features(self, mock_fetch):
self.cfg.config(odl_features_json=None, group='ml2_odl')
self.cfg.config(odl_features=None, group='ml2_odl')
odl_features.init()
mock_fetch.assert_called_once()
@mock.patch.object(odl_features, '_fetch_features')
def test_init_from_config_list(self, mock_fetch):
self.cfg.config(odl_features_json=None, group='ml2_odl')
self.cfg.config(odl_features=self.feature_list, group='ml2_odl')
odl_features.init()
self.assertTrue(odl_features.has('thing1'))
self.assertTrue(odl_features.has('thing2'))
mock_fetch.assert_not_called()
@mock.patch.object(odl_features, '_fetch_features')
def test_init_from_json_overrides_list(self, mock_fetch):
self.cfg.config(odl_features=self.feature_list, group='ml2_odl')
self.cfg.config(odl_features_json=self.feature_json, group='ml2_odl')
odl_features.init()
self.assertFalse(odl_features.has('thing1'))
self.assertTrue(odl_features.has('operational-port-status'))
mock_fetch.assert_not_called()
@mock.patch.object(odl_features, '_fetch_features')
def test_init_with_config_does_not_call__fetch_features(self, mock_fetch):
self.cfg.config(odl_features_json=self.feature_json, group='ml2_odl')
odl_features.init()
mock_fetch.assert_not_called()
def test_init_from_config_malformed_json_raises_exception(self):
malformed_json = ")]}'" + self.feature_json
self.cfg.config(odl_features_json=malformed_json, group='ml2_odl')
self.assertRaises(ValueError, odl_features.init)
def test_init_from_config_json_not_in_odl_format_raises_exception(self):
self.cfg.config(odl_features_json='{}', group='ml2_odl')
self.assertRaises(KeyError, odl_features.init)
class TestOdlFeaturesNoFixture(base.DietTestCase):
"""Basic tests for odl_features that don't call init()"""
def setUp(self):
super(TestOdlFeaturesNoFixture, self).setUp()
self.addCleanup(odl_features.deinit)
def test_feature_configs_does_not_mutate_default_features(self):
odl_features.deinit()
self.assertEqual(odl_features.EMPTY_FEATURES,
odl_features.feature_configs)
odl_features.feature_configs['test'] = True
self.assertNotEqual(odl_features.EMPTY_FEATURES,
odl_features.feature_configs)
def test_deinit_does_not_mutate_default_features(self):
# we call it before initing anything, to force the reassignment
# of the global features variable.
odl_features.deinit()
odl_features.feature_configs['test'] = True
self.assertNotEqual(odl_features.EMPTY_FEATURES,
odl_features.feature_configs)
# now we do it again, to make sure that it assigns it to default
# values
odl_features.deinit()
self.assertEqual(odl_features.EMPTY_FEATURES,
odl_features.feature_configs)
def test_deinit_resets_to_default_features(self):
odl_features.deinit()
self.assertEqual(odl_features.feature_configs,
odl_features.EMPTY_FEATURES)
networking-odl-16.0.0/networking_odl/tests/unit/common/test_lightweight_testing.py0000664000175000017500000002047013656750541030725 0ustar zuulzuul00000000000000# Copyright (c) 2015 Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests import base
from oslo_config import fixture as config_fixture
from networking_odl.common import lightweight_testing as lwt
from networking_odl.tests import base as odl_base
class LightweightTestingTestCase(base.DietTestCase):
def setUp(self):
self.useFixture(odl_base.OpenDaylightRestClientFixture())
self.cfg = self.useFixture(config_fixture.Config())
super(LightweightTestingTestCase, self).setUp()
def test_create_client_with_lwt_enabled(self):
"""Have to do the importation here, otherwise there will be a loop"""
from networking_odl.common import client as odl_client
self.cfg.config(enable_lightweight_testing=True, group='ml2_odl')
# DietTestCase does not automatically cleans configuration overrides
self.addCleanup(odl_client.cfg.CONF.reset)
client = odl_client.OpenDaylightRestClient.create_client()
self.assertIsInstance(client, lwt.OpenDaylightLwtClient)
def test_create_client_with_lwt_disabled(self):
"""Have to do the importation here, otherwise there will be a loop"""
from networking_odl.common import client as odl_client
self.cfg.config(enable_lightweight_testing=False, group='ml2_odl')
# DietTestCase does not automatically cleans configuration overrides
self.addCleanup(odl_client.cfg.CONF.reset)
client = odl_client.OpenDaylightRestClient.create_client()
self.assertIsInstance(client, odl_client.OpenDaylightRestClient)
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'networks': {}}, clear=True)
def test_post_single_resource(self):
client = lwt.OpenDaylightLwtClient.create_client()
fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'}
obj = {'networks': fake_network1}
response = client.sendjson('post', 'networks', obj)
self.assertEqual(lwt.NO_CONTENT, response.status_code)
lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
self.assertEqual(lwt_dict['networks']['fakeid1'],
fake_network1)
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'networks': {}}, clear=True)
def test_post_multiple_resources(self):
client = lwt.OpenDaylightLwtClient.create_client()
fake_network1 = {'id': 'fakeid1', 'name': 'fake_network1'}
fake_network2 = {'id': 'fakeid2', 'name': 'fake_network2'}
obj = {'networks': [fake_network1, fake_network2]}
response = client.sendjson('post', 'networks', obj)
self.assertEqual(lwt.NO_CONTENT, response.status_code)
lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
self.assertEqual(lwt_dict['networks']['fakeid1'],
fake_network1)
self.assertEqual(lwt_dict['networks']['fakeid2'],
fake_network2)
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'ports': {'fakeid1': {'id': 'fakeid1',
'name': 'fake_port1'}}},
clear=True)
def test_get_single_resource(self):
client = lwt.OpenDaylightLwtClient.create_client()
url_path = 'ports/fakeid1'
response = client.sendjson('get', url_path, None)
self.assertEqual(lwt.OK, response.status_code)
res = response.json()
# For single resource, the return value is a dict
self.assertEqual(res['port']['name'], 'fake_port1')
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'ports': {'fakeid1': {'id': 'fakeid1',
'name': 'fake_port1'},
'fakeid2': {'id': 'fakeid2',
'name': 'fake_port2'}}},
clear=True)
def test_get_multiple_resources(self):
client = lwt.OpenDaylightLwtClient.create_client()
url_path = 'ports/'
response = client.sendjson('get', url_path, None)
self.assertEqual(lwt.OK, response.status_code)
res = response.json()
for port in res:
self.assertIn(port['port']['name'],
['fake_port1', 'fake_port2'])
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'subnets': {'fakeid1': {'id': 'fakeid1',
'name': 'fake_subnet1'}}},
clear=True)
def test_put_single_resource(self):
client = lwt.OpenDaylightLwtClient.create_client()
changed = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'}
obj = {'subnets': changed}
url_path = 'subnets/fakeid1'
response = client.sendjson('put', url_path, obj)
self.assertEqual(lwt.NO_CONTENT, response.status_code)
lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
self.assertEqual('fake_subnet1_changed',
lwt_dict['subnets']['fakeid1']['name'])
"""Check the client does not change the parameter"""
self.assertEqual('fakeid1', changed['id'])
self.assertEqual('fake_subnet1_changed', changed['name'])
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'subnets': {'fakeid1': {'id': 'fakeid1',
'name': 'fake_subnet1'},
'fakeid2': {'id': 'fakeid2',
'name': 'fake_subnet2'}}},
clear=True)
def test_put_multiple_resources(self):
client = lwt.OpenDaylightLwtClient.create_client()
changed1 = {'id': 'fakeid1', 'name': 'fake_subnet1_changed'}
changed2 = {'id': 'fakeid2', 'name': 'fake_subnet2_changed'}
obj = {'subnets': [changed1, changed2]}
url_path = 'subnets/'
response = client.sendjson('put', url_path, obj)
self.assertEqual(lwt.NO_CONTENT, response.status_code)
lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
self.assertEqual('fake_subnet1_changed',
lwt_dict['subnets']['fakeid1']['name'])
self.assertEqual('fake_subnet2_changed',
lwt_dict['subnets']['fakeid2']['name'])
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'networks': {'fakeid1': {'id': 'fakeid1',
'name': 'fake_network1'}}},
clear=True)
def test_delete_single_resource(self):
client = lwt.OpenDaylightLwtClient.create_client()
url_path = 'networks/fakeid1'
response = client.sendjson('delete', url_path, None)
self.assertEqual(lwt.NO_CONTENT, response.status_code)
lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
network = lwt_dict['networks'].get('fakeid1')
self.assertIsNone(network)
@mock.patch.dict(lwt.OpenDaylightLwtClient.lwt_dict,
{'networks': {'fakeid1': {'id': 'fakeid1',
'name': 'fake_network1'},
'fakeid2': {'id': 'fakeid2',
'name': 'fake_network2'}}},
clear=True)
def test_delete_multiple_resources(self):
client = lwt.OpenDaylightLwtClient.create_client()
network1 = {'id': 'fakeid1'}
network2 = {'id': 'fakeid2'}
obj = {'networks': [network1, network2]}
response = client.sendjson('delete', 'networks/', obj)
self.assertEqual(lwt.NO_CONTENT, response.status_code)
lwt_dict = lwt.OpenDaylightLwtClient.lwt_dict
network = lwt_dict['networks'].get('fakeid1')
self.assertIsNone(network)
network = lwt_dict['networks'].get('fakeid2')
self.assertIsNone(network)
networking-odl-16.0.0/networking_odl/tests/unit/qos/0000775000175000017500000000000013656750617022553 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/qos/__init__.py0000664000175000017500000000000013656750541024646 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/qos/test_qos_driver_v2.py0000664000175000017500000001003213656750541026740 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron_lib.db import api as db_api
from networking_odl.common import constants as odl_const
from networking_odl.common import odl_features
from networking_odl.db import db
from networking_odl.qos import qos_driver_v2 as qos_driver
from networking_odl.tests import base
from networking_odl.tests.unit import base_v2
class OpenDaylightQosDriverTestCase(base_v2.OpenDaylightConfigBase):
def setUp(self):
self.useFixture(base.OpenDaylightJournalThreadFixture())
super(OpenDaylightQosDriverTestCase, self).setUp()
self.qos_driver = qos_driver.OpenDaylightQosDriver({})
self.addCleanup(odl_features.deinit)
def test_qos_supported_rules_are_fetched_from_odl_feature(self):
feature_json = """{"features": {"feature":
[{"service-provider-feature":
"neutron-extensions:operational-port-status"},
{"service-provider-feature":
"neutron-extensions:qos-rules",
"configuration": {"key": "value"}}]}}"""
self.cfg.config(odl_features_json=feature_json, group='ml2_odl')
odl_features.init()
qos_driver_object = qos_driver.OpenDaylightQosDriver.create()
self.assertDictEqual(qos_driver_object.supported_rules,
{'key': 'value'})
def test_default_values_for_supported_rules(self):
self.cfg.config(odl_features='key', group='ml2_odl')
odl_features.init()
qos_driver_object = qos_driver.OpenDaylightQosDriver.create()
self.assertDictEqual(qos_driver_object.supported_rules,
qos_driver.DEFAULT_QOS_RULES)
def _get_mock_context(self, session=None):
current = {'tenant_id': 'tenant_id'}
context = mock.Mock(current=current)
context.session = session
return context
def _get_mock_qos_operation_data(self):
data = {'description': u"qos_policy",
'rules': [],
'tenant_id': 'test-tenant',
'shared': False,
'id': 'qos-policy1',
'name': u"policy1"}
qos_data = mock.Mock()
to_dict = mock.Mock(return_value=data)
qos_data.to_dict = to_dict
return qos_data
def _call_operation_object(self, operation, object_type):
qos_data = self._get_mock_qos_operation_data()
method = getattr(self.qos_driver, '%s_%s' % (operation,
object_type))
assert object_type.endswith("precommit")
with db_api.CONTEXT_WRITER.using(self.db_context):
context = self._get_mock_context(self.db_context.session)
method(context, qos_data)
def _test_qos_policy(self, operation):
self._call_operation_object(operation=operation,
object_type='policy_precommit')
qos_data = self._get_mock_qos_operation_data()
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertEqual(operation, row['operation'])
self.assertEqual(qos_data.to_dict()['id'], row['object_uuid'])
def test_qos_policy_create(self):
self._test_qos_policy(odl_const.ODL_CREATE)
def test_qos_policy_update(self):
self._test_qos_policy(odl_const.ODL_UPDATE)
def test_qos_policy_delete(self):
self._test_qos_policy(odl_const.ODL_DELETE)
networking-odl-16.0.0/networking_odl/tests/unit/l2gateway/0000775000175000017500000000000013656750617023650 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/l2gateway/__init__.py0000664000175000017500000000000013656750541025743 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/l2gateway/test_driver_v2.py0000664000175000017500000001356313656750541027167 0ustar zuulzuul00000000000000#
# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.l2gateway import driver_v2 as driverv2
from networking_odl.tests.unit import base_v2
class OpenDaylightL2GWDriverTestCase(base_v2.OpenDaylightConfigBase):
def setUp(self):
super(OpenDaylightL2GWDriverTestCase, self).setUp()
self.driver = driverv2.OpenDaylightL2gwDriver(service_plugin=None)
def _get_fake_l2_gateway(self):
fake_l2_gateway = {
"tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820",
"id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1",
"name": "test-gateway",
"devices": [
{
"device_name": "switch1",
"interfaces": [
{
"name": "port1",
"segmentation_id": [100]
},
{
"name": "port2",
"segmentation_id": [151, 152]
}
]
},
{
"device_name": "switch2",
"interfaces": [
{
"name": "port5",
"segmentation_id": [200]
},
{
"name": "port6",
"segmentation_id": [251, 252]
}
]
}
]
}
return fake_l2_gateway
def _get_fake_l2_gateway_connection(self):
fake_l2_gateway_connection = {
"tenant_id": "de0a7495-05c4-4be0-b796-1412835c6820",
"id": "5227c228-6bba-4bbe-bdb8-6942768ff02f",
"network_id": "be0a7495-05c4-4be0-b796-1412835c6830",
"default_segmentation_id": 77,
"l2_gateway_id": "5227c228-6bba-4bbe-bdb8-6942768ff0f1"
}
return fake_l2_gateway_connection
def _assert_op(self, operation, object_type, data, precommit=True):
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if precommit:
self.db_context.session.flush()
self.assertEqual(operation, row['operation'])
self.assertEqual(object_type, row['object_type'])
self.assertEqual(data['id'], row['object_uuid'])
else:
self.assertIsNone(row)
def test_create_l2_gateway(self):
fake_data = self._get_fake_l2_gateway()
self.driver.create_l2_gateway_precommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY,
fake_data)
self.driver.create_l2_gateway_postcommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_CREATE, odl_const.ODL_L2GATEWAY,
fake_data, False)
def test_delete_l2_gateway(self):
fake_data = self._get_fake_l2_gateway()
self.driver.delete_l2_gateway_precommit(self.db_context,
fake_data['id'])
self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY,
fake_data)
self.driver.delete_l2_gateway_postcommit(self.db_context,
fake_data['id'])
self._assert_op(odl_const.ODL_DELETE, odl_const.ODL_L2GATEWAY,
fake_data, False)
def test_update_l2_gateway(self):
fake_data = self._get_fake_l2_gateway()
self.driver.update_l2_gateway_precommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_L2GATEWAY,
fake_data)
self.driver.update_l2_gateway_postcommit(self.db_context, fake_data)
self._assert_op(odl_const.ODL_UPDATE, odl_const.ODL_L2GATEWAY,
fake_data, False)
def test_create_l2_gateway_connection(self):
fake_data = self._get_fake_l2_gateway_connection()
self.driver.create_l2_gateway_connection_precommit(self.db_context,
fake_data)
self._assert_op(odl_const.ODL_CREATE,
odl_const.ODL_L2GATEWAY_CONNECTION,
fake_data)
self.driver.create_l2_gateway_connection_postcommit(self.db_context,
fake_data)
self._assert_op(odl_const.ODL_CREATE,
odl_const.ODL_L2GATEWAY_CONNECTION,
fake_data, False)
def test_delete_l2_gateway_connection(self):
fake_data = self._get_fake_l2_gateway_connection()
self.driver.delete_l2_gateway_connection_precommit(self.db_context,
fake_data['id'])
self._assert_op(odl_const.ODL_DELETE,
odl_const.ODL_L2GATEWAY_CONNECTION,
fake_data)
self.driver.delete_l2_gateway_connection_postcommit(self.db_context,
fake_data['id'])
self._assert_op(odl_const.ODL_DELETE,
odl_const.ODL_L2GATEWAY_CONNECTION,
fake_data, False)
networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/0000775000175000017500000000000013656750617024101 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/__init__.py0000664000175000017500000000000013656750541026174 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/0000775000175000017500000000000013656750617025572 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/__init__.py0000664000175000017500000000000013656750541027665 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/0000775000175000017500000000000013656750617027764 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/__init__.py0000664000175000017500000000000013656750541032057 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/0000775000175000017500000000000013656750617033062 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000015200000000000011213 Lustar 00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init__.pynetworking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/__init0000664000175000017500000000000013656750541034230 0ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_client.pynetworking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_c0000664000175000017500000001074513656750541034271 0ustar zuulzuul00000000000000#
# Copyright 2017 Ericsson India Global Services Pvt Ltd.. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import fixture as config_fixture
from oslotest import base
from requests import auth as req_auth
import six
from six.moves.urllib import parse as urlparse
from ceilometer.i18n import _
from ceilometer import service as ceilometer_service
from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client
class TestClientHTTPBasicAuth(base.BaseTestCase):
auth_way = 'basic'
scheme = 'http'
def setUp(self):
super(TestClientHTTPBasicAuth, self).setUp()
conf = ceilometer_service.prepare_service(argv=[], config_files=[])
self.CONF = self.useFixture(config_fixture.Config(conf)).conf
self.parsed_url = urlparse.urlparse(
'http://127.0.0.1:8080/controller/statistics?'
'auth=%s&user=admin&password=admin_pass&'
'scheme=%s' % (self.auth_way, self.scheme))
self.params = urlparse.parse_qs(self.parsed_url.query)
self.endpoint = urlparse.urlunparse(
urlparse.ParseResult(self.scheme,
self.parsed_url.netloc,
self.parsed_url.path,
None, None, None))
odl_params = {'auth': self.params.get('auth')[0],
'user': self.params.get('user')[0],
'password': self.params.get('password')[0]}
self.client = client.Client(self.CONF, self.endpoint, odl_params)
self.resp = mock.MagicMock()
self.get = mock.patch('requests.Session.get',
return_value=self.resp).start()
self.resp.raw.version = 1.1
self.resp.status_code = 200
self.resp.reason = 'OK'
self.resp.headers = {}
self.resp.content = 'dummy'
def _test_request(self, method, url):
data = method()
call_args = self.get.call_args_list[0][0]
call_kwargs = self.get.call_args_list[0][1]
# check url
real_url = url % {'scheme': self.scheme}
self.assertEqual(real_url, call_args[0])
# check auth parameters
auth = call_kwargs.get('auth')
if self.auth_way == 'digest':
self.assertIsInstance(auth, req_auth.HTTPDigestAuth)
else:
self.assertIsInstance(auth, req_auth.HTTPBasicAuth)
self.assertEqual('admin', auth.username)
self.assertEqual('admin_pass', auth.password)
# check header
self.assertEqual(
{'Accept': 'application/json'},
call_kwargs['headers'])
# check return value
self.assertEqual(self.get().json(), data)
def test_switch_statistics(self):
self._test_request(
self.client.switch_statistics.get_statistics,
'%(scheme)s://127.0.0.1:8080/controller'
'/statistics/flow-capable-switches')
def test_http_error(self):
self.resp.status_code = 404
self.resp.reason = 'Not Found'
try:
self.client.switch_statistics.get_statistics()
self.fail('')
except client.OpenDaylightRESTAPIFailed as e:
self.assertEqual(
_('OpenDaylight API returned %(status)s %(reason)s') %
{'status': self.resp.status_code,
'reason': self.resp.reason},
six.text_type(e))
def test_other_error(self):
class _Exception(Exception):
pass
self.get = mock.patch('requests.Session.get',
side_effect=_Exception).start()
self.assertRaises(_Exception,
lambda:
self.client.switch_statistics.get_statistics())
class TestClientHTTPDigestAuth(TestClientHTTPBasicAuth):
auth_way = 'digest'
class TestClientHTTPSBasicAuth(TestClientHTTPBasicAuth):
scheme = 'https'
class TestClientHTTPSDigestAuth(TestClientHTTPDigestAuth):
scheme = 'https'
././@LongLink0000000000000000000000000000015500000000000011216 Lustar 00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_driver.pynetworking-odl-16.0.0/networking_odl/tests/unit/ceilometer/network/statistics/opendaylight_v2/test_d0000664000175000017500000006264513656750541034300 0ustar zuulzuul00000000000000#
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import mock
from oslotest import base
import six
from six.moves.urllib import parse as url_parse
from ceilometer import service
from networking_odl.ceilometer.network.statistics.opendaylight_v2 import driver
from oslo_utils import uuidutils
ADMIN_ID = uuidutils.generate_uuid()
PORT_1_TENANT_ID = uuidutils.generate_uuid()
PORT_2_TENANT_ID = uuidutils.generate_uuid()
PORT_1_ID = uuidutils.generate_uuid()
PORT_2_ID = uuidutils.generate_uuid()
@six.add_metaclass(abc.ABCMeta)
class _Base(base.BaseTestCase):
@abc.abstractproperty
def switch_data(self):
pass
fake_odl_url = url_parse.ParseResult('opendaylight.v2',
'localhost:8080',
'controller/statistics',
None,
None,
None)
fake_params = url_parse.parse_qs('user=admin&password=admin&scheme=http&'
'auth=basic')
def setUp(self):
super(_Base, self).setUp()
self.addCleanup(mock.patch.stopall)
conf = service.prepare_service([], [])
self.driver = driver.OpenDaylightDriver(conf)
ks_client = mock.Mock(auth_token='fake_token')
ks_client.projects.find.return_value = mock.Mock(name='admin',
id=ADMIN_ID)
self.ks_client = mock.patch('ceilometer.keystone_client.get_client',
return_value=ks_client).start()
self.get_statistics = mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
return_value=self.switch_data).start()
def _test_for_meter(self, meter_name, expected_data):
sample_data = self.driver.get_sample_data(meter_name,
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(expected_data, list(sample_data))
class TestOpenDayLightDriverInvalid(_Base):
switch_data = {"flow_capable_switches": []}
def test_not_implemented_meter(self):
sample_data = self.driver.get_sample_data('egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
sample_data = self.driver.get_sample_data('switch.table.egg',
self.fake_odl_url,
self.fake_params,
{})
self.assertIsNone(sample_data)
def test_cache(self):
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(1, self.get_statistics.call_count)
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertEqual(2, self.get_statistics.call_count)
def test_http_error(self):
mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
side_effect=Exception()).start()
sample_data = self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
{})
self.assertEqual(0, len(sample_data))
mock.patch(
'networking_odl.ceilometer.network.statistics.opendaylight_v2.'
'client.SwitchStatisticsAPIClient.get_statistics',
side_effect=[Exception(), self.switch_data]).start()
cache = {}
self.driver.get_sample_data('switch',
self.fake_odl_url,
self.fake_params,
cache)
self.assertIn('network.statistics.opendaylight_v2', cache)
class TestOpenDayLightDriverSimple(_Base):
switch_data = {
"flow_capable_switches": [{
"packet_in_messages_received": 501,
"packet_out_messages_sent": 300,
"ports": 1,
"flow_datapath_id": 55120148545607,
"switch_port_counters": [{
"bytes_received": 0,
"bytes_sent": 0,
"duration": 600,
"packets_internal_received": 444,
"packets_internal_sent": 0,
"packets_received": 0,
"packets_received_drop": 0,
"packets_received_error": 0,
"packets_sent": 0,
"port_id": 4,
"tenant_id": PORT_1_TENANT_ID,
"uuid": PORT_1_ID
}],
"table_counters": [{
"flow_count": 90,
"table_id": 0
}]
}]
}
def test_meter_switch(self):
expected_data = [
(1, "55120148545607",
{'controller': 'OpenDaylight_V2'},
ADMIN_ID),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_ports(self):
expected_data = [
(1, "55120148545607",
{'controller': 'OpenDaylight_V2'},
ADMIN_ID)
]
self._test_for_meter('switch.ports', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_uptime(self):
expected_data = [
(600, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.uptime', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.packets', expected_data)
def test_meter_switch_port_receive_bytes(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.bytes', expected_data)
def test_meter_switch_port_transmit_bytes(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.bytes', expected_data)
def test_meter_switch_port_receive_drops(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.drops', expected_data)
def test_meter_switch_port_receive_errors(self):
expected_data = [
(0, '55120148545607:4', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 4,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.errors', expected_data)
def test_meter_port(self):
expected_data = [
(1, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port', expected_data)
def test_meter_port_uptime(self):
expected_data = [
(600, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.uptime', expected_data)
def test_meter_port_receive_packets(self):
expected_data = [
(0, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.packets', expected_data)
def test_meter_port_transmit_packets(self):
expected_data = [
(0, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.packets', expected_data)
def test_meter_port_receive_bytes(self):
expected_data = [
(0, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.bytes', expected_data)
def test_meter_port_transmit_bytes(self):
expected_data = [
(0, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.bytes', expected_data)
def test_meter_port_receive_drops(self):
expected_data = [
(0, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.drops', expected_data)
def test_meter_port_receive_errors(self):
expected_data = [
(0, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.errors', expected_data)
def test_meter_switch_table_active_entries(self):
expected_data = [
(90, "55120148545607:table:0", {
'switch': '55120148545607',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch.table.active.entries', expected_data)
class TestOpenDayLightDriverComplex(_Base):
switch_data = {
"flow_capable_switches": [{
"packet_in_messages_received": 501,
"packet_out_messages_sent": 300,
"ports": 3,
"flow_datapath_id": 55120148545607,
"switch_port_counters": [{
"bytes_received": 0,
"bytes_sent": 512,
"duration": 200,
"packets_internal_received": 444,
"packets_internal_sent": 0,
"packets_received": 10,
"packets_received_drop": 0,
"packets_received_error": 0,
"packets_sent": 0,
"port_id": 3,
}, {
"bytes_received": 9800,
"bytes_sent": 6540,
"duration": 150,
"packets_internal_received": 0,
"packets_internal_sent": 7650,
"packets_received": 20,
"packets_received_drop": 0,
"packets_received_error": 0,
"packets_sent": 0,
"port_id": 2,
"tenant_id": PORT_2_TENANT_ID,
"uuid": PORT_2_ID
}, {
"bytes_received": 100,
"bytes_sent": 840,
"duration": 100,
"packets_internal_received": 984,
"packets_internal_sent": 7950,
"packets_received": 9900,
"packets_received_drop": 1500,
"packets_received_error": 1000,
"packets_sent": 7890,
"port_id": 1,
"tenant_id": PORT_1_TENANT_ID,
"uuid": PORT_1_ID
}],
"table_counters": [{
"flow_count": 90,
"table_id": 10
}, {
"flow_count": 80,
"table_id": 20
}],
}, {
"packet_in_messages_received": 0,
"packet_out_messages_sent": 0,
"ports": 0,
"flow_datapath_id": 55120148545555,
"table_counters": [{
"flow_count": 5,
"table_id": 10
}, {
"flow_count": 3,
"table_id": 20
}],
}]
}
def test_meter_switch(self):
expected_data = [
(1, "55120148545607", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(1, "55120148545555", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch', expected_data)
def test_meter_switch_ports(self):
expected_data = [
(3, "55120148545607", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(0, "55120148545555", {
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch.ports', expected_data)
def test_meter_switch_port(self):
expected_data = [
(1, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(1, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(1, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port', expected_data)
def test_meter_switch_port_uptime(self):
expected_data = [
(200, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(150, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(100, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.uptime', expected_data)
def test_meter_switch_port_receive_packets(self):
expected_data = [
(10, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(20, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(9900, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.packets', expected_data)
def test_meter_switch_port_transmit_packets(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(0, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(7890, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.packets', expected_data)
def test_meter_switch_port_receive_bytes(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(9800, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(100, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.bytes', expected_data)
def test_meter_switch_port_transmit_bytes(self):
expected_data = [
(512, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(6540, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(840, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.transmit.bytes', expected_data)
def test_meter_switch_port_receive_drops(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(0, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(1500, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.drops', expected_data)
def test_meter_switch_port_receive_errors(self):
expected_data = [
(0, "55120148545607:3", {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 3,
'switch': '55120148545607'
}, ADMIN_ID),
(0, '55120148545607:2', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 2,
'neutron_port_id': PORT_2_ID,
'switch': '55120148545607'
}, ADMIN_ID),
(1000, '55120148545607:1', {
'controller': 'OpenDaylight_V2',
'port_number_on_switch': 1,
'neutron_port_id': PORT_1_ID,
'switch': '55120148545607'
}, ADMIN_ID),
]
self._test_for_meter('switch.port.receive.errors', expected_data)
def test_meter_port(self):
expected_data = [
(1, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(1, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port', expected_data)
def test_meter_port_uptime(self):
expected_data = [
(150, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(100, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.uptime', expected_data)
def test_meter_port_receive_packets(self):
expected_data = [
(20, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(9900, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.packets', expected_data)
def test_meter_port_transmit_packets(self):
expected_data = [
(0, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(7890, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.packets', expected_data)
def test_meter_port_receive_bytes(self):
expected_data = [
(9800, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(100, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.bytes', expected_data)
def test_meter_port_transmit_bytes(self):
expected_data = [
(6540, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(840, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.transmit.bytes', expected_data)
def test_meter_port_receive_drops(self):
expected_data = [
(0, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(1500, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.drops', expected_data)
def test_meter_port_receive_errors(self):
expected_data = [
(0, str(PORT_2_ID),
{'controller': 'OpenDaylight_V2'},
PORT_2_TENANT_ID),
(1000, str(PORT_1_ID),
{'controller': 'OpenDaylight_V2'},
PORT_1_TENANT_ID),
]
self._test_for_meter('port.receive.errors', expected_data)
def test_meter_switch_table_active_entries(self):
expected_data = [
(90, "55120148545607:table:10", {
'switch': '55120148545607',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(80, "55120148545607:table:20", {
'switch': '55120148545607',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(5, "55120148545555:table:10", {
'switch': '55120148545555',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
(3, "55120148545555:table:20", {
'switch': '55120148545555',
'controller': 'OpenDaylight_V2'
}, ADMIN_ID),
]
self._test_for_meter('switch.table.active.entries', expected_data)
networking-odl-16.0.0/networking_odl/tests/unit/journal/0000775000175000017500000000000013656750617023423 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/journal/__init__.py0000664000175000017500000000000013656750541025516 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/journal/test_recovery.py0000664000175000017500000002376013656750541026676 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from neutron_lib import exceptions as nexc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from networking_odl.common import constants as odl_const
from networking_odl.common import exceptions
from networking_odl.db import db
from networking_odl.journal import full_sync
from networking_odl.journal import recovery
from networking_odl.l3 import l3_odl_v2
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests import base
from networking_odl.tests.unit.db import test_db
from networking_odl.tests.unit.journal import helper
from networking_odl.tests.unit import test_base_db
class RecoveryTestCase(test_base_db.ODLBaseDbTestCase):
def setUp(self):
self.useFixture(
base.OpenDaylightRestClientGlobalFixture(recovery._CLIENT))
super(RecoveryTestCase, self).setUp()
self._CLIENT = recovery._CLIENT.get_client()
self.addCleanup(self.clean_registered_resources)
@staticmethod
def clean_registered_resources():
full_sync.ALL_RESOURCES = {}
def _mock_resource(self, plugin, resource_type):
mock_resource = mock.MagicMock()
get_func = getattr(plugin, 'get_{}'.format(resource_type))
get_func.return_value = mock_resource
return mock_resource
def _mock_row(self, resource_type):
return mock.MagicMock(object_type=resource_type)
def _test__get_latest_resource(self, plugin, resource_type):
l2 = mech_driver_v2.OpenDaylightMechanismDriver.RESOURCES
full_sync.ALL_RESOURCES[plugin_constants.CORE] = l2
mock_resource = self._mock_resource(plugin, resource_type)
mock_row = self._mock_row(resource_type)
resource = recovery._get_latest_resource(self.db_context.session,
mock_row)
self.assertEqual(mock_resource, resource)
@mock.patch.object(directory, 'get_plugin')
def test__get_latest_resource_l2(self, plugin_mock):
for resource_type in(
mech_driver_v2.OpenDaylightMechanismDriver.RESOURCES):
plugin = plugin_mock.return_value
self._test__get_latest_resource(plugin, resource_type)
@mock.patch.object(directory, 'get_plugin')
def test__get_latest_resource_l3(self, plugin_mock):
full_sync.ALL_RESOURCES[plugin_constants.L3] = l3_odl_v2.L3_RESOURCES
for resource_type in l3_odl_v2.L3_RESOURCES:
plugin = plugin_mock.return_value
self._test__get_latest_resource(plugin, resource_type)
def test__get_latest_resource_unsupported(self):
mock_row = self._mock_row('aaa')
self.assertRaises(
exceptions.UnsupportedResourceType, recovery._get_latest_resource,
self.db_context.session, mock_row)
@mock.patch.object(directory, 'get_plugin')
def test__get_latest_resource_none(self, plugin_mock):
plugin_mock.return_value.get_network.side_effect = nexc.NotFound()
l2 = mech_driver_v2.OpenDaylightMechanismDriver.RESOURCES
full_sync.ALL_RESOURCES[plugin_constants.CORE] = l2
mock_row = self._mock_row(odl_const.ODL_NETWORK)
self.assertRaises(
nexc.NotFound, recovery._get_latest_resource,
self.db_context.session, mock_row)
def test_journal_recovery_retries_exceptions(self):
db.create_pending_row(self.db_context, odl_const.ODL_NETWORK,
'id', odl_const.ODL_DELETE, {})
created_row = db.get_all_db_rows(self.db_context)[0]
db.update_db_row_state(self.db_context, created_row,
odl_const.FAILED)
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(recovery.journal_recovery, m)
def test_journal_recovery_no_rows(self):
recovery.journal_recovery(self.db_context)
self.assertFalse(self._CLIENT.get_resource.called)
@test_db.in_session
def _test_recovery(self, operation, odl_resource, expected_state):
db.create_pending_row(self.db_context, odl_const.ODL_NETWORK,
'id', operation, {})
created_row = db.get_all_db_rows(self.db_context)[0]
db.update_db_row_state(self.db_context, created_row, odl_const.FAILED)
self._CLIENT.get_resource.return_value = odl_resource
recovery.journal_recovery(self.db_context)
if expected_state is None:
completed_rows = db.get_all_db_rows_by_state(
self.db_context, odl_const.COMPLETED)
self.assertEqual([], completed_rows)
else:
row = db.get_all_db_rows_by_state(self.db_context,
expected_state)[0]
self.assertEqual(created_row['seqnum'], row['seqnum'])
return created_row
def _disable_retention(self):
self.cfg.config(completed_rows_retention=0, group='ml2_odl')
def test_journal_recovery_handles_failure_quietly(self):
class TestException(Exception):
pass
self._CLIENT.get_resource.side_effect = TestException('')
self._test_recovery(
odl_const.ODL_DELETE, None, odl_const.FAILED)
def test_journal_recovery_deleted_row_not_in_odl(self):
self._test_recovery(odl_const.ODL_DELETE, None, odl_const.COMPLETED)
def test_journal_recovery_deleted_row_not_in_odl_purged(self):
self._disable_retention()
self._test_recovery(odl_const.ODL_DELETE, None, None)
def test_journal_recovery_created_row_exists_in_odl(self):
self._test_recovery(odl_const.ODL_CREATE, {}, odl_const.COMPLETED)
def test_journal_recovery_created_row_exists_in_odl_purged(self):
self._disable_retention()
self._test_recovery(odl_const.ODL_CREATE, {}, None)
def test_journal_recovery_deleted_row_exists_in_odl(self):
self._test_recovery(odl_const.ODL_DELETE, {}, odl_const.PENDING)
@mock.patch.object(recovery, '_get_latest_resource')
def _test_recovery_creates_operation(
self, operation, resource, odl_resource, expected_operation,
recovery_mock):
if resource is not None:
recovery_mock.return_value = resource
else:
recovery_mock.side_effect = nexc.NotFound
original_row = self._test_recovery(
operation, odl_resource, odl_const.COMPLETED)
pending_row = db.get_all_db_rows_by_state(
self.db_context, odl_const.PENDING)[0]
self.assertEqual(expected_operation, pending_row['operation'])
self.assertEqual(original_row['object_type'],
pending_row['object_type'])
self.assertEqual(original_row['object_uuid'],
pending_row['object_uuid'])
def test_recovery_created_row_not_in_odl(self):
self._test_recovery_creates_operation(
odl_const.ODL_CREATE, {}, None, odl_const.ODL_CREATE)
def test_recovery_updated_row_not_in_odl(self):
self._test_recovery_creates_operation(
odl_const.ODL_UPDATE, {}, None, odl_const.ODL_CREATE)
def test_recovery_updated_resource_missing_but_exists_in_odl(self):
self._test_recovery_creates_operation(
odl_const.ODL_UPDATE, None, {}, odl_const.ODL_DELETE)
@mock.patch.object(recovery, '_get_latest_resource')
def test_recovery_created_resource_missing_and_not_in_odl(self, rmock):
rmock.side_effect = nexc.NotFound
self._test_recovery(odl_const.ODL_CREATE, None, odl_const.COMPLETED)
@mock.patch.object(recovery, '_get_latest_resource')
def test_recovery_created_resource_missing_and_not_in_odl_purged(
self, rmock):
rmock.side_effect = nexc.NotFound
self._disable_retention()
self._test_recovery(odl_const.ODL_CREATE, None, None)
@mock.patch.object(recovery, '_get_latest_resource')
def test_recovery_updated_resource_missing_and_not_in_odl(self, rmock):
rmock.side_effect = nexc.NotFound
self._test_recovery(odl_const.ODL_UPDATE, None, odl_const.COMPLETED)
@mock.patch.object(recovery, '_get_latest_resource')
def test_recovery_updated_resource_missing_and_not_in_odl_purged(
self, rmock):
rmock.side_effect = nexc.NotFound
self._disable_retention()
self._test_recovery(odl_const.ODL_UPDATE, None, None)
def _test_get_latest_resource(self, resource_type):
# Drivers needs to be initialized to register resources for recovery
# and full sync mechasnim.
helper.TestDriver()
directory.add_plugin(helper.TEST_PLUGIN, helper.TestPlugin())
self.addCleanup(directory.add_plugin, helper.TEST_PLUGIN, None)
return db.create_pending_row(self.db_context, resource_type,
'id', odl_const.ODL_DELETE, {})
def test_get_latest_resource(self):
row = self._test_get_latest_resource(helper.TEST_RESOURCE1)
plugin = directory.get_plugin(helper.TEST_PLUGIN)
resource = recovery.get_latest_resource(self.db_context, row)
self.assertDictEqual(resource,
plugin.get_test_resource1(self.db_context, 'id'))
def test_get_unsupported_latest_resource(self):
row = self._test_get_latest_resource(helper.TEST_RESOURCE1)
row.object_type = helper.INVALID_RESOURCE
self.assertRaises(exceptions.UnsupportedResourceType,
recovery.get_latest_resource,
self.db_context, row)
networking-odl-16.0.0/networking_odl/tests/unit/journal/helper.py0000664000175000017500000000310413656750541025246 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_odl.journal import base_driver
TEST_UUID = 'bd8db3a8-2b30-4083-a8b3-b3fd46401142'
TEST_PLUGIN = 'test_plugin'
TEST_RESOURCE1 = 'test_resource1'
TEST_RESOURCE2 = 'test_resource2'
TEST_RESOURCE1_SUFFIX = 'test_resource1s'
TEST_RESOURCE2_SUFFIX = 'test_resource2s'
INVALID_RESOURCE = 'invalid_resource'
INVALID_PLUGIN = 'invalid_plugin'
INVALID_METHOD = 'invalid_method_name'
class TestPlugin(object):
def get_test_resource1s(self, context):
return [{'id': 'test_id1'}, {'id': 'test_id2'}]
def get_test_resource2s(self, context):
return [{'id': 'test_id3'}, {'id': 'test_id4'}]
def get_test_resource1(self, context, id_):
return {'id': id_}
class TestDriver(base_driver.ResourceBaseDriver):
RESOURCES = {
TEST_RESOURCE1: TEST_RESOURCE1_SUFFIX,
TEST_RESOURCE2: TEST_RESOURCE2_SUFFIX
}
plugin_type = TEST_PLUGIN
def __init__(self):
super(TestDriver, self).__init__()
networking-odl-16.0.0/networking_odl/tests/unit/journal/test_cleanup.py0000664000175000017500000000367313656750541026470 0ustar zuulzuul00000000000000#
# Copyright (C) 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
from networking_odl.db import db
from networking_odl.journal import cleanup
from networking_odl.tests.unit import test_base_db
class JournalCleanupTestCase(test_base_db.ODLBaseDbTestCase):
def test_delete_completed_rows_retries_exceptions(self):
self.cfg.config(completed_rows_retention=1, group='ml2_odl')
with mock.patch.object(db, 'delete_rows_by_state_and_time') as m:
self._test_retry_exceptions(
cleanup.delete_completed_rows, m)
def test_cleanup_processsing_rows_retries_exceptions(self):
with mock.patch.object(db, 'reset_processing_rows') as m:
self._test_retry_exceptions(
cleanup.cleanup_processing_rows, m)
@mock.patch.object(db, 'delete_rows_by_state_and_time')
def _test_delete_completed_rows(self, retention, expected_call, mock_db):
self.cfg.config(completed_rows_retention=retention, group='ml2_odl')
cleanup.delete_completed_rows(self.db_context)
self.assertEqual(expected_call, mock_db.called)
def test_delete_completed_rows_with_retention(self):
self._test_delete_completed_rows(1, True)
def test_delete_completed_rows_zero_retention(self):
self._test_delete_completed_rows(0, False)
def test_delete_completed_rows_indefinite_retention(self):
self._test_delete_completed_rows(-1, False)
networking-odl-16.0.0/networking_odl/tests/unit/journal/test_journal.py0000664000175000017500000005430113656750541026505 0ustar zuulzuul00000000000000# Copyright (c) 2017 NEC Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import signal
import fixtures
import mock
from neutron.common import utils
from oslo_db import exception
from oslo_log import log as logging
from oslo_service.tests import test_service
from oslo_utils import uuidutils
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.db import models
from networking_odl.journal import cleanup
from networking_odl.journal import dependency_validations
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.journal import periodic_task
from networking_odl.journal import recovery
from networking_odl.journal import worker
from networking_odl.tests import base
from networking_odl.tests.unit import base_v2
from networking_odl.tests.unit.db import test_db
PROCESS_RUNNING_STATUSES = ('S', 'R', 'D')
class JournalPeriodicProcessorTest(base_v2.OpenDaylightConfigBase,
test_service.ServiceTestBase):
def setUp(self):
super(JournalPeriodicProcessorTest, self).setUp()
self.periodic_task_fixture = self.useFixture(
base.OpenDaylightPeriodicTaskFixture())
self.cfg.config(sync_timeout=0.1, group='ml2_odl')
def _create_periodic_processor(self):
periodic_processor = worker.JournalPeriodicProcessor()
self.addCleanup(periodic_processor.stop)
return periodic_processor
def _get_pid_status(self, pid):
"""Allows to query a system process based on the PID
It will use `ps` to query the pid, it's state and the command.
:param pid: An integer with the Process ID number
:returns: A tuple of strings with the command and the running status
in a single char as defined in the manpage PS(1) under
PROCESS STATE CODES.
"""
with os.popen('ps ax -o pid,state,cmd') as f:
# Skip ps header
f.readline()
processes = (l.strip().split()[:3] for l in f)
return next(((c, s) for p, s, c in processes if int(p) == pid),
(None, None))
def _kill_process(self, pid):
if self._get_pid_status(pid)[1] in PROCESS_RUNNING_STATUSES:
os.kill(pid, signal.SIGKILL)
def mock_object_with_ipc(self, target, attribute, pre_hook=None):
patcher = mock.patch.object(target, attribute, autospec=True)
c2p_read = self.create_ipc_for_mock(patcher, pre_hook)
return c2p_read
def create_ipc_for_mock(self, patcher, pre_hook=None):
# NOTE(mpeterson): The following pipe is being used because this is
# testing something inter processeses and we need to have a value on
# the side of the test processes to know it succeeded with the
# operation. A pipe provide a way for two processes to communicate.
# The was_called method will be called by the worker process while
# the test process will read the result on c2p_read.
c2p_read, c2p_write = os.pipe()
def close_pipe_end(fd):
try:
os.close(fd)
except OSError:
print('failed closing: %s' % fd)
# First we want to close the write, to unlock any running read()
self.addCleanup(close_pipe_end, c2p_read)
self.addCleanup(close_pipe_end, c2p_write)
mock_ = patcher.start()
self.addCleanup(patcher.stop)
def was_called(*args, **kwargs):
# OSError is caught because start is called twice on the worker
# and the second time the pipe is already closed.
try:
os.close(c2p_read)
try:
if pre_hook:
pre_hook(*args, **kwargs)
os.write(c2p_write, b'1')
except Exception:
# This is done so any read on the pipe is unblocked.
os.write(c2p_write, b'0')
finally:
os.close(c2p_write)
except OSError:
pass
mock_.side_effect = was_called
return c2p_read
def assert_ipc_mock_called(self, c2p_read):
# If it timeouts on the read then it means the function was
# not called.
called = int(os.read(c2p_read, 1))
self.assertEqual(called, 1,
'The IPC mock was called but during the '
'execution an exception was raised')
@mock.patch.object(journal.OpenDaylightJournalThread, 'set_sync_event')
def test_processing(self, mock_journal):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
utils.wait_until_true(lambda: mock_journal.call_count > 1, 5, 0.1)
@mock.patch.object(journal.OpenDaylightJournalThread, 'start')
@mock.patch.object(journal.OpenDaylightJournalThread, 'stop')
def test_stops_journal_sync_thread(self, mock_stop, mock_start):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor.stop()
mock_stop.assert_called_once()
mock_start.assert_called_once()
def test_allow_multiple_starts_gracefully(self):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor.stop()
try:
periodic_processor.start()
except RuntimeError:
self.fail('Calling a start() after a stop() should be allowed')
def test_multiple_starts_without_stop_throws_exception(self):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
self.assertRaises(RuntimeError, periodic_processor.start)
def test_call_stop_without_calling_start(self):
periodic_processor = self._create_periodic_processor()
try:
periodic_processor.stop()
except AttributeError:
self.fail('start() was not called before calling stop()')
def assert_process_running(self, pid):
cmd, state = self._get_pid_status(pid)
self.assertIn(state, PROCESS_RUNNING_STATUSES)
return cmd
def _create_periodic_processor_ipc_fork(self, target, pre_hook=None):
self._setup_mocks_for_periodic_task()
real_start = worker.JournalPeriodicProcessor.start
pipe_start = self.mock_object_with_ipc(worker.JournalPeriodicProcessor,
'start', real_start)
c2p_read = self.mock_object_with_ipc(worker.JournalPeriodicProcessor,
target, pre_hook)
pid = self._spawn_service(
service_maker=lambda: worker.JournalPeriodicProcessor())
self.addCleanup(self._kill_process, pid)
# Allow the process to spawn and signal handling to be registered
self.assert_ipc_mock_called(pipe_start)
return pid, c2p_read
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops',
new=mock.Mock())
@mock.patch.object(journal.OpenDaylightJournalThread,
'sync_pending_entries', new=mock.Mock())
def test_handle_sighup_gracefully(self):
real_reset = worker.JournalPeriodicProcessor.reset
pid, c2p_read = self._create_periodic_processor_ipc_fork('reset',
real_reset)
cmd = self.assert_process_running(pid)
os.kill(pid, signal.SIGHUP)
self.assert_ipc_mock_called(c2p_read)
new_cmd = self.assert_process_running(pid)
self.assertEqual(cmd, new_cmd)
def _setup_mocks_for_periodic_task(self, executed_recently=False):
mock_db_module = mock.MagicMock(spec=db)
mock_db_module.was_periodic_task_executed_recently.return_value = \
executed_recently
mock_db = mock.patch('networking_odl.journal.periodic_task.db',
mock_db_module)
mock_db.start()
self.addCleanup(mock_db.stop)
@mock.patch.object(cleanup, 'delete_completed_rows')
@mock.patch.object(cleanup, 'cleanup_processing_rows')
@mock.patch.object(full_sync, 'full_sync')
@mock.patch.object(recovery, 'journal_recovery')
# ^^ The above mocks represent the required calling order starting from
# top. Use decorators *only* to specify the stack order.
def test_maintenance_task_correctly_registered(self, *stack_order):
calls = []
for item in reversed(stack_order):
calls.append(mock.call(item))
with mock.patch.object(
periodic_task.PeriodicTask,
'register_operation') as register_operation_mock:
periodic_processor = self._create_periodic_processor()
periodic_processor._start_maintenance_task()
register_operation_mock.assert_has_calls(calls)
def test_maintenance_task_started(self):
self.periodic_task_fixture.task_start_mock.stop()
mock_start = self.periodic_task_fixture.task_start_mock.start()
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor._maintenance_task = mock.MagicMock()
mock_start.assert_called_once()
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops',
new=mock.Mock())
def test_reset_called_on_sighup(self):
pid, c2p_read = self._create_periodic_processor_ipc_fork('reset')
self.assert_process_running(pid)
os.kill(pid, signal.SIGHUP)
self.assert_ipc_mock_called(c2p_read)
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops')
def test_reset_fires_maintenance_task(self, execute_mock):
periodic_processor = self._create_periodic_processor()
periodic_processor._start_maintenance_task()
execute_mock.reset_mock()
periodic_processor.reset()
execute_mock.assert_has_calls([mock.call(forced=True)])
def test_reset_succeeeds_when_maintenance_task_not_setup(self):
periodic_processor = self._create_periodic_processor()
# NOTE(mpeterson): This tests that if calling reset without setting up
# the maintenance task then it would not raise an exception and just
# proceed as usual.
periodic_processor.reset()
@mock.patch.object(periodic_task.PeriodicTask, 'execute_ops')
def test_start_fires_maintenance_task(self, execute_mock):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
execute_mock.called_once_with([mock.call(forced=True)])
def test_creates_pidfile(self):
periodic_processor = self._create_periodic_processor()
periodic_processor._create_pidfile()
pidfile = str(periodic_processor.pidfile)
self.assertTrue(os.path.isfile(pidfile))
with open(pidfile) as f:
pid = int(f.readline())
self.assertEqual(pid, os.getpid())
# NOTE(mpeterson): to avoid showing an expected exception while
# running the next assert
with mock.patch('neutron.agent.linux.daemon.LOG', autospec=True):
self.assertRaises(
SystemExit,
worker.JournalPeriodicProcessor()._create_pidfile
)
@mock.patch.object(worker.JournalPeriodicProcessor, '_create_pidfile')
@mock.patch.object(worker.JournalPeriodicProcessor, '_delete_pidfile')
def test_pidfile_handling_on_start_stop(self, mock_create, mock_delete):
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
periodic_processor.stop()
mock_create.assert_called_once()
mock_delete.assert_called_once()
def test_deletes_pidfile(self):
atexit_mock = self.journal_thread_fixture.remock_atexit()
periodic_processor = self._create_periodic_processor()
periodic_processor.start()
pidfile = str(periodic_processor.pidfile)
self.assertTrue(os.path.isfile(pidfile))
periodic_processor._delete_pidfile()
self.assertFalse(os.path.isfile(pidfile))
atexit_mock.assert_called_once_with(periodic_processor._delete_pidfile)
def test_atexit_delete_pidfile_registered_only_once(self):
atexit_mock = self.journal_thread_fixture.remock_atexit()
periodic_processor = self._create_periodic_processor()
for _ in range(0, 2):
periodic_processor.start()
periodic_processor.stop()
atexit_mock.assert_called_once()
class OpenDaylightJournalThreadTest(base_v2.OpenDaylightTestCase):
def setUp(self):
super(OpenDaylightJournalThreadTest, self).setUp()
self.journal = journal.OpenDaylightJournalThread()
self.addCleanup(self.cleanup)
@staticmethod
def cleanup():
journal.MAKE_URL.clear()
def test_json_data(self):
object_type = 'testobject'
data = 'testdata'
row = models.OpenDaylightJournal(object_type=object_type,
object_uuid=uuidutils.generate_uuid(),
operation=odl_const.ODL_CREATE,
data=data)
self.assertEqual("%ss" % object_type, self.journal._json_data(row)[1])
def test_json_data_customized_url(self):
object_type = 'randomtestobject'
data = 'testdata'
journal.register_url_builder(object_type, lambda row: row.object_type)
row = models.OpenDaylightJournal(object_type=object_type,
object_uuid=uuidutils.generate_uuid(),
operation=odl_const.ODL_CREATE,
data=data)
url_param = self.journal._json_data(row)
self.assertEqual(object_type, url_param[1])
def test_entry_reset_retries_exceptions(self):
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(journal.entry_reset, m)
@test_db.in_session
@mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
mock.Mock(side_effect=Exception))
def test__sync_entry_update_state_by_retry_count_on_exception(self):
entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
self.journal._max_retry_count = 1
self.assertEqual(entry.retry_count, 0)
self.journal._sync_entry(self.db_context, entry)
self.assertEqual(entry.retry_count, 1)
self.assertEqual(entry.state, odl_const.PENDING)
self.journal._sync_entry(self.db_context, entry)
self.assertEqual(entry.retry_count, 1)
self.assertEqual(entry.state, odl_const.FAILED)
def _test__sync_entry_logs(self, log_type):
entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
logger = self.useFixture(fixtures.FakeLogger())
self.journal._sync_entry(self.db_context, entry)
self.assertIn(log_type, logger.output)
def test__sync_entry_logs_processing(self):
self._test__sync_entry_logs(journal.LOG_PROCESSING)
def test__sync_entry_logs_completed(self):
self._test__sync_entry_logs(journal.LOG_COMPLETED)
@mock.patch.object(client.OpenDaylightRestClient, 'sendjson',
mock.Mock(side_effect=Exception))
def test__sync_entry_logs_failed(self):
self._test__sync_entry_logs(journal.LOG_ERROR_PROCESSING)
@mock.patch.object(journal.OpenDaylightJournalThread,
'sync_pending_entries')
def test_terminate_journal_thread_correctly(self, mock_journal):
self.journal_thread_fixture.journal_thread_mock.stop()
self.addCleanup(self.journal_thread_fixture.journal_thread_mock.start)
journal_thread = journal.OpenDaylightJournalThread(start_thread=True)
journal_thread.stop(5)
self.assertTrue(not journal_thread._odl_sync_thread.is_alive())
mock_journal.assert_called_once()
@mock.patch.object(journal.OpenDaylightJournalThread,
'sync_pending_entries')
def test_allow_multiple_starts_gracefully(self, mock_journal):
self.journal_thread_fixture.journal_thread_mock.stop()
self.addCleanup(self.journal_thread_fixture.journal_thread_mock.start)
journal_thread = journal.OpenDaylightJournalThread(start_thread=False)
self.addCleanup(journal_thread.stop)
journal_thread.start()
try:
journal_thread.start()
except RuntimeError:
self.fail('OpenDaylightJournalThread started twice')
def _raise_DBReferenceError(*args, **kwargs):
args = [mock.Mock(unsafe=True)] * 4
e = exception.DBReferenceError(*args)
raise e
class JournalTest(base_v2.OpenDaylightTestCase):
@mock.patch.object(dependency_validations, 'calculate')
@mock.patch.object(journal.db, 'create_pending_row',
side_effect=_raise_DBReferenceError)
def test_record_triggers_retry_on_reference_error(self, mock_create_row,
mock_calculate):
args = [mock.Mock(unsafe=True)] * 5
self.assertRaises(exception.RetryRequest, journal.record, *args)
def test_entry_complete_retries_exceptions(self):
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(journal.entry_complete, m)
@test_db.in_session
def _test_entry_complete(self, retention, expected_length):
self.cfg.config(completed_rows_retention=retention, group='ml2_odl')
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry = db.get_all_db_rows(self.db_context)[-1]
journal.entry_complete(self.db_context, entry)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(expected_length, len(rows))
self.assertTrue(
all(row.state == odl_const.COMPLETED for row in rows))
def test_entry_complete_no_retention(self):
self._test_entry_complete(0, 0)
def test_entry_complete_with_retention(self):
self._test_entry_complete(1, 1)
def test_entry_complete_with_indefinite_retention(self):
self._test_entry_complete(-1, 1)
@test_db.in_session
def test_entry_complete_with_retention_deletes_dependencies(self):
self.cfg.config(completed_rows_retention=1, group='ml2_odl')
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry = db.get_all_db_rows(self.db_context)[-1]
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW,
depending_on=[entry])
dependant = db.get_all_db_rows(self.db_context)[-1]
journal.entry_complete(self.db_context, entry)
rows = db.get_all_db_rows(self.db_context)
self.assertIn(entry, rows)
self.assertEqual([], entry.dependencies)
self.assertEqual([], dependant.depending_on)
def test_entry_reset_retries_exceptions(self):
with mock.patch.object(db, 'update_db_row_state') as m:
self._test_retry_exceptions(journal.entry_reset, m)
@test_db.in_session
def test_entry_reset(self):
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry = db.get_all_db_rows(self.db_context)[-1]
entry.state = odl_const.PROCESSING
self.db_context.session.merge(entry)
self.db_context.session.flush()
entry = db.get_all_db_rows(self.db_context)[-1]
self.assertEqual(entry.state, odl_const.PROCESSING)
journal.entry_reset(self.db_context, entry)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(2, len(rows))
self.assertTrue(all(row.state == odl_const.PENDING for row in rows))
def test_entry_set_retry_count_retries_exceptions(self):
with mock.patch.object(db, 'update_pending_db_row_retry') as m:
self._test_retry_exceptions(
journal.entry_update_state_by_retry_count, m)
@test_db.in_session
def test_entry_set_retry_count(self):
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry_baseline = db.get_all_db_rows(self.db_context)[-1]
db.create_pending_row(self.db_context, *test_db.DbTestCase.UPDATE_ROW)
entry_target = db.get_all_db_rows(self.db_context)[-1]
self.assertEqual(entry_target.retry_count, 0)
self.assertEqual(entry_target.retry_count, entry_baseline.retry_count)
self.assertEqual(entry_target.state, entry_baseline.state)
journal.entry_update_state_by_retry_count(
self.db_context, entry_target, 1)
self.assertEqual(entry_target.retry_count, 1)
self.assertEqual(entry_target.state, odl_const.PENDING)
journal.entry_update_state_by_retry_count(
self.db_context, entry_target, 1)
self.assertEqual(entry_target.retry_count, 1)
self.assertEqual(entry_target.state, odl_const.FAILED)
self.assertNotEqual(entry_target.state, entry_baseline.state)
self.assertNotEqual(entry_target.retry_count,
entry_baseline.retry_count)
def test_record_logs_recording(self):
logger = self.useFixture(fixtures.FakeLogger())
journal.record(self.db_context, *self.UPDATE_ROW)
for arg in self.UPDATE_ROW[0:3]:
self.assertIn(arg, logger.output)
def test_record_logs_dependencies(self):
entry = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
logger = self.useFixture(fixtures.FakeLogger(level=logging.DEBUG))
journal.record(self.db_context, *self.UPDATE_ROW)
self.assertIn(str(entry.seqnum), logger.output)
networking-odl-16.0.0/networking_odl/tests/unit/journal/test_dependency_validations.py0000664000175000017500000005552113656750541031553 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Intel Corp. Isaku Yamahata
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import testscenarios
from networking_odl.common import constants as const
from networking_odl.db import db
from networking_odl.journal import dependency_validations
from networking_odl.tests.unit import test_base_db
load_tests = testscenarios.load_tests_apply_scenarios
_NET_ID = 'NET_ID'
_NET_DATA = {'id': _NET_ID}
_SUBNET_ID = 'SUBNET_ID'
_SUBNET_DATA = {'network_id': _NET_ID}
_PORT_ID = 'PORT_ID'
_PORT_DATA = {'network_id': _NET_ID,
'fixed_ips': [{'subnet_id': _SUBNET_ID}]}
_PORT_DATA_DUPLICATE_SUBNET = {
'network_id': _NET_ID,
'fixed_ips': [{'subnet_id': _SUBNET_ID},
{'subnet_id': _SUBNET_ID}]
}
_ROUTER_ID = 'ROUTER_ID'
_ROUTER_DATA = {'id': 'ROUTER_ID',
'gw_port_id': 'GW_PORT_ID'}
_L2GW_ID = 'l2gw_id'
_L2GW_DATA = {'id': _L2GW_ID}
_L2GWCONN_ID = 'l2gwconn_id'
_L2GWCONN_DATA = {'id': _L2GWCONN_ID,
'network_id': _NET_ID,
'gateway_id': _L2GW_ID}
_TRUNK_ID = 'TRUNK_ID'
_SUBPORT_ID = 'CPORT_ID'
_TRUNK_DATA = {'trunk_id': _TRUNK_ID,
'port_id': _PORT_ID,
'sub_ports': [{'port_id': _SUBPORT_ID}]}
_BGPVPN_ID = 'BGPVPN_ID'
_SG_ID = 'SG_ID'
_SG_DATA = {'id': _SG_ID}
_SG_RULE_ID = 'SG_RULE_ID'
_SG_RULE_DATA = {'id': _SG_RULE_ID,
'security_group_id': _SG_ID}
def get_data(res_type, operation):
if res_type == const.ODL_NETWORK:
return [_NET_DATA]
elif res_type == const.ODL_SUBNET:
if operation == const.ODL_DELETE:
return [[_NET_ID]]
return [_SUBNET_DATA]
elif res_type == const.ODL_PORT:
# TODO(yamahata): test case of (ODL_port, ODL_DELETE) is missing
if operation == const.ODL_DELETE:
return [[_NET_ID, _SUBNET_ID]]
return [_PORT_DATA, _PORT_DATA_DUPLICATE_SUBNET]
elif res_type == const.ODL_ROUTER:
return [_ROUTER_DATA]
elif res_type == const.ODL_L2GATEWAY:
return [_L2GW_DATA]
elif res_type == const.ODL_L2GATEWAY_CONNECTION:
return [_L2GWCONN_DATA]
elif res_type == const.ODL_TRUNK:
if operation == const.ODL_DELETE:
return [[_PORT_ID, _SUBPORT_ID]]
return [_TRUNK_DATA]
elif res_type == const.ODL_BGPVPN:
if operation == const.ODL_DELETE:
return [[_NET_ID, _ROUTER_ID]]
else:
routers = []
networks = []
if operation == const.ODL_UPDATE:
routers = [_ROUTER_ID]
networks = [_NET_ID]
return [{'id': _BGPVPN_ID, 'networks': networks,
'routers': routers,
'route_distinguishers': ['100:1']}]
elif res_type == const.ODL_SG:
return [_SG_DATA]
elif res_type == const.ODL_SG_RULE:
if operation == const.ODL_DELETE:
return [[_SG_RULE_ID]]
return [_SG_RULE_DATA]
return [[]]
def subnet_fail_network_dep(net_op, subnet_op):
return {'expected': 1,
'first_type': const.ODL_NETWORK,
'first_operation': net_op,
'first_id': _NET_ID,
'second_type': const.ODL_SUBNET,
'second_operation': subnet_op,
'second_id': _SUBNET_ID}
def subnet_succeed_network_dep(net_op, subnet_op):
return {'expected': 0,
'first_type': const.ODL_SUBNET,
'first_operation': subnet_op,
'first_id': _SUBNET_ID,
'second_type': const.ODL_NETWORK,
'second_operation': net_op,
'second_id': _NET_ID}
# TODO(vthapar) add tests for l2gw dependency validations
class BaseDependencyValidationsTestCase(object):
def test_dependency(self):
db.create_pending_row(
self.db_context, self.first_type, self.first_id,
self.first_operation,
get_data(self.first_type, self.first_operation))
for data in get_data(self.second_type, self.second_operation):
deps = dependency_validations.calculate(
self.db_context, self.second_operation, self.second_type,
self.second_id, data)
self.assertEqual(self.expected, len(deps))
class SubnetDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("subnet_create_depends_on_older_network_create",
subnet_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("subnet_create_depends_on_older_network_update",
subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("subnet_create_depends_on_older_network_delete",
subnet_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("subnet_create_doesnt_depend_on_newer_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("subnet_create_doesnt_depend_on_newer_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("subnet_create_doesnt_depend_on_newer_network_delete",
subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("subnet_update_depends_on_older_network_create",
subnet_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("subnet_update_depends_on_older_network_update",
subnet_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("subnet_update_depends_on_older_network_delete",
subnet_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("subnet_update_doesnt_depend_on_newer_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("subnet_update_doesnt_depend_on_newer_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("subnet_update_doesnt_depend_on_newer_network_delete",
subnet_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("subnet_delete_doesnt_depend_on_older_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)),
("subnet_delete_doesnt_depend_on_older_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)),
("subnet_delete_doesnt_depend_on_newer_network_create",
subnet_succeed_network_dep(const.ODL_CREATE, const.ODL_DELETE)),
("subnet_delete_doesnt_depend_on_newer_network_update",
subnet_succeed_network_dep(const.ODL_UPDATE, const.ODL_DELETE)),
)
def security_rule_fail_security_group_dep(sg_op, sgr_op):
return {'expected': 1,
'first_type': const.ODL_SG,
'first_operation': sg_op,
'first_id': _SG_ID,
'second_type': const.ODL_SG_RULE,
'second_operation': sgr_op,
'second_id': _SG_RULE_ID}
def security_rule_succeed_security_group_dep(sg_op, sgr_op):
return {'expected': 0,
'first_type': const.ODL_SG_RULE,
'first_operation': sgr_op,
'first_id': _SG_RULE_ID,
'second_type': const.ODL_SG,
'second_operation': sg_op,
'second_id': _SG_ID}
class SecurityRuleDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("security_rule_create_depends_on_older_security_group_create",
security_rule_fail_security_group_dep(const.ODL_CREATE,
const.ODL_CREATE)),
("security_rule_create_depends_on_older_security_group_update",
security_rule_fail_security_group_dep(const.ODL_UPDATE,
const.ODL_CREATE)),
("security_rule_create_depends_on_older_security_group_delete",
security_rule_fail_security_group_dep(const.ODL_DELETE,
const.ODL_CREATE)),
("security_rule_create_doesnt_depend_on_newer_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_CREATE)),
("security_rule_create_doesnt_depend_on_newer_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_CREATE)),
("security_rule_create_doesnt_depend_on_newer_security_group_delete",
security_rule_succeed_security_group_dep(const.ODL_DELETE,
const.ODL_CREATE)),
("security_rule_update_depends_on_older_security_group_create",
security_rule_fail_security_group_dep(const.ODL_CREATE,
const.ODL_UPDATE)),
("security_rule_update_depends_on_older_security_group_update",
security_rule_fail_security_group_dep(const.ODL_UPDATE,
const.ODL_UPDATE)),
("security_rule_update_depends_on_older_security_group_delete",
security_rule_fail_security_group_dep(const.ODL_DELETE,
const.ODL_UPDATE)),
("security_rule_update_doesnt_depend_on_newer_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_UPDATE)),
("security_rule_update_doesnt_depend_on_newer_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_UPDATE)),
("security_rule_update_doesnt_depend_on_newer_security_group_delete",
security_rule_succeed_security_group_dep(const.ODL_DELETE,
const.ODL_UPDATE)),
("security_rule_delete_doesnt_depend_on_older_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_DELETE)),
("security_rule_delete_doesnt_depend_on_older_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_DELETE)),
("security_rule_delete_doesnt_depend_on_newer_security_group_create",
security_rule_succeed_security_group_dep(const.ODL_CREATE,
const.ODL_DELETE)),
("security_rule_delete_doesnt_depend_on_newer_security_group_update",
security_rule_succeed_security_group_dep(const.ODL_UPDATE,
const.ODL_DELETE)),
)
def port_fail_network_dep(net_op, port_op):
return {'expected': 1,
'first_type': const.ODL_NETWORK,
'first_operation': net_op,
'first_id': _NET_ID,
'second_type': const.ODL_PORT,
'second_operation': port_op,
'second_id': _PORT_ID}
def port_succeed_network_dep(net_op, port_op):
return {'expected': 0,
'first_type': const.ODL_PORT,
'first_operation': port_op,
'first_id': _PORT_ID,
'second_type': const.ODL_NETWORK,
'second_operation': net_op,
'second_id': _NET_ID}
def port_fail_subnet_dep(subnet_op, port_op):
return {'expected': 1,
'first_type': const.ODL_SUBNET,
'first_operation': subnet_op,
'first_id': _SUBNET_ID,
'second_type': const.ODL_PORT,
'second_operation': port_op,
'second_id': _PORT_ID}
def port_succeed_subnet_dep(subnet_op, port_op):
return {'expected': 0,
'first_type': const.ODL_PORT,
'first_operation': port_op,
'first_id': _PORT_ID,
'second_type': const.ODL_SUBNET,
'second_operation': subnet_op,
'second_id': _SUBNET_ID}
class PortDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("port_create_depends_on_older_network_create",
port_fail_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_depends_on_older_network_update",
port_fail_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_depends_on_older_network_delete",
port_fail_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_network_create",
port_succeed_network_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_network_update",
port_succeed_network_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_network_delete",
port_succeed_network_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_update_depends_on_older_network_create",
port_fail_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_depends_on_older_network_update",
port_fail_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_depends_on_older_network_delete",
port_fail_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_network_create",
port_succeed_network_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_network_update",
port_succeed_network_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_network_delete",
port_succeed_network_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("port_create_depends_on_older_subnet_create",
port_fail_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_depends_on_older_subnet_update",
port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_depends_on_older_subnet_delete",
port_fail_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_subnet_create",
port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_subnet_update",
port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_CREATE)),
("port_create_doesnt_depend_on_newer_subnet_delete",
port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_CREATE)),
("port_update_depends_on_older_subnet_create",
port_fail_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_depends_on_older_subnet_update",
port_fail_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_depends_on_older_subnet_delete",
port_fail_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_subnet_create",
port_succeed_subnet_dep(const.ODL_CREATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_subnet_update",
port_succeed_subnet_dep(const.ODL_UPDATE, const.ODL_UPDATE)),
("port_update_doesnt_depend_on_newer_subnet_delete",
port_succeed_subnet_dep(const.ODL_DELETE, const.ODL_UPDATE)),
)
def trunk_dep(first_type, second_type, first_op, second_op, result,
sub_port=False):
expected = {'fail': 1, 'pass': 0}
port_id = _SUBPORT_ID if sub_port else _PORT_ID
type_id = {const.ODL_PORT: port_id,
const.ODL_TRUNK: _TRUNK_ID}
return {'expected': expected[result],
'first_type': first_type,
'first_operation': first_op,
'first_id': type_id[first_type],
'second_type': second_type,
'second_operation': second_op,
'second_id': type_id[second_type]}
class TrunkDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("trunk_create_depends_on_older_port_create",
trunk_dep(const.ODL_PORT, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_CREATE, 'fail')),
("trunk_create_doesnt_depend_on_newer_port_create",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("trunk_create_doesnt_depend_on_port_update",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_CREATE, const.ODL_UPDATE, 'pass')),
("trunk_create_doesnt_depend_on_newer_port_delete",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
# TODO(vthapar): add more/better validations for subport
# trunk update means subport add/delete
("trunk_update_depends_on_older_trunk_create",
trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)),
("trunk_update_depends_on_older_port_create",
trunk_dep(const.ODL_PORT, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_UPDATE, 'fail', True)),
("trunk_update_doesnt_depend_on_newer_port_create",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_UPDATE, const.ODL_CREATE, 'pass', True)),
("trunk_update_doesnt_depend_on_port_update",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_UPDATE, const.ODL_UPDATE, 'pass', True)),
("trunk_update_doesnt_depend_on_newer_port_delete",
trunk_dep(const.ODL_TRUNK, const.ODL_PORT,
const.ODL_UPDATE, const.ODL_DELETE, 'pass', True)),
# trunk delete cases
("trunk_delete_depends_on_older_trunk_create",
trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_DELETE, 'fail', True)),
("trunk_delete_depends_on_older_trunk_update",
trunk_dep(const.ODL_TRUNK, const.ODL_TRUNK,
const.ODL_UPDATE, const.ODL_DELETE, 'fail', True)),
("trunk_delete_doesnt_depend_on_older_port_create",
trunk_dep(const.ODL_PORT, const.ODL_TRUNK,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
)
def l2gw_dep(first_type, second_type, first_op, second_op, result):
expected = {'fail': 1, 'pass': 0}
type_id = {const.ODL_NETWORK: _NET_ID,
const.ODL_L2GATEWAY: _L2GW_ID,
const.ODL_L2GATEWAY_CONNECTION: _L2GWCONN_ID}
return {'expected': expected[result],
'first_type': first_type,
'first_operation': first_op,
'first_id': type_id[first_type],
'second_type': second_type,
'second_operation': second_op,
'second_id': type_id[second_type]}
class L2GWDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("L2GWConn_create_depends_on_older_network_create",
l2gw_dep(const.ODL_NETWORK, const.ODL_L2GATEWAY_CONNECTION,
const.ODL_CREATE, const.ODL_CREATE, 'fail')),
("L2GWConn_create_depends_on_older_L2GW_create",
l2gw_dep(const.ODL_L2GATEWAY, const.ODL_L2GATEWAY_CONNECTION,
const.ODL_CREATE, const.ODL_CREATE, 'fail')),
("L2GWConn_create_doesnt_depend_on_newer_network_create",
l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_NETWORK,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("L2GWConn_create_doesnt_depend_on_newer_L2GW_create",
l2gw_dep(const.ODL_L2GATEWAY_CONNECTION, const.ODL_L2GATEWAY,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
)
# TODO(vthapar): Refactor *_dep into a common method
def bgpvpn_dep(first_type, second_type, first_op, second_op, result):
expected = {'fail': 1, 'pass': 0}
type_id = {const.ODL_NETWORK: _NET_ID,
const.ODL_ROUTER: _ROUTER_ID,
const.ODL_BGPVPN: _BGPVPN_ID}
return {'expected': expected[result],
'first_type': first_type,
'first_operation': first_op,
'first_id': type_id[first_type],
'second_type': second_type,
'second_operation': second_op,
'second_id': type_id[second_type]}
class BGPVPNDependencyValidationsTestCase(
test_base_db.ODLBaseDbTestCase, BaseDependencyValidationsTestCase):
scenarios = (
("bgpvpn_create_doesnt_depend_on_older_network_create",
bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_create_doesnt_depend_on_newer_network_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_create_doesnt_depend_on_older_router_create",
bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_create_doesnt_depend_on_newer_router_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER,
const.ODL_CREATE, const.ODL_CREATE, 'pass')),
("bgpvpn_update_depends_on_older_bgpvpn_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_UPDATE, 'fail')),
("bgpvpn_update_depends_on_older_network_create",
bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_UPDATE, 'fail')),
("bgpvpn_update_doesnt_depend_on_newer_network_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_NETWORK,
const.ODL_UPDATE, const.ODL_CREATE, 'pass')),
("bgpvpn_update_depends_on_older_router_create",
bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_UPDATE, 'fail')),
("bgpvpn_update_doesnt_depend_on_newer_router_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_ROUTER,
const.ODL_UPDATE, const.ODL_CREATE, 'pass')),
# bgpvpn delete cases
("bgpvpn_delete_depends_on_older_bgpvpn_create",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_DELETE, 'fail')),
("bgpvpn_delete_depends_on_older_bgpvpn_update",
bgpvpn_dep(const.ODL_BGPVPN, const.ODL_BGPVPN,
const.ODL_UPDATE, const.ODL_DELETE, 'fail')),
("bgpvpn_delete_doesnt_depend_on_older_network_create",
bgpvpn_dep(const.ODL_NETWORK, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
("bgpvpn_delete_doesnt_depend_on_older_router_create",
bgpvpn_dep(const.ODL_ROUTER, const.ODL_BGPVPN,
const.ODL_CREATE, const.ODL_DELETE, 'pass')),
)
networking-odl-16.0.0/networking_odl/tests/unit/journal/test_base_driver.py0000664000175000017500000000720413656750541027320 0ustar zuulzuul00000000000000# Copyright (c) 2017 NEC Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import directory
from networking_odl.common import constants
from networking_odl.common import exceptions
from networking_odl.db import db
from networking_odl.journal import base_driver
from networking_odl.tests.unit.journal import helper
from networking_odl.tests.unit import test_base_db
class BaseDriverTestCase(test_base_db.ODLBaseDbTestCase):
def setUp(self):
super(BaseDriverTestCase, self).setUp()
self.test_driver = helper.TestDriver()
self.plugin = helper.TestPlugin()
directory.add_plugin(helper.TEST_PLUGIN, self.plugin)
self.addCleanup(directory.add_plugin, helper.TEST_PLUGIN, None)
def test_get_resource_driver(self):
for resource, resource_suffix in self.test_driver.RESOURCES.items():
driver = base_driver.get_driver(resource)
self.assertEqual(driver, self.test_driver)
self.assertEqual(driver.plugin_type, helper.TEST_PLUGIN)
self.assertEqual(self.test_driver.RESOURCES.get(resource),
resource_suffix)
def non_existing_plugin_cleanup(self):
self.test_driver.plugin_type = helper.TEST_PLUGIN
def test_non_existing_plugin(self):
self.test_driver.plugin_type = helper.INVALID_PLUGIN
self.addCleanup(self.non_existing_plugin_cleanup)
self.assertIsNone(self.test_driver.plugin)
def test_get_non_existing_resource_driver(self):
self.assertRaises(exceptions.ResourceNotRegistered,
base_driver.get_driver, helper.INVALID_RESOURCE)
def test_get_resources_for_full_sync(self):
received_resources = self.test_driver.get_resources_for_full_sync(
self.db_context,
helper.TEST_RESOURCE1)
resources = self.plugin.get_test_resource1s(self.db_context)
for resource in resources:
self.assertIn(resource, received_resources)
def test_get_non_existing_resources_for_full_sync(self):
self.assertRaises(exceptions.UnsupportedResourceType,
self.test_driver.get_resources_for_full_sync,
self.db_context, helper.INVALID_RESOURCE)
def test_get_resource(self):
row = db.create_pending_row(self.db_context, helper.TEST_RESOURCE1,
helper.TEST_UUID, constants.ODL_CREATE,
{'id': helper.TEST_UUID})
resource = self.test_driver.get_resource_for_recovery(self.db_context,
row)
self.assertEqual(resource['id'], helper.TEST_UUID)
def test_get_unsupported_resource(self):
row = db.create_pending_row(self.db_context, helper.INVALID_RESOURCE,
helper.TEST_UUID, constants.ODL_CREATE,
{'id': helper.TEST_UUID})
self.assertRaises(exceptions.PluginMethodNotFound,
self.test_driver.get_resource_for_recovery,
self.db_context, row)
networking-odl-16.0.0/networking_odl/tests/unit/journal/test_full_sync.py0000664000175000017500000004647713656750541027050 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import requests
from networking_l2gw.services.l2gateway.common import constants as l2gw_const
from networking_sfc.extensions import flowclassifier as fc_const
from networking_sfc.extensions import sfc as sfc_const
from neutron_lib.api.definitions import bgpvpn as bgpvpn_const
from neutron_lib.callbacks import resources
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from networking_odl.bgpvpn import odl_v2 as bgpvpn_driver
from networking_odl.common import constants as odl_const
from networking_odl.common import exceptions
from networking_odl.db import db
from networking_odl.journal import base_driver
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.l2gateway import driver_v2 as l2gw_driver
from networking_odl.l3 import l3_odl_v2
from networking_odl.ml2 import mech_driver_v2
from networking_odl.qos import qos_driver_v2 as qos_driver
from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2
from networking_odl.sfc import sfc_driver_v2 as sfc_driver
from networking_odl.tests import base
from networking_odl.tests.unit.journal import helper
from networking_odl.tests.unit import test_base_db
from networking_odl.trunk import trunk_driver_v2 as trunk_driver
class FullSyncTestCase(test_base_db.ODLBaseDbTestCase):
def setUp(self):
self.useFixture(
base.OpenDaylightRestClientGlobalFixture(full_sync._CLIENT))
super(FullSyncTestCase, self).setUp()
self._CLIENT = full_sync._CLIENT.get_client()
self.addCleanup(full_sync.FULL_SYNC_RESOURCES.clear)
# NOTE(rajivk) workaround, Fixture defined are executed after complete
# tests cases, but cleanup is needed after each test case.
self.addCleanup(self._clean_registered_plugins)
def _clean_registered_plugins(self):
for plugin_type in self._get_all_plugins().keys():
directory.add_plugin(plugin_type, None)
def test_no_full_sync_when_canary_exists(self):
full_sync.full_sync(self.db_context)
self.assertEqual([], db.get_all_db_rows(self.db_context))
def _filter_out_canary(self, rows):
return [row for row in rows if row['object_uuid'] !=
full_sync._CANARY_NETWORK_ID]
def _mock_l2_resources(self):
expected_journal = {odl_const.ODL_NETWORK: '1',
odl_const.ODL_SUBNET: '2',
odl_const.ODL_PORT: '3'}
network_id = expected_journal[odl_const.ODL_NETWORK]
plugin = mock.Mock()
plugin.get_networks.return_value = [{'id': network_id}]
plugin.get_subnets.return_value = [
{'id': expected_journal[odl_const.ODL_SUBNET],
'network_id': network_id}]
port = {'id': expected_journal[odl_const.ODL_PORT],
odl_const.ODL_SGS: None,
'tenant_id': '123',
'fixed_ips': [],
'network_id': network_id}
plugin.get_ports.side_effect = ([port], [])
directory.add_plugin(constants.CORE, plugin)
return expected_journal
def _test_no_full_sync_when_canary_in_journal(self, state):
self._mock_canary_missing()
self._mock_l2_resources()
db.create_pending_row(self.db_context, odl_const.ODL_NETWORK,
full_sync._CANARY_NETWORK_ID,
odl_const.ODL_CREATE, {})
row = db.get_all_db_rows(self.db_context)[0]
db.update_db_row_state(self.db_context, row, state)
full_sync.full_sync(self.db_context)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual([], self._filter_out_canary(rows))
def test_no_full_sync_when_canary_pending_creation(self):
self._test_no_full_sync_when_canary_in_journal(odl_const.PENDING)
def test_no_full_sync_when_canary_is_processing(self):
self._test_no_full_sync_when_canary_in_journal(odl_const.PROCESSING)
@staticmethod
def _get_all_resources():
return (
(odl_const.ODL_SG, constants.CORE),
(odl_const.ODL_SG_RULE, constants.CORE),
(odl_const.ODL_NETWORK, constants.CORE),
(odl_const.ODL_SUBNET, constants.CORE),
(odl_const.ODL_ROUTER, constants.L3),
(odl_const.ODL_PORT, constants.CORE),
(odl_const.ODL_FLOATINGIP, constants.L3),
(odl_const.ODL_QOS_POLICY, constants.QOS),
(odl_const.ODL_TRUNK, resources.TRUNK),
(odl_const.ODL_BGPVPN, bgpvpn_const.ALIAS),
(odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION, bgpvpn_const.ALIAS),
(odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION, bgpvpn_const.ALIAS),
(odl_const.ODL_SFC_FLOW_CLASSIFIER, fc_const.FLOW_CLASSIFIER_EXT),
(odl_const.ODL_SFC_PORT_PAIR, sfc_const.SFC_EXT),
(odl_const.ODL_SFC_PORT_PAIR_GROUP, sfc_const.SFC_EXT),
(odl_const.ODL_SFC_PORT_CHAIN, sfc_const.SFC_EXT),
(odl_const.ODL_L2GATEWAY, l2gw_const.L2GW),
(odl_const.ODL_L2GATEWAY_CONNECTION, l2gw_const.L2GW))
@mock.patch.object(db, 'delete_pending_rows')
@mock.patch.object(full_sync, '_full_sync_needed')
@mock.patch.object(full_sync, '_sync_resources')
@mock.patch.object(journal, 'record')
def test_sync_resource_order(
self, record_mock, _sync_resources_mock, _full_sync_needed_mock,
delete_pending_rows_mock):
all_resources = self._get_all_resources()
full_sync.FULL_SYNC_RESOURCES = {resource_type: mock.Mock()
for resource_type, _ in all_resources}
_full_sync_needed_mock._full_sync_needed.return_value = True
context = mock.MagicMock()
full_sync.full_sync(context)
_sync_resources_mock.assert_has_calls(
[mock.call(mock.ANY, object_type, mock.ANY)
for object_type, _ in all_resources])
def test_client_error_propagates(self):
class TestException(Exception):
def __init__(self):
pass
self._CLIENT.get.side_effect = TestException()
self.assertRaises(TestException, full_sync.full_sync, self.db_context)
def _mock_canary_missing(self):
get_return = mock.MagicMock()
get_return.status_code = requests.codes.not_found
self._CLIENT.get.return_value = get_return
def _assert_canary_created(self):
rows = db.get_all_db_rows(self.db_context)
self.assertTrue(any(r['object_uuid'] == full_sync._CANARY_NETWORK_ID
for r in rows))
return rows
def _test_full_sync_resources(self, expected_journal):
self._mock_canary_missing()
directory.add_plugin(constants.CORE, mock.Mock())
full_sync.full_sync(self.db_context)
rows = self._assert_canary_created()
rows = self._filter_out_canary(rows)
self.assertItemsEqual(expected_journal.keys(),
[row['object_type'] for row in rows])
for row in rows:
self.assertEqual(expected_journal[row['object_type']],
row['object_uuid'])
def test_full_sync_removes_pending_rows(self):
db.create_pending_row(self.db_context, odl_const.ODL_NETWORK, "uuid",
odl_const.ODL_CREATE, {'foo': 'bar'})
self._test_full_sync_resources({})
def test_full_sync_no_resources(self):
self._test_full_sync_resources({})
@staticmethod
def _get_mocked_security_groups(context):
return [{'description': u'description',
'security_group_rules': ['security_grp_rules'],
'id': 'test_uuid', 'name': u'default'}]
@staticmethod
def _get_mocked_security_group_rules(context):
return [{'direction': 'egress', 'protocol': None,
'description': 'description', 'port_range_max': None,
'id': 'test_uuid', 'security_group_id': 'test_uuid'}]
@staticmethod
def _get_mocked_networks(context):
return [{'id': 'test_uuid', 'project_id': u'project_id',
'status': u'ACTIVE', 'subnets': [], 'description': u'',
'name': u'network0'}]
@staticmethod
def _get_mocked_subnets(context):
return [{'description': u'', 'cidr': u'test-cidr', 'id': 'test_uuid',
'name': u'test-subnet', 'network_id': 'test_uuid',
'gateway_ip': u'gateway_ip'}]
@staticmethod
def _get_mocked_routers(context):
return [{'status': u'ACTIVE', 'description': u'', 'name': u'router1',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_ports(context):
return [{'status': u'DOWN', 'description': None, 'id': 'test_uuid',
'name': u'loadbalancer-27', 'network_id': 'test_uuid',
'mac_address': u'fa:16:3e:69:4e:33'}]
@staticmethod
def _get_mocked_loadbalancers(context):
return [{'description': '', 'tenant_id': 'tenant_id',
'vip_subnet_id': 'subnet_id', 'listeners': [],
'vip_address': '10.1.0.11', 'vip_port_id': 'port_id',
'pools': [], 'id': 'test_uuid', 'name': 'test-lb'}]
@staticmethod
def _get_mocked_listeners(context):
return [{'admin_state_up': True, 'project_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_trunks(context):
return [{'routers': [], 'id': 'test_uuid', 'name': u'',
'tenant_id': u'project_id', 'networks': [], 'route_targets': [
u'64512:1'], 'project_id': u'project_id', 'type': 'l3'},
{'routers': [], 'id': 'test_uuid', 'name': u'',
'tenant_id': u'tenant_id', 'networks': [], 'route_targets': [
u'64512:1'], 'project_id': u'project_id', 'type': 'l3'}]
@staticmethod
def _get_mocked_bgpvpns(context):
return [{'network_id': 'test_uuid', 'bgpvpn_id': 'test_uuid',
'project_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_l2_gateways(context):
return [{'tenant_id': u'test_tenant_id', 'id': 'test_uuid',
'devices': [{'interfaces': [{'name': u'eth3'}],
'id': 'test_uuid', 'device_name': u'vtep0'}],
'name': u'test-gateway'}]
@staticmethod
def _get_mocked_l2_gateway_connections(context):
return [{'network_id': 'test_uuid', 'tenant_id': 'test_uuid',
'l2_gateway_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_pools(context):
return [{'name': 'pool1', 'admin_state_up': True,
'project_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_pool_members(context, pool_id):
return [{'name': 'pool1', 'admin_state_up': True,
'project_id': 'test_uuid', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_healthmonitors(context):
return [{'type': 'HTTP', 'admin_state_up': True,
'project_id': 'test_uuid', 'id': 'test_uuid',
'name': 'monitor1'}]
@staticmethod
def _get_mocked_listener(context):
return [{'admin_state_up': True, 'project_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_floatingips(context):
return [{'floating_network_id': 'test_uuid', 'tenant_id': 'test_uuid',
'dns_name': '', 'dns_domain': '', 'id': 'test_uuid'}]
@staticmethod
def _get_mocked_policies(context):
return [{'id': 'test_uuid', 'project_id': 'test_uuid',
'name': 'test-policy', 'description': 'Policy description',
'shared': True, 'is_default': False}]
@staticmethod
def _get_mocked_bgpvpn_network_associations(context, bgpvpn_id):
return [{'network_id': 'test_uuid', 'tenant_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_bgpvpn_router_associations(context, bgpvpn_id):
return [{'router_id': 'test_uuid', 'tenant_id': 'test_uuid',
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_port_chains(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_port_pair_groups(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_port_pairs(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_mocked_flowclassifiers(context):
tenant_id = 'test_uuid'
return [{'tenant_id': tenant_id, 'project_id': tenant_id,
'id': 'test_uuid'}]
@staticmethod
def _get_all_plugins():
return {
constants.CORE: (mock.Mock(),
mech_driver_v2.OpenDaylightMechanismDriver),
constants.L3: (mock.Mock(), l3_odl_v2.OpenDaylightL3RouterPlugin),
resources.TRUNK: (mock.Mock(),
trunk_driver.OpenDaylightTrunkHandlerV2),
constants.QOS: (mock.Mock(), qos_driver.OpenDaylightQosDriver),
sfc_const.SFC_EXT: (mock.Mock(),
sfc_driver.OpenDaylightSFCDriverV2),
bgpvpn_const.ALIAS: (mock.Mock(),
bgpvpn_driver.OpenDaylightBgpvpnDriver),
fc_const.FLOW_CLASSIFIER_EXT: (
mock.Mock(),
sfc_flowclassifier_v2.OpenDaylightSFCFlowClassifierDriverV2),
l2gw_const.L2GW: (mock.Mock(), l2gw_driver.OpenDaylightL2gwDriver)
}
@staticmethod
def _get_name(resource_type):
mapping = {
odl_const.ODL_QOS_POLICY: odl_const.ODL_QOS_POLICIES,
odl_const.ODL_SFC_PORT_PAIR:
odl_const.NETWORKING_SFC_FLOW_CLASSIFIERS,
odl_const.ODL_SFC_PORT_PAIR:
odl_const.NETWORKING_SFC_PORT_PAIRS,
odl_const.ODL_SFC_PORT_PAIR_GROUP:
odl_const.NETWORKING_SFC_PORT_PAIR_GROUPS,
odl_const.ODL_SFC_PORT_CHAIN: odl_const.NETWORKING_SFC_PORT_CHAINS,
odl_const.ODL_L2GATEWAY_CONNECTION:
odl_const.ODL_L2GATEWAY_CONNECTIONS}
return ('_get_mocked_%s' % mapping.get(
resource_type, resource_type + 's'))
def _add_side_effect(self):
plugins = self._get_all_plugins()
resources = self._get_all_resources()
for resource_type, plugin_name in resources:
name = self._get_name(resource_type)
setattr(plugins[plugin_name][0], "get_%s" % name[12:],
getattr(self, name))
if directory.get_plugin(plugin_name) is None:
directory.add_plugin(plugin_name, plugins[plugin_name][0])
@mock.patch.object(journal, 'record')
def _test_sync_resources(self, object_type, plugin_type, mocked_record):
plugins = self._get_all_plugins()
driver = plugins[plugin_type][1]
args = [mock.Mock()]
if object_type in [odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION,
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION]:
args.append(mock.Mock())
resources = getattr(self, self._get_name(object_type))(*args)
context = mock.Mock()
def _test_get_default_handler(context, resource_type,
plugin_type=plugin_type):
resource_type = self._get_name(resource_type)[12:]
return full_sync.get_resources(context, plugin_type=plugin_type,
resource_type=resource_type)
handler = getattr(driver, 'get_resources', _test_get_default_handler)
full_sync._sync_resources(context, object_type, handler)
mocked_record.assert_has_calls(
[mock.call(context, object_type, resource['id'],
odl_const.ODL_CREATE,
resource) for resource in resources])
def test_sync_all_resources(self):
self._add_side_effect()
resources = self._get_all_resources()
for obj_type, plugin_name in resources:
self._test_sync_resources(obj_type, plugin_name)
def test_full_sync_retries_exceptions(self):
with mock.patch.object(full_sync, '_full_sync_needed') as m:
self._test_retry_exceptions(full_sync.full_sync, m)
def test_object_not_registered(self):
self.assertRaises(exceptions.ResourceNotRegistered,
full_sync.sync_resources,
self.db_context,
'test-object-type')
self.assertEqual([], db.get_all_db_rows(self.db_context))
def _register_resources(self):
helper.TestDriver()
self.addCleanup(base_driver.ALL_RESOURCES.clear)
def add_plugin(self, plugin_type, plugin):
directory.add_plugin(plugin_type, plugin)
def test_plugin_not_registered(self):
self._register_resources()
# NOTE(rajivk): workaround, as we don't have delete method for plugin
plugin = directory.get_plugin(helper.TEST_PLUGIN)
directory.add_plugin(helper.TEST_PLUGIN, None)
self.addCleanup(self.add_plugin, helper.TEST_PLUGIN, plugin)
self.assertRaises(exceptions.PluginMethodNotFound,
full_sync.sync_resources,
self.db_context,
helper.TEST_RESOURCE1)
self.assertEqual([], db.get_all_db_rows(self.db_context))
def test_sync_resources(self):
self._register_resources()
plugin = helper.TestPlugin()
self.add_plugin(helper.TEST_PLUGIN, plugin)
resources = plugin.get_test_resource1s(self.db_context)
full_sync.sync_resources(self.db_context,
helper.TEST_RESOURCE1)
entries = [entry.data for entry in db.get_all_db_rows(self.db_context)]
for resource in resources:
self.assertIn(resource, entries)
self.assertEqual(len(resources), len(entries))
@mock.patch.object(base_driver.ResourceBaseDriver,
'get_resources_for_full_sync')
def test_get_resources_failed(self, mock_get_resources):
self._register_resources()
mock_get_resources.side_effect = exceptions.UnsupportedResourceType()
resource_name = helper.TEST_RESOURCE1
self.assertRaises(exceptions.UnsupportedResourceType,
full_sync.sync_resources, self.db_context,
resource_name)
mock_get_resources.assert_called_once_with(self.db_context,
resource_name)
self.assertEqual([], db.get_all_db_rows(self.db_context))
networking-odl-16.0.0/networking_odl/tests/unit/journal/test_periodic_task.py0000664000175000017500000002020213656750541027644 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import threading
import mock
from neutron.common import utils
from neutron_lib import context
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.db import models
from networking_odl.journal import periodic_task
from networking_odl.tests.unit import test_base_db
TEST_TASK_NAME = 'test-maintenance'
TEST_TASK_INTERVAL = 0.1
class PeriodicTaskThreadTestCase(test_base_db.ODLBaseDbTestCase):
def setUp(self):
super(PeriodicTaskThreadTestCase, self).setUp()
row = models.OpenDaylightPeriodicTask(task=TEST_TASK_NAME,
state=odl_const.PENDING)
self.db_context.session.add(row)
self.db_context.session.flush()
self.thread = periodic_task.PeriodicTask(TEST_TASK_NAME,
TEST_TASK_INTERVAL)
self.addCleanup(self.thread.cleanup)
def test__execute_op_no_exception(self):
with mock.patch.object(periodic_task, 'LOG') as mock_log:
operation = mock.MagicMock()
operation.__name__ = "test"
self.thread.register_operation(operation)
self.thread._execute_op(operation, self.db_context)
operation.assert_called()
mock_log.info.assert_called()
mock_log.exception.assert_not_called()
def test__execute_op_with_exception(self):
with mock.patch.object(periodic_task, 'LOG') as mock_log:
operation = mock.MagicMock(side_effect=Exception())
operation.__name__ = "test"
self.thread._execute_op(operation, self.db_context)
mock_log.exception.assert_called()
def test_thread_works(self):
callback_event = threading.Event()
count = 0
def callback_op(*args):
nonlocal count
count += 1
# The following should be true on the second call, so we're making
# sure that the thread runs more than once.
if count > 1:
callback_event.set()
self.thread.register_operation(callback_op)
self.thread.start()
# Make sure the callback event was called and not timed out
self.assertTrue(callback_event.wait(timeout=5))
def test_thread_continues_after_exception(self):
exception_event = threading.Event()
callback_event = threading.Event()
def exception_op(*args):
if not exception_event.is_set():
exception_event.set()
raise Exception()
def callback_op(*args):
callback_event.set()
for op in [exception_op, callback_op]:
self.thread.register_operation(op)
self.thread.start()
# Make sure the callback event was called and not timed out
self.assertTrue(callback_event.wait(timeout=5))
def test_multiple_thread_work(self):
self.thread1 = periodic_task.PeriodicTask(TEST_TASK_NAME + '1',
TEST_TASK_INTERVAL)
callback_event = threading.Event()
callback_event1 = threading.Event()
self.addCleanup(self.thread1.cleanup)
def callback_op(*args):
callback_event.set()
def callback_op1(*args):
callback_event1.set()
self.thread.register_operation(callback_op)
self.thread.register_operation(callback_op1)
self.thread.start()
self.assertTrue(callback_event.wait(timeout=5))
self.thread1.start()
self.assertTrue(callback_event1.wait(timeout=5))
@mock.patch.object(db, "was_periodic_task_executed_recently")
def test_back_to_back_job(self, mock_status_method):
callback_event = threading.Event()
continue_event = threading.Event()
def callback_op(*args):
callback_event.set()
return_value = True
def continue_(*args, **kwargs):
continue_event.set()
return return_value
mock_status_method.side_effect = continue_
self.thread.register_operation(callback_op)
msg = ("Periodic %s task executed after periodic "
"interval Skipping execution.")
with mock.patch.object(periodic_task.LOG, 'info') as mock_log_info:
self.thread.start()
self.assertTrue(continue_event.wait(timeout=1))
continue_event.clear()
mock_log_info.assert_called_with(msg, TEST_TASK_NAME)
self.assertFalse(callback_event.is_set())
self.assertTrue(continue_event.wait(timeout=1))
continue_event.clear()
mock_log_info.assert_called_with(msg, TEST_TASK_NAME)
return_value = False
self.assertTrue(callback_event.wait(timeout=2))
def test_set_operation_retries_exceptions(self):
with mock.patch.object(db, 'update_periodic_task') as m:
self._test_retry_exceptions(self.thread._set_operation, m)
def test_lock_task_retries_exceptions(self):
with mock.patch.object(db, 'lock_periodic_task') as m:
self._test_retry_exceptions(self.thread._lock_task, m)
def test_clear_and_unlock_task_retries_exceptions(self):
with mock.patch.object(db, 'update_periodic_task') as m:
self._test_retry_exceptions(self.thread._clear_and_unlock_task, m)
@mock.patch.object(db, "was_periodic_task_executed_recently",
return_value=False)
def test_no_multiple_executions_simultaneously(self, mock_exec_recently):
continue_event = threading.Event()
trigger_event = threading.Event()
count = 0
def wait_until_event(context):
nonlocal count
trigger_event.set()
if continue_event.wait(2):
count += 1
self.thread.register_operation(wait_until_event)
def task_locked():
session = self.db_context.session
row = (session.query(models.OpenDaylightPeriodicTask)
.filter_by(state=odl_const.PROCESSING,
task=TEST_TASK_NAME)
.one_or_none())
return (row is not None)
self.thread.start()
utils.wait_until_true(trigger_event.is_set, 5, 0.01)
self.assertEqual(count, 0)
self.assertTrue(task_locked())
self.thread.execute_ops()
self.assertEqual(count, 0)
self.assertTrue(task_locked())
continue_event.set()
trigger_event.clear()
utils.wait_until_true(trigger_event.is_set, 5, 0.01)
self.thread.cleanup()
self.assertFalse(task_locked())
self.assertGreaterEqual(count, 1)
@mock.patch.object(db, "was_periodic_task_executed_recently",
return_value=True)
def test_forced_execution(self, mock_status_method):
operation = mock.MagicMock()
operation.__name__ = "test"
self.thread.register_operation(operation)
self.thread.execute_ops(forced=True)
operation.assert_called()
@mock.patch.object(db, "was_periodic_task_executed_recently",
return_value=True)
def test_context_is_passed_as_args(self, _):
operation = mock.MagicMock()
operation.__name__ = 'test'
self.thread.register_operation(operation)
self.thread.execute_ops(forced=True)
# This tests that only ONE args is passed, and no kwargs
operation.assert_called_with(mock.ANY)
# This tests that it's a context
kall = operation.call_args
args, kwargs = kall
self.assertIsInstance(args[0], context.Context)
networking-odl-16.0.0/networking_odl/tests/unit/ml2/0000775000175000017500000000000013656750617022443 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ml2/__init__.py0000664000175000017500000000000013656750541024536 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/ml2/test_port_status_update.py0000664000175000017500000000754013656750541030007 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import threading
import mock
from networking_odl.common.client import OpenDaylightRestClient
from networking_odl.common import websocket_client as odl_ws_client
from networking_odl.common.websocket_client import OpenDaylightWebsocketClient
from networking_odl.ml2.port_status_update import OdlPortStatusUpdate
from networking_odl.tests import base
from neutron.db import provisioning_blocks
import neutron_lib.context
import neutron_lib.plugins.directory
class TestOdlPortStatusUpdate(base.DietTestCase):
WEBSOCK_NOTIFICATION = re.sub(r'\s*', '', """
{
"notification": {
"data-changed-notification": {
"data-change-event": {
"data": {
"status": {
"content": "ACTIVE",
"xmlns": "urn:opendaylight:neutron"
}
},
"operation": "updated",
"path":
"/neutron:neutron/neutron:ports/neutron:port[
neutron:uuid='d6e6335d-9568-4949-aef1-4107e34c5f28']
/neutron:status"
},
"xmlns":
"urn:opendaylight:params:xml:ns:yang:controller:md:sal:remote"
},
"eventTime": "2017-02-22T02:27:32+02:00",
"xmlns": "urn:ietf:params:xml:ns:netconf:notification:1.0"
}
}""")
def setUp(self):
self.useFixture(base.OpenDaylightFeaturesFixture())
self.mock_ws_client = mock.patch.object(
OpenDaylightWebsocketClient, 'odl_create_websocket')
super(TestOdlPortStatusUpdate, self).setUp()
def test_object_create(self):
OdlPortStatusUpdate()
@mock.patch.object(provisioning_blocks, 'provisioning_complete')
def test_websock_recv(self, mocked_provisioning_complete):
updater = OdlPortStatusUpdate()
updater._process_websocket_recv(self.WEBSOCK_NOTIFICATION, False)
mocked_provisioning_complete.assert_called_once()
self.assertEqual(mocked_provisioning_complete.call_args[0][1],
'd6e6335d-9568-4949-aef1-4107e34c5f28')
@mock.patch.object(provisioning_blocks, 'provisioning_complete')
@mock.patch.object(neutron_lib.context, 'get_admin_context')
@mock.patch.object(OpenDaylightRestClient, 'get')
@mock.patch.object(neutron_lib.plugins.directory, 'get_plugin')
def test_pull_missed_statuses(self, mocked_get_plugin, mocked_get, ac, pc):
uuid = 'd6e6335d-9568-4949-aef1-4107e34c5f28'
plugin = mock.MagicMock()
plugin.get_ports = mock.MagicMock(return_value=[{'id': uuid}])
mocked_get_plugin.return_value = plugin
updater = OdlPortStatusUpdate()
updater._pull_missed_statuses()
mocked_get.assert_called_with(uuid)
@mock.patch.object(threading, 'Thread')
def test_process_websocket_reconnect(self, mocked_thread):
updater = OdlPortStatusUpdate()
updater._process_websocket_reconnect(
odl_ws_client.ODL_WEBSOCKET_CONNECTED)
mocked_thread.assert_called()
mocked_thread.return_value.start.assert_called()
networking-odl-16.0.0/networking_odl/tests/unit/ml2/test_port_binding.py0000664000175000017500000000330513656750541026527 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from networking_odl.ml2 import legacy_port_binding
from networking_odl.ml2 import port_binding
from networking_odl.tests import base
class TestPortBindingManager(base.DietTestCase):
def test_create(self):
mgr = port_binding.PortBindingManager.create(
name="legacy-port-binding")
self.assertEqual("legacy-port-binding", mgr.name)
self.assertIsInstance(mgr.controller,
legacy_port_binding.LegacyPortBindingManager)
def test_create_with_nonexist_name(self):
self.assertRaises(AssertionError,
port_binding.PortBindingManager.create,
name="nonexist-port-binding")
@mock.patch.object(legacy_port_binding.LegacyPortBindingManager,
"bind_port")
def test_bind_port(self, mock_method):
port_context = mock.Mock()
mgr = port_binding.PortBindingManager.create(
name="legacy-port-binding")
mgr.controller.bind_port(port_context)
mock_method.assert_called_once_with(port_context)
networking-odl-16.0.0/networking_odl/tests/unit/ml2/config-ovs-external_ids.sh0000775000175000017500000000230213656750541027524 0ustar zuulzuul00000000000000#!/bin/sh
# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
uuid=$(sudo ovs-vsctl get Open_vSwitch . _uuid)
# Test data
sudo ovs-vsctl set Open_vSwitch $uuid \
external_ids:odl_os_hostconfig_hostid="devstack"
# sudo ovs-vsctl set Open_vSwitch $uuid \
# external_ids:odl_os_hostconfig_hosttype="ODL L2"
config=$(cat <<____CONFIG
{"supported_vnic_types":[
{"vnic_type":"normal","vif_type":"ovs","vif_details":{}}],
"allowed_network_types":["local","vlan","vxlan","gre"],
"bridge_mappings":{"physnet1":"br-ex"}}
____CONFIG
)
echo config: $config
sudo ovs-vsctl set Open_vSwitch $uuid \
external_ids:odl_os_hostconfig_config_odl_l2="$config"
networking-odl-16.0.0/networking_odl/tests/unit/ml2/test_pseudo_agentdb_binding.py0000664000175000017500000007566213656750541030545 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from copy import deepcopy
from os import path as os_path
from string import Template
import fixtures
import mock
from oslo_serialization import jsonutils
from requests.exceptions import HTTPError
from neutron.db import provisioning_blocks
from neutron.plugins.ml2 import driver_context as ctx
from neutron.plugins.ml2 import plugin as ml2_plugin
from neutron.tests.unit.db import test_db_base_plugin_v2 as test_plugin
from neutron.tests.unit import testlib_api
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import fixture
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from oslo_config import fixture as config_fixture
from networking_odl.common import odl_features
from networking_odl.common import websocket_client
from networking_odl.journal import periodic_task
from networking_odl.ml2 import pseudo_agentdb_binding
from networking_odl.tests import base
AGENTDB_BINARY = 'neutron-odlagent-portbinding'
L2_TYPE = "ODL L2"
# test data hostconfig and hostconfig-dbget
SAMPLE_ODL_HCONFIGS = {"hostconfigs": {"hostconfig": [
{"host-id": "devstack",
"host-type": "ODL L2",
"config": """{"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "ovs",
"vif_details": {}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}"""}
]}}
class OpenDaylightAgentDBFixture(fixtures.Fixture):
def _setUp(self):
super(OpenDaylightAgentDBFixture, self)._setUp()
fake_agents_db = mock.MagicMock()
fake_agents_db.create_or_update_agent = mock.MagicMock()
self.useFixture(fixture.PluginDirectoryFixture())
directory.add_plugin(plugin_constants.CORE, fake_agents_db)
class TestPseudoAgentDBBindingTaskBase(base.DietTestCase):
"""Test class for AgentDBPortBindingTaskBase."""
def setUp(self):
"""Setup test."""
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.useFixture(OpenDaylightAgentDBFixture())
super(TestPseudoAgentDBBindingTaskBase, self).setUp()
self.worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker()
self.task = pseudo_agentdb_binding.PseudoAgentDBBindingTaskBase(
self.worker)
def _get_raised_response(self, json_data, status_code):
class MockHTTPError(HTTPError):
def __init__(self, json_data, status_code):
self.json_data = json_data
self.status_code = status_code
self.response = self
class MockResponse(object):
def __init__(self, json_data, status_code):
self.raise_obj = MockHTTPError(json_data, status_code)
def raise_for_status(self):
raise self.raise_obj
return MockResponse(json_data, status_code)
def test_hostconfig_response_404(self):
with mock.patch.object(self.task.odl_rest_client,
'get', return_value=self.
_get_raised_response({}, 404)):
self.assertEqual(self.task._rest_get_hostconfigs(), [])
class TestPseudoAgentDBBindingPrePopulate(base.DietTestCase):
KNOWN_HOST = 'known_host'
AGENT_TYPE = pseudo_agentdb_binding.PseudoAgentDBBindingWorker.L2_TYPE
def setUp(self):
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(OpenDaylightAgentDBFixture())
super(TestPseudoAgentDBBindingPrePopulate, self).setUp()
self.useFixture(fixture.CallbackRegistryFixture())
self.ml2_plugin = mock.Mock()
self.ml2_plugin.get_agents = mock.Mock(return_value=[])
self.worker = mock.Mock()
self.worker.known_agent = mock.Mock(return_value=False)
self.worker.add_known_agent = mock.Mock()
self.worker.update_agetns_db_row = mock.Mock()
self.prepopulate = (pseudo_agentdb_binding.
PseudoAgentDBBindingPrePopulate(self.worker))
def _call_before_port_binding(self, host):
kwargs = {
'context': mock.Mock(),
'port': {
portbindings.HOST_ID: host
}
}
registry.notify(resources.PORT, events.BEFORE_CREATE, self.ml2_plugin,
**kwargs)
def test_unspecified(self):
self._call_before_port_binding(n_const.ATTR_NOT_SPECIFIED)
self.worker.known_agent.assert_not_called()
def test_empty_host(self):
self._call_before_port_binding('')
self.worker.known_agent.assert_not_called()
def test_known_agent(self):
self.worker.known_agent = mock.Mock(return_value=True)
self._call_before_port_binding(self.KNOWN_HOST)
self.worker.known_agent.assert_called()
self.ml2_plugin.get_agents.assert_not_called()
def test_agentdb_alive(self):
self.ml2_plugin.get_agents = mock.Mock(return_value=[
{'host': self.KNOWN_HOST,
'agent_type': self.AGENT_TYPE,
'alive': True}])
self._call_before_port_binding(self.KNOWN_HOST)
self.worker.known_agent.assert_called()
self.ml2_plugin.get_agents.assert_called()
self.worker.add_known_agents.assert_called_with([
{'host': self.KNOWN_HOST,
'agent_type': self.AGENT_TYPE,
'alive': True}])
self.worker.update_agents_db_row.assert_not_called()
def test_agentdb_dead(self):
self.ml2_plugin.get_agents = mock.Mock(return_value=[
{'host': self.KNOWN_HOST,
'agent_type': self.AGENT_TYPE,
'alive': False}])
self._call_before_port_binding(self.KNOWN_HOST)
self.worker.known_agent.assert_called()
self.ml2_plugin.get_agents.assert_called()
self.worker.add_known_agents.assert_not_called()
def test_unkown_hostconfig(self):
with mock.patch.object(self.prepopulate,
'odl_rest_client') as mock_rest_client:
mock_response = mock.Mock()
mock_response.json = mock.Mock(
return_value=SAMPLE_ODL_HCONFIGS['hostconfigs'])
mock_rest_client.get = mock.Mock(return_value=mock_response)
self._call_before_port_binding(self.KNOWN_HOST)
self.worker.known_agent.assert_called()
self.ml2_plugin.get_agents.assert_called()
self.worker.add_known_agent.assert_not_called()
self.worker.update_agents_db_row.assert_called_once()
def test_http_error(self):
with mock.patch.object(self.prepopulate,
'odl_rest_client') as mock_rest_client:
mock_rest_client.get = mock.Mock(side_effect=Exception('error'))
self._call_before_port_binding(self.KNOWN_HOST)
self.worker.known_agent.assert_called()
self.ml2_plugin.get_agents.assert_called()
self.worker.add_known_agent.assert_not_called()
self.worker.update_agents_db_row.assert_not_called()
class TestPseudoAgentDBBindingWorker(base.DietTestCase):
"""Test class for AgentDBPortBinding."""
def setUp(self):
"""Setup test."""
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.useFixture(OpenDaylightAgentDBFixture())
super(TestPseudoAgentDBBindingWorker, self).setUp()
self.worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker()
def test_update_agents_db(self):
"""test agent update."""
self.worker.update_agents_db(
hostconfigs=SAMPLE_ODL_HCONFIGS['hostconfigs']['hostconfig'])
self.worker.agents_db.create_or_update_agent.assert_called_once()
class TestPseudoAgentDBBindingController(base.DietTestCase):
"""Test class for AgentDBPortBinding."""
# Test data for string interpolation of substitutable identifers
# e.g. $PORT_ID identifier in the configurations JSON string below shall
# be substituted with portcontext.current['id'] eliminating the check
# for specific vif_type making port-binding truly switch agnostic.
# Refer: Python string templates and interpolation (string.Template)
sample_hconf_str_tmpl_subs_vpp = {
"host": "devstack", # host-id in ODL JSON
"agent_type": "ODL L2", # host-type in ODL JSON
# config in ODL JSON
"alive": True,
"configurations": {"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "vhostuser",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "socket_",
"vhostuser_socket_dir": "/tmp",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "server",
"vhostuser_socket":
"/tmp/socket_$PORT_ID"
}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}
}
sample_hconf_str_tmpl_subs_ovs = {
"host": "devstack", # host-id in ODL JSON
"agent_type": "ODL L2", # host-type in ODL JSON
# config in ODL JSON
"alive": True,
"configurations": {"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "vhostuser",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "vhu",
"vhostuser_socket_dir": "/var/run/openvswitch",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "client",
"vhostuser_socket":
"/var/run/openvswitch/vhu$PORT_ID"
}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}
}
sample_hconf_str_tmpl_nosubs = {
"host": "devstack", # host-id in ODL JSON
"agent_type": "ODL L2", # host-type in ODL JSON
# config in ODL JSON
"configurations": {"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "ovs",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "socket_",
"vhostuser_socket_dir": "/tmp",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "server",
"vhostuser_socket":
"/var/run/openvswitch/PORT_NOSUBS"
}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}
}
# Test data for vanilla OVS
sample_hconfig_dbget_ovs = {"configurations": {"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_OVS,
"vif_details": {
"some_test_details": None
}}],
"allowed_network_types": ["local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}}
# Test data for vanilla OVS with SR-IOV offload
sample_hconfig_dbget_ovs_sriov_offload = {"configurations": {
"supported_vnic_types": [{
"vnic_type": "normal",
"vif_type": portbindings.VIF_TYPE_OVS,
"vif_details": {
"some_test_details": None}}, {
"vnic_type": "direct",
"vif_type": portbindings.VIF_TYPE_OVS,
"vif_details": {
"some_test_details": None
}}, ],
"allowed_network_types": ["local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}}
# Test data for OVS-DPDK
sample_hconfig_dbget_ovs_dpdk = {"configurations": {
"supported_vnic_types": [{
"vnic_type": "normal",
"vif_type": portbindings.VIF_TYPE_VHOST_USER,
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "vhu",
# Assumption: /var/run mounted as tmpfs
"vhostuser_socket_dir": "/var/run/openvswitch",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "client",
"vhostuser_socket": "/var/run/openvswitch/vhu$PORT_ID"}}],
"allowed_network_types": ["local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}}
# Test data for VPP
sample_hconfig_dbget_vpp = {"configurations": {"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": portbindings.VIF_TYPE_VHOST_USER,
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "socket_",
"vhostuser_socket_dir": "/tmp",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "server",
"vhostuser_socket": "/tmp/socket_$PORT_ID"
}}],
"allowed_network_types": ["local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}}
# Test data for length of string
sample_odl_hconfigs_length = {
"host": "devstack", # host-id in ODL JSON
"agent_type": "ODL L2", # host-type in ODL JSON
# config in ODL JSON
"configurations": {"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "vhostuser",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": "longprefix_",
"vhostuser_socket_dir": "/tmp",
"vhostuser_ovs_plug": True,
"vhostuser_mode": "server",
"vhostuser_socket":
"/tmp/longprefix_$PORT_ID"
}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}
}
# Raw test data for unicode/string comparison
sample_odl_hconfigs_length_raw = {
"host": "devstack",
"agent_type": "ODL L2",
"configurations": """{"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "vhostuser",
"vif_details": {
"uuid": "TEST_UUID",
"has_datapath_type_netdev": true,
"support_vhost_user": true,
"port_prefix": "prefix_",
"vhostuser_socket_dir": "/tmp",
"vhostuser_ovs_plug": true,
"vhostuser_mode": "server",
"vhostuser_socket":
"/tmp/prefix_$PORT_ID"
}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}"""
}
# test data valid and invalid segments
test_valid_segment = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_const.TYPE_LOCAL,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
test_invalid_segment = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_const.TYPE_NONE,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
def setUp(self):
"""Setup test."""
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(base.OpenDaylightFeaturesFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.useFixture(OpenDaylightAgentDBFixture())
super(TestPseudoAgentDBBindingController, self).setUp()
self.useFixture(fixture.CallbackRegistryFixture())
self.cfg = self.useFixture(config_fixture.Config())
self.mgr = pseudo_agentdb_binding.PseudoAgentDBBindingController()
def test_is_valid_segment(self):
"""Validate the _check_segment method."""
all_network_types = [n_const.TYPE_FLAT, n_const.TYPE_GRE,
n_const.TYPE_LOCAL, n_const.TYPE_VXLAN,
n_const.TYPE_VLAN, n_const.TYPE_NONE]
valid_types = {
network_type
for network_type in all_network_types
if self.mgr._is_valid_segment(
{api.NETWORK_TYPE: network_type},
{'allowed_network_types': [
n_const.TYPE_LOCAL, n_const.TYPE_GRE,
n_const.TYPE_VXLAN, n_const.TYPE_VLAN]})}
self.assertEqual({
n_const.TYPE_LOCAL, n_const.TYPE_GRE, n_const.TYPE_VXLAN,
n_const.TYPE_VLAN}, valid_types)
def test_bind_port_with_vif_type_ovs(self):
"""test bind_port with vanilla ovs."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment])
vif_type = portbindings.VIF_TYPE_OVS
vif_details = {'some_test_details': None}
self.mgr._hconfig_bind_port(
port_context, self.sample_hconfig_dbget_ovs)
port_context.set_binding.assert_called_once_with(
self.test_valid_segment[api.ID], vif_type,
vif_details, status=n_const.PORT_STATUS_ACTIVE)
def test_bind_port_with_vif_type_ovs_with_sriov_offload(self):
"""test bind_port with vanilla ovs with SR-IOV offload"""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment])
vif_type = portbindings.VIF_TYPE_OVS
vif_details = {'some_test_details': None}
self.mgr._hconfig_bind_port(
port_context, self.sample_hconfig_dbget_ovs_sriov_offload)
port_context.set_binding.assert_called_once_with(
self.test_valid_segment[api.ID], vif_type,
vif_details, status=n_const.PORT_STATUS_ACTIVE)
def _set_pass_vif_details(self, port_context, vif_details):
"""extract vif_details and update vif_details if needed."""
vhostuser_socket_dir = vif_details.get(
'vhostuser_socket_dir', '/var/run/openvswitch')
port_spec = vif_details.get(
'port_prefix', 'vhu') + port_context.current['id']
socket_path = os_path.join(vhostuser_socket_dir, port_spec)
vif_details.update({portbindings.VHOST_USER_SOCKET: socket_path})
return vif_details
def test_bind_port_with_vif_type_vhost_user(self):
"""test bind_port with ovs-dpdk."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment],
host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_ovs)])
self.mgr.bind_port(port_context)
pass_vif_type = portbindings.VIF_TYPE_VHOST_USER
pass_vif_details = self.sample_hconfig_dbget_ovs_dpdk[
'configurations']['supported_vnic_types'][0]['vif_details']
self._set_pass_vif_details(port_context, pass_vif_details)
port_context.set_binding.assert_called_once_with(
self.test_valid_segment[api.ID], pass_vif_type,
pass_vif_details, status=n_const.PORT_STATUS_ACTIVE)
def _test_bind_port_succeed_when_agent_status(self, hconfig, agent_status):
hconfig['alive'] = agent_status
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment],
host_agents=[hconfig])
self.mgr.bind_port(port_context)
port_context.set_binding.assert_called()
def test_bind_port_succeed_when_agent_dead_vpp(self):
hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_vpp)
self._test_bind_port_succeed_when_agent_status(hconfig, False)
def test_bind_port_succeed_when_agent_dead_ovs(self):
hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_ovs)
self._test_bind_port_succeed_when_agent_status(hconfig, False)
def test_bind_port_succeed_when_agent_alive_vpp(self):
hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_vpp)
self._test_bind_port_succeed_when_agent_status(hconfig, True)
def test_bind_port_succeed_when_agent_alive_ovs(self):
hconfig = deepcopy(self.sample_hconf_str_tmpl_subs_ovs)
self._test_bind_port_succeed_when_agent_status(hconfig, True)
def test_bind_port_with_vif_type_vhost_user_vpp(self):
"""test bind_port with vpp."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment],
host_agents=[deepcopy(self.sample_hconf_str_tmpl_subs_vpp)])
self.mgr.bind_port(port_context)
pass_vif_type = portbindings.VIF_TYPE_VHOST_USER
pass_vif_details = self.sample_hconfig_dbget_vpp['configurations'][
'supported_vnic_types'][0]['vif_details']
self._set_pass_vif_details(port_context, pass_vif_details)
port_context.set_binding.assert_called_once_with(
self.test_valid_segment[api.ID], pass_vif_type,
pass_vif_details, status=n_const.PORT_STATUS_ACTIVE)
def test_bind_port_without_valid_segment(self):
"""test bind_port without a valid segment."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment])
self.mgr._hconfig_bind_port(
port_context, self.sample_hconfig_dbget_ovs)
port_context.set_binding.assert_not_called()
def test_no_str_template_substitution_in_configuration_string(self):
"""Test for no identifier substituion in config JSON string."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment])
hconf_dict = self.mgr._substitute_hconfig_tmpl(
port_context, self.sample_hconf_str_tmpl_nosubs)
test_string = hconf_dict['configurations'][
'supported_vnic_types'][0][
'vif_details'][portbindings.VHOST_USER_SOCKET]
expected_str = '/var/run/openvswitch/PORT_NOSUBS'
self.assertEqual(expected_str, test_string)
def test_str_template_substitution_in_configuration_string(self):
"""Test for identifier substitution in config JSON string."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment])
hconf_dict = self.mgr._substitute_hconfig_tmpl(
port_context, self.sample_hconf_str_tmpl_subs_vpp)
test_string = hconf_dict['configurations'][
'supported_vnic_types'][0][
'vif_details'][portbindings.VHOST_USER_SOCKET]
expected_str = Template('/tmp/socket_$PORT_ID')
expected_str = expected_str.safe_substitute({
'PORT_ID': port_context.current['id']})
self.assertEqual(expected_str, test_string)
def test_str_template_substitution_length_in_configuration_string(self):
"""Test for identifier substitution in config JSON string."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment])
hconf_dict = self.mgr._substitute_hconfig_tmpl(
port_context, self.sample_odl_hconfigs_length)
test_string = hconf_dict['configurations'][
'supported_vnic_types'][0][
'vif_details'][portbindings.VHOST_USER_SOCKET]
expected_str = Template('/tmp/longprefix_$PORT_ID')
expected_str = expected_str.safe_substitute({
'PORT_ID': port_context.current['id']})
self.assertNotEqual(expected_str, test_string)
self.assertEqual(len(test_string) - len('/tmp/'), 14)
def test_template_substitution_in_raw_configuration(self):
"""Test for identifier substitution in config string."""
port_context = self._fake_port_context(
fake_segments=[self.test_invalid_segment, self.test_valid_segment])
# Substitute raw string configuration with json
raw_configurations = self.sample_odl_hconfigs_length_raw[
'configurations']
raw_configurations_json = jsonutils.loads(raw_configurations)
self.sample_odl_hconfigs_length_raw['configurations'] = (
raw_configurations_json)
hconf_dict = self.mgr._substitute_hconfig_tmpl(
port_context, self.sample_odl_hconfigs_length_raw)
test_string = hconf_dict['configurations'][
'supported_vnic_types'][0][
'vif_details'][portbindings.VHOST_USER_SOCKET]
expected_str = Template('/tmp/prefix_$PORT_ID')
expected_str = expected_str.safe_substitute({
'PORT_ID': port_context.current['id']})
self.assertEqual(expected_str, test_string)
def _fake_port_context(self, fake_segments, host_agents=None):
network = mock.MagicMock(spec=api.NetworkContext)
return mock.MagicMock(
spec=ctx.PortContext,
current={'id': 'PORTID',
portbindings.VNIC_TYPE: portbindings.VNIC_NORMAL},
segments_to_bind=fake_segments, network=network,
host_agents=lambda agent_type: host_agents,
_plugin_context=mock.MagicMock()
)
@mock.patch.object(provisioning_blocks, 'add_provisioning_component')
def test_prepare_inital_port_status_no_websocket(
self, mocked_add_provisioning_component):
port_ctx = self._fake_port_context(
fake_segments=[self.test_valid_segment])
initial_port_status = self.mgr._prepare_initial_port_status(port_ctx)
self.assertEqual(initial_port_status, n_const.PORT_STATUS_ACTIVE)
mocked_add_provisioning_component.assert_not_called()
@mock.patch.object(provisioning_blocks, 'add_provisioning_component')
def test_prepare_inital_port_status_with_websocket(
self, mocked_add_provisioning_component):
feature_json = """{"features": {"feature":
[{"service-provider-feature":
"neutron-extensions:operational-port-status"}]}}"""
self.cfg.config(odl_features_json=feature_json, group='ml2_odl')
self.addCleanup(odl_features.deinit)
odl_features.init()
port_ctx = self._fake_port_context(
fake_segments=[self.test_valid_segment])
initial_port_status = self.mgr._prepare_initial_port_status(port_ctx)
self.assertEqual(initial_port_status, n_const.PORT_STATUS_DOWN)
mocked_add_provisioning_component.assert_called()
class TestPseudoAgentDBBindingControllerBug1608659(
test_plugin.NeutronDbPluginV2TestCase):
"""Test class for Bug1608659."""
# test data hostconfig
sample_odl_hconfigs = {"hostconfigs": {"hostconfig": [
{"host-id": "devstack-control",
"host-type": "ODL L2",
"config": """{"supported_vnic_types": [
{"vnic_type": "normal", "vif_type": "vhostuser",
"vif_details":
{"port_filter": "False",
"vhostuser_socket": "/var/run/openvswitch"}}],
"allowed_network_types": [
"local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1": "br-ex"}}"""},
{"host-id": "devstack-control",
"host-type": "ODL L3",
"config": """{ "some_details": "dummy_details" }"""}
]}}
def setUp(self):
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.useFixture(OpenDaylightAgentDBFixture())
super(TestPseudoAgentDBBindingControllerBug1608659, self).setUp(
plugin='ml2')
self.worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker()
def test_execute_no_exception(self):
with mock.patch.object(pseudo_agentdb_binding, 'LOG') as mock_log:
self.worker.update_agents_db(
self.sample_odl_hconfigs['hostconfigs']['hostconfig'])
# Assert no exception happened
self.assertFalse(mock_log.exception.called)
class TestPseudoAgentNeutronWorker(testlib_api.SqlTestCase):
def setUp(self):
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(base.OpenDaylightJournalThreadFixture())
self.useFixture(base.OpenDaylightFeaturesFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.cfg = self.useFixture(config_fixture.Config())
self.mock_periodic_thread = mock.patch.object(
periodic_task.PeriodicTask, 'start').start()
super(TestPseudoAgentNeutronWorker, self).setUp()
self.cfg.config(mechanism_drivers=['opendaylight_v2'], group='ml2')
self.cfg.config(
port_binding_controller='pseudo-agentdb-binding', group='ml2_odl')
def test_get_worker(self):
workers = ml2_plugin.Ml2Plugin().get_workers()
self.assertTrue(any(
isinstance(worker,
pseudo_agentdb_binding.PseudoAgentDBBindingWorker)
for worker in workers))
def test_worker(self):
worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker()
worker.wait()
worker.stop()
worker.reset()
def test_worker_start_websocket(self):
self.cfg.config(enable_websocket_pseudo_agentdb=True, group='ml2_odl')
worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker()
with mock.patch.object(
websocket_client.OpenDaylightWebsocketClient,
'odl_create_websocket') as mock_odl_create_websocket:
worker.start()
mock_odl_create_websocket.assert_called_once()
def test_worker_start_periodic(self):
self.cfg.config(enable_websocket_pseudo_agentdb=False, group='ml2_odl')
worker = pseudo_agentdb_binding.PseudoAgentDBBindingWorker()
with mock.patch.object(
periodic_task.PeriodicTask, 'start') as mock_start:
worker.start()
mock_start.assert_called_once()
networking-odl-16.0.0/networking_odl/tests/unit/ml2/test_mechanism_odl_v2.py0000664000175000017500000010510413656750541027262 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
import operator
import mock
import requests
import testscenarios
from neutron.db.models import securitygroup
from neutron.db import segments_db
from neutron.plugins.ml2 import plugin
from neutron.tests.unit import testlib_api
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
from neutron_lib.api.definitions import provider_net as providernet
from neutron_lib import constants as n_constants
from neutron_lib.db import api as db_api
from neutron_lib.plugins import constants as plugin_const
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from networking_odl.common import callback
from networking_odl.common import constants as odl_const
from networking_odl.common import filters
from networking_odl.common import utils
from networking_odl.db import db
from networking_odl.journal import base_driver
from networking_odl.journal import cleanup
from networking_odl.journal import journal
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests import base
from networking_odl.tests.unit import base_v2
# Required to generate tests from scenarios. Not compatible with nose.
load_tests = testscenarios.load_tests_apply_scenarios
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
SECURITY_GROUP = '2f9244b4-9bee-4e81-bc4a-3f3c2045b3d7'
SG_FAKE_ID = uuidutils.generate_uuid()
SG_RULE_FAKE_ID = uuidutils.generate_uuid()
class OpenDayLightMechanismConfigTests(testlib_api.SqlTestCase):
def setUp(self):
self.useFixture(base.OpenDaylightFeaturesFixture())
self.useFixture(base.OpenDaylightJournalThreadFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.cfg = self.useFixture(config_fixture.Config())
super(OpenDayLightMechanismConfigTests, self).setUp()
self.cfg.config(mechanism_drivers=[
'logger', 'opendaylight_v2'], group='ml2')
self.cfg.config(
port_binding_controller='legacy-port-binding', group='ml2_odl')
def _set_config(self, url='http://127.0.0.1:9999', username='someuser',
password='somepass'):
self.cfg.config(url=url, group='ml2_odl')
self.cfg.config(username=username, group='ml2_odl')
self.cfg.config(password=password, group='ml2_odl')
def _test_missing_config(self, **kwargs):
self._set_config(**kwargs)
self.assertRaisesRegex(cfg.RequiredOptError,
r'value required for option \w+ in group '
r'\[ml2_odl\]',
plugin.Ml2Plugin)
def test_valid_config(self):
self._set_config()
plugin.Ml2Plugin()
def test_missing_url_raises_exception(self):
self._test_missing_config(url=None)
def test_missing_username_raises_exception(self):
self._test_missing_config(username=None)
def test_missing_password_raises_exception(self):
self._test_missing_config(password=None)
class _OpenDaylightMechanismBase(base_v2.OpenDaylightTestCase):
_mechanism_drivers = ['logger', 'opendaylight_v2']
# TODO(mpeterson): Add a test to make sure extension_drivers are honored.
_extension_drivers = ['port_security', 'qos']
def setUp(self):
mech_initialize_patcher = mock.patch.object(
mech_driver_v2.OpenDaylightMechanismDriver,
'initialize',
autospec=True,
side_effect=mech_driver_v2.OpenDaylightMechanismDriver.initialize
)
self.mech_initialize_mock = mech_initialize_patcher.start()
mock.patch('networking_odl.common.odl_features.init').start()
# NOTE(mpeterson): We cannot use stop in the following cleanup because
# several of the following fixtures and setUp() add a cleanup for
# stopall. The reason to add the stopall ourselves is to make sure
# that it will be stopped if anything were to change in the future.
self.addCleanup(mock.patch.stopall)
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(extension_drivers=self._extension_drivers, group='ml2')
super(_OpenDaylightMechanismBase, self).setUp()
def test_mechanism_driver_is_initialized(self):
"""Test that the mech driver is initialized.
This test will allow us know if the mech driver is not initialized
in case there is a change in the way Ml2PluginV2TestCase instantiate
them
"""
# NOTE(mpeterson): Because of the autospec the mock lacks
# the helper assert_called_once
msg = "The opendaylight_v2 ML2 Mechanism Driver was not initialized"
self.assertTrue(self.mech_initialize_mock.called, msg)
class DataMatcher(object):
def __init__(self, operation, object_type, context):
if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
self._data = copy.deepcopy(context[object_type])
elif object_type == odl_const.ODL_PORT:
# NOTE(yamahata): work around for journal._enrich_port()
self._data = copy.deepcopy(context.current)
if self._data.get(odl_const.ODL_SGS):
self._data[odl_const.ODL_SGS] = [
{'id': id_} for id_ in self._data[odl_const.ODL_SGS]]
else:
self._data = copy.deepcopy(context.current)
self._object_type = object_type
filters.filter_for_odl(object_type, operation, self._data)
def __eq__(self, s):
data = jsonutils.loads(s)
return self._data == data[self._object_type]
def __ne__(self, s):
return not self.__eq__(s)
def __repr__(self):
# for debugging
return 'DataMatcher(%(object_type)s, %(data)s)' % {
'object_type': self._object_type,
'data': self._data}
class AttributeDict(dict):
def __init__(self, *args, **kwargs):
super(AttributeDict, self).__init__(*args, **kwargs)
self.__dict__ = self
class OpenDaylightMechanismDriverTestCase(base_v2.OpenDaylightConfigBase):
def setUp(self):
self.useFixture(base.OpenDaylightFeaturesFixture())
self.useFixture(base.OpenDaylightJournalThreadFixture())
self.useFixture(base.OpenDaylightPseudoAgentPrePopulateFixture())
super(OpenDaylightMechanismDriverTestCase, self).setUp()
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mech.initialize()
def test_registered_plugin_type(self):
self.assertEqual(self.mech.plugin_type, plugin_const.CORE)
def test_registered_resources(self):
for resource in self.mech.RESOURCES:
self.assertIn(resource, base_driver.ALL_RESOURCES)
self.assertEqual(base_driver.ALL_RESOURCES[resource], self.mech)
def _get_mock_network_operation_context(self):
current = {'status': 'ACTIVE',
'subnets': [],
'name': 'net1',
'provider:physical_network': None,
'admin_state_up': True,
'tenant_id': 'test-tenant',
'provider:network_type': 'local',
'router:external': False,
'shared': False,
'id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
'provider:segmentation_id': None}
context = mock.Mock(current=current)
context._plugin_context = self.db_context
return context
def _get_mock_subnet_operation_context(self):
current = {'ipv6_ra_mode': None,
'allocation_pools': [{'start': '10.0.0.2',
'end': '10.0.1.254'}],
'host_routes': [],
'ipv6_address_mode': None,
'cidr': '10.0.0.0/23',
'id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839',
'name': '',
'enable_dhcp': True,
'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
'tenant_id': 'test-tenant',
'dns_nameservers': [],
'gateway_ip': '10.0.0.1',
'ip_version': 4,
'shared': False}
context = mock.Mock(current=current)
context._plugin_context = self.db_context
return context
def _get_mock_port_operation_context(self):
current = {'status': 'DOWN',
'binding:host_id': '',
'allowed_address_pairs': [],
'device_owner': 'fake_owner',
'binding:profile': {},
'fixed_ips': [{
'subnet_id': '72c56c48-e9b8-4dcf-b3a7-0813bb3bd839'}],
'id': '83d56c48-e9b8-4dcf-b3a7-0813bb3bd940',
'security_groups': [SECURITY_GROUP],
'device_id': 'fake_device',
'name': '',
'admin_state_up': True,
'network_id': 'd897e21a-dfd6-4331-a5dd-7524fa421c3e',
'tenant_id': 'test-tenant',
'binding:vif_details': {},
'binding:vnic_type': 'normal',
'binding:vif_type': 'unbound',
'mac_address': '12:34:56:78:21:b6'}
_network = self._get_mock_network_operation_context().current
_plugin = directory.get_plugin()
_plugin.writer_get_security_group = mock.Mock(
return_value=SECURITY_GROUP)
_plugin.get_port = mock.Mock(return_value=current)
_plugin.get_network = mock.Mock(return_value=_network)
_plugin_context_mock = {'session': self.db_context.session}
_network_context_mock = {'_network': _network}
context = {'current': AttributeDict(current),
'_plugin': _plugin,
'_plugin_context': AttributeDict(_plugin_context_mock),
'_network_context': AttributeDict(_network_context_mock)}
return AttributeDict(context)
def _get_mock_security_group_operation_context(self):
context = {odl_const.ODL_SG: {'name': 'test_sg',
'project_id': 'test-tenant',
'tenant_id': 'test-tenant',
'description': 'test-description',
'security_group_rules': [],
'id': SG_FAKE_ID}}
return context
def _get_mock_security_group_rule_operation_context(self):
context = {odl_const.ODL_SG_RULE: {'security_group_id': SG_FAKE_ID,
'id': SG_RULE_FAKE_ID}}
_plugin = directory.get_plugin()
_plugin._get_security_group_rule = mock.Mock(
return_value=AttributeDict(context[odl_const.ODL_SG_RULE]))
return context
def _get_mock_operation_context(self, object_type):
getter = getattr(self, '_get_mock_%s_operation_context' % object_type)
return getter()
_status_code_msgs = {
200: '',
201: '',
204: '',
400: '400 Client Error: Bad Request',
401: '401 Client Error: Unauthorized',
403: '403 Client Error: Forbidden',
404: '404 Client Error: Not Found',
409: '409 Client Error: Conflict',
501: '501 Server Error: Not Implemented',
503: '503 Server Error: Service Unavailable',
}
@classmethod
def _get_mock_request_response(cls, status_code):
response = mock.Mock(status_code=status_code)
response.raise_for_status = mock.Mock() if status_code < 400 else (
mock.Mock(side_effect=requests.exceptions.HTTPError(
cls._status_code_msgs[status_code])))
return response
def _test_operation(self, status_code, expected_calls,
*args, **kwargs):
request_response = self._get_mock_request_response(status_code)
with mock.patch('requests.sessions.Session.request',
return_value=request_response) as mock_method:
self.run_journal_processing()
if expected_calls:
mock_method.assert_called_with(
headers={'Content-Type': 'application/json'},
timeout=cfg.CONF.ml2_odl.timeout, *args, **kwargs)
self.assertEqual(expected_calls, mock_method.call_count)
def _call_operation_object(self, operation, object_type):
context = self._get_mock_operation_context(object_type)
if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
res_type = [rt for rt in callback._RESOURCE_MAPPING.values()
if rt.singular == object_type][0]
res_id = context[object_type]['id']
context_ = (copy.deepcopy(context)
if operation != odl_const.ODL_DELETE else None)
plugin_context = self.db_context
if (object_type == odl_const.ODL_SG and
operation in [odl_const.ODL_CREATE, odl_const.ODL_DELETE]):
# TODO(yamahata): remove this work around once
# https://review.opendev.org/#/c/281693/
# is merged.
if operation == odl_const.ODL_CREATE:
sg = securitygroup.SecurityGroup(
id=res_id, name=context_[object_type]['name'],
tenant_id=context_[object_type]['tenant_id'],
description=context_[object_type]['description'])
plugin_context.session.add(sg)
sg_dict = dict(sg)
sg_dict['security_group_rules'] = []
with db_api.CONTEXT_WRITER.using(plugin_context):
self.mech.sync_from_callback_precommit(
plugin_context, operation, res_type, res_id,
context_, security_group=sg_dict)
if operation == odl_const.ODL_DELETE:
with db_api.CONTEXT_WRITER.using(plugin_context):
self.mech.sync_from_callback_precommit(
plugin_context, operation, res_type, res_id,
context_,
security_group={'security_group_rules':
{'id': SG_RULE_FAKE_ID}},
security_group_rule_ids=[SG_RULE_FAKE_ID])
elif (object_type == odl_const.ODL_SG_RULE and
operation == odl_const.ODL_DELETE):
with db_api.CONTEXT_WRITER.using(plugin_context):
self.mech.sync_from_callback_precommit(
plugin_context, operation, res_type, res_id,
context_, security_group_id=SG_FAKE_ID)
else:
with db_api.CONTEXT_WRITER.using(plugin_context):
self.mech.sync_from_callback_precommit(
plugin_context, operation, res_type, res_id,
context_)
else:
method = getattr(self.mech, '%s_%s_precommit' % (operation,
object_type))
with db_api.CONTEXT_WRITER.using(context):
method(context)
def _test_operation_object(self, operation, object_type):
self._call_operation_object(operation, object_type)
context = self._get_mock_operation_context(object_type)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertEqual(operation, row['operation'])
self.assertEqual(object_type, row['object_type'])
self.assertEqual(context.current['id'], row['object_uuid'])
self._db_cleanup()
def _test_thread_processing(self, operation, object_type,
expected_calls=1):
http_requests = {odl_const.ODL_CREATE: 'post',
odl_const.ODL_UPDATE: 'put',
odl_const.ODL_DELETE: 'delete'}
status_codes = {odl_const.ODL_CREATE: requests.codes.created,
odl_const.ODL_UPDATE: requests.codes.ok,
odl_const.ODL_DELETE: requests.codes.no_content}
http_request = http_requests[operation]
status_code = status_codes[operation]
self._call_operation_object(operation, object_type)
context = self._get_mock_operation_context(object_type)
url_object_type = utils.neutronify(object_type)
if operation in [odl_const.ODL_UPDATE, odl_const.ODL_DELETE]:
if object_type in [odl_const.ODL_SG, odl_const.ODL_SG_RULE]:
uuid = context[object_type]['id']
else:
uuid = context.current['id']
url = '%s/%ss/%s' % (cfg.CONF.ml2_odl.url, url_object_type, uuid)
else:
url = '%s/%ss' % (cfg.CONF.ml2_odl.url, url_object_type)
if (object_type == odl_const.ODL_SG and
operation == odl_const.ODL_CREATE):
context = copy.deepcopy(context)
if operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE]:
kwargs = {
'url': url,
'data': DataMatcher(operation, object_type, context)}
else:
kwargs = {'url': url, 'data': None}
self._test_operation(status_code, expected_calls, http_request,
**kwargs)
def _test_object_type(self, object_type, delete_expected_calls=1):
# Add and process create request.
self._test_thread_processing(odl_const.ODL_CREATE, object_type)
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(1, len(rows))
# Add and process update request. Adds to database.
self._test_thread_processing(odl_const.ODL_UPDATE, object_type)
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(2, len(rows))
# Add and process update request. Adds to database.
self._test_thread_processing(odl_const.ODL_DELETE, object_type,
delete_expected_calls)
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(2 + delete_expected_calls, len(rows))
def _test_object_type_pending_network(self, object_type):
# Create a network (creates db row in pending state).
self._call_operation_object(odl_const.ODL_CREATE,
odl_const.ODL_NETWORK)
# Create object_type database row and process. This results in both
# the object_type and network rows being processed.
self._test_thread_processing(odl_const.ODL_CREATE, object_type,
expected_calls=2)
# Verify both rows are now marked as completed.
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.COMPLETED)
self.assertEqual(2, len(rows))
def _test_object_type_processing_network(self, object_type):
self._test_object_operation_pending_another_object_operation(
object_type, odl_const.ODL_CREATE, odl_const.ODL_NETWORK,
odl_const.ODL_CREATE)
def _test_object_operation_pending_object_operation(
self, object_type, operation, pending_operation):
self._test_object_operation_pending_another_object_operation(
object_type, operation, object_type, pending_operation)
def _test_object_operation_pending_another_object_operation(
self, object_type, operation, pending_type, pending_operation):
# Create the object_type (creates db row in pending state).
self._call_operation_object(pending_operation,
pending_type)
# Get pending row and mark as processing so that
# this row will not be processed by journal thread.
row = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)
db.update_db_row_state(self.db_context, row[0], odl_const.PROCESSING)
# Create the object_type database row and process.
# Verify that object request is not processed because the
# dependent object operation has not been marked as 'completed'.
self._test_thread_processing(operation,
object_type,
expected_calls=0)
# Verify that all rows are still in the database.
rows = db.get_all_db_rows_by_state(self.db_context,
odl_const.PROCESSING)
self.assertEqual(1, len(rows))
rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)
self.assertEqual(1, len(rows))
def _test_parent_delete_pending_child_delete(self, parent, child):
self._test_object_operation_pending_another_object_operation(
parent, odl_const.ODL_DELETE, child, odl_const.ODL_DELETE)
def _test_cleanup_processing_rows(self, last_retried, expected_state):
# Create a dummy network (creates db row in pending state).
self._call_operation_object(odl_const.ODL_CREATE,
odl_const.ODL_NETWORK)
# Get pending row and mark as processing and update
# the last_retried time
row = db.get_all_db_rows_by_state(self.db_context,
odl_const.PENDING)[0]
row.last_retried = last_retried
db.update_db_row_state(self.db_context, row, odl_const.PROCESSING)
# Test if the cleanup marks this in the desired state
# based on the last_retried timestamp
cleanup.cleanup_processing_rows(self.db_context)
# Verify that the Db row is in the desired state
rows = db.get_all_db_rows_by_state(self.db_context, expected_state)
self.assertEqual(1, len(rows))
def test_driver(self):
for operation in [odl_const.ODL_CREATE, odl_const.ODL_UPDATE,
odl_const.ODL_DELETE]:
for object_type in [odl_const.ODL_NETWORK, odl_const.ODL_SUBNET,
odl_const.ODL_PORT]:
self._test_operation_object(operation, object_type)
def test_port_precommit_no_tenant(self):
context = self._get_mock_operation_context(odl_const.ODL_PORT)
context.current['tenant_id'] = ''
method = getattr(self.mech, 'create_port_precommit')
method(context)
self.db_context.session.flush()
# Verify that the Db row has a tenant
rows = db.get_all_db_rows_by_state(self.db_context, odl_const.PENDING)
self.assertEqual(1, len(rows))
_network = self._get_mock_network_operation_context().current
self.assertEqual(_network['tenant_id'], rows[0]['data']['tenant_id'])
def test_network(self):
self._test_object_type(odl_const.ODL_NETWORK)
def test_network_update_pending_network_create(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_NETWORK, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
def test_network_delete_pending_network_create(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
def test_network_delete_pending_network_update(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_NETWORK, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
def test_network_delete_pending_subnet_delete(self):
self._test_parent_delete_pending_child_delete(
odl_const.ODL_NETWORK, odl_const.ODL_SUBNET)
def test_network_delete_pending_port_delete(self):
self._test_parent_delete_pending_child_delete(
odl_const.ODL_NETWORK, odl_const.ODL_PORT)
def test_subnet(self):
self._test_object_type(odl_const.ODL_SUBNET)
def test_subnet_update_pending_subnet_create(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_SUBNET, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
def test_subnet_delete_pending_subnet_create(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
def test_subnet_delete_pending_subnet_update(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_SUBNET, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
def test_subnet_pending_network(self):
self._test_object_type_pending_network(odl_const.ODL_SUBNET)
def test_subnet_processing_network(self):
self._test_object_type_processing_network(odl_const.ODL_SUBNET)
def test_subnet_delete_pending_port_delete(self):
self._test_parent_delete_pending_child_delete(
odl_const.ODL_SUBNET, odl_const.ODL_PORT)
def test_port(self):
self._test_object_type(odl_const.ODL_PORT)
def test_port_update_pending_port_create(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_PORT, odl_const.ODL_UPDATE, odl_const.ODL_CREATE)
def test_port_delete_pending_port_create(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_CREATE)
def test_port_delete_pending_port_update(self):
self._test_object_operation_pending_object_operation(
odl_const.ODL_PORT, odl_const.ODL_DELETE, odl_const.ODL_UPDATE)
def test_port_pending_network(self):
self._test_object_type_pending_network(odl_const.ODL_PORT)
def test_port_processing_network(self):
self._test_object_type_processing_network(odl_const.ODL_PORT)
def test_cleanup_processing_rows_time_not_expired(self):
self._test_cleanup_processing_rows(datetime.datetime.utcnow(),
odl_const.PROCESSING)
def test_cleanup_processing_rows_time_expired(self):
old_time = datetime.datetime.utcnow() - datetime.timedelta(hours=24)
self._test_cleanup_processing_rows(old_time, odl_const.PENDING)
def test_thread_call(self):
"""Verify that the sync thread method is called."""
with mock.patch.object(
journal.OpenDaylightJournalThread,
'start') as mock_sync_thread:
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mech.initialize()
# Create any object that would spin up the sync thread via the
# decorator call_thread_on_end() used by all the event handlers.
self._call_operation_object(odl_const.ODL_CREATE,
odl_const.ODL_NETWORK)
# Verify that the thread call was made.
mock_sync_thread.assert_called()
def test_sg(self):
self._test_object_type(odl_const.ODL_SG, 2)
def test_sg_rule(self):
self._test_object_type(odl_const.ODL_SG_RULE)
def test_sg_delete(self):
with mock.patch.object(journal, 'record') as record:
context = self._get_mock_operation_context(odl_const.ODL_SG)
res_id = context[odl_const.ODL_SG]['id']
rule = mock.Mock()
rule.id = SG_RULE_FAKE_ID
rule.security_group_id = SG_FAKE_ID
sg = mock.Mock()
sg.id = SG_FAKE_ID
sg.security_group_rules = [rule]
kwargs = {'security_group': sg,
'security_group_rule_ids': [SG_RULE_FAKE_ID]}
with db_api.CONTEXT_WRITER.using(self.db_context):
self.mech.sync_from_callback_precommit(
self.db_context, odl_const.ODL_DELETE,
callback._RESOURCE_MAPPING[odl_const.ODL_SG],
res_id, context, **kwargs)
record.assert_has_calls(
[mock.call(mock.ANY, 'security_group_rule',
SG_RULE_FAKE_ID, 'delete', [SG_FAKE_ID]),
mock.call(mock.ANY, 'security_group', SG_FAKE_ID,
'delete',
{'description': 'test-description',
'project_id': 'test-tenant',
'security_group_rules': [],
'tenant_id': 'test-tenant',
'id': SG_FAKE_ID, 'name': 'test_sg'})])
def test_sg_rule_delete(self):
with mock.patch.object(journal, 'record') as record:
context = self._get_mock_operation_context(odl_const.ODL_SG_RULE)
res_id = context[odl_const.ODL_SG_RULE]['id']
rule = mock.Mock()
rule.id = SG_RULE_FAKE_ID
rule.security_group_id = SG_FAKE_ID
kwargs = {'security_group_rule_id': SG_RULE_FAKE_ID,
'security_group_id': SG_FAKE_ID}
with db_api.CONTEXT_WRITER.using(self.db_context):
self.mech.sync_from_callback_precommit(
self.db_context, odl_const.ODL_DELETE,
callback._RESOURCE_MAPPING[odl_const.ODL_SG_RULE],
res_id, context, **kwargs)
record.assert_has_calls(
[mock.call(mock.ANY, 'security_group_rule',
SG_RULE_FAKE_ID, 'delete', [SG_FAKE_ID])])
def test_subnet_allocation_pools(self):
context = self._get_mock_operation_context(odl_const.ODL_SUBNET)
alloc_pool = context.current['allocation_pools']
self._call_operation_object(odl_const.ODL_UPDATE,
odl_const.ODL_SUBNET)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertEqual(alloc_pool, row.data['allocation_pools'])
def test_sync_multiple_updates(self):
# add 2 updates
for i in range(2):
self._call_operation_object(odl_const.ODL_UPDATE,
odl_const.ODL_NETWORK)
# get the last update row
rows = db.get_all_db_rows(self.db_context)
rows.sort(key=operator.attrgetter("seqnum"))
first_row = rows[0]
# change the state to processing
db.update_db_row_state(self.db_context, first_row,
odl_const.PROCESSING)
# create 1 more operation to trigger the sync thread
# verify that there are no calls to ODL controller, because the
# first row was processing (exit_after_run = true)
self._test_thread_processing(odl_const.ODL_UPDATE,
odl_const.ODL_NETWORK, expected_calls=0)
# validate that all the pending rows stays in 'pending' state
# first row should be 'processing' because it was not processed
processing = db.get_all_db_rows_by_state(self.db_context, 'processing')
self.assertEqual(1, len(processing))
rows = db.get_all_db_rows_by_state(self.db_context, 'pending')
self.assertEqual(2, len(rows))
def test_update_port_filter(self):
"""Validate the filter code on update port operation"""
expected_items = ['fixed_ips', 'security_groups', 'device_id',
'security_groups', 'admin_state_up']
subnet = self._get_mock_operation_context(odl_const.ODL_SUBNET).current
port = self._get_mock_operation_context(odl_const.ODL_PORT).current
port['fixed_ips'] = [{'subnet_id': subnet['id'],
'ip_address': '10.0.0.10'}]
port['mac_address'] = port['mac_address'].upper()
orig_port = copy.deepcopy(port)
with mock.patch.object(segments_db, 'get_network_segments'):
filters.filter_for_odl(odl_const.ODL_PORT,
odl_const.ODL_UPDATE, port)
for key, value in orig_port.items():
if key in expected_items:
self.assertEqual(port[key], value)
class _OpenDaylightDriverVlanTransparencyBase(_OpenDaylightMechanismBase):
def _driver_context(self, network):
return mock.MagicMock(current=network)
class TestOpenDaylightDriverVlanTransparencyNetwork(
_OpenDaylightDriverVlanTransparencyBase):
def _test_network_type(self, expected, network_type):
context = self._driver_context({providernet.NETWORK_TYPE:
network_type})
self.assertEqual(expected,
self.mech.check_vlan_transparency(context))
def test_none_network_type(self):
context = self._driver_context({})
self.assertTrue(self.mech.check_vlan_transparency(context))
def test_vlan_transparency(self):
for network_type in [n_constants.TYPE_VXLAN]:
self._test_network_type(True, network_type)
for network_type in [n_constants.TYPE_FLAT, n_constants.TYPE_GENEVE,
n_constants.TYPE_GRE, n_constants.TYPE_LOCAL,
n_constants.TYPE_VLAN]:
self._test_network_type(False, network_type)
class TestOpenDaylightDriverVlanTransparency(
_OpenDaylightDriverVlanTransparencyBase):
scenarios = [
("vxlan_vxlan",
{'expected': True,
'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VXLAN]}),
("gre_vxlan",
{'expected': False,
'network_types': [n_constants.TYPE_GRE, n_constants.TYPE_VXLAN]}),
("vxlan_vlan",
{'expected': False,
'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_VLAN]}),
("vxlan_flat",
{'expected': False,
'network_types': [n_constants.TYPE_VXLAN, n_constants.TYPE_FLAT]}),
("vlan_vlan",
{'expected': False,
'network_types': [n_constants.TYPE_VLAN, n_constants.TYPE_VLAN]}),
]
def test_network_segments(self):
segments = [{providernet.NETWORK_TYPE: type_}
for type_ in self.network_types]
context = self._driver_context({mpnet_apidef.SEGMENTS: segments})
self.assertEqual(self.expected,
self.mech.check_vlan_transparency(context))
networking-odl-16.0.0/networking_odl/tests/unit/ml2/odl_teststub.js0000664000175000017500000000400713656750541025511 0ustar zuulzuul00000000000000/*
* Copyright (c) 2016 OpenStack Foundation
* All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* $nodejs odl_teststub.js
*
* local.conf or ml2_conf.ini should be set to the following:
*
* [ml2_odl]
* port_binding_controller = pseudo-agentdb-binding
* password = admin
* username = admin
* url = http://localhost:8080/controller/nb/v2/neutron
* restconf_uri = http://localhost:8125/ # for this stub
*
* To test with ODL *end to end* use below URL for restconf_uri and configure
* ovsdb external_ids using the test script: config-ovs-external_ids.sh
*
* http://localhost:8181/restconf/operational/neutron:neutron/hostconfigs
*/
var http = require('http');
const PORT=8125;
__test_odl_hconfig = {"hostconfigs": {"hostconfig": [
{"host-id": "devstack",
"host-type": "ODL L2",
"config": {
"supported_vnic_types": [
{"vnic_type": "normal",
"vif_type": "ovs",
"vif_details": {}}],
"allowed_network_types": ["local", "vlan", "vxlan", "gre"],
"bridge_mappings": {"physnet1":"br-ex"}
}
}]
}}
function handleRequest(req, res){
res.setHeader('Content-Type', 'application/json');
res.end(JSON.stringify(__test_odl_hconfig));
}
var server = http.createServer(handleRequest);
server.listen(PORT, function(){
console.log("Server listening on: http://localhost:%s", PORT);
});
networking-odl-16.0.0/networking_odl/tests/unit/ml2/test_legacy_port_binding.py0000664000175000017500000000644613656750541030064 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.plugins.ml2 import driver_context as ctx
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_constants
from neutron_lib.plugins.ml2 import api
from networking_odl.ml2 import legacy_port_binding
from networking_odl.tests import base
class TestLegacyPortBindingManager(base.DietTestCase):
# valid and invalid segments
valid_segment = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_constants.TYPE_LOCAL,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
invalid_segment = {
api.ID: 'API_ID',
api.NETWORK_TYPE: n_constants.TYPE_NONE,
api.SEGMENTATION_ID: 'API_SEGMENTATION_ID',
api.PHYSICAL_NETWORK: 'API_PHYSICAL_NETWORK'}
def test_check_segment(self):
"""Validate the _check_segment method."""
all_network_types = [n_constants.TYPE_FLAT, n_constants.TYPE_GRE,
n_constants.TYPE_LOCAL, n_constants.TYPE_VXLAN,
n_constants.TYPE_VLAN, n_constants.TYPE_NONE]
mgr = legacy_port_binding.LegacyPortBindingManager()
valid_types = {
network_type
for network_type in all_network_types
if mgr._check_segment({api.NETWORK_TYPE: network_type})}
self.assertEqual({
n_constants.TYPE_FLAT, n_constants.TYPE_LOCAL,
n_constants.TYPE_GRE, n_constants.TYPE_VXLAN,
n_constants.TYPE_VLAN}, valid_types)
def test_bind_port(self):
network = mock.MagicMock(spec=api.NetworkContext)
port_context = mock.MagicMock(
spec=ctx.PortContext, current={'id': 'CURRENT_CONTEXT_ID'},
segments_to_bind=[self.valid_segment, self.invalid_segment],
network=network)
mgr = legacy_port_binding.LegacyPortBindingManager()
vif_type = mgr._get_vif_type(port_context)
mgr.bind_port(port_context)
port_context.set_binding.assert_called_once_with(
self.valid_segment[api.ID], vif_type,
mgr.vif_details, status=n_constants.PORT_STATUS_ACTIVE)
def test_bind_port_unsupported_vnic_type(self):
network = mock.MagicMock(spec=api.NetworkContext)
port_context = mock.MagicMock(
spec=ctx.PortContext,
current={'id': 'CURRENT_CONTEXT_ID',
portbindings.VNIC_TYPE: portbindings.VNIC_DIRECT},
segments_to_bind=[self.valid_segment, self.invalid_segment],
network=network)
mgr = legacy_port_binding.LegacyPortBindingManager()
mgr.bind_port(port_context)
port_context.set_binding.assert_not_called()
networking-odl-16.0.0/networking_odl/tests/unit/sfc/0000775000175000017500000000000013656750617022524 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/sfc/__init__.py0000664000175000017500000000000013656750541024617 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/sfc/flowclassifier/0000775000175000017500000000000013656750617025540 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/sfc/flowclassifier/__init__.py0000664000175000017500000000000013656750541027633 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/sfc/flowclassifier/test_sfc_flowclassifier_v2.py0000664000175000017500000000634413656750541033432 0ustar zuulzuul00000000000000# Copyright (c) 2017 Brocade Communication Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from neutron_lib.db import api as db_api
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.sfc.flowclassifier import sfc_flowclassifier_v2 as sfc_fc
from networking_odl.tests import base as odl_base
from networking_odl.tests.unit import base_v2
from networking_odl.tests.unit.sfc import constants as sfc_const
class TestOpenDaylightSFCFlowClassifierDriverV2(
base_v2.OpenDaylightConfigBase):
def setUp(self):
self.useFixture(odl_base.OpenDaylightRestClientFixture())
super(TestOpenDaylightSFCFlowClassifierDriverV2, self).setUp()
self.handler = sfc_fc.OpenDaylightSFCFlowClassifierDriverV2()
self.handler.initialize()
def _get_mock_context(self):
mocked_fc_context = patch(
'networking_sfc.services.flowclassifier.common.context'
'.FlowClassifierContext').start().return_value
mocked_fc_context.current = sfc_const.FAKE_FLOW_CLASSIFIER
mocked_fc_context.session = self.db_context.session
mocked_fc_context._plugin_context = mocked_fc_context
return mocked_fc_context
def _call_operation_object(self, operation, timing):
method = getattr(self.handler,
'%s_flow_classifier_%s' % (operation, timing))
method(self._get_mock_context())
def _test_event(self, operation, timing):
with db_api.CONTEXT_WRITER.using(self.db_context):
self._call_operation_object(operation, timing)
if timing == 'precommit':
self.db_context.session.flush()
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if timing == 'precommit':
self.assertEqual(operation, row['operation'])
self.assertEqual(
odl_const.ODL_SFC_FLOW_CLASSIFIER, row['object_type'])
elif timing == 'after':
self.assertIsNone(row)
# TODO(yamahata): utilize test scenarios
def test_create_flow_classifier_precommit(self):
self._test_event("create", "precommit")
def test_create_flow_classifier_postcommit(self):
self._test_event("create", "postcommit")
def test_update_flow_classifier_precommit(self):
self._test_event("update", "precommit")
def test_update_flow_classifier_postcommit(self):
self._test_event("update", "postcommit")
def test_delete_flow_classifier_precommit(self):
self._test_event("delete", "precommit")
def test_delete_flow_classifier_postcommit(self):
self._test_event("delete", "postcommit")
networking-odl-16.0.0/networking_odl/tests/unit/sfc/test_sfc_driver_v2.py0000664000175000017500000001523013656750541026667 0ustar zuulzuul00000000000000# Copyright (c) 2017 Brocade Communication Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mock import patch
from neutron_lib.db import api as db_api
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.sfc import sfc_driver_v2 as sfc
from networking_odl.tests import base as odl_base
from networking_odl.tests.unit import base_v2
from networking_odl.tests.unit.sfc import constants as sfc_const
class TestOpenDaylightSFCDriverV2(base_v2.OpenDaylightConfigBase):
def setUp(self):
self.useFixture(odl_base.OpenDaylightRestClientFixture())
super(TestOpenDaylightSFCDriverV2, self).setUp()
self.handler = sfc.OpenDaylightSFCDriverV2()
self.handler.initialize()
def _get_mock_portpair_operation_context(self):
mocked_fc_context = patch(
'networking_sfc.services.sfc.common.context.PortPairContext'
).start().return_value
mocked_fc_context.current = sfc_const.FAKE_PORT_PAIR
mocked_fc_context.session = self.db_context.session
mocked_fc_context._plugin_context = mocked_fc_context
return mocked_fc_context
def _get_mock_portpairgroup_operation_context(self):
mocked_fc_context = patch(
'networking_sfc.services.sfc.common.context.PortPairGroupContext'
).start().return_value
mocked_fc_context.current = sfc_const.FAKE_PORT_PAIR_GROUP
mocked_fc_context.session = self.db_context.session
mocked_fc_context._plugin_context = mocked_fc_context
return mocked_fc_context
def _get_mock_portchain_operation_context(self):
mocked_fc_context = patch(
'networking_sfc.services.sfc.common.context.PortChainContext'
).start().return_value
mocked_fc_context.current = sfc_const.FAKE_PORT_CHAIN
mocked_fc_context.session = self.db_context.session
mocked_fc_context._plugin_context = mocked_fc_context
return mocked_fc_context
def _get_mock_operation_context(self, object_type):
getter = getattr(self, '_get_mock_%s_operation_context' % object_type)
return getter()
def _call_operation_object(self, operation, timing, resource_str, context):
method = getattr(self.handler,
'%s_%s_%s' % (operation, resource_str, timing))
method(context)
def _test_event(self, operation, timing, resource_str,
object_type):
with db_api.CONTEXT_WRITER.using(self.db_context):
context = self._get_mock_operation_context(object_type)
self._call_operation_object(operation, timing,
resource_str, context)
if timing == 'precommit':
self.db_context.session.flush()
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if timing == 'precommit':
self.assertEqual(operation, row['operation'])
self.assertEqual(object_type, row['object_type'])
elif timing == 'after':
self.assertIsNone(row)
# TODO(yamahata): utilize test scenarios
def test_create_port_pair_precommit(self):
self._test_event("create", "precommit", "port_pair",
odl_const.ODL_SFC_PORT_PAIR)
def test_create_port_pair_postcommit(self):
self._test_event("create", "postcommit", "port_pair",
odl_const.ODL_SFC_PORT_PAIR)
def test_update_port_pair_precommit(self):
self._test_event("update", "precommit", "port_pair",
odl_const.ODL_SFC_PORT_PAIR)
def test_update_port_pair_postcommit(self):
self._test_event("update", "postcommit", "port_pair",
odl_const.ODL_SFC_PORT_PAIR)
def test_delete_port_pair_precommit(self):
self._test_event("delete", "precommit", "port_pair",
odl_const.ODL_SFC_PORT_PAIR)
def test_delete_port_pair_postcommit(self):
self._test_event("delete", "postcommit", "port_pair",
odl_const.ODL_SFC_PORT_PAIR)
def test_create_port_pair_group_precommit(self):
self._test_event("create", "precommit", "port_pair_group",
odl_const.ODL_SFC_PORT_PAIR_GROUP)
def test_create_port_pair_group_postcommit(self):
self._test_event("create", "postcommit", "port_pair_group",
odl_const.ODL_SFC_PORT_PAIR_GROUP)
def test_update_port_pair_group_precommit(self):
self._test_event("update", "precommit", "port_pair_group",
odl_const.ODL_SFC_PORT_PAIR_GROUP)
def test_update_port_pair_group_postcommit(self):
self._test_event("update", "postcommit", "port_pair_group",
odl_const.ODL_SFC_PORT_PAIR_GROUP)
def test_delete_port_pair_group_precommit(self):
self._test_event("delete", "precommit", "port_pair_group",
odl_const.ODL_SFC_PORT_PAIR_GROUP)
def test_delete_port_pair_group_postcommit(self):
self._test_event("delete", "postcommit", "port_pair_group",
odl_const.ODL_SFC_PORT_PAIR_GROUP)
def test_create_port_chain_precommit(self):
self._test_event("create", "precommit", "port_chain",
odl_const.ODL_SFC_PORT_CHAIN)
def test_create_port_chain_postcommit(self):
self._test_event("create", "postcommit", "port_chain",
odl_const.ODL_SFC_PORT_CHAIN)
def test_update_port_chain_precommit(self):
self._test_event("update", "precommit", "port_chain",
odl_const.ODL_SFC_PORT_CHAIN)
def test_update_port_chain_postcommit(self):
self._test_event("update", "postcommit", "port_chain",
odl_const.ODL_SFC_PORT_CHAIN)
def test_delete_port_chain_precommit(self):
self._test_event("delete", "precommit", "port_chain",
odl_const.ODL_SFC_PORT_CHAIN)
def test_delete_port_chain_postcommit(self):
self._test_event("delete", "postcommit", "port_chain",
odl_const.ODL_SFC_PORT_CHAIN)
networking-odl-16.0.0/networking_odl/tests/unit/sfc/constants.py0000664000175000017500000000507713656750541025117 0ustar zuulzuul00000000000000# Copyright (c) 2016 Brocade Communication Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
CLASSIFIERS_BASE_URI = 'sfc/flowclassifiers'
FAKE_FLOW_CLASSIFIER_ID = "4a334cd4-fe9c-4fae-af4b-321c5e2eb051"
FAKE_FLOW_CLASSIFIER = {
"id": "4a334cd4-fe9c-4fae-af4b-321c5e2eb051",
"name": "FC1",
"tenant_id": "1814726e2d22407b8ca76db5e567dcf1",
"description": "Flow rule for classifying TCP traffic",
"protocol": "TCP",
"source_port_range_min": 22,
"source_port_range_max": 4000,
"destination_port_range_min": 80,
"destination_port_range_max": 80,
"source_ip_prefix": "22.12.34.44",
"destination_ip_prefix": "22.12.34.45"
}
PORT_PAIRS_BASE_URI = 'sfc/portpairs'
FAKE_PORT_PAIR_ID = "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae"
FAKE_PORT_PAIR = {
"name": "SF1",
"id": "78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae",
"tenant_id": "d382007aa9904763a801f68ecf065cf5",
"description": "Firewall SF instance",
"ingress": "dace4513-24fc-4fae-af4b-321c5e2eb3d1",
"egress": "aef3478a-4a56-2a6e-cd3a-9dee4e2ec345"
}
PORT_PAIR_GROUPS_BASE_URI = 'sfc/portpairgroups'
FAKE_PORT_PAIR_GROUP_ID = "4512d643-24fc-4fae-af4b-321c5e2eb3d1"
FAKE_PORT_PAIR_GROUP = {
"name": "Firewall_PortPairGroup",
"id": "4512d643-24fc-4fae-af4b-321c5e2eb3d1",
"tenant_id": "d382007aa9904763a801f68ecf065cf5",
"description": "Grouping Firewall SF instances",
"port_pairs": ["78dcd363-fc23-aeb6-f44b-56dc5e2fb3ae"]
}
PORT_CHAINS_BASE_URI = 'sfc/portchains'
FAKE_PORT_CHAIN_ID = "1278dcd4-459f-62ed-754b-87fc5e4a6751"
FAKE_PORT_CHAIN = {
"name": "PC2",
"id": "1278dcd4-459f-62ed-754b-87fc5e4a6751",
"tenant_id": "d382007aa9904763a801f68ecf065cf5",
"description": "Steering TCP and UDP traffic first to Firewall "
"and then to Loadbalancer",
"flow_classifiers": ["4a334cd4-fe9c-4fae-af4b-321c5e2eb051",
"105a4b0a-73d6-11e5-b392-2c27d72acb4c"],
"port_pair_groups": ["4512d643-24fc-4fae-af4b-321c5e2eb3d1",
"4a634d49-76dc-4fae-af4b-321c5e23d651"]
}
networking-odl-16.0.0/networking_odl/tests/unit/db/0000775000175000017500000000000013656750617022336 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/db/__init__.py0000664000175000017500000000000013656750541024431 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/db/test_db.py0000664000175000017500000004634113656750541024340 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import timedelta
import functools
import mock
from neutron_lib.db import api as db_api
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.db import models
from networking_odl.tests.unit import test_base_db
def in_session(fn):
@functools.wraps(fn)
def wrapper(self, *args, **kwargs):
with db_api.CONTEXT_WRITER.using(self.db_context):
return fn(self, *args, **kwargs)
return wrapper
class DbTestCase(test_base_db.ODLBaseDbTestCase):
def setUp(self):
super(DbTestCase, self).setUp()
# NOTE(mpeterson): Due to how the pecan lib does introspection
# to find a non-decorated function, it needs a function that will
# be found in the end. The line below workarounds this limitation.
self._mock_function = mock.MagicMock()
def _update_row(self, row):
self.db_context.session.merge(row)
self.db_context.session.flush()
def _test_validate_updates(self, first_entry, second_entry, expected_deps,
state=None):
db.create_pending_row(self.db_context, *first_entry)
if state:
row = db.get_all_db_rows(self.db_context)[0]
row.state = state
self._update_row(row)
deps = db.get_pending_or_processing_ops(
self.db_context, second_entry[1], second_entry[2])
self.assertEqual(expected_deps, len(deps) != 0)
def _test_retry_count(self, retry_num, max_retry,
expected_retry_count, expected_state):
# add new pending row
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
# update the row with the requested retry_num
row = db.get_all_db_rows(self.db_context)[0]
row.retry_count = retry_num - 1
db.update_pending_db_row_retry(self.db_context, row, max_retry)
# validate the state and the retry_count of the row
row = db.get_all_db_rows(self.db_context)[0]
self.assertEqual(expected_state, row.state)
self.assertEqual(expected_retry_count, row.retry_count)
def _test_retry_wrapper(self, decorated_function):
# NOTE(mpeterson): we want to make sure that it's configured
# to MAX_RETRIES.
self.assertEqual(db_api._retry_db_errors.max_retries,
db_api.MAX_RETRIES)
self._test_retry_exceptions(decorated_function,
self._mock_function, False)
# NOTE(mpeterson): The following function serves to workaround a
# limitation in the discovery mechanism of pecan lib that does not allow
# us to create a generic function that decorates on the fly. It needs to
# be decorated through the decorator directive and not via function
# composition
@db_api.retry_if_session_inactive()
def _decorated_retry_if_session_inactive(self, context):
self._mock_function()
def test_retry_if_session_inactive(self):
self._test_retry_wrapper(self._decorated_retry_if_session_inactive)
@in_session
def _test_update_row_state(self, from_state, to_state, dry_flush=False):
# add new pending row
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
mock_flush = mock.MagicMock(
side_effect=self.db_context.session.flush)
if dry_flush:
patch_flush = mock.patch.object(self.db_context.session,
'flush',
side_effect=mock_flush)
row = db.get_all_db_rows(self.db_context)[0]
for state in [from_state, to_state]:
if dry_flush:
patch_flush.start()
try:
# update the row state
db.update_db_row_state(self.db_context, row, state,
flush=not dry_flush)
finally:
if dry_flush:
patch_flush.stop()
# validate the new state
row = db.get_all_db_rows(self.db_context)[0]
self.assertEqual(state, row.state)
return mock_flush
def test_updates_same_object_uuid(self):
self._test_validate_updates(self.UPDATE_ROW, self.UPDATE_ROW, True)
def test_validate_updates_different_object_uuid(self):
other_row = list(self.UPDATE_ROW)
other_row[1] += 'a'
self._test_validate_updates(self.UPDATE_ROW, other_row, False)
def test_validate_updates_different_object_type(self):
other_row = list(self.UPDATE_ROW)
other_row[0] = odl_const.ODL_PORT
other_row[1] += 'a'
self._test_validate_updates(self.UPDATE_ROW, other_row, False)
def test_check_for_older_ops_processing(self):
self._test_validate_updates(self.UPDATE_ROW, self.UPDATE_ROW, True,
state=odl_const.PROCESSING)
@in_session
def test_get_oldest_pending_row_none_when_no_rows(self):
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertIsNone(row)
@in_session
def _test_get_oldest_pending_row_none(self, state):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
row = db.get_all_db_rows(self.db_context)[0]
row.state = state
self._update_row(row)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertIsNone(row)
def test_get_oldest_pending_row_none_when_row_processing(self):
self._test_get_oldest_pending_row_none(odl_const.PROCESSING)
def test_get_oldest_pending_row_none_when_row_failed(self):
self._test_get_oldest_pending_row_none(odl_const.FAILED)
def test_get_oldest_pending_row_none_when_row_completed(self):
self._test_get_oldest_pending_row_none(odl_const.COMPLETED)
def test_get_oldest_pending_row(self):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertIsNotNone(row)
self.assertEqual(odl_const.PROCESSING, row.state)
@in_session
def test_get_oldest_pending_row_order(self):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
older_row = db.get_all_db_rows(self.db_context)[0]
older_row.last_retried -= timedelta(minutes=1)
self._update_row(older_row)
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertEqual(older_row, row)
def _test_get_oldest_pending_row_with_dep(self, dep_state):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
parent_row = db.get_all_db_rows(self.db_context)[0]
db.update_db_row_state(self.db_context, parent_row, dep_state)
db.create_pending_row(self.db_context, *self.UPDATE_ROW,
depending_on=[parent_row])
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
if row is not None:
self.assertNotEqual(parent_row.seqnum, row.seqnum)
return row
def test_get_oldest_pending_row_when_dep_completed(self):
row = self._test_get_oldest_pending_row_with_dep(odl_const.COMPLETED)
self.assertEqual(odl_const.PROCESSING, row.state)
def test_get_oldest_pending_row_when_dep_failed(self):
row = self._test_get_oldest_pending_row_with_dep(odl_const.FAILED)
self.assertEqual(odl_const.PROCESSING, row.state)
@in_session
def test_get_oldest_pending_row_returns_parent_when_dep_pending(self):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
parent_row = db.get_all_db_rows(self.db_context)[0]
db.create_pending_row(self.db_context, *self.UPDATE_ROW,
depending_on=[parent_row])
row = db.get_oldest_pending_db_row_with_lock(self.db_context)
self.assertEqual(parent_row, row)
def test_get_oldest_pending_row_none_when_dep_processing(self):
row = self._test_get_oldest_pending_row_with_dep(odl_const.PROCESSING)
self.assertIsNone(row)
def test_get_oldest_pending_row_retries_exceptions(self):
with mock.patch.object(db, 'aliased') as m:
self._test_retry_exceptions(db.get_oldest_pending_db_row_with_lock,
m)
@in_session
def _test_delete_row(self, by_row=False, by_row_id=False, dry_flush=False):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(len(rows), 2)
row = rows[-1]
params = {'flush': not dry_flush}
if by_row:
params['row'] = row
elif by_row_id:
params['row_id'] = row.seqnum
mock_flush = None
if dry_flush:
patch_flush = mock.patch.object(
self.db_context.session, 'flush',
side_effect=self.db_context.session.flush
)
mock_flush = patch_flush.start()
try:
db.delete_row(self.db_context, **params)
finally:
if dry_flush:
patch_flush.stop()
self.db_context.session.flush()
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(len(rows), 1)
self.assertNotEqual(row.seqnum, rows[0].seqnum)
return mock_flush
def test_delete_row_by_row(self):
self._test_delete_row(by_row=True)
def test_delete_row_by_row_id(self):
self._test_delete_row(by_row_id=True)
def test_delete_row_by_row_without_flushing(self):
mock_flush = self._test_delete_row(by_row=True, dry_flush=True)
mock_flush.assert_not_called()
def test_create_pending_row(self):
row = db.create_pending_row(self.db_context, *self.UPDATE_ROW)
self.assertIsNotNone(row)
rows = db.get_all_db_rows(self.db_context)
self.assertTrue(row in rows)
def _test_delete_rows_by_state_and_time(self, last_retried, row_retention,
state, expected_rows,
dry_delete=False):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
# update state and last retried
row = db.get_all_db_rows(self.db_context)[-1]
row.state = state
row.last_retried = row.last_retried - timedelta(seconds=last_retried)
self._update_row(row)
if not dry_delete:
db.delete_rows_by_state_and_time(self.db_context,
odl_const.COMPLETED,
timedelta(seconds=row_retention))
# validate the number of rows in the journal
rows = db.get_all_db_rows(self.db_context)
self.assertEqual(expected_rows, len(rows))
def test_delete_completed_rows_no_new_rows(self):
self._test_delete_rows_by_state_and_time(0, 10, odl_const.COMPLETED, 1)
def test_delete_completed_rows_one_new_row(self):
self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0)
def test_delete_completed_rows_wrong_state(self):
self._test_delete_rows_by_state_and_time(10, 8, odl_const.PENDING, 1)
@in_session
def test_delete_completed_rows_individually(self):
self._test_delete_rows_by_state_and_time(
6, 5, odl_const.COMPLETED, 1, True
)
patch_delete = mock.patch.object(
self.db_context.session, 'delete',
side_effect=self.db_context.session.delete
)
mock_delete = patch_delete.start()
self.addCleanup(patch_delete.stop)
self._test_delete_rows_by_state_and_time(
6, 5, odl_const.COMPLETED, 0
)
self.assertEqual(mock_delete.call_count, 2)
@mock.patch.object(db, 'delete_row', side_effect=db.delete_row)
def test_delete_completed_rows_without_flush(self, mock_delete_row):
self._test_delete_rows_by_state_and_time(6, 5, odl_const.COMPLETED, 0)
self.assertEqual({'flush': False}, mock_delete_row.call_args[1])
@in_session
def _test_reset_processing_rows(self, last_retried, max_timedelta,
quantity, dry_reset=False):
db.create_pending_row(self.db_context, *self.UPDATE_ROW)
expected_state = odl_const.PROCESSING
row = db.get_all_db_rows(self.db_context)[-1]
row.state = expected_state
row.last_retried = row.last_retried - timedelta(seconds=last_retried)
self._update_row(row)
if not dry_reset:
expected_state = odl_const.PENDING
reset = db.reset_processing_rows(self.db_context, max_timedelta)
self.assertIsInstance(reset, int)
self.assertEqual(reset, quantity)
rows = db.get_all_db_rows_by_state(self.db_context, expected_state)
self.assertEqual(len(rows), quantity)
for row in rows:
self.assertEqual(row.state, expected_state)
def test_reset_processing_rows(self):
self._test_reset_processing_rows(6, 5, 1)
def test_reset_processing_rows_no_new_rows(self):
self._test_reset_processing_rows(0, 10, 0)
@mock.patch.object(db, 'update_db_row_state',
side_effect=db.update_db_row_state)
def test_reset_processing_rows_individually(self, mock_update_row):
self._test_reset_processing_rows(6, 5, 1, True)
self._test_reset_processing_rows(6, 5, 2)
self.assertEqual(mock_update_row.call_count, 2)
self.assertEqual(mock_update_row.call_args[1], {'flush': False})
def test_valid_retry_count(self):
self._test_retry_count(1, 1, 1, odl_const.PENDING)
def test_invalid_retry_count(self):
self._test_retry_count(2, 1, 1, odl_const.FAILED)
def test_update_row_state_to_pending(self):
self._test_update_row_state(odl_const.PROCESSING, odl_const.PENDING)
def test_update_row_state_to_processing(self):
self._test_update_row_state(odl_const.PENDING, odl_const.PROCESSING)
def test_update_row_state_to_failed(self):
self._test_update_row_state(odl_const.PROCESSING, odl_const.FAILED)
def test_update_row_state_to_completed(self):
self._test_update_row_state(odl_const.PROCESSING, odl_const.COMPLETED)
def test_update_row_state_to_status_without_flush(self):
mock_flush = self._test_update_row_state(odl_const.PROCESSING,
odl_const.COMPLETED,
dry_flush=True)
# NOTE(mpeterson): call_count=2 because session.merge() calls flush()
# and we are changing the status twice
self.assertEqual(mock_flush.call_count, 2)
def _test_periodic_task_lock_unlock(self, db_func, existing_state,
expected_state, expected_result,
task='test_task'):
row = models.OpenDaylightPeriodicTask(state=existing_state,
task=task)
self.db_context.session.add(row)
self.db_context.session.flush()
self.assertEqual(expected_result, db_func(self.db_context,
task))
row = self.db_context.session.query(
models.OpenDaylightPeriodicTask).filter_by(task=task).one()
self.assertEqual(expected_state, row['state'])
def test_lock_periodic_task(self):
self._test_periodic_task_lock_unlock(db.lock_periodic_task,
odl_const.PENDING,
odl_const.PROCESSING,
True)
def test_lock_periodic_task_fails_when_processing(self):
self._test_periodic_task_lock_unlock(db.lock_periodic_task,
odl_const.PROCESSING,
odl_const.PROCESSING,
False)
def test_unlock_periodic_task(self):
self._test_periodic_task_lock_unlock(db.unlock_periodic_task,
odl_const.PROCESSING,
odl_const.PENDING,
True)
def test_unlock_periodic_task_fails_when_pending(self):
self._test_periodic_task_lock_unlock(db.unlock_periodic_task,
odl_const.PENDING,
odl_const.PENDING,
False)
def test_multiple_row_tasks(self):
self._test_periodic_task_lock_unlock(db.unlock_periodic_task,
odl_const.PENDING,
odl_const.PENDING,
False)
def _add_tasks(self, tasks):
row = []
for count, task in enumerate(tasks):
row.append(models.OpenDaylightPeriodicTask(state=odl_const.PENDING,
task=task))
self.db_context.session.add(row[count])
self.db_context.session.flush()
rows = self.db_context.session.query(
models.OpenDaylightPeriodicTask).all()
self.assertEqual(len(tasks), len(rows))
def _perform_ops_on_all_rows(self, tasks, to_lock):
if to_lock:
curr_state = odl_const.PENDING
exp_state = odl_const.PROCESSING
func = db.lock_periodic_task
else:
exp_state = odl_const.PENDING
curr_state = odl_const.PROCESSING
func = db.unlock_periodic_task
processed = []
for task in tasks:
row = self.db_context.session.query(
models.OpenDaylightPeriodicTask).filter_by(task=task).one()
self.assertEqual(row['state'], curr_state)
self.assertTrue(func(self.db_context, task))
rows = self.db_context.session.query(
models.OpenDaylightPeriodicTask).filter_by().all()
processed.append(task)
for row in rows:
if row['task'] in processed:
self.assertEqual(exp_state, row['state'])
else:
self.assertEqual(curr_state, row['state'])
self.assertFalse(func(self.db_context, tasks[-1]))
def test_multiple_row_tasks_lock_unlock(self):
task1 = 'test_random_task'
task2 = 'random_task_random'
task3 = 'task_test_random'
tasks = [task1, task2, task3]
self._add_tasks(tasks)
self._perform_ops_on_all_rows(tasks, to_lock=True)
self._perform_ops_on_all_rows(tasks, to_lock=False)
networking-odl-16.0.0/networking_odl/tests/unit/base_v2.py0000664000175000017500000000424613656750541023646 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutron.tests.unit.plugins.ml2 import test_plugin
from networking_odl.common import client
from networking_odl.journal import base_driver
from networking_odl.journal import journal
from networking_odl.ml2 import mech_driver_v2
from networking_odl.tests import base
from networking_odl.tests.unit import test_base_db
class OpenDaylightConfigBase(test_plugin.Ml2PluginV2TestCase,
test_base_db.ODLBaseDbTestCase):
def setUp(self):
self.journal_thread_fixture = self.useFixture(
base.OpenDaylightJournalThreadFixture())
self.useFixture(base.OpenDaylightRestClientFixture())
self.useFixture(base.OpenDaylightFullSyncFixture())
super(OpenDaylightConfigBase, self).setUp()
self.thread = journal.OpenDaylightJournalThread()
self.addCleanup(base_driver.ALL_RESOURCES.clear)
def run_journal_processing(self):
"""Cause the journal to process the first pending entry"""
self.thread.sync_pending_entries()
class OpenDaylightTestCase(OpenDaylightConfigBase):
def setUp(self):
self.mock_sendjson = mock.patch.object(client.OpenDaylightRestClient,
'sendjson').start()
super(OpenDaylightTestCase, self).setUp()
self.port_create_status = 'DOWN'
self.mech = mech_driver_v2.OpenDaylightMechanismDriver()
self.mock_sendjson.side_effect = self.check_sendjson
def check_sendjson(self, method, urlpath, obj):
self.assertFalse(urlpath.startswith("http://"))
networking-odl-16.0.0/networking_odl/tests/unit/cmd/0000775000175000017500000000000013656750617022514 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/cmd/__init__.py0000664000175000017500000000000013656750541024607 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/unit/cmd/test_set_ovs_hostconfigs.py0000664000175000017500000002502513656750541030215 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# pylint: disable=unused-argument, protected-access
from contextlib import contextmanager
import os
import sys
import tempfile
import mock
from oslo_serialization import jsonutils
import six
from networking_odl.cmd import set_ovs_hostconfigs
from networking_odl.tests import base
from networking_odl.tests import match
LOGGING_ENABLED = "Logging Enabled!"
LOGGING_PERMISSION_REQUIRED = "permissions are required to configure ovsdb"
@contextmanager
def capture(command, args):
out, sys.stdout = sys.stdout, six.StringIO()
try:
command(args)
sys.stdout.seek(0)
yield sys.stdout.read()
finally:
sys.stdout = out
class TestSetOvsHostconfigs(base.DietTestCase):
maxDiff = None
def test_given_ovs_hostconfigs(self):
# given
self.patch_os_geteuid()
ovs_hostconfigs = {
"ODL L2": {"allowed_network_types": ["a", "b", "c"]}}
args = ['--ovs_hostconfigs=' + jsonutils.dumps(ovs_hostconfigs),
'--bridge_mappings=a:1,b:2']
execute = self.patch_utils_execute()
conf = set_ovs_hostconfigs.setup_conf(args)
# when
result = set_ovs_hostconfigs.main(args)
# then
self.assertEqual(0, result)
execute.assert_has_calls([
mock.call(
('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')),
mock.call(
('ovs-vsctl', 'set', 'Open_vSwitch', '',
'external_ids:odl_os_hostconfig_hostid=' + conf.host)),
mock.call(
('ovs-vsctl', 'set', 'Open_vSwitch', '',
match.wildcard(
'external_ids:odl_os_hostconfig_config_odl_l2=*'))),
])
expected = ovs_hostconfigs['ODL L2']
_, actual_json = execute.call_args_list[2][0][0][4].split("=", 1)
self.assertEqual(match.json(expected), actual_json)
def test_given_no_args(self):
self._test_given_args(tuple())
def test_given_default_values(self):
self._test_given_args([])
def test_given_datapath_type_system(self):
self._test_given_args(['--datapath_type=netdev'])
def test_given_datapath_type_netdev(self):
self._test_given_args(['--datapath_type=netdev'])
def test_given_datapath_type_vhostuser(self):
self._test_given_args(['--datapath_type=dpdkvhostuser'])
def test_given_ovs_dpdk(self):
self._test_given_args(['--ovs_dpdk'])
def test_given_noovs_dpdk(self):
self._test_given_args(['--noovs_dpdk'])
def test_given_ovs_sriov_offload(self):
self._test_given_args(['--noovs_dpdk', '--ovs_sriov_offload'])
def test_given_vhostuser_ovs_plug(self):
self._test_given_args(['--vhostuser_ovs_plug'])
def test_given_novhostuser_ovs_plug(self):
self._test_given_args(['--novhostuser_ovs_plug'])
def test_given_allowed_network_types(self):
self._test_given_args(['--allowed_network_types=a,b,c'])
def test_given_local_ip(self):
self._test_given_args(['--local_ip=192.168.1.10', '--host='])
def test_given_vhostuser_mode_server(self):
self._test_given_args(
['--vhostuser_mode=server', '--datapath_type=netdev'])
def test_given_vhostuser_mode_client(self):
self._test_given_args(
['--vhostuser_mode=client', '--datapath_type=netdev'])
def test_given_vhostuser_port_prefix_vhu(self):
self._test_given_args(
['--vhostuser_port_prefix=vhu', '--datapath_type=netdev'])
def test_given_vhostuser_port_prefix_socket(self):
self._test_given_args(
['--vhostuser_port_prefix=socket', '--datapath_type=netdev'])
def test_given_config_file(self):
file_descriptor, file_path = tempfile.mkstemp()
try:
os.write(file_descriptor, six.b("# dummy neutron config file\n"))
os.close(file_descriptor)
self._test_given_args(['--config-file={}'.format(file_path)])
finally:
os.remove(file_path)
def _test_given_args(self, *args):
# given
self.patch_os_geteuid()
execute = self.patch_utils_execute()
conf = set_ovs_hostconfigs.setup_conf(*args)
datapath_type = conf.datapath_type
if datapath_type is None:
if conf.ovs_dpdk is False:
datapath_type = "system"
else:
datapath_type = "netdev"
# when
result = set_ovs_hostconfigs.main(*args)
# then
self.assertEqual(0, result)
execute.assert_has_calls([
mock.call(
('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')),
mock.call(
('ovs-vsctl', 'get', 'Open_vSwitch', '.', 'datapath_types')),
mock.call(
('ovs-vsctl', 'set', 'Open_vSwitch', '',
'external_ids:odl_os_hostconfig_hostid=' + conf.host)),
mock.call(
('ovs-vsctl', 'set', 'Open_vSwitch', '',
match.wildcard(
'external_ids:odl_os_hostconfig_config_odl_l2=*'))),
])
host_addresses = [conf.host or conf.local_ip]
if datapath_type == "system":
vif_type = "ovs"
vif_details = {
"uuid": '',
"host_addresses": host_addresses,
"has_datapath_type_netdev": False,
"support_vhost_user": False
}
else: # datapath_type in ["system", "netdev"]
vif_type = "vhostuser"
vif_details = {
"uuid": '',
"host_addresses": host_addresses,
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": conf.vhostuser_port_prefix,
"vhostuser_mode": conf.vhostuser_mode,
"vhostuser_ovs_plug": conf.vhostuser_ovs_plug,
"vhostuser_socket_dir": conf.vhostuser_socket_dir,
"vhostuser_socket": os.path.join(
conf.vhostuser_socket_dir,
conf.vhostuser_port_prefix + "$PORT_ID"),
}
_, actual_json = execute.call_args_list[3][0][0][4].split("=", 1)
expected = {
"allowed_network_types": conf.allowed_network_types,
"bridge_mappings": conf.bridge_mappings,
"datapath_type": datapath_type,
"supported_vnic_types": [
{
"vif_type": vif_type,
"vnic_type": "normal",
"vif_details": vif_details
}
]
}
if vif_type == 'ovs' and conf.ovs_sriov_offload:
direct_vnic = {
"vif_details": vif_details,
"vif_type": vif_type,
"vnic_type": "direct",
}
expected["supported_vnic_types"].append(direct_vnic)
self.assertEqual(match.json(expected), actual_json)
def test_given_ovs_dpdk_undetected(self):
# given
LOG = self.patch(set_ovs_hostconfigs, 'LOG')
args = ('--ovs_dpdk', '--bridge_mappings=a:1,b:2', '--debug')
conf = set_ovs_hostconfigs.setup_conf(args)
self.patch_os_geteuid()
execute = self.patch_utils_execute(datapath_types="whatever")
# when
result = set_ovs_hostconfigs.main(args)
# then
self.assertEqual(1, result)
execute.assert_has_calls([
mock.call(
('ovs-vsctl', 'get', 'Open_vSwitch', '.', '_uuid')),
mock.call(
('ovs-vsctl', 'get', 'Open_vSwitch', '.', 'datapath_types')),
])
LOG.error.assert_called_once_with(
"Fatal error: %s",
match.wildcard(
"--ovs_dpdk option was specified but the 'netdev' "
"datapath_type was not enabled. To override use option "
"--datapath_type=netdev"), exc_info=conf.debug)
def test_bridge_mappings(self):
# when
conf = set_ovs_hostconfigs.setup_conf(('--bridge_mappings=a:1,b:2',))
self.assertEqual({'a': '1', 'b': '2'}, conf.bridge_mappings)
def test_allowed_network_types(self):
# when
conf = set_ovs_hostconfigs.setup_conf(('--allowed_network_types=a,b',))
self.assertEqual(['a', 'b'], conf.allowed_network_types)
def patch_utils_execute(
self, uuid='',
datapath_types='netdev,dpdkvhostuser,system'):
def execute(args):
command, method, table, record, value = args
self.assertEqual('ovs-vsctl', command)
self.assertEqual('Open_vSwitch', table)
self.assertIn(method, ['get', 'set'])
if method == 'set':
self.assertEqual(uuid, record)
return ""
elif method == 'get':
self.assertEqual('.', record)
self.assertIn(value, ['_uuid', 'datapath_types'])
if value == '_uuid':
return uuid
elif value == 'datapath_types':
return datapath_types
self.fail('Unexpected command: ' + repr(args))
return self.patch(
set_ovs_hostconfigs.subprocess, "check_output",
side_effect=execute)
def patch_os_geteuid(self, return_value=0):
return self.patch(
set_ovs_hostconfigs.os, "geteuid", return_value=return_value)
@contextmanager
def test_log_on_console_msg(self):
with capture(set_ovs_hostconfigs.main, args=()) as output:
self.assertNotEqual(-1, output.find(LOGGING_PERMISSION_REQUIRED))
def test_log_in_file(self):
with tempfile.TemporaryFile() as fp:
set_ovs_hostconfigs.main(("--log-file=%s" % fp.name,))
logs = [LOGGING_ENABLED, LOGGING_PERMISSION_REQUIRED]
for line, count in fp.readline():
self.assertNotEqual(-1, line.find(logs[count]))
networking-odl-16.0.0/networking_odl/tests/unit/cmd/test_analyze_journal.py0000664000175000017500000002215113656750541027317 0ustar zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import random
import string
import fixtures
import mock
from six import StringIO
from networking_odl.cmd import analyze_journal
from networking_odl.journal import journal
from networking_odl.tests import base
def _random_string():
letters = string.ascii_lowercase
return ''.join(random.choice(letters)
for _ in range(random.randint(1, 10)))
def _generate_log_entry(log_type=None, entry_id=None):
entry = mock.Mock()
entry.seqnum = entry_id if entry_id else _random_string()
entry.operation = _random_string()
entry.object_type = _random_string()
entry.object_uuid = _random_string()
logger = fixtures.FakeLogger()
with logger:
journal._log_entry(log_type if log_type else _random_string(), entry)
return entry, "noise %s noise" % logger.output
class TestAnalyzeJournal(base.DietTestCase):
def setUp(self):
super(TestAnalyzeJournal, self).setUp()
self.output = StringIO()
def _assert_nothing_printed(self):
self.assertEqual('', self.output.getvalue())
def _assert_something_printed(self, expected=None):
self.assertNotEqual('', self.output.getvalue())
if expected:
self.assertIn(str(expected), self.output.getvalue())
def test_setup_conf_no_args(self):
conf = analyze_journal.setup_conf(self.output, [])
self.assertIsNotNone(conf)
self._assert_nothing_printed()
def test_setup_conf_h_flag(self):
self.assertRaises(
SystemExit, analyze_journal.setup_conf, self.output, ['-h'])
self._assert_something_printed()
def test_setup_conf_help_flag(self):
self.assertRaises(
SystemExit, analyze_journal.setup_conf, self.output, ['--help'])
self._assert_something_printed()
def test_setup_conf_file(self):
file_name = _random_string()
conf = analyze_journal.setup_conf(self.output, ['--file', file_name])
self.assertEqual(file_name, conf.file)
def test_setup_conf_slowest(self):
slowest = random.randint(1, 10000)
conf = analyze_journal.setup_conf(
self.output, ['--slowest', str(slowest)])
self.assertEqual(slowest, conf.slowest)
def test_setup_conf_slowest_zero(self):
self.assertRaises(SystemExit, analyze_journal.setup_conf,
self.output, ['--slowest', '0'])
self._assert_nothing_printed()
def test_parse_log_no_matched_content(self):
self.assertEqual({}, analyze_journal.parse_log([]))
self.assertEqual({}, analyze_journal.parse_log(['dummy']))
def _test_parse_log_entry(self, recorded=False, completed=False):
content = []
entry_id = _random_string()
entry = None
if recorded:
entry, log = _generate_log_entry(log_type=journal.LOG_RECORDED,
entry_id=entry_id)
content.append(log)
if completed:
centry, log = _generate_log_entry(log_type=journal.LOG_COMPLETED,
entry_id=entry_id)
entry = centry if entry is None else entry
content.append(log)
entries = analyze_journal.parse_log(content)
actual_entry = entries[entry_id]
self.assertEqual(entry.operation, actual_entry['op'])
self.assertEqual(entry.object_type, actual_entry['obj_type'])
self.assertEqual(entry.object_uuid, actual_entry['obj_id'])
if recorded:
self.assertGreater(actual_entry[journal.LOG_RECORDED], 0)
if completed:
self.assertGreater(actual_entry[journal.LOG_COMPLETED], 0)
def test_parse_log_entry_recorded(self):
self._test_parse_log_entry(recorded=True)
def test_parse_log_entry_completed(self):
self._test_parse_log_entry(completed=True)
def test_parse_log_entry_recorded_and_completed(self):
self._test_parse_log_entry(recorded=True, completed=True)
def test_analyze_entries_no_records(self):
self.assertEqual([], analyze_journal.analyze_entries({}))
def _generate_random_entry(self):
return dict([(k, _random_string()) for k in analyze_journal.LOG_KEYS])
def _entry_for_analyze_entries(self, recorded=False, completed=False):
entry = self._generate_random_entry()
if recorded:
entry[journal.LOG_RECORDED] = random.uniform(1, 10)
if completed:
entry[journal.LOG_COMPLETED] = random.uniform(10, 20)
return entry
def test_analyze_entries_no_completed_time(self):
entry = self._entry_for_analyze_entries(recorded=True)
entries = {entry['entry_id']: entry}
self.assertEqual([], analyze_journal.analyze_entries(entries))
def test_analyze_entries_no_recorded_time(self):
entry = self._entry_for_analyze_entries(completed=True)
entries = {entry['entry_id']: entry}
self.assertEqual([], analyze_journal.analyze_entries(entries))
def test_analyze_entries(self):
entry = self._entry_for_analyze_entries(recorded=True, completed=True)
entry_only_recorded = self._entry_for_analyze_entries(recorded=True)
entry_only_completed = self._entry_for_analyze_entries(completed=True)
entries = {e['entry_id']: e for e in
(entry, entry_only_recorded, entry_only_completed)}
entries_stats = analyze_journal.analyze_entries(entries)
expected_time = (entry[journal.LOG_COMPLETED] -
entry[journal.LOG_RECORDED])
expected_entry = analyze_journal.EntryStats(
entry_id=entry['entry_id'], time=expected_time, op=entry['op'],
obj_type=entry['obj_type'], obj_id=entry['obj_id'])
self.assertIn(expected_entry, entries_stats)
def _assert_percentile_printed(self, entries_stats, percentile):
expected_percentile_format = "%sth percentile: %s"
percentile_index = int(len(entries_stats) * (percentile / 100.0))
entry = entries_stats[percentile_index]
self._assert_something_printed(expected_percentile_format %
(percentile, entry.time))
def test_print_stats(self):
entries_stats = []
entries_count = 10
slowest = random.randint(1, int(entries_count / 2))
for i in range(entries_count):
entry = self._generate_random_entry()
entries_stats.append(
analyze_journal.EntryStats(
entry_id=entry['entry_id'], time=i, op=entry['op'],
obj_type=entry['obj_type'], obj_id=entry['obj_id']))
analyze_journal.print_stats(self.output, slowest, entries_stats)
total_time = (entries_count * (entries_count - 1)) / 2
avg_time = total_time / entries_count
self._assert_something_printed(avg_time)
self._assert_something_printed(slowest)
self._assert_percentile_printed(entries_stats, 90)
self._assert_percentile_printed(entries_stats, 99)
self._assert_percentile_printed(entries_stats, 99.9)
expected = ''
for i in reversed(range(entries_count - slowest, entries_count)):
entry = entries_stats[i]
expected += '\n'
expected += (analyze_journal.ENTRY_LOG_TEMPLATE %
(entry.entry_id, entry.time, entry.op, entry.obj_type,
entry.obj_id))
self._assert_something_printed(expected)
@contextlib.contextmanager
def _setup_mocks_for_main(self, content):
with mock.patch.object(analyze_journal, 'get_content') as mgc, \
mock.patch.object(analyze_journal, 'setup_conf') as msc:
m = mock.MagicMock()
m.__iter__.return_value = content
mgc().__enter__.return_value = m
conf = msc()
conf.slowest = 10
yield
def test_main(self):
entry_id = _random_string()
_, entry_recorded = _generate_log_entry(journal.LOG_RECORDED, entry_id)
_, entry_completed = _generate_log_entry(journal.LOG_COMPLETED,
entry_id)
with self._setup_mocks_for_main((entry_recorded, entry_completed)):
rc = analyze_journal.main(self.output)
self.assertEqual(0, rc)
self._assert_something_printed(entry_id)
def test_main_no_entry_stats(self):
with self._setup_mocks_for_main(('dummy',)):
rc = analyze_journal.main(self.output)
self.assertNotEqual(0, rc)
self._assert_something_printed()
networking-odl-16.0.0/networking_odl/tests/unit/test_base_db.py0000664000175000017500000001647313656750541024750 0ustar zuulzuul00000000000000# Copyright 2016 Intel Corporation.
# Copyright 2016 Isaku Yamahata
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import mock
from pecan import util as p_util
from neutron.tests.unit.testlib_api import SqlTestCaseLight
from neutron_lib import context as neutron_context
from neutron_lib.db import api as db_api
from neutron_lib import fixture as lib_fixtures
from oslo_config import fixture as config_fixture
from oslo_db import exception as db_exc
import sqlalchemy
from sqlalchemy.orm import exc
from networking_odl.common import constants
from networking_odl.db import models
RETRIABLE_EXCEPTIONS = (db_exc.DBDeadlock, exc.StaleDataError,
db_exc.DBConnectionError, db_exc.DBDuplicateEntry,
db_exc.RetryRequest)
RETRY_INTERVAL = 0.001
RETRY_MAX = 2
class _InnerException(Exception):
pass
class ODLBaseDbTestCase(SqlTestCaseLight):
UPDATE_ROW = [constants.ODL_NETWORK, 'id', constants.ODL_UPDATE,
{'test': 'data'}]
def setUp(self):
super(ODLBaseDbTestCase, self).setUp()
self.db_context = neutron_context.get_admin_context()
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(completed_rows_retention=-1, group='ml2_odl')
self._setup_retry_tracker_table()
def _setup_retry_tracker_table(self):
metadata = sqlalchemy.MetaData()
self.retry_table = sqlalchemy.Table(
'retry_tracker', metadata,
sqlalchemy.Column(
'id', sqlalchemy.Integer,
autoincrement=True,
primary_key=True,
),
)
metadata.create_all(self.engine)
self.addCleanup(metadata.drop_all, self.engine)
class RetryTracker(object):
pass
sqlalchemy.orm.mapper(RetryTracker, self.retry_table)
self.retry_tracker = RetryTracker
def _db_cleanup(self):
self.db_context.session.query(models.OpenDaylightJournal).delete()
self.db_context.session.query(models.OpenDaylightPeriodicTask).delete()
row0 = models.OpenDaylightPeriodicTask(
task='maintenance', state=constants.PENDING)
row1 = models.OpenDaylightPeriodicTask(
task='hostconfig', state=constants.PENDING)
self.db_context.session.merge(row0)
self.db_context.session.merge(row1)
self.db_context.session.flush()
def _test_db_exceptions_handled(self, method, mock_object, expect_retries):
# NOTE(mpeterson): make retries faster so it doesn't take a lot.
retry_fixture = lib_fixtures.DBRetryErrorsFixture(
max_retries=RETRY_MAX, retry_interval=RETRY_INTERVAL)
retry_fixture.setUp()
# NOTE(mpeterson): this test is very verbose, disabling logging
logging.disable(logging.CRITICAL)
self.addCleanup(logging.disable, logging.NOTSET)
exceptions = RETRIABLE_EXCEPTIONS
r_method = getattr(method, '__wrapped__', method)
r_method_args = p_util.getargspec(r_method).args
args_number = len(r_method_args) - (2 if r_method_args[0] == 'self'
else 1)
mock_arg = mock.MagicMock(unsafe=True)
# NOTE(mpeterson): workarounds for py3 compatibility and behavior
# expected by particular functions
mock_arg.__name__ = 'mock_arg'
mock_arg.retry_count = 1
mock_arg.__ge__.return_value = True
mock_arg.__gt__.return_value = True
mock_arg.__le__.return_value = True
mock_arg.__lt__.return_value = True
args = (mock_arg,) * args_number
def _assertRaises(exceptions, method, context, *args, **kwargs):
try:
method(context, *args, **kwargs)
except Exception as e:
if not isinstance(e, exceptions):
raise e
# TODO(mpeterson): For now the check with session.is_active is
# accepted, but when the enginefacade is the only accepted
# pattern then it should be changed to check that a session is
# attached to the context
session = context.session
if session.is_active and isinstance(e, _InnerException):
self.assertTrue(getattr(e, '_RETRY_EXCEEDED', False))
return
exc_names = (tuple(exc.__name__ for exc in exceptions)
if hasattr(exceptions, '__iter__') else
exceptions.__name__)
self.fail('%s did not raise %s' % (method.__name__, exc_names))
try:
raise _InnerException
except _InnerException as e:
_e = e
expected_retries = RETRY_MAX if expect_retries else 0
retry_counter = 0
for exception in exceptions:
def increase_retry_counter_and_except(*args, **kwargs):
nonlocal retry_counter
retry_counter += 1
self.db_context.session.add(self.retry_tracker())
self.db_context.session.flush()
raise exception(_e)
mock_object.side_effect = increase_retry_counter_and_except
_assertRaises((exception, _InnerException), method,
self.db_context, *args)
self.assertEqual(expected_retries, mock_object.call_count - 1)
mock_object.reset_mock()
retry_fixture.cleanUp()
return retry_counter
def _assertRetryCount(self, expected_count):
actual_count = \
self.db_context.session.query(self.retry_tracker).count()
self.assertEqual(expected_count, actual_count)
def _test_retry_exceptions(self, method, mock_object,
assert_transaction=True):
retries = self._test_db_exceptions_handled(method, mock_object,
True)
if assert_transaction:
# It should be 0 as long as the retriable method creates save
# points or transactions, which is the correct behavior
self._assertRetryCount(0)
# RETRIABLE * 3 when expect_retries=True since it will retry
# twice as per the test, plus the original call.
self.assertEqual(
len(RETRIABLE_EXCEPTIONS) * (RETRY_MAX + 1),
retries
)
with db_api.CONTEXT_WRITER.using(self.db_context):
retries = self._test_db_exceptions_handled(
method, mock_object, False
)
if assert_transaction:
self._assertRetryCount(0)
# only once per exception when expect_retries=False
self.assertEqual(len(RETRIABLE_EXCEPTIONS), retries)
networking-odl-16.0.0/networking_odl/tests/functional/0000775000175000017500000000000013656750617023134 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/functional/__init__.py0000664000175000017500000000000013656750541025227 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/functional/requirements.txt0000664000175000017500000000063713656750541026422 0ustar zuulzuul00000000000000# Additional requirements for functional tests
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
oslotest>=3.3.0 # Apache-2.0
psutil>=1.1.1,<2.0.0
psycopg2
python-subunit>=1.2.0 # Apache-2.0/BSD
PyMySQL>=0.6.2 # MIT License
stestr>=2.0.0 # Apache-2.0
networking-odl-16.0.0/networking_odl/tests/functional/test_l3.py0000664000175000017500000000711313656750541025061 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
from neutron.tests.unit.extensions import test_l3
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron_lib import constants as q_const
from networking_odl.common import constants as odl_const
from networking_odl.tests.functional import base
class _TestL3Base(test_l3.L3NatTestCaseMixin, base.OdlTestsBase):
# Override default behavior so that extension manager is used, otherwise
# we can't test security groups.
def setup_parent(self):
"""Perform parent setup with the common plugin configuration class."""
ext_mgr = test_l3.L3TestExtensionManager()
# Ensure that the parent setup can be called without arguments
# by the common configuration setUp.
parent_setup = functools.partial(
super(test_plugin.Ml2PluginV2TestCase, self).setUp,
plugin=test_plugin.PLUGIN_NAME,
ext_mgr=ext_mgr,
service_plugins={'l3_plugin_name': self.l3_plugin},
)
self.useFixture(test_plugin.Ml2ConfFixture(parent_setup))
def test_router_create(self):
with self.router() as router:
self.assert_resource_created(odl_const.ODL_ROUTER, router)
def test_router_update(self):
with self.router() as router:
self.resource_update_test(odl_const.ODL_ROUTER, router)
def test_router_delete(self):
with self.router() as router:
self.resource_delete_test(odl_const.ODL_ROUTER, router)
def test_floatingip_create(self):
with self.floatingip_with_assoc() as fip:
self.assert_resource_created(odl_const.ODL_FLOATINGIP, fip)
# Test FIP was deleted since the code creating the FIP deletes it
# once the context block exists.
odl_fip = self.get_odl_resource(odl_const.ODL_FLOATINGIP, fip)
self.assertIsNone(odl_fip)
def test_floatingip_status_with_port(self):
with self.floatingip_with_assoc() as fip:
self.assertEqual(
q_const.FLOATINGIP_STATUS_ACTIVE,
fip['floatingip']['status'])
def test_floatingip_status_without_port(self):
with self.subnet() as subnet:
with self.floatingip_no_assoc(subnet) as fip:
# status should be down when floating ip
# is not associated to any port
self.assertEqual(
q_const.FLOATINGIP_STATUS_DOWN,
fip['floatingip']['status'])
def test_floatingip_dissociate_port(self):
with self.floatingip_with_assoc() as fip:
portid = fip['floatingip']['port_id']
self.assertIsNotNone(portid)
self._delete(odl_const.ODL_PORTS, portid)
updated_fip = self.get_odl_resource(odl_const.ODL_FLOATINGIP, fip)
self.assertNotIn('port_id', updated_fip['floatingip'].keys())
class TestL3PluginV2(base.V2DriverAdjustment, _TestL3Base,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
l3_plugin = 'odl-router_v2'
networking-odl-16.0.0/networking_odl/tests/functional/test_trunk_drivers.py0000664000175000017500000001075613656750541027453 0ustar zuulzuul00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
from networking_odl.common import constants as odl_const
from networking_odl.tests.functional import base
from neutron.services.trunk import plugin as trunk_plugin
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron_lib.plugins import utils
from neutron_lib.services.trunk import constants
from oslo_utils import uuidutils
class _TrunkDriverTest(base.OdlTestsBase):
def test_trunk_create(self):
with self.trunk() as trunk:
self.assert_resource_created(odl_const.ODL_TRUNK, trunk)
def test_trunk_update(self):
with self.trunk() as trunk:
trunk['trunk'].update(admin_state_up=False)
self.trunk_plugin.update_trunk(self.context,
trunk['trunk']['id'], trunk)
response = self.get_odl_resource(odl_const.ODL_TRUNK, trunk)
self.assertFalse(response['trunk']['admin_state_up'])
def test_subport_create(self):
with self.trunk() as trunk:
with self.subport() as subport:
trunk_obj = self.trunk_plugin.add_subports(
self.context, trunk['trunk']['id'],
{'sub_ports': [subport]})
response = self.get_odl_resource(odl_const.ODL_TRUNK,
{'trunk': trunk_obj})
self.assertEqual(response['trunk']['sub_ports'][0]['port_id'],
subport['port_id'])
def test_subport_delete(self):
with self.subport() as subport:
with self.trunk([subport]) as trunk:
response = self.get_odl_resource(odl_const.ODL_TRUNK, trunk)
self.assertEqual(response['trunk']['sub_ports'][0]['port_id'],
subport['port_id'])
trunk_obj = self.trunk_plugin.remove_subports(
self.context, trunk['trunk']['id'],
{'sub_ports': [subport]})
response = self.get_odl_resource(odl_const.ODL_TRUNK,
{'trunk': trunk_obj})
self.assertEqual(response['trunk']['sub_ports'], [])
def test_trunk_delete(self):
with self.trunk() as trunk:
self.trunk_plugin.delete_trunk(self.context, trunk['trunk']['id'])
self.assertIsNone(self.get_odl_resource(odl_const.ODL_TRUNK,
trunk))
@contextlib.contextmanager
def trunk(self, subports=None):
subports = subports if subports else []
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet) as trunk_parent:
tenant_id = uuidutils.generate_uuid()
trunk = {'port_id': trunk_parent['port']['id'],
'tenant_id': tenant_id, 'project_id': tenant_id,
'admin_state_up': True,
'name': 'test_trunk', 'sub_ports': subports}
trunk_obj = self.trunk_plugin.create_trunk(
self.context, {'trunk': trunk})
yield {'trunk': trunk_obj}
@contextlib.contextmanager
def subport(self):
with self.port() as child_port:
subport = {'segmentation_type': 'vlan',
'segmentation_id': 123,
'port_id': child_port['port']['id']}
yield subport
class TestTrunkV2Driver(base.V2DriverAdjustment, _TrunkDriverTest,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
def setUp(self):
super(TestTrunkV2Driver, self).setUp()
self.trunk_plugin = trunk_plugin.TrunkPlugin()
self.trunk_plugin.add_segmentation_type(
constants.SEGMENTATION_TYPE_VLAN, utils.is_valid_vlan_tag)
networking-odl-16.0.0/networking_odl/tests/functional/test_l2gateway.py0000664000175000017500000001542713656750541026451 0ustar zuulzuul00000000000000#
# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
import copy
import mock
import webob.exc
from neutron.api import extensions as api_extensions
from neutron.db import servicetype_db as sdb
from neutron.tests.unit.plugins.ml2 import test_plugin
from oslo_utils import uuidutils
from networking_l2gw import extensions as l2gw_extensions
from networking_l2gw.services.l2gateway.common import constants as l2gw_const
from networking_l2gw.services.l2gateway.plugin import L2GatewayPlugin
from networking_odl.common import constants as odl_const
from networking_odl.tests.functional import base
_uuid = uuidutils.generate_uuid
class L2GatewayTestCaseMixin(object):
devices = [{'device_name': 's1',
'interfaces': [{'name': 'int1'}]
},
{'device_name': 's2',
'interfaces': [{'name': 'int2', 'segmentation_id': [10, 20]}]
}]
l2gw_data = {l2gw_const.GATEWAY_RESOURCE_NAME: {'tenant_id': _uuid(),
'name': 'l2gw',
'devices': devices}}
def setUp(self):
"""Perform parent setup with the common plugin configuration class."""
# Ensure that the parent setup can be called without arguments
# by the common configuration setUp.
bits = self.service_provider.split(':')
provider = {
'service_type': bits[0],
'name': bits[1],
'driver': bits[2],
'default': True
}
# override the default service provider
self.service_providers = (
mock.patch.object(sdb.ServiceTypeManager,
'get_service_providers').start())
self.service_providers.return_value = [provider]
super(L2GatewayTestCaseMixin, self).setUp()
@contextlib.contextmanager
def l2gateway(self, do_delete=True, **kwargs):
req_data = copy.deepcopy(self.l2gw_data)
fmt = 'json'
if kwargs.get('data'):
req_data = kwargs.get('data')
else:
req_data[l2gw_const.GATEWAY_RESOURCE_NAME].update(kwargs)
l2gw_req = self.new_create_request(l2gw_const.L2_GATEWAYS,
req_data, fmt=fmt)
res = l2gw_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
l2gw = self.deserialize('json', res)
yield l2gw
if do_delete:
self._delete(l2gw_const.L2_GATEWAYS,
l2gw[l2gw_const.GATEWAY_RESOURCE_NAME]['id'])
@contextlib.contextmanager
def l2gateway_connection(self, nw_id, l2gw_id,
do_delete=True, **kwargs):
req_data = {
l2gw_const.CONNECTION_RESOURCE_NAME:
{'tenant_id': _uuid(),
'network_id': nw_id,
'l2_gateway_id': l2gw_id}
}
fmt = 'json'
if kwargs.get('data'):
req_data = kwargs.get('data')
else:
req_data[l2gw_const.CONNECTION_RESOURCE_NAME].update(kwargs)
l2gw_connection_req = self.new_create_request(
l2gw_const.L2_GATEWAYS_CONNECTION, req_data, fmt=fmt)
res = l2gw_connection_req.get_response(self.ext_api)
if res.status_int >= 400:
raise webob.exc.HTTPClientError(code=res.status_int)
l2gw_connection = self.deserialize('json', res)
yield l2gw_connection
if do_delete:
self._delete(l2gw_const.L2_GATEWAYS_CONNECTION,
l2gw_connection
[l2gw_const.CONNECTION_RESOURCE_NAME]['id'])
@staticmethod
def convert_to_odl_l2gw_connection(l2gw_connection_in):
odl_l2_gw_conn_data = copy.deepcopy(
l2gw_connection_in[l2gw_const.CONNECTION_RESOURCE_NAME])
odl_l2_gw_conn_data['gateway_id'] = (
odl_l2_gw_conn_data['l2_gateway_id'])
odl_l2_gw_conn_data.pop('l2_gateway_id')
return {odl_const.ODL_L2GATEWAY_CONNECTION: odl_l2_gw_conn_data}
class _TestL2GatewayBase(base.OdlTestsBase, L2GatewayTestCaseMixin):
def get_ext_managers(self):
extensions_path = ':'.join(l2gw_extensions.__path__)
return api_extensions.PluginAwareExtensionManager(
extensions_path,
{'l2gw_plugin': L2GatewayPlugin()})
def get_additional_service_plugins(self):
l2gw_plugin_str = ('networking_l2gw.services.l2gateway.plugin.'
'L2GatewayPlugin')
service_plugin = {'l2gw_plugin': l2gw_plugin_str}
return service_plugin
def test_l2gateway_create(self):
with self.l2gateway(name='mygateway') as l2gateway:
self.assert_resource_created(odl_const.ODL_L2GATEWAY, l2gateway)
def test_l2gateway_update(self):
with self.l2gateway(name='gateway1') as l2gateway:
self.resource_update_test(odl_const.ODL_L2GATEWAY, l2gateway)
def test_l2gateway_delete(self):
with self.l2gateway(do_delete=False) as l2gateway:
self.resource_delete_test(odl_const.ODL_L2GATEWAY, l2gateway)
def test_l2gateway_connection_create_delete(self):
odl_l2gw_connection = {}
with self.network() as network:
with self.l2gateway() as l2gateway:
net_id = network['network']['id']
l2gw_id = l2gateway[odl_const.ODL_L2GATEWAY]['id']
with (self.l2gateway_connection(net_id, l2gw_id)
) as l2gw_connection:
odl_l2gw_connection = (
self.convert_to_odl_l2gw_connection(l2gw_connection))
self.assert_resource_created(
odl_const.ODL_L2GATEWAY_CONNECTION,
odl_l2gw_connection)
self.assertIsNone(self.get_odl_resource(
odl_const.ODL_L2GATEWAY_CONNECTION, odl_l2gw_connection))
class TestL2gatewayV2Driver(base.V2DriverAdjustment, _TestL2GatewayBase,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
service_provider = ('L2GW:OpenDaylight:networking_odl.l2gateway.driver_v2.'
'OpenDaylightL2gwDriver:default')
networking-odl-16.0.0/networking_odl/tests/functional/test_qos.py0000664000175000017500000000537513656750541025355 0ustar zuulzuul00000000000000# Copyright (C) 2017 Intel Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import contextlib
from oslo_utils import uuidutils
from neutron.extensions import qos as qos_ext
from neutron.services.qos import qos_plugin
from neutron.tests.unit.api import test_extensions
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron_lib import fixture as nlib_fixture
from neutron_lib.plugins import directory
from networking_odl.common import constants as odl_const
from networking_odl.tests.functional import base
class QoSTestExtensionManager(object):
def get_resources(self):
return qos_ext.Qos.get_resources()
def get_actions(self):
return []
def get_request_extensions(self):
return []
class _QoSDriverTestCase(base.OdlTestsBase):
def test_policy_create(self):
with self.qos_policy() as policy:
self.assert_resource_created(
odl_const.ODL_QOS_POLICY, policy)
def test_policy_update(self):
with self.qos_policy() as policy:
self.resource_update_test(
odl_const.ODL_QOS_POLICY, policy)
def test_policy_delete(self):
with self.qos_policy() as policy:
self.resource_delete_test(
odl_const.ODL_QOS_POLICY, policy)
class QoSDriverTests(base.V2DriverAdjustment,
_QoSDriverTestCase,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
def setUp(self):
self.useFixture(nlib_fixture.PluginDirectoryFixture())
super(QoSDriverTests, self).setUp()
self.qos_plug = qos_plugin.QoSPlugin()
directory.add_plugin('QOS', self.qos_plug)
ext_mgr = QoSTestExtensionManager()
self.resource_prefix_map = {'policies': '/qos'}
self.ext_api = test_extensions.setup_extensions_middleware(ext_mgr)
tenant_id = uuidutils.generate_uuid()
self.policy_data = {
'policy': {'name': 'test-policy', 'tenant_id': tenant_id}}
@contextlib.contextmanager
def qos_policy(self, fmt='json'):
po_res = self.new_create_request('policies', self.policy_data, fmt)
po_rep = po_res.get_response(self.ext_api)
policy = self.deserialize(fmt, po_rep)
yield policy
networking-odl-16.0.0/networking_odl/tests/functional/test_bgpvpn.py0000664000175000017500000001736013656750541026044 0ustar zuulzuul00000000000000#
# Copyright (C) 2017 Ericsson India Global Services Pvt Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import webob.exc
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron.tests.unit import testlib_api
# BGPVPN Table metadata should be imported before
# sqlalchemy metadata.create_all call else tables
# will not be created.
from networking_bgpvpn.neutron.db import bgpvpn_db # noqa
from networking_bgpvpn.tests.unit.services import test_plugin as bgpvpn_plugin
from networking_odl.common import constants as odl_const
from networking_odl.tests.functional import base
class _TestBGPVPNBase(base.OdlTestsBase):
rds = ['100:1']
def setUp(self, plugin=None, service_plugins=None,
ext_mgr=None):
provider = {
'service_type': 'BGPVPN',
'name': 'OpenDaylight',
'driver': 'networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver',
'default': True
}
self.service_providers.return_value = [provider]
self.plugin_arg = plugin
self.service_plugin_arg = service_plugins
self.ext_mgr_arg = ext_mgr
super(_TestBGPVPNBase, self).setUp()
def get_ext_managers(self):
return self.ext_mgr_arg
def get_plugins(self):
return self.plugin_arg
def get_additional_service_plugins(self):
return self.service_plugin_arg
def _assert_networks_associated(self, net_ids, bgpvpn):
response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)
self.assertItemsEqual(net_ids,
response[odl_const.ODL_BGPVPN]['networks'])
def _assert_routers_associated(self, router_ids, bgpvpn):
response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)
self.assertItemsEqual(router_ids,
response[odl_const.ODL_BGPVPN]['routers'])
def test_bgpvpn_create(self):
with self.bgpvpn() as bgpvpn:
self.assert_resource_created(odl_const.ODL_BGPVPN, bgpvpn)
def test_bgpvpn_create_with_rds(self):
with self.bgpvpn(route_distinguishers=self.rds) as bgpvpn:
response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)
self.assertItemsEqual(self.rds,
response[odl_const.ODL_BGPVPN]
['route_distinguishers'])
def test_bgpvpn_delete(self):
with self.bgpvpn(do_delete=False) as bgpvpn:
self._delete('bgpvpn/bgpvpns', bgpvpn['bgpvpn']['id'])
self.assertIsNone(
self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn))
def test_associate_dissociate_net(self):
with (self.network()) as net1, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:
net_id = net1['network']['id']
bgpvpn_id = bgpvpn['bgpvpn']['id']
with self.assoc_net(bgpvpn_id, net_id):
self._assert_networks_associated([net_id], bgpvpn)
self._assert_networks_associated([], bgpvpn)
def test_associate_multiple_networks(self):
with (self.network()) as net1, (self.network()) as net2, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:
net_id1 = net1['network']['id']
net_id2 = net2['network']['id']
bgpvpn_id = bgpvpn['bgpvpn']['id']
with self.assoc_net(bgpvpn_id, net_id1), \
self.assoc_net(bgpvpn_id, net_id2):
self._assert_networks_associated([net_id1, net_id2], bgpvpn)
def test_assoc_multiple_networks_dissoc_one(self):
with (self.network()) as net1, (self.network()) as net2, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:
net_id1 = net1['network']['id']
net_id2 = net2['network']['id']
bgpvpn_id = bgpvpn['bgpvpn']['id']
with self.assoc_net(bgpvpn_id, net_id1):
with self.assoc_net(bgpvpn_id, net_id2):
self._assert_networks_associated([net_id1, net_id2],
bgpvpn)
self._assert_networks_associated([net_id1], bgpvpn)
def test_associate_dissociate_router(self):
with (self.router(tenant_id=self._tenant_id)) as router, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:
router_id = router['router']['id']
bgpvpn_id = bgpvpn['bgpvpn']['id']
with self.assoc_router(bgpvpn_id, router_id):
self._assert_routers_associated([router_id], bgpvpn)
self._assert_routers_associated([], bgpvpn)
def test_associate_multiple_routers(self):
with (self.router(tenant_id=self._tenant_id, name='r1')) as r1, (
self.router(tenant_id=self._tenant_id, name='r2')) as r2, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:
router_id1 = r1['router']['id']
router_id2 = r2['router']['id']
bgpvpn_id = bgpvpn['bgpvpn']['id']
with self.assoc_router(bgpvpn_id, router_id1):
self._assert_routers_associated([router_id1], bgpvpn)
with testlib_api.ExpectedException(
webob.exc.HTTPClientError) as ctx_manager:
with self.assoc_router(bgpvpn_id, router_id2):
pass
self.assertEqual(webob.exc.HTTPBadRequest.code,
ctx_manager.exception.code)
self._assert_routers_associated([router_id1], bgpvpn)
def test_assoc_router_multiple_bgpvpns(self):
with (self.router(tenant_id=self._tenant_id, name='r1')) as router, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn1, (
self.bgpvpn()) as bgpvpn2:
router_id = router['router']['id']
bgpvpn_id_1 = bgpvpn1['bgpvpn']['id']
bgpvpn_id_2 = bgpvpn2['bgpvpn']['id']
with (self.assoc_router(bgpvpn_id_1, router_id)), (
self.assoc_router(bgpvpn_id_2, router_id)):
self._assert_routers_associated([router_id], bgpvpn1)
self._assert_routers_associated([router_id], bgpvpn2)
def test_associate_router_network(self):
with (self.router(tenant_id=self._tenant_id)) as router, (
self.network()) as net1, (
self.bgpvpn(route_distinguishers=self.rds)) as bgpvpn:
router_id = router['router']['id']
net_id = net1['network']['id']
bgpvpn_id = bgpvpn['bgpvpn']['id']
with self.assoc_router(bgpvpn_id, router_id), \
self.assoc_net(bgpvpn_id, net_id):
response = self.get_odl_resource(odl_const.ODL_BGPVPN, bgpvpn)
self.assertItemsEqual([router_id],
response[odl_const.ODL_BGPVPN]
['routers'])
self.assertItemsEqual([net_id],
response[odl_const.ODL_BGPVPN]
['networks'])
class TestBGPVPNV2Driver(base.V2DriverAdjustment,
bgpvpn_plugin.BgpvpnTestCaseMixin,
_TestBGPVPNBase, test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
networking-odl-16.0.0/networking_odl/tests/functional/db/0000775000175000017500000000000013656750617023521 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/functional/db/__init__.py0000664000175000017500000000000013656750541025614 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/tests/functional/db/test_migrations.py0000664000175000017500000001272013656750541027304 0ustar zuulzuul00000000000000# Copyright 2016 Intel Corporation.
# Copyright 2016 Isaku Yamahata
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from sqlalchemy import sql
from sqlalchemy.sql import schema
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration import cli as migration
from neutron.tests.functional.db import test_migrations
from neutron.tests.unit import testlib_api
from networking_odl.db import head
FWAAS_TABLES = (
'cisco_firewall_associations',
'firewall_group_port_associations_v2',
'firewall_groups_v2',
'firewall_policies_v2',
'firewall_policy_rule_associations_v2',
'firewall_router_associations',
'firewall_rules_v2',
)
L2GW_TABLES = (
'l2gatewayconnections',
'l2gatewaydevices',
'l2gatewayinterfaces',
'l2gateways',
'l2gw_alembic_version',
'logical_switches',
'pending_ucast_macs_remotes',
'physical_locators',
'physical_ports',
'physical_switches',
'ucast_macs_locals',
'ucast_macs_remotes',
'vlan_bindings',
)
BGPVPN_TABLES = (
'bgpvpns',
'bgpvpn_network_associations',
'bgpvpn_router_associations',
'ml2_route_target_allocations',
'sfc_bagpipe_ppg_rtnn_associations',
'sfc_bagpipe_chain_hops',
)
# Tables from other repos that we depend on but do not manage.
IGNORED_TABLES_MATCH = set(
FWAAS_TABLES +
L2GW_TABLES +
BGPVPN_TABLES
)
# EXTERNAL_TABLES should contain all names of tables that are not related to
# current repo.
EXTERNAL_TABLES = set(external.TABLES)
VERSION_TABLE = 'odl_alembic_version'
class _TestModelsMigrationsODL(test_migrations._TestModelsMigrations):
def db_sync(self, engine):
self.cfg.config(connection=engine.url, group='database')
for conf in migration.get_alembic_configs():
self.alembic_config = conf
self.alembic_config.neutron_config = cfg.CONF
migration.do_alembic_command(conf, 'upgrade', 'heads')
def get_metadata(self):
return head.get_metadata()
def include_object(self, object_, name, type_, reflected, compare_to):
if type_ == 'table' and (name.startswith('alembic') or
name == VERSION_TABLE or
name in EXTERNAL_TABLES or
name in IGNORED_TABLES_MATCH):
return False
if type_ == 'index' and reflected and name.startswith("idx_autoinc_"):
return False
return True
def _filter_mysql_server_func_now(self, diff_elem):
# TODO(yamahata): remove this bug work around once it's fixed
# example:
# when the column has server_default=sa.func.now(), the diff
# includes the followings diff
# [ ('modify_default',
# None,
# 'opendaylightjournal',
# 'created_at',
# {'existing_nullable': True,
# 'existing_type': DATETIME()},
# DefaultClause(, for_update=False),
# DefaultClause(,
# for_update=False))]
# another example
# [ ('modify_default',
# None,
# 'opendaylightjournal',
# 'created_at',
# {'existing_nullable': True,
# 'existing_type': DATETIME()},
# None,
# DefaultClause(,
# for_update=False))]
meta_def = diff_elem[0][5]
rendered_meta_def = diff_elem[0][6]
if (diff_elem[0][0] == 'modify_default' and
diff_elem[0][2] in ('opendaylightjournal',
'opendaylight_periodic_task') and
isinstance(meta_def, schema.DefaultClause) and
isinstance(meta_def.arg, sql.elements.TextClause) and
meta_def.reflected and
meta_def.arg.text == u'CURRENT_TIMESTAMP' and
isinstance(rendered_meta_def, schema.DefaultClause) and
isinstance(rendered_meta_def.arg, sql.functions.now) and
not rendered_meta_def.reflected and
meta_def.for_update == rendered_meta_def.for_update):
return False
return True
def filter_metadata_diff(self, diff):
return list(filter(self._filter_mysql_server_func_now, diff))
class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrationsODL,
testlib_api.SqlTestCaseLight):
pass
class TestModelsMigrationsPostgresql(testlib_api.PostgreSQLTestCaseMixin,
_TestModelsMigrationsODL,
testlib_api.SqlTestCaseLight):
pass
networking-odl-16.0.0/networking_odl/tests/functional/test_ml2_drivers.py0000664000175000017500000001141013656750541026766 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
from neutron.tests.unit.extensions import test_securitygroup
from neutron.tests.unit.plugins.ml2 import test_plugin
from networking_odl.common import constants as odl_const
from networking_odl.tests.functional import base
class _DriverTest(base.OdlTestsBase):
def test_network_create(self):
with self.network() as network:
self.assert_resource_created(odl_const.ODL_NETWORK, network)
def test_network_update(self):
with self.network() as network:
self.resource_update_test(odl_const.ODL_NETWORK, network)
def test_network_delete(self):
with self.network() as network:
self.resource_delete_test(odl_const.ODL_NETWORK, network)
def test_subnet_create(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
self.assert_resource_created(odl_const.ODL_SUBNET, subnet)
def test_subnet_update(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
self.resource_update_test(odl_const.ODL_SUBNET, subnet)
def test_subnet_delete(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
self.resource_delete_test(odl_const.ODL_SUBNET, subnet)
def test_port_create(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet) as port:
self.assert_resource_created(odl_const.ODL_PORT, port)
def test_port_update(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet) as port:
self.resource_update_test(odl_const.ODL_PORT, port)
def test_port_delete(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
with self.port(subnet=subnet) as port:
self.resource_delete_test(odl_const.ODL_PORT, port)
class _DriverSecGroupsTests(base.OdlTestsBase):
# Override default behavior so that extension manager is used, otherwise
# we can't test security groups.
def setup_parent(self):
"""Perform parent setup with the common plugin configuration class."""
ext_mgr = (
test_securitygroup.SecurityGroupTestExtensionManager())
# Ensure that the parent setup can be called without arguments
# by the common configuration setUp.
parent_setup = functools.partial(
super(test_plugin.Ml2PluginV2TestCase, self).setUp,
plugin=test_plugin.PLUGIN_NAME,
ext_mgr=ext_mgr,
)
self.useFixture(test_plugin.Ml2ConfFixture(parent_setup))
def test_security_group_create(self):
with self.security_group() as sg:
self.assert_resource_created(odl_const.ODL_SG, sg)
def test_security_group_update(self):
with self.security_group() as sg:
self.resource_update_test(odl_const.ODL_SG, sg)
def test_security_group_delete(self):
with self.security_group() as sg:
self.resource_delete_test(odl_const.ODL_SG, sg)
def test_security_group_rule_create(self):
with self.security_group() as sg:
sg_id = sg[odl_const.ODL_SG]['id']
with self.security_group_rule(security_group_id=sg_id) as sg_rule:
self.assert_resource_created(odl_const.ODL_SG_RULE, sg_rule)
def test_security_group_rule_delete(self):
with self.security_group() as sg:
sg_id = sg[odl_const.ODL_SG]['id']
with self.security_group_rule(security_group_id=sg_id) as sg_rule:
self.resource_delete_test(odl_const.ODL_SG_RULE, sg_rule)
class TestV2Driver(base.V2DriverAdjustment, _DriverTest,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
class TestV2DriverSecGroups(base.V2DriverAdjustment, _DriverSecGroupsTests,
test_securitygroup.SecurityGroupsTestCase,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
networking-odl-16.0.0/networking_odl/tests/functional/test_odl_dhcp_driver.py0000664000175000017500000001044613656750541027675 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.tests.unit.plugins.ml2 import test_plugin
from neutron_lib import constants as n_const
from neutron_lib.plugins import directory
from oslo_config import fixture as config_fixture
from networking_odl.common import constants as odl_const
from networking_odl.dhcp import odl_dhcp_driver_base as driver_base
from networking_odl.tests.functional import base
class TestOdlDhcpDriver(base.V2DriverAdjustment, base.OdlTestsBase,
test_plugin.Ml2PluginV2TestCase):
_mechanism_drivers = ['opendaylight_v2']
def setUp(self):
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(enable_dhcp_service=True, group='ml2_odl')
super(TestOdlDhcpDriver, self).setUp()
def get_port_data(self, network, subnet):
plugin = self.get_plugin()
device_id = driver_base.OPENDAYLIGHT_DEVICE_ID + \
'-' + subnet[odl_const.ODL_SUBNET]['id']
filters = {
'network_id': [network[odl_const.ODL_NETWORK]['id']],
'device_id': [device_id],
'device_owner': [n_const.DEVICE_OWNER_DHCP]
}
ports = plugin.get_ports(self.context, filters=filters)
if ports:
port = ports[0]
return {odl_const.ODL_PORT: {'id': port['id']}}
def get_plugin(self):
return directory.get_plugin()
def test_subnet_create(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
self.get_odl_resource(odl_const.ODL_SUBNET, subnet)
port = self.get_port_data(network, subnet)
self.assert_resource_created(odl_const.ODL_PORT, port)
def test_subnet_update_from_disable_to_enable(self):
with self.network() as network:
with self.subnet(network=network, enable_dhcp=False) as subnet:
self.get_odl_resource(odl_const.ODL_SUBNET, subnet)
plugin = self.get_plugin()
port = self.get_port_data(network, subnet)
self.assertIsNone(port)
subnet[odl_const.ODL_SUBNET]['enable_dhcp'] = True
plugin.update_subnet(
self.context, subnet[odl_const.ODL_SUBNET]['id'], subnet)
self.get_odl_resource(odl_const.ODL_SUBNET, subnet)
port = self.get_port_data(network, subnet)
self.assert_resource_created(odl_const.ODL_PORT, port)
def test_subnet_update_from_enable_to_disable(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
self.get_odl_resource(odl_const.ODL_SUBNET, subnet)
plugin = self.get_plugin()
port = self.get_port_data(network, subnet)
self.assert_resource_created(odl_const.ODL_PORT, port)
subnet[odl_const.ODL_SUBNET]['enable_dhcp'] = False
plugin.update_subnet(
self.context, subnet[odl_const.ODL_SUBNET]['id'], subnet)
resource = self.get_odl_resource(odl_const.ODL_PORT, port)
self.assertIsNone(resource)
def test_subnet_delete(self):
with self.network() as network:
with self.subnet(network=network) as subnet:
self.get_odl_resource(odl_const.ODL_SUBNET, subnet)
plugin = self.get_plugin()
port = self.get_port_data(network, subnet)
self.assert_resource_created(odl_const.ODL_PORT, port)
plugin.delete_subnet(
self.context, subnet[odl_const.ODL_SUBNET]['id'])
resource = self.get_odl_resource(odl_const.ODL_PORT, port)
self.assertIsNone(resource)
networking-odl-16.0.0/networking_odl/tests/functional/base.py0000664000175000017500000001143413656750541024417 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
import os
from neutron.common import utils
from neutron.tests import base
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import test_plugin
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import utils as odl_utils
from networking_odl.db import db
from networking_odl.tests import base as test_base
from networking_odl.tests.unit import test_base_db
class OdlTestsBase(object):
# this is stolen from neutron.tests.functional.base
# This is the directory from which infra fetches log files
# for functional tests.
DEFAULT_LOG_DIR = os.path.join(helpers.get_test_log_path(),
'functional-logs')
def setUp(self):
self.cfg = self.useFixture(config_fixture.Config())
self.cfg.config(
url='http://127.0.0.1:8181/controller/nb/v2/neutron',
group='ml2_odl')
self.cfg.config(username='admin', group='ml2_odl')
self.cfg.config(password='admin', group='ml2_odl')
self.cfg.config(mechanism_drivers=self._mechanism_drivers, group='ml2')
self.cfg.config(extension_drivers=[
'qos', 'port_security'], group='ml2')
self.client = client.OpenDaylightRestClient.create_client()
super(OdlTestsBase, self).setUp()
base.setup_test_logging(
cfg.CONF, self.DEFAULT_LOG_DIR, "%s.txt" % self.id())
def setup_parent(self):
"""Perform parent setup with the common plugin configuration class."""
# Ensure that the parent setup can be called without arguments
# by the common configuration setUp.
service_plugins = {'l3_plugin_name': self.l3_plugin}
service_plugins.update(self.get_additional_service_plugins())
parent_setup = functools.partial(
super(test_plugin.Ml2PluginV2TestCase, self).setUp,
plugin=self.get_plugins(),
ext_mgr=self.get_ext_managers(),
service_plugins=service_plugins
)
self.useFixture(test_plugin.Ml2ConfFixture(parent_setup))
def get_plugins(self):
return test_plugin.PLUGIN_NAME
def get_ext_managers(self):
return None
def get_odl_resource(self, resource_type, resource):
return self.client.get_resource(
resource_type, resource[resource_type]['id'])
def assert_resource_created(self, resource_type, resource):
odl_resource = self.get_odl_resource(resource_type, resource)
self.assertIsNotNone(odl_resource)
def resource_update_test(self, resource_type, resource):
update_field = 'name'
update_value = 'bubu'
resource = self.get_odl_resource(resource_type, resource)
self.assertNotEqual(update_value,
resource[resource_type][update_field])
self._update(odl_utils.make_url_object(resource_type),
resource[resource_type]['id'],
{resource_type: {update_field: update_value}})
resource = self.get_odl_resource(resource_type, resource)
self.assertEqual(update_value, resource[resource_type][update_field])
def resource_delete_test(self, resource_type, resource):
self._delete(odl_utils.make_url_object(resource_type),
resource[resource_type]['id'])
self.assertIsNone(self.get_odl_resource(resource_type, resource))
class V2DriverAdjustment(test_base_db.ODLBaseDbTestCase):
def setUp(self):
super(V2DriverAdjustment, self).setUp()
self.useFixture(test_base.JournalWorkerPidFileFixture())
def get_odl_resource(self, resource_type, resource):
def no_journal_rows():
pending_rows = db.get_all_db_rows_by_state(
self.db_context, odl_const.PENDING)
processing_rows = db.get_all_db_rows_by_state(
self.db_context, odl_const.PROCESSING)
return len(pending_rows) == 0 and len(processing_rows) == 0
utils.wait_until_true(no_journal_rows, 5, 0.5)
return super(V2DriverAdjustment, self).get_odl_resource(
resource_type, resource)
networking-odl-16.0.0/networking_odl/tests/base.py0000664000175000017500000001331113656750541022251 0ustar zuulzuul00000000000000# Copyright (c) 2015-2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from neutron.tests import base
from neutron_lib.callbacks import registry
from neutron_lib import fixture as nl_fixture
from oslo_config import cfg
from oslo_config import fixture as config_fixture
from networking_odl.common import odl_features
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.journal import periodic_task
from networking_odl.ml2 import pseudo_agentdb_binding
class DietTestCase(base.DietTestCase):
def patch(self, target, name, *args, **kwargs):
context = mock.patch.object(target, name, *args, **kwargs)
patch = context.start()
self.addCleanup(context.stop)
return patch
class OpenDaylightRestClientFixture(fixtures.Fixture):
# Set URL/user/pass so init doesn't throw a cfg required error.
# They are not used in these tests since requests.request is overwritten.
def _setUp(self):
super(OpenDaylightRestClientFixture, self)._setUp()
self.cfg = self.useFixture(config_fixture.Config())
mock.patch('requests.sessions.Session.request').start()
self.cfg.config(url='http://localhost:8080/controller/nb/v2/neutron',
group='ml2_odl')
self.cfg.config(username='someuser', group='ml2_odl')
self.cfg.config(password='somepass', group='ml2_odl')
self.cfg.config(port_binding_controller='legacy-port-binding',
group='ml2_odl')
class OpenDaylightRestClientGlobalFixture(fixtures.Fixture):
def __init__(self, global_client):
super(OpenDaylightRestClientGlobalFixture, self).__init__()
self._global_client = global_client
def _setUp(self):
super(OpenDaylightRestClientGlobalFixture, self)._setUp()
mock.patch.object(self._global_client, 'get_client').start()
class OpenDaylightFeaturesFixture(fixtures.Fixture):
def _setUp(self):
super(OpenDaylightFeaturesFixture, self)._setUp()
self.cfg = self.useFixture(config_fixture.Config())
if cfg.CONF.ml2_odl.url is None:
self.cfg.config(url='http://127.0.0.1:9999', group='ml2_odl')
if cfg.CONF.ml2_odl.username is None:
self.cfg.config(username='someuser', group='ml2_odl')
if cfg.CONF.ml2_odl.password is None:
self.cfg.config(password='somepass', group='ml2_odl')
# make sure _fetch_features is not called, it'll block the main thread
self.cfg.config(odl_features_json='{"features": {"feature": []}}',
group='ml2_odl')
odl_features.init()
self.addCleanup(odl_features.deinit)
class OpenDaylightJournalThreadFixture(fixtures.Fixture):
def _setUp(self):
super(OpenDaylightJournalThreadFixture, self)._setUp()
self.journal_thread_mock = mock.patch.object(
journal.OpenDaylightJournalThread, 'start')
self.journal_thread_mock.start()
self.pidfile_fixture = self.useFixture(JournalWorkerPidFileFixture())
def remock_atexit(self):
self.pidfile_fixture.atexit_mock.stop()
return self.pidfile_fixture.atexit_mock.start()
class JournalWorkerPidFileFixture(fixtures.Fixture):
def _setUp(self):
super(JournalWorkerPidFileFixture, self)._setUp()
# Every pidfile that is created for the JournalPeriodicProcessor
# worker registers an operation to clean it when the interpreter
# is about to exit. Tests each have a temporary directory where
# they work, this directory is deleted after each test. That means
# that by the time atexit is called the pidfile does not exist anymore
# and therefore fails with an error. This avoids this problem.
self.atexit_mock = mock.patch(
'networking_odl.journal.worker.atexit.register'
)
self.atexit_mock.start()
class OpenDaylightPeriodicTaskFixture(fixtures.Fixture):
def _setUp(self):
super(OpenDaylightPeriodicTaskFixture, self)._setUp()
self.task_start_mock = mock.patch.object(
periodic_task.PeriodicTask, 'start')
self.task_start_mock.start()
class OpenDaylightPseudoAgentPrePopulateFixture(
nl_fixture.CallbackRegistryFixture):
def _setUp(self):
super(OpenDaylightPseudoAgentPrePopulateFixture, self)._setUp()
mock.patch.object(
pseudo_agentdb_binding.PseudoAgentDBBindingPrePopulate,
'before_port_binding').start()
# NOTE(yamahata): work around
# CallbackRegistryFixture._restore causes stopping unstarted patcher
# bacause some of base classes neutron test cases issue stop_all()
# with tearDown method
def _restore(self):
registry._CALLBACK_MANAGER = self._orig_manager
if mock.mock._is_started(self.patcher):
# this may cause RuntimeError('stop called on unstarted patcher')
# due to stop_all called by base test cases
self.patcher.stop()
class OpenDaylightFullSyncFixture(fixtures.Fixture):
def _setUp(self):
super(OpenDaylightFullSyncFixture, self)._setUp()
self.addCleanup(full_sync.FULL_SYNC_RESOURCES.clear)
networking-odl-16.0.0/networking_odl/l2gateway/0000775000175000017500000000000013656750617021527 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/l2gateway/__init__.py0000664000175000017500000000000013656750541023622 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/l2gateway/driver_v2.py0000664000175000017500000000735313656750541024007 0ustar zuulzuul00000000000000# Copyright (c) 2017 Ericsson India Global Service Pvt Ltd.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_l2gw.services.l2gateway.common import constants
from networking_l2gw.services.l2gateway import service_drivers
from networking_odl.common import constants as odl_const
from networking_odl.common import postcommit
from networking_odl.journal import full_sync
from networking_odl.journal import journal
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
LOG = logging.getLogger(__name__)
L2GW_RESOURCES = {
odl_const.ODL_L2GATEWAY: odl_const.ODL_L2GATEWAYS,
odl_const.ODL_L2GATEWAY_CONNECTION: odl_const.ODL_L2GATEWAY_CONNECTIONS
}
@postcommit.add_postcommit('l2_gateway', 'l2_gateway_connection')
class OpenDaylightL2gwDriver(service_drivers.L2gwDriver):
"""OpenDaylight L2Gateway Service Driver
This code is the openstack driver for exciting the OpenDaylight L2GW
facility.
"""
def __init__(self, service_plugin, validator=None):
super(OpenDaylightL2gwDriver, self).__init__(service_plugin, validator)
self.service_plugin = service_plugin
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(constants.L2GW, L2GW_RESOURCES)
LOG.info("ODL: Started OpenDaylight L2Gateway V2 driver")
@property
def service_type(self):
return constants.L2GW
@log_helpers.log_method_call
def create_l2_gateway_precommit(self, context, l2_gateway):
journal.record(context, odl_const.ODL_L2GATEWAY,
l2_gateway['id'], odl_const.ODL_CREATE,
l2_gateway)
@log_helpers.log_method_call
def update_l2_gateway_precommit(self, context, l2_gateway):
journal.record(context, odl_const.ODL_L2GATEWAY,
l2_gateway['id'], odl_const.ODL_UPDATE,
l2_gateway)
@log_helpers.log_method_call
def delete_l2_gateway_precommit(self, context, l2_gateway_id):
journal.record(context, odl_const.ODL_L2GATEWAY,
l2_gateway_id, odl_const.ODL_DELETE,
l2_gateway_id)
@log_helpers.log_method_call
def create_l2_gateway_connection_precommit(self, context,
l2_gateway_connection):
odl_l2_gateway_connection = copy.deepcopy(l2_gateway_connection)
odl_l2_gateway_connection['gateway_id'] = (
l2_gateway_connection['l2_gateway_id'])
odl_l2_gateway_connection.pop('l2_gateway_id')
journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION,
odl_l2_gateway_connection['id'],
odl_const.ODL_CREATE,
odl_l2_gateway_connection)
@log_helpers.log_method_call
def delete_l2_gateway_connection_precommit(self, context,
l2_gateway_connection_id):
journal.record(context, odl_const.ODL_L2GATEWAY_CONNECTION,
l2_gateway_connection_id,
odl_const.ODL_DELETE,
l2_gateway_connection_id)
networking-odl-16.0.0/networking_odl/ceilometer/0000775000175000017500000000000013656750617021760 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/__init__.py0000664000175000017500000000000013656750541024053 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/0000775000175000017500000000000013656750617023451 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/__init__.py0000664000175000017500000000000013656750541025544 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/statistics/0000775000175000017500000000000013656750617025643 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/statistics/__init__.py0000664000175000017500000000000013656750541027736 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/statistics/opendaylight_v2/0000775000175000017500000000000013656750617030741 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/statistics/opendaylight_v2/__init__.py0000664000175000017500000000000013656750541033034 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ceilometer/network/statistics/opendaylight_v2/driver.py0000664000175000017500000003001113656750541032575 0ustar zuulzuul00000000000000#
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from debtcollector import removals
from oslo_log import log
from six.moves import urllib_parse as urlparse
from ceilometer import keystone_client
from ceilometer.network.statistics import driver
from networking_odl.ceilometer.network.statistics.opendaylight_v2 import client
LOG = log.getLogger(__name__)
INT64_MAX_VALUE = (2 ** 64 / 2 - 1)
@removals.removed_class(
'OpenDaylightDriver', version='Stein', removal_version='release T',
message="Ceilometer driver is deprecated and will get removed from n-odl")
class OpenDaylightDriver(driver.Driver):
"""Driver of network info collector from OpenDaylight.
This driver uses resources in "/etc/ceilometer/polling.yaml".
Resource requires below conditions:
* resource is url
* scheme is "opendaylight.v2"
This driver can be configured via query parameters.
Supported parameters:
* scheme:
The scheme of request url to OpenDaylight REST API endpoint.
(default http)
* auth:
Auth strategy of http.
This parameter can be set basic or digest.(default None)
* user:
This is username that is used by auth.(default None)
* password:
This is password that is used by auth.(default None)
e.g.::
opendaylight.v2://127.0.0.1:8080/controller/statistics
?auth=basic&user=admin&password=admin&scheme=http
In this case, the driver send request to below URLs:
http://127.0.0.1:8080/controller/statistics/flow-capable-switches
Example JSON response from OpenDaylight
{
flow_capable_switches: [{
packet_in_messages_received: 501,
packet_out_messages_sent: 300,
ports: 1,
flow_datapath_id: 55120148545607,
tenant_id: ADMIN_ID,
switch_port_counters: [{
bytes_received: 1000,
bytes_sent: 1000,
duration: 600,
packets_internal_received: 100,
packets_internal_sent: 200,
packets_received: 100,
packets_received_drop: 0,
packets_received_error: 0,
packets_sent: 100,
port_id: 4,
tenant_id: PORT_1_TENANT_ID,
uuid: PORT_1_ID
}],
table_counters: [{
flow_count: 90,
table_id: 0
}]
}]
}
"""
admin_project_id = None
@staticmethod
def _get_int_sample(key, statistic, resource_id,
resource_meta, tenant_id):
if key not in statistic:
return None
value = int(statistic[key])
if not (0 <= value <= INT64_MAX_VALUE):
value = 0
return value, resource_id, resource_meta, tenant_id
def _prepare_cache(self, endpoint, params, cache):
if 'network.statistics.opendaylight_v2' in cache:
return cache['network.statistics.opendaylight_v2']
data = {}
odl_params = {}
if 'auth' in params:
odl_params['auth'] = params['auth'][0]
if 'user' in params:
odl_params['user'] = params['user'][0]
if 'password' in params:
odl_params['password'] = params['password'][0]
cs = client.Client(self.conf, endpoint, odl_params)
if not self.admin_project_id:
try:
ks_client = keystone_client.get_client(self.conf)
project = ks_client.projects.find(name='admin')
if project:
self.admin_project_id = project.id
except Exception:
LOG.exception('Unable to fetch admin tenant id')
cache['network.statistics.opendaylight_v2'] = data
return data
try:
# get switch statistics
data['switch'] = cs.switch_statistics.get_statistics()
data['admin_tenant_id'] = self.admin_project_id
except client.OpenDaylightRESTAPIFailed:
LOG.exception('OpenDaylight REST API Failed. ')
except Exception:
LOG.exception('Failed to connect to OpenDaylight'
' REST API')
cache['network.statistics.opendaylight_v2'] = data
return data
def get_sample_data(self, meter_name, parse_url, params, cache):
extractor = self._get_extractor(meter_name)
if extractor is None:
# The way to getting meter is not implemented in this driver or
# OpenDaylight REST API has not api to getting meter.
return None
iter = self._get_iter(meter_name)
if iter is None:
# The way to getting meter is not implemented in this driver or
# OpenDaylight REST API has not api to getting meter.
return None
parts = urlparse.ParseResult(params.get('scheme', ['http'])[0],
parse_url.netloc,
parse_url.path,
None,
None,
None)
endpoint = urlparse.urlunparse(parts)
data = self._prepare_cache(endpoint, params, cache)
samples = []
if data:
for sample in iter(extractor, data):
if sample is not None:
# set controller name to resource_metadata
sample[2]['controller'] = 'OpenDaylight_V2'
samples.append(sample)
return samples
def _get_iter(self, meter_name):
if meter_name == 'switch' or meter_name == 'switch.ports':
return self._iter_switch
elif meter_name.startswith('switch.table'):
return self._iter_table
elif meter_name.startswith('switch.port'):
return self._iter_switch_port
elif meter_name.startswith('port'):
return self._iter_port
return None
def _get_extractor(self, meter_name):
if (meter_name == 'switch.port' or
meter_name.startswith('switch.port.')):
meter_name = meter_name.split('.', 1)[1]
method_name = '_' + meter_name.replace('.', '_')
return getattr(self, method_name, None)
@staticmethod
def _iter_switch(extractor, data):
for switch in data['switch']['flow_capable_switches']:
yield (extractor(switch, str(switch['flow_datapath_id']), {},
(switch.get('tenant_id') or
data['admin_tenant_id'])))
@staticmethod
def _switch(statistic, resource_id,
resource_meta, tenant_id):
return 1, resource_id, resource_meta, tenant_id
@staticmethod
def _switch_ports(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'ports', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _iter_switch_port(extractor, data):
for switch in data['switch']['flow_capable_switches']:
if 'switch_port_counters' in switch:
switch_id = str(switch['flow_datapath_id'])
tenant_id = (switch.get('tenant_id') or
data['admin_tenant_id'])
for port_statistic in switch['switch_port_counters']:
port_id = port_statistic['port_id']
resource_id = '%s:%d' % (switch_id, port_id)
resource_meta = {'switch': switch_id,
'port_number_on_switch': port_id}
if 'uuid' in port_statistic:
neutron_port_id = port_statistic['uuid']
resource_meta['neutron_port_id'] = neutron_port_id
yield extractor(port_statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _iter_port(extractor, data):
resource_meta = {}
for switch in data['switch']['flow_capable_switches']:
if 'switch_port_counters' in switch:
for port_statistic in switch['switch_port_counters']:
if 'uuid' in port_statistic:
resource_id = port_statistic['uuid']
tenant_id = port_statistic.get('tenant_id')
yield extractor(
port_statistic, resource_id, resource_meta,
tenant_id or data['admin_tenant_id'])
@staticmethod
def _port(statistic, resource_id, resource_meta, tenant_id):
return 1, resource_id, resource_meta, tenant_id
@staticmethod
def _port_uptime(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'duration', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _port_receive_packets(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'packets_received', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _port_transmit_packets(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'packets_sent', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _port_receive_bytes(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'bytes_received', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _port_transmit_bytes(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'bytes_sent', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _port_receive_drops(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'packets_received_drop', statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _port_receive_errors(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'packets_received_error', statistic,
resource_id, resource_meta, tenant_id)
@staticmethod
def _iter_table(extractor, data):
for switch_statistic in data['switch']['flow_capable_switches']:
if 'table_counters' in switch_statistic:
switch_id = str(switch_statistic['flow_datapath_id'])
tenant_id = (switch_statistic.get('tenant_id') or
data['admin_tenant_id'])
for table_statistic in switch_statistic['table_counters']:
resource_meta = {'switch': switch_id}
resource_id = ("%s:table:%d" %
(switch_id, table_statistic['table_id']))
yield extractor(table_statistic, resource_id,
resource_meta, tenant_id)
@staticmethod
def _switch_table_active_entries(statistic, resource_id,
resource_meta, tenant_id):
return OpenDaylightDriver._get_int_sample(
'flow_count', statistic, resource_id,
resource_meta, tenant_id)
networking-odl-16.0.0/networking_odl/ceilometer/network/statistics/opendaylight_v2/client.py0000664000175000017500000001036113656750541032566 0ustar zuulzuul00000000000000#
# Copyright 2017 Ericsson India Global Services Pvt Ltd. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from debtcollector import removals
from oslo_log import log
import requests
from requests import auth
import six
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class _Base(object):
"""Base class of OpenDaylight REST APIs Clients."""
@abc.abstractproperty
def base_url(self):
"""Returns base url for each REST API."""
def __init__(self, client):
self.client = client
def get_statistics(self):
return self.client.request(self.base_url)
class OpenDaylightRESTAPIFailed(Exception):
pass
@removals.removed_class(
'SwitchStatisticsAPIClient', version='Stein', removal_version='release T',
message="Ceilometer driver is deprecated and will get removed from n-odl")
class SwitchStatisticsAPIClient(_Base):
"""OpenDaylight Switch Statistics REST API Client
Base URL:
{endpoint}/flow-capable-switches
"""
base_url = '/flow-capable-switches'
@removals.removed_class(
'Client', version='Stein', removal_version='release T',
message="Ceilometer driver is deprecated and will get removed from n-odl")
class Client(object):
def __init__(self, conf, endpoint, params):
self.switch_statistics = SwitchStatisticsAPIClient(self)
self._endpoint = endpoint
self.conf = conf
self._req_params = self._get_req_params(params)
self.session = requests.Session()
def _get_req_params(self, params):
req_params = {
'headers': {
'Accept': 'application/json'
},
'timeout': self.conf.http_timeout,
}
auth_way = params.get('auth')
if auth_way in ['basic', 'digest']:
user = params.get('user')
password = params.get('password')
if auth_way == 'basic':
auth_class = auth.HTTPBasicAuth
else:
auth_class = auth.HTTPDigestAuth
req_params['auth'] = auth_class(user, password)
return req_params
def _log_req(self, url):
curl_command = ['REQ: curl -i -X GET', '"%s"' % (url)]
if 'auth' in self._req_params:
auth_class = self._req_params['auth']
if isinstance(auth_class, auth.HTTPBasicAuth):
curl_command.append('--basic')
else:
curl_command.append('--digest')
curl_command.append('--user "%s":"***"' % auth_class.username)
for name, value in six.iteritems(self._req_params['headers']):
curl_command.append('-H "%s: %s"' % (name, value))
LOG.debug(' '.join(curl_command))
@staticmethod
def _log_res(resp):
dump = ['RES: \n', 'HTTP %.1f %s %s\n' % (resp.raw.version,
resp.status_code,
resp.reason)]
dump.extend('%s: %s\n' % (k, v)
for k, v in six.iteritems(resp.headers))
dump.append('\n')
if resp.content:
dump.extend([resp.content, '\n'])
LOG.debug(''.join(dump))
def _http_request(self, url):
if self.conf.debug:
self._log_req(url)
resp = self.session.get(url, **self._req_params)
if self.conf.debug:
self._log_res(resp)
if resp.status_code // 100 != 2:
raise OpenDaylightRESTAPIFailed(
_('OpenDaylight API returned %(status)s %(reason)s') %
{'status': resp.status_code, 'reason': resp.reason})
return resp.json()
def request(self, path):
url = self._endpoint + path
return self._http_request(url)
networking-odl-16.0.0/networking_odl/journal/0000775000175000017500000000000013656750617021302 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/journal/__init__.py0000664000175000017500000000000013656750541023375 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/journal/full_sync.py0000664000175000017500000001230713656750541023651 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
import requests
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.db import db
from networking_odl.journal import base_driver
from networking_odl.journal import journal
# Define which pending operation types should be deleted
_CANARY_NETWORK_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142"
_CANARY_TENANT_ID = "bd8db3a8-2b30-4083-a8b3-b3fd46401142"
_CANARY_NETWORK_DATA = {'id': _CANARY_NETWORK_ID,
'tenant_id': _CANARY_TENANT_ID,
'name': 'Sync Canary Network',
'admin_state_up': False}
_OPS_TO_DELETE_ON_SYNC = (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)
_CLIENT = client.OpenDaylightRestClientGlobal()
_ORDERED_ODL_RESOURCES = (
odl_const.ODL_SG,
odl_const.ODL_SG_RULE,
odl_const.ODL_NETWORK,
odl_const.ODL_SUBNET,
odl_const.ODL_ROUTER,
odl_const.ODL_PORT,
odl_const.ODL_FLOATINGIP,
odl_const.ODL_QOS_POLICY,
odl_const.ODL_TRUNK,
odl_const.ODL_BGPVPN,
odl_const.ODL_BGPVPN_NETWORK_ASSOCIATION,
odl_const.ODL_BGPVPN_ROUTER_ASSOCIATION,
odl_const.ODL_SFC_FLOW_CLASSIFIER,
odl_const.ODL_SFC_PORT_PAIR,
odl_const.ODL_SFC_PORT_PAIR_GROUP,
odl_const.ODL_SFC_PORT_CHAIN,
odl_const.ODL_L2GATEWAY,
odl_const.ODL_L2GATEWAY_CONNECTION,
)
# TODO(rajivk): Remove this variable, while fixing recovery
ALL_RESOURCES = {}
FULL_SYNC_RESOURCES = {}
def register(driver, resources, handler=None):
def default_handler(context, resource_type):
return get_resources(context, driver, resources[resource_type])
ALL_RESOURCES[driver] = resources
handler = handler or default_handler
for resource in resources:
FULL_SYNC_RESOURCES[resource] = handler
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def full_sync(context):
if not _full_sync_needed(context):
return
db.delete_pending_rows(context, _OPS_TO_DELETE_ON_SYNC)
for resource_type in _ORDERED_ODL_RESOURCES:
handler = FULL_SYNC_RESOURCES.get(resource_type)
if handler:
_sync_resources(context, resource_type, handler)
journal.record(context, odl_const.ODL_NETWORK, _CANARY_NETWORK_ID,
odl_const.ODL_CREATE, _CANARY_NETWORK_DATA)
def _full_sync_needed(context):
return (_canary_network_missing_on_odl() and
_canary_network_not_in_journal(context))
def _canary_network_missing_on_odl():
# Try to reach the ODL server, sometimes it might be up & responding to
# HTTP calls but inoperative..
client = _CLIENT.get_client()
response = client.get(odl_const.ODL_NETWORKS)
response.raise_for_status()
response = client.get(odl_const.ODL_NETWORKS + "/" + _CANARY_NETWORK_ID)
if response.status_code == requests.codes.not_found:
return True
# In case there was an error raise it up because we don't know how to deal
# with it..
response.raise_for_status()
return False
def _canary_network_not_in_journal(context):
return not db.get_pending_or_processing_ops(
context, _CANARY_NETWORK_ID, operation=odl_const.ODL_CREATE)
def get_resources_require_id(plugin, context, get_resources_for_id,
method_name_for_resource):
dep_id_resources = get_resources_for_id(context)
resources = []
for d_resource in dep_id_resources:
obj_getter = getattr(plugin, method_name_for_resource)
resource = obj_getter(context, d_resource['id'])
if resource:
resources.extend(resource)
return resources
def get_resources(context, plugin_type, resource_type):
plugin = directory.get_plugin(plugin_type)
obj_getter = getattr(plugin, 'get_%s' % resource_type)
return obj_getter(context)
def _sync_resources(context, object_type, handler):
resources = handler(context, object_type)
for resource in resources:
journal.record(context, object_type, resource['id'],
odl_const.ODL_CREATE, resource)
@db_api.retry_if_session_inactive()
# TODO(rajivk): Change name from sync_resource to _sync_resources
# once, we are completely moved to new sync mechanism to plug new syncing
# mechanism.
def sync_resources(context, resource_type):
driver = base_driver.get_driver(resource_type)
resources = driver.get_resources_for_full_sync(context, resource_type)
with db_api.CONTEXT_WRITER.savepoint.using(context):
for resource in resources:
journal.record(context, resource_type, resource['id'],
odl_const.ODL_CREATE, resource)
networking-odl-16.0.0/networking_odl/journal/worker.py0000664000175000017500000001063713656750541023170 0ustar zuulzuul00000000000000# Copyright (c) 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import os
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
# TODO(mpeterson): this should go back to the previous block once the problems
# with flake8-import-order are fixed.
from neutron.agent.linux import daemon
from networking_odl._i18n import _
from networking_odl.journal import cleanup
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.journal import periodic_task
from networking_odl.journal import recovery
LOG = logging.getLogger(__name__)
class JournalPeriodicProcessor(worker.BaseWorker):
"""Responsible for running the periodic processing of the journal.
This is a separate worker as the regular journal thread is called when an
operation finishes and that run will take care of any and all entries
that might be present in the journal, including the one relating to that
operation.
A periodic run over the journal is thus necessary for cases when journal
entries in the aforementioned run didn't process correctly due to some
error (usually a connection problem) and need to be retried.
"""
def __init__(self):
super(JournalPeriodicProcessor, self).__init__()
self._journal = journal.OpenDaylightJournalThread(start_thread=False)
self._interval = cfg.CONF.ml2_odl.sync_timeout
self._timer = None
self._maintenance_task = None
self._running = None
self.pidfile = None
def _create_pidfile(self):
pidfile = os.path.join(cfg.CONF.state_path,
type(self).__name__.lower() + '.pid')
self.pidfile = daemon.Pidfile(pidfile, 'python')
# NOTE(mpeterson): We want self._running to be None before the first
# run so atexit is only registered once and not several times.
if self._running is None:
atexit.unregister(self._delete_pidfile)
atexit.register(self._delete_pidfile)
self.pidfile.write(os.getpid())
def _delete_pidfile(self):
if self.pidfile is not None:
self.pidfile.unlock()
os.remove(str(self.pidfile))
self.pidfile = None
def start(self):
if self._running:
raise RuntimeError(
_("Thread has to be stopped before started again")
)
super(JournalPeriodicProcessor, self).start()
LOG.debug('JournalPeriodicProcessor starting')
self._journal.start()
self._timer = loopingcall.FixedIntervalLoopingCall(self._call_journal)
self._timer.start(self._interval)
self._start_maintenance_task()
self._create_pidfile()
self._running = True
def stop(self):
if not self._running:
return
LOG.debug('JournalPeriodicProcessor stopping')
self._journal.stop()
self._timer.stop()
self._maintenance_task.cleanup()
self._delete_pidfile()
super(JournalPeriodicProcessor, self).stop()
self._running = False
def wait(self):
pass
def reset(self):
if self._maintenance_task is not None:
self._maintenance_task.execute_ops(forced=True)
def _call_journal(self):
self._journal.set_sync_event()
def _start_maintenance_task(self):
self._maintenance_task = periodic_task.PeriodicTask(
'maintenance', cfg.CONF.ml2_odl.maintenance_interval)
for phase in (
cleanup.delete_completed_rows,
cleanup.cleanup_processing_rows,
full_sync.full_sync,
recovery.journal_recovery,
):
self._maintenance_task.register_operation(phase)
self._maintenance_task.execute_ops(forced=True)
self._maintenance_task.start()
networking-odl-16.0.0/networking_odl/journal/periodic_task.py0000664000175000017500000001010113656750541024461 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib import context as neutron_context
from neutron_lib.db import api as db_api
from oslo_log import log as logging
from oslo_service import loopingcall
from networking_odl.db import db
LOG = logging.getLogger(__name__)
class PeriodicTask(object):
def __init__(self, task, interval):
self.task = task
self.phases = []
self.timer = loopingcall.FixedIntervalLoopingCall(self.execute_ops)
self.interval = interval
def start(self):
self.timer.start(self.interval, stop_on_exception=False)
def cleanup(self):
# this method is used for unit test to tear down
self.timer.stop()
try:
self.timer.wait()
except AttributeError:
# NOTE(yamahata): workaround
# some tests call this cleanup without calling start
pass
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def _set_operation(self, context, operation):
db.update_periodic_task(context, task=self.task,
operation=operation)
def _execute_op(self, operation, context):
op_details = operation.__name__
if operation.__doc__:
op_details += " (%s)" % operation.__doc__
try:
LOG.info("Starting %s phase of periodic task %s.",
op_details, self.task)
self._set_operation(context, operation)
operation(context)
LOG.info("Finished %s phase of %s task.", op_details, self.task)
except Exception:
LOG.exception("Failed during periodic task operation %s.",
op_details)
def task_already_executed_recently(self, context):
return db.was_periodic_task_executed_recently(
context, self.task, self.interval)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def _clear_and_unlock_task(self, context):
db.update_periodic_task(context, task=self.task,
operation=None)
db.unlock_periodic_task(context, self.task)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def _lock_task(self, context):
return db.lock_periodic_task(context, self.task)
def execute_ops(self, forced=False):
LOG.info("Starting %s periodic task.", self.task)
context = neutron_context.get_admin_context()
# Lock make sure that periodic task is executed only after
# specified interval. It makes sure that maintenance tasks
# are not executed back to back.
if not forced and self.task_already_executed_recently(context):
LOG.info("Periodic %s task executed after periodic interval "
"Skipping execution.", self.task)
return
if not self._lock_task(context):
LOG.info("Periodic %s task already running task", self.task)
return
try:
for phase in self.phases:
self._execute_op(phase, context)
finally:
self._clear_and_unlock_task(context)
LOG.info("%s task has been finished", self.task)
def register_operation(self, phase):
"""Register a function to be run by the periodic task.
:param phase: Function to call when the thread runs. The function will
receive a DB session to use for DB operations.
"""
self.phases.append(phase)
LOG.info("%s phase has been registered in %s task", phase, self.task)
networking-odl-16.0.0/networking_odl/journal/dependency_validations.py0000664000175000017500000001622713656750541026373 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from networking_odl._i18n import _
from networking_odl.common import constants as odl_const
from networking_odl.db import db
def _get_delete_dependencies(context, object_type, object_uuid):
"""Get dependent operations for a delete operation.
Return any operations that pertain to the delete: Either create
or update operations on the same object, or delete operations on other
objects that depend on the deleted object.
"""
# Get any pending or processing create or update ops on the row itself
deps = db.get_pending_or_processing_ops(
context, object_uuid, operation=(odl_const.ODL_UPDATE,
odl_const.ODL_CREATE))
# Get dependent operations of other dependent types
dependent_resource_types = _DELETE_DEPENDENCIES.get(object_type)
if dependent_resource_types is not None:
for resource_type in dependent_resource_types:
deps.extend(db.get_pending_delete_ops_with_parent(
context, resource_type, object_uuid))
return deps
def _get_older_operations(context, object_ids):
"""Get any older operations.
Return any operations still in the queue for the given ID(s).
"""
if not isinstance(object_ids, (list, tuple)):
object_ids = (object_ids,)
deps = []
for object_id in object_ids:
deps.extend(
db.get_pending_or_processing_ops(context, object_id))
return deps
def _generate_subnet_deps(data):
return data['network_id']
def _generate_port_deps(data):
object_ids = set(fixed_ip['subnet_id'] for fixed_ip in data['fixed_ips'])
object_ids = list(object_ids)
object_ids.append(data['network_id'])
qos_policy_id = data.get('qos_policy_id')
if qos_policy_id is not None:
object_ids.append(qos_policy_id)
return object_ids
def _generate_network_deps(data):
return data.get('qos_policy_id')
def _generate_sg_rule_deps(data):
return data['security_group_id']
def _generate_router_deps(data):
return data['gw_port_id']
def _generate_floatingip_deps(data):
object_ids = []
network_id = data.get('floating_network_id')
if network_id is not None:
object_ids.append(network_id)
port_id = data.get('port_id')
if port_id is not None:
object_ids.append(port_id)
router_id = data.get('router_id')
if router_id is not None:
object_ids.append(router_id)
return object_ids
def _generate_trunk_deps(data):
portids = [subport['port_id'] for subport in data['sub_ports']]
portids.append(data['port_id'])
return portids
def _generate_l2gateway_connection_deps(data):
object_ids = []
network_id = data.get('network_id')
if network_id is not None:
object_ids.append(network_id)
gateway_id = data.get('gateway_id')
if gateway_id is not None:
object_ids.append(gateway_id)
return object_ids
def _generate_sfc_port_pair_deps(data):
object_ids = []
ingress_port = data.get('ingress')
if ingress_port is not None:
object_ids.append(ingress_port)
egress_port = data.get('egress')
if egress_port is not None:
object_ids.append(egress_port)
return object_ids
def _generate_sfc_port_pair_group_deps(data):
return data['port_pairs']
def _generate_sfc_port_chain_deps(data):
object_ids = data['port_pair_groups'][:]
flow_classifiers = data['flow_classifiers'][:]
object_ids.extend(flow_classifiers)
return object_ids
def _generate_bgpvpn_deps(data):
object_ids = []
network_ids = data.get('networks')
if network_ids is not None:
object_ids.extend(network_ids)
router_ids = data.get('routers')
if router_ids is not None:
object_ids.extend(router_ids)
return object_ids
_CREATE_OR_UPDATE_DEP_GENERATOR = {
odl_const.ODL_NETWORK: _generate_network_deps,
odl_const.ODL_SUBNET: _generate_subnet_deps,
odl_const.ODL_PORT: _generate_port_deps,
# TODO(yamahata): dependency between SG and PORT
odl_const.ODL_SG_RULE: _generate_sg_rule_deps,
odl_const.ODL_ROUTER: _generate_router_deps,
odl_const.ODL_FLOATINGIP: _generate_floatingip_deps,
odl_const.ODL_TRUNK: _generate_trunk_deps,
odl_const.ODL_L2GATEWAY_CONNECTION: _generate_l2gateway_connection_deps,
odl_const.ODL_SFC_PORT_PAIR: _generate_sfc_port_pair_deps,
odl_const.ODL_SFC_PORT_PAIR_GROUP: _generate_sfc_port_pair_group_deps,
odl_const.ODL_SFC_PORT_CHAIN: _generate_sfc_port_chain_deps,
odl_const.ODL_BGPVPN: _generate_bgpvpn_deps,
}
_DELETE_DEPENDENCIES = {
odl_const.ODL_NETWORK: (odl_const.ODL_SUBNET, odl_const.ODL_PORT,
odl_const.ODL_ROUTER,
odl_const.ODL_L2GATEWAY_CONNECTION,
odl_const.ODL_BGPVPN),
odl_const.ODL_SUBNET: (odl_const.ODL_PORT,),
odl_const.ODL_ROUTER: (odl_const.ODL_PORT, odl_const.ODL_FLOATINGIP,
odl_const.ODL_BGPVPN),
odl_const.ODL_PORT: (odl_const.ODL_TRUNK,),
# TODO(yamahata): dependency between SG and PORT
odl_const.ODL_SG: (odl_const.ODL_SG_RULE,),
odl_const.ODL_L2GATEWAY: (odl_const.ODL_L2GATEWAY_CONNECTION,),
odl_const.ODL_SFC_FLOW_CLASSIFIER: (odl_const.ODL_SFC_PORT_CHAIN,),
odl_const.ODL_SFC_PORT_PAIR: (odl_const.ODL_SFC_PORT_PAIR_GROUP,),
odl_const.ODL_SFC_PORT_PAIR_GROUP: (odl_const.ODL_SFC_PORT_CHAIN,),
odl_const.ODL_QOS_POLICY: (odl_const.ODL_PORT, odl_const.ODL_NETWORK),
}
def calculate(context, operation, object_type, object_uuid, data):
"""Calculate resource deps in journaled operations.
As a rule of thumb validation takes into consideration only operations in
pending or processing state, other states are irrelevant.
:param context: enginefacade context
:param row: entry in journal entry to be validated
"""
deps = []
if operation == odl_const.ODL_DELETE:
return _get_delete_dependencies(context, object_type, object_uuid)
elif operation == odl_const.ODL_UPDATE:
deps.extend(
db.get_pending_or_processing_ops(
context, object_uuid,
operation=(odl_const.ODL_CREATE, odl_const.ODL_UPDATE)))
elif operation != odl_const.ODL_CREATE:
raise ValueError(_("unsupported operation {}").format(operation))
# Validate deps if there are any to validate.
dep_generator = _CREATE_OR_UPDATE_DEP_GENERATOR.get(object_type)
if dep_generator is not None:
object_ids = dep_generator(data)
if object_ids is not None:
deps.extend(_get_older_operations(context, object_ids))
return deps
networking-odl-16.0.0/networking_odl/journal/base_driver.py0000664000175000017500000000554213656750541024143 0ustar zuulzuul00000000000000# Copyright (c) 2017 NEC Corp
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.plugins import directory
from oslo_log import log as logging
from networking_odl.common import exceptions
LOG = logging.getLogger(__name__)
ALL_RESOURCES = {}
def get_driver(resource_type):
try:
return ALL_RESOURCES[resource_type]
except KeyError:
raise exceptions.ResourceNotRegistered(resource_type=resource_type)
class ResourceBaseDriver(object):
"""Base class for all the drivers to support full sync
ResourceBaseDriver class acts as base class for all the drivers and
provides default behaviour for full sync functionality.
A driver has to provide class or object attribute RESOURCES, specifying
resources it manages. RESOURCES must be a dictionary, keys of the
dictionary should be resource type and value should be method suffix
or plural used for the resources.
A driver has to provide plugin type for itself, as class or object
attribute. Its value should be the same, as used by neutron to
register plugin for the resources it manages.
"""
RESOURCES = {}
plugin_type = None
def __init__(self, *args, **kwargs):
super(ResourceBaseDriver, self).__init__(*args, **kwargs)
for resource in self.RESOURCES:
ALL_RESOURCES[resource] = self
def _get_resource_getter(self, method_suffix):
method_name = "get_%s" % method_suffix
try:
return getattr(self.plugin, method_name)
except AttributeError:
raise exceptions.PluginMethodNotFound(plugin=self.plugin_type,
method=method_name)
def get_resources_for_full_sync(self, context, resource_type):
"""Provide all resources of type resource_type """
if resource_type not in self.RESOURCES:
raise exceptions.UnsupportedResourceType
resource_getter = self._get_resource_getter(
self.RESOURCES[resource_type])
return resource_getter(context)
@property
def plugin(self):
return directory.get_plugin(self.plugin_type)
def get_resource_for_recovery(self, context, obj):
resource_getter = self._get_resource_getter(obj.object_type)
return resource_getter(context, obj.object_uuid)
networking-odl-16.0.0/networking_odl/journal/cleanup.py0000664000175000017500000000313213656750541023276 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from datetime import timedelta
from neutron_lib.db import api as db_api
from oslo_config import cfg
from oslo_log import log as logging
from networking_odl.common import constants as odl_const
from networking_odl.db import db
LOG = logging.getLogger(__name__)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def delete_completed_rows(context):
"""Journal maintenance operation for deleting completed rows."""
rows_retention = cfg.CONF.ml2_odl.completed_rows_retention
if rows_retention <= 0:
return
LOG.debug("Deleting completed rows")
db.delete_rows_by_state_and_time(
context, odl_const.COMPLETED,
timedelta(seconds=rows_retention))
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def cleanup_processing_rows(context):
row_count = db.reset_processing_rows(
context, cfg.CONF.ml2_odl.processing_timeout)
if row_count:
LOG.info("Reset %(num)s orphaned rows back to pending",
{"num": row_count})
networking-odl-16.0.0/networking_odl/journal/recovery.py0000664000175000017500000000752013656750541023512 0ustar zuulzuul00000000000000#
# Copyright (C) 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutron_lib.db import api as db_api
from neutron_lib import exceptions as nexc
from neutron_lib.plugins import directory
from oslo_log import log as logging
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import exceptions
from networking_odl.db import db
from networking_odl.journal import base_driver
from networking_odl.journal import full_sync
from networking_odl.journal import journal
_CLIENT = client.OpenDaylightRestClientGlobal()
LOG = logging.getLogger(__name__)
@db_api.retry_if_session_inactive()
def journal_recovery(context):
for row in db.get_all_db_rows_by_state(context, odl_const.FAILED):
LOG.debug("Attempting recovery of journal entry %s.", row)
try:
odl_resource = _CLIENT.get_client().get_resource(
row.object_type,
row.object_uuid)
except exceptions.UnsupportedResourceType:
LOG.warning('Unsupported resource %s', row.object_type)
except Exception:
LOG.exception("Failure while recovering journal entry %s.", row)
else:
with db_api.CONTEXT_WRITER.savepoint.using(context):
if odl_resource is not None:
_handle_existing_resource(context, row)
else:
_handle_non_existing_resource(context, row)
def get_latest_resource(context, row):
try:
driver = base_driver.get_driver(row.object_type)
except exceptions.ResourceNotRegistered:
raise exceptions.UnsupportedResourceType(resource=row.object_type)
return driver.get_resource_for_recovery(context, row)
# TODO(rajivk): Remove this method once recovery is fully supported
def _get_latest_resource(context, row):
object_type = row.object_type
for plugin_alias, resources in full_sync.ALL_RESOURCES.items():
if object_type in resources:
plugin = directory.get_plugin(plugin_alias)
break
else:
raise exceptions.UnsupportedResourceType(resource=object_type)
obj_getter = getattr(plugin, 'get_{}'.format(object_type))
return obj_getter(context, row.object_uuid)
def _sync_resource_to_odl(context, row, operation_type, exists_on_odl):
resource = None
try:
resource = _get_latest_resource(context, row)
except nexc.NotFound:
if exists_on_odl:
journal.record(context, row.object_type,
row.object_uuid, odl_const.ODL_DELETE, [])
else:
journal.record(context, row.object_type, row.object_uuid,
operation_type, resource)
journal.entry_complete(context, row)
def _handle_existing_resource(context, row):
if row.operation == odl_const.ODL_CREATE:
journal.entry_complete(context, row)
elif row.operation == odl_const.ODL_DELETE:
db.update_db_row_state(context, row, odl_const.PENDING)
else:
_sync_resource_to_odl(context, row, odl_const.ODL_UPDATE, True)
def _handle_non_existing_resource(context, row):
if row.operation == odl_const.ODL_DELETE:
journal.entry_complete(context, row)
else:
_sync_resource_to_odl(context, row, odl_const.ODL_CREATE, False)
# TODO(mkolesni): Handle missing parent resources somehow.
networking-odl-16.0.0/networking_odl/journal/journal.py0000664000175000017500000002534313656750541023331 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from datetime import datetime
import threading
import time
from neutron_lib.callbacks import registry
from neutron_lib import context as nl_context
from neutron_lib.db import api as db_api
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_db import exception
from oslo_log import log as logging
from requests import exceptions
from networking_odl.common import client
from networking_odl.common import constants as odl_const
from networking_odl.common import filters
from networking_odl.common import utils
from networking_odl.db import db
from networking_odl.journal import dependency_validations
LOG = logging.getLogger(__name__)
MAKE_URL = {}
LOG_ENTRY_TEMPLATE = ("%(log_type)s (Entry ID: %(entry_id)s) - %(op)s "
"%(obj_type)s %(obj_id)s (Time stamp: %(timestamp)s)")
LOG_RECORDED = 'Recorded'
LOG_PROCESSING = 'Processing'
LOG_COMPLETED = 'Completed'
LOG_ERROR_PROCESSING = 'Error while processing'
def call_thread_on_end(func):
def new_func(obj, *args, **kwargs):
return_value = func(obj, *args, **kwargs)
obj.journal.set_sync_event()
return return_value
return new_func
def _enrich_port(plugin_context, ml2_context, object_type, operation, data):
"""Enrich the port with additional information needed by ODL"""
# NOTE(yamahata): work around of ODL neutron northbound
# It passes security groups in port as list of dict for historical reasons.
# keep its format for compatibility.
# TODO(yamahata): drop this format conversion.
if data[odl_const.ODL_SGS]:
groups = [{'id': id_} for id_ in data['security_groups']]
else:
groups = []
new_data = copy.deepcopy(data)
new_data[odl_const.ODL_SGS] = groups
# NOTE(yamahata): work around for port creation for router
# tenant_id=''(empty string) is passed when port is created
# by l3 plugin internally for router.
# On the other hand, ODL doesn't accept empty string for tenant_id.
# In that case, deduce tenant_id from network_id for now.
# Right fix: modify Neutron so that don't allow empty string
# for tenant_id even for port for internal use.
# TODO(yamahata): eliminate this work around when neutron side
# is fixed
# assert port['tenant_id'] != ''
if ('tenant_id' not in new_data or new_data['tenant_id'] == ''):
if ml2_context:
network = ml2_context._network_context._network
else:
plugin = directory.get_plugin()
network = plugin.get_network(plugin_context,
new_data['network_id'])
new_data['tenant_id'] = network['tenant_id']
return new_data
def _log_entry(log_type, entry, log_level=logging.INFO, **kwargs):
delta = datetime.now() - datetime.min
timestamp = delta.total_seconds()
log_dict = {'log_type': log_type, 'op': entry.operation,
'obj_type': entry.object_type, 'obj_id': entry.object_uuid,
'entry_id': entry.seqnum, 'timestamp': timestamp}
LOG.log(log_level, LOG_ENTRY_TEMPLATE, log_dict, **kwargs)
def record(plugin_context, object_type, object_uuid, operation, data,
ml2_context=None):
if (object_type == odl_const.ODL_PORT and
operation in (odl_const.ODL_CREATE, odl_const.ODL_UPDATE)):
data = _enrich_port(
plugin_context, ml2_context, object_type, operation, data)
# Calculate depending_on on other journal entries
depending_on = dependency_validations.calculate(
plugin_context, operation, object_type, object_uuid, data)
# NOTE(mpeterson): Between the moment that a dependency is calculated and
# the new entry is recorded in the journal, an operation can ocurr that
# would make the dependency irrelevant. In that case we request a retry.
# For more details, read the commit message that introduced this comment.
try:
entry = db.create_pending_row(
plugin_context, object_type, object_uuid, operation, data,
depending_on=depending_on)
except exception.DBReferenceError as e:
raise exception.RetryRequest(e)
_log_entry(LOG_RECORDED, entry)
LOG.debug('Entry with ID %(entry_id)s depends on these entries: '
'%(depending_on)s',
{'entry_id': entry.seqnum,
'depending_on': [d.seqnum for d in depending_on]})
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_complete(context, entry):
if cfg.CONF.ml2_odl.completed_rows_retention == 0:
db.delete_row(context, entry)
else:
db.update_db_row_state(context, entry, odl_const.COMPLETED)
db.delete_dependency(context, entry)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_reset(context, entry):
db.update_db_row_state(context, entry, odl_const.PENDING)
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def entry_update_state_by_retry_count(context, entry, retry_count):
db.update_pending_db_row_retry(context, entry, retry_count)
def _make_url(row):
url_object = utils.make_url_object(row.object_type)
urlpath = ''
if row.operation == odl_const.ODL_CREATE:
urlpath = url_object
else:
urlpath = url_object + '/' + row.object_uuid
return urlpath
def register_url_builder(object_type, method):
MAKE_URL[object_type] = method
def _build_url(row):
return MAKE_URL.get(row.object_type, _make_url)(row)
class OpenDaylightJournalThread(object):
"""Thread worker for the OpenDaylight Journal Database."""
# make those parameter configurable?
_RETRY_SLEEP_MIN = 0.1
_RETRY_SLEEP_MAX = 60
def __init__(self, start_thread=True):
self.client = client.OpenDaylightRestClient.create_client()
self._max_retry_count = cfg.CONF.ml2_odl.retry_count
self._sleep_time = self._RETRY_SLEEP_MIN
self.event = threading.Event()
self._odl_sync_thread = self._create_odl_sync_thread()
self._odl_sync_thread_stop = threading.Event()
if start_thread:
self.start()
def _create_odl_sync_thread(self):
return threading.Thread(name='sync', target=self.run_sync_thread)
def start(self):
# Start the sync thread
LOG.debug("Starting a new sync thread")
if self._odl_sync_thread_stop.is_set():
self._odl_sync_thread_stop.clear()
self._odl_sync_thread = self._create_odl_sync_thread()
if not self._odl_sync_thread.is_alive():
self._odl_sync_thread.start()
def stop(self, timeout=None):
"""Allows to stop the sync thread.
Args:
timeout (float): Time in seconds to wait for joining or None for
no timeout.
"""
# Stop the sync thread
LOG.debug("Stopping the sync thread")
if self._odl_sync_thread.is_alive():
self._odl_sync_thread_stop.set()
# Process the journal one last time before stopping.
self.set_sync_event()
self._odl_sync_thread.join(timeout)
def set_sync_event(self):
self.event.set()
@staticmethod
def _json_data(row):
data = copy.deepcopy(row.data)
filters.filter_for_odl(row.object_type, row.operation, data)
if row.operation == odl_const.ODL_CREATE:
method = 'post'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_UPDATE:
method = 'put'
to_send = {row.object_type: data}
elif row.operation == odl_const.ODL_DELETE:
method = 'delete'
to_send = None
return method, _build_url(row), to_send
def run_sync_thread(self):
while not self._odl_sync_thread_stop.is_set():
try:
self.event.wait()
self.event.clear()
self.sync_pending_entries()
except Exception:
# Catch exceptions to protect the thread while running
LOG.exception("Error on run_sync_thread")
def sync_pending_entries(self):
LOG.debug("Start processing journal entries")
context = nl_context.get_admin_context()
entry = db.get_oldest_pending_db_row_with_lock(context)
if entry is None:
LOG.debug("No journal entries to process")
return
while entry is not None:
stop_processing = self._sync_entry(context, entry)
if stop_processing:
break
entry = db.get_oldest_pending_db_row_with_lock(context)
LOG.debug("Finished processing journal entries")
def _retry_sleep(self):
# When something happened in the connection to ODL, don't busy loop
# because it's likely to hit same issue.
# Wait for a while for recovery
time.sleep(self._sleep_time)
self._sleep_time = min(self._sleep_time * 2, self._RETRY_SLEEP_MAX)
def _retry_reset(self):
self._sleep_time = self._RETRY_SLEEP_MIN
def _sync_entry(self, context, entry):
_log_entry(LOG_PROCESSING, entry)
method, urlpath, to_send = self._json_data(entry)
# TODO(mkolesni): This logic is weirdly written, need to refactor it.
try:
self.client.sendjson(method, urlpath, to_send)
registry.notify(entry.object_type, odl_const.BEFORE_COMPLETE,
self, context=context, operation=entry.operation,
row=entry)
entry_complete(context, entry)
self._retry_reset()
_log_entry(LOG_COMPLETED, entry)
except exceptions.ConnectionError:
# Don't raise the retry count, just log an error & break
entry_reset(context, entry)
LOG.error("Cannot connect to the OpenDaylight Controller,"
" will not process additional entries")
self._retry_sleep()
return True
except Exception:
_log_entry(LOG_ERROR_PROCESSING, entry,
log_level=logging.ERROR, exc_info=True)
entry_update_state_by_retry_count(
context, entry, self._max_retry_count)
return False
networking-odl-16.0.0/networking_odl/ml2/0000775000175000017500000000000013656750617020322 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ml2/__init__.py0000664000175000017500000000000013656750541022415 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/ml2/mech_driver_v2.py0000664000175000017500000002334613656750541023576 0ustar zuulzuul00000000000000# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import multiprovidernet as mpnet_apidef
from neutron_lib.api.definitions import provider_net as providernet
from neutron_lib import constants as p_const
from neutron_lib.plugins import constants as nlib_const
from neutron_lib.plugins.ml2 import api
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_odl.common import callback
from networking_odl.common import config as odl_conf
from networking_odl.common import constants as odl_const
from networking_odl.common import odl_features
from networking_odl.common import postcommit
from networking_odl.dhcp import odl_dhcp_driver as dhcp_driver
from networking_odl.journal import base_driver
from networking_odl.journal import full_sync
from networking_odl.journal import journal
from networking_odl.journal import worker
from networking_odl.ml2 import port_binding
from networking_odl.ml2 import port_status_update
from networking_odl.qos import qos_driver_v2 as qos_driver
from networking_odl.trunk import trunk_driver_v2 as trunk_driver
LOG = logging.getLogger(__name__)
@postcommit.add_postcommit('network', 'subnet', 'port')
class OpenDaylightMechanismDriver(api.MechanismDriver,
base_driver.ResourceBaseDriver):
"""OpenDaylight Python Driver for Neutron.
This code is the backend implementation for the OpenDaylight ML2
MechanismDriver for OpenStack Neutron.
"""
RESOURCES = {
odl_const.ODL_SG: odl_const.ODL_SGS,
odl_const.ODL_SG_RULE: odl_const.ODL_SG_RULES,
odl_const.ODL_NETWORK: odl_const.ODL_NETWORKS,
odl_const.ODL_SUBNET: odl_const.ODL_SUBNETS,
odl_const.ODL_PORT: odl_const.ODL_PORTS
}
plugin_type = nlib_const.CORE
def initialize(self):
LOG.debug("Initializing OpenDaylight ML2 driver")
cfg.CONF.register_opts(odl_conf.odl_opts, "ml2_odl")
self.sg_handler = callback.OdlSecurityGroupsHandler(
self.sync_from_callback_precommit,
self.sync_from_callback_postcommit)
self.journal = journal.OpenDaylightJournalThread()
self.port_binding_controller = port_binding.PortBindingManager.create()
self.trunk_driver = trunk_driver.OpenDaylightTrunkDriverV2.create()
if cfg.CONF.ml2_odl.enable_dhcp_service:
self.dhcp_driver = dhcp_driver.OdlDhcpDriver()
full_sync.register(nlib_const.CORE, self.RESOURCES)
odl_features.init()
if odl_const.ODL_QOS in cfg.CONF.ml2.extension_drivers:
qos_driver.OpenDaylightQosDriver.create()
def get_workers(self):
workers = [port_status_update.OdlPortStatusUpdate(),
worker.JournalPeriodicProcessor()]
workers += self.port_binding_controller.get_workers()
return workers
@staticmethod
def _record_in_journal(context, object_type, operation, data=None):
if data is None:
data = context.current
journal.record(context._plugin_context, object_type,
context.current['id'], operation, data,
ml2_context=context)
@log_helpers.log_method_call
def create_network_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_NETWORK, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def create_subnet_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def create_port_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_PORT, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def update_network_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_NETWORK, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def update_subnet_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_SUBNET, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def update_port_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_PORT, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def delete_network_precommit(self, context):
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_NETWORK, odl_const.ODL_DELETE, data=[])
@log_helpers.log_method_call
def delete_subnet_precommit(self, context):
# Use the journal row's data field to store parent object
# uuids. This information is required for validation checking
# when deleting parent objects.
new_context = [context.current['network_id']]
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_SUBNET, odl_const.ODL_DELETE,
data=new_context)
@log_helpers.log_method_call
def delete_port_precommit(self, context):
# Use the journal row's data field to store parent object
# uuids. This information is required for validation checking
# when deleting parent objects.
new_context = [context.current['network_id']]
for subnet in context.current['fixed_ips']:
new_context.append(subnet['subnet_id'])
OpenDaylightMechanismDriver._record_in_journal(
context, odl_const.ODL_PORT, odl_const.ODL_DELETE,
data=new_context)
def _sync_security_group_create_precommit(
self, context, operation, object_type, res_id, sg_dict):
journal.record(context, object_type, sg_dict['id'], operation, sg_dict)
# NOTE(yamahata): when security group is created, default rules
# are also created.
for rule in sg_dict['security_group_rules']:
journal.record(context, odl_const.ODL_SG_RULE, rule['id'],
odl_const.ODL_CREATE, rule)
@log_helpers.log_method_call
def sync_from_callback_precommit(self, context, operation, res_type,
res_id, resource_dict, **kwargs):
object_type = res_type.singular
if resource_dict is not None:
resource_dict = resource_dict[object_type]
if (operation == odl_const.ODL_CREATE and
object_type == odl_const.ODL_SG):
self._sync_security_group_create_precommit(
context, operation, object_type, res_id, resource_dict)
return
object_uuid = (resource_dict.get('id')
if operation == 'create' else res_id)
data = resource_dict
if (operation == odl_const.ODL_DELETE):
# NOTE(yamahata): DB auto deletion
# Security Group Rule under this Security Group needs to
# be deleted. At NeutronDB layer rules are auto deleted with
# cascade='all,delete'.
if (object_type == odl_const.ODL_SG):
for rule_id in kwargs['security_group_rule_ids']:
journal.record(context, odl_const.ODL_SG_RULE,
rule_id, odl_const.ODL_DELETE,
[object_uuid])
elif (object_type == odl_const.ODL_SG_RULE):
# Set the parent security group id so that dependencies
# to this security rule deletion can be properly found
# in the journal.
data = [kwargs['security_group_id']]
assert object_uuid is not None
journal.record(context, object_type, object_uuid,
operation, data)
def sync_from_callback_postcommit(self, context, operation, res_type,
res_id, resource_dict, **kwargs):
self._postcommit(context)
def _postcommit(self, context):
self.journal.set_sync_event()
@log_helpers.log_method_call
def bind_port(self, port_context):
"""Set binding for a valid segments
"""
return self.port_binding_controller.bind_port(port_context)
def check_vlan_transparency(self, context):
"""Check VLAN transparency
"""
# TODO(yamahata): This should be odl service provider dependent
# introduce ODL yang model for ODL to report which network types
# are vlan-transparent.
# VLAN and FLAT cases, we don't know if the underlying network
# supports QinQ or VLAN.
# For now, netvirt supports only vxlan tunneling.
VLAN_TRANSPARENT_NETWORK_TYPES = [p_const.TYPE_VXLAN]
network = context.current
# see TypeManager._extend_network_dict_provider()
# single providernet
if providernet.NETWORK_TYPE in network:
return (network[providernet.NETWORK_TYPE] in
VLAN_TRANSPARENT_NETWORK_TYPES)
# multi providernet
segments = network.get(mpnet_apidef.SEGMENTS)
if segments is None:
return True
return all(segment[providernet.NETWORK_TYPE]
in VLAN_TRANSPARENT_NETWORK_TYPES
for segment in segments)
networking-odl-16.0.0/networking_odl/ml2/README.odl0000664000175000017500000000343313656750541021756 0ustar zuulzuul00000000000000OpenDaylight ML2 MechanismDriver
================================
OpenDaylight is an Open Source SDN Controller developed by a plethora of
companies and hosted by the Linux Foundation. The OpenDaylight website
contains more information on the capabilities OpenDaylight provides:
http://www.opendaylight.org
Theory of operation
===================
The OpenStack Neutron integration with OpenDaylight consists of the ML2
MechanismDriver which acts as a REST proxy and passess all Neutron API
calls into OpenDaylight. OpenDaylight contains a NB REST service (called
the NeutronAPIService) which caches data from these proxied API calls and
makes it available to other services inside of OpenDaylight. One current
user of the SB side of the NeutronAPIService is the OVSDB code in
OpenDaylight. OVSDB uses the neutron information to isolate tenant networks
using GRE or VXLAN tunnels.
How to use the OpenDaylight ML2 MechanismDriver
===============================================
To use the ML2 MechanismDriver, you need to ensure you have it configured
as one of the "mechanism_drivers" in ML2:
mechanism_drivers=opendaylight
The next step is to setup the "[ml2_odl]" section in either the ml2_conf.ini
file or in a separate ml2_conf_odl.ini file. An example is shown below:
[ml2_odl]
password = admin
username = admin
url = http://192.168.100.1:8080/controller/nb/v2/neutron
When starting OpenDaylight, ensure you have the SimpleForwarding application
disabled or remove the .jar file from the plugins directory. Also ensure you
start OpenDaylight before you start OpenStack Neutron.
There is devstack support for this which will automatically pull down OpenDaylight
and start it as part of devstack as well. The patch for this will likely merge
around the same time as this patch merges.
networking-odl-16.0.0/networking_odl/ml2/legacy_port_binding.py0000664000175000017500000000602413656750541024674 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib.plugins.ml2 import api
from oslo_log import log
from networking_odl.ml2 import port_binding
LOG = log.getLogger(__name__)
class LegacyPortBindingManager(port_binding.PortBindingController):
def __init__(self):
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
self.supported_vnic_types = [portbindings.VNIC_NORMAL]
def bind_port(self, port_context):
"""Set binding for all valid segments
"""
vnic_type = port_context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
valid_segment = None
for segment in port_context.segments_to_bind:
if self._check_segment(segment):
valid_segment = segment
break
if valid_segment:
vif_type = self._get_vif_type(port_context)
LOG.debug("Bind port %(port)s on network %(network)s with valid "
"segment %(segment)s and VIF type %(vif_type)r.",
{'port': port_context.current['id'],
'network': port_context.network.current['id'],
'segment': valid_segment, 'vif_type': vif_type})
port_context.set_binding(
valid_segment[api.ID], vif_type,
self.vif_details,
status=n_const.PORT_STATUS_ACTIVE)
def _check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [n_const.TYPE_FLAT, n_const.TYPE_LOCAL,
n_const.TYPE_GRE, n_const.TYPE_VXLAN,
n_const.TYPE_VLAN]
def _get_vif_type(self, port_context):
"""Get VIF type string for given PortContext
Dummy implementation: it always returns following constant.
neutron_lib.api.definitions.portbindings.VIF_TYPE_OVS
"""
return portbindings.VIF_TYPE_OVS
networking-odl-16.0.0/networking_odl/ml2/port_status_update.py0000664000175000017500000001265213656750541024627 0ustar zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib import worker
from oslo_log import log
from neutron.db import provisioning_blocks
from networking_odl.common import client as odl_client
from networking_odl.common import odl_features
from networking_odl.common import utils
from networking_odl.common import websocket_client as odl_ws_client
LOG = log.getLogger(__name__)
class OdlPortStatusUpdate(worker.BaseWorker):
"""Class to register and handle port status update"""
PORT_PATH = "restconf/operational/neutron:neutron/ports/port"
def __init__(self):
super(OdlPortStatusUpdate, self).__init__()
self.odl_websocket_client = None
def start(self):
super(OdlPortStatusUpdate, self).start()
LOG.debug('OdlPortStatusUpdate worker running')
if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS):
self.run_websocket()
def stop(self):
if self.odl_websocket_client:
self.odl_websocket_client.set_exit_flag()
def wait(self):
"""Wait for service to complete."""
@staticmethod
def reset():
pass
def run_websocket(self):
# OpenDaylight path to recieve websocket notifications on
neutron_ports_path = "/neutron:neutron/neutron:ports"
self.path_uri = utils.get_odl_url()
self.odl_websocket_client = (
odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket(
self.path_uri, neutron_ports_path,
odl_ws_client.ODL_OPERATIONAL_DATASTORE,
odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE,
self._process_websocket_recv,
self._process_websocket_reconnect,
True
))
def _process_websocket_recv(self, payload, reconnect):
# Callback for websocket notification
LOG.debug("Websocket notification for port status update")
for event in odl_ws_client.EventDataParser.get_item(payload):
operation, path, data = event.get_fields()
if ((operation in [event.OPERATION_UPDATE,
event.OPERATION_CREATE])):
port_id = event.extract_field(path, "neutron:uuid")
port_id = str(port_id).strip("'")
status_field = data.get('status')
if status_field is not None:
status = status_field.get('content')
LOG.debug("Update port for port id %s %s", port_id, status)
# for now we only support transition from DOWN->ACTIVE
# https://bugs.launchpad.net/networking-odl/+bug/1686023
if status == n_const.PORT_STATUS_ACTIVE:
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
if operation == event.OPERATION_DELETE:
LOG.debug("PortStatus: Ignoring delete operation")
def _process_websocket_reconnect(self, status):
if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED:
# Get port data using restconf
LOG.debug("Websocket notification on reconnection")
reconn_thread = threading.Thread(
name='websocket', target=self._pull_missed_statuses)
reconn_thread.start()
def _pull_missed_statuses(self):
LOG.debug("starting to pull pending statuses...")
plugin = directory.get_plugin()
filter = {"status": [n_const.PORT_STATUS_DOWN],
"vif_type": ["unbound"]}
ports = plugin.get_ports(context.get_admin_context(), filter)
if not ports:
LOG.debug("no down ports found, done")
return
port_fetch_url = utils.get_odl_url(self.PORT_PATH)
client = odl_client.OpenDaylightRestClient.create_client(
url=port_fetch_url)
for port in ports:
port_id = port["id"]
response = client.get(port_id)
if response.status_code != 200:
LOG.warning("Non-200 response code %s", str(response))
continue
odl_status = response.json()['port'][0]['status']
if odl_status == n_const.PORT_STATUS_ACTIVE:
# for now we only support transition from DOWN->ACTIVE
# See https://bugs.launchpad.net/networking-odl/+bug/1686023
provisioning_blocks.provisioning_complete(
context.get_admin_context(),
port_id, resources.PORT,
provisioning_blocks.L2_AGENT_ENTITY)
LOG.debug("done pulling pending statuses")
networking-odl-16.0.0/networking_odl/ml2/pseudo_agentdb_binding.py0000664000175000017500000004504413656750541025354 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from string import Template
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as nl_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib.plugins.ml2 import api
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import log
from oslo_serialization import jsonutils
from requests import codes
from requests import exceptions
from neutron.db import provisioning_blocks
from networking_odl.common import client as odl_client
from networking_odl.common import odl_features
from networking_odl.common import utils
from networking_odl.common import websocket_client as odl_ws_client
from networking_odl.journal import periodic_task
from networking_odl.ml2 import port_binding
cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')
LOG = log.getLogger(__name__)
class PseudoAgentDBBindingTaskBase(object):
def __init__(self, worker):
super(PseudoAgentDBBindingTaskBase, self).__init__()
self._worker = worker
# extract host/port from ODL URL and append hostconf_uri path
hostconf_uri = utils.get_odl_url(cfg.CONF.ml2_odl.odl_hostconf_uri)
LOG.debug("ODLPORTBINDING hostconfigs URI: %s", hostconf_uri)
# TODO(mzmalick): disable port-binding for ODL lightweight testing
self.odl_rest_client = odl_client.OpenDaylightRestClient.create_client(
url=hostconf_uri)
def _rest_get_hostconfigs(self):
try:
response = self.odl_rest_client.get()
response.raise_for_status()
hostconfigs = response.json()['hostconfigs']['hostconfig']
except exceptions.ConnectionError:
LOG.error("Cannot connect to the OpenDaylight Controller",
exc_info=True)
return None
except exceptions.HTTPError as e:
# restconf returns 404 on operation when there is no entry
if e.response.status_code == codes.not_found:
LOG.debug("Response code not_found (404)"
" treated as an empty list")
return []
LOG.warning("REST/GET odl hostconfig failed, ",
exc_info=True)
return None
except KeyError:
LOG.error("got invalid hostconfigs", exc_info=True)
return None
except Exception:
LOG.warning("REST/GET odl hostconfig failed, ",
exc_info=True)
return None
else:
if LOG.isEnabledFor(logging.DEBUG):
_hconfig_str = jsonutils.dumps(
response, sort_keys=True, indent=4, separators=(',', ': '))
LOG.debug("ODLPORTBINDING hostconfigs:\n%s", _hconfig_str)
return hostconfigs
def _get_and_update_hostconfigs(self, context=None):
LOG.info("REST/GET hostconfigs from ODL")
hostconfigs = self._rest_get_hostconfigs()
if not hostconfigs:
LOG.warning("ODL hostconfigs REST/GET failed, "
"will retry on next poll")
return # retry on next poll
self._worker.update_agents_db(hostconfigs=hostconfigs)
@registry.has_registry_receivers
class PseudoAgentDBBindingPrePopulate(PseudoAgentDBBindingTaskBase):
@registry.receives(resources.PORT,
[events.BEFORE_CREATE, events.BEFORE_UPDATE])
def before_port_binding(self, resource, event, trigger, **kwargs):
LOG.debug("before_port resource %s event %s %s",
resource, event, kwargs)
assert resource == resources.PORT
assert event in [events.BEFORE_CREATE, events.BEFORE_UPDATE]
ml2_plugin = trigger
context = kwargs['context']
port = kwargs['port']
host = nl_const.ATTR_NOT_SPECIFIED
if port and portbindings.HOST_ID in port:
host = port.get(portbindings.HOST_ID)
if host == nl_const.ATTR_NOT_SPECIFIED or not host:
return
agent_type = PseudoAgentDBBindingWorker.L2_TYPE
if self._worker.known_agent(host, agent_type):
return
agents = ml2_plugin.get_agents(
context, filters={'agent_type': [agent_type], 'host': [host]})
if agents and all(agent['alive'] for agent in agents):
self._worker.add_known_agents(agents)
LOG.debug("agents %s", agents)
return
# This host may not be created/updated by worker.
# try to populate it.
urlpath = "hostconfig/{0}/{1}".format(
host, PseudoAgentDBBindingWorker.L2_TYPE)
try:
response = self.odl_rest_client.get(urlpath)
response.raise_for_status()
except Exception:
LOG.warning("REST/GET odl hostconfig/%s failed.", host,
exc_info=True)
return
LOG.debug("response %s", response.json())
hostconfig = response.json().get('hostconfig', [])
if hostconfig:
self._worker.update_agents_db_row(hostconfig[0])
class PseudoAgentDBBindingPeriodicTask(PseudoAgentDBBindingTaskBase):
def __init__(self, worker):
super(PseudoAgentDBBindingPeriodicTask, self).__init__(worker)
# Start polling ODL restconf using maintenance thread.
# default: 30s (should be <= agent keep-alive poll interval)
self._periodic = periodic_task.PeriodicTask(
'hostconfig', cfg.CONF.ml2_odl.restconf_poll_interval)
self._periodic.register_operation(self._get_and_update_hostconfigs)
self._periodic.start()
class PseudoAgentDBBindingWebSocket(PseudoAgentDBBindingTaskBase):
def __init__(self, worker):
super(PseudoAgentDBBindingWebSocket, self).__init__(worker)
# Update hostconfig once for the configurations already present
self._get_and_update_hostconfigs()
odl_url = utils.get_odl_url()
self._start_websocket(odl_url)
def _start_websocket(self, odl_url):
# OpenDaylight path to recieve websocket notifications on
neutron_hostconfigs_path = """/neutron:neutron/neutron:hostconfigs"""
self.odl_websocket_client = (
odl_ws_client.OpenDaylightWebsocketClient.odl_create_websocket(
odl_url, neutron_hostconfigs_path,
odl_ws_client.ODL_OPERATIONAL_DATASTORE,
odl_ws_client.ODL_NOTIFICATION_SCOPE_SUBTREE,
self._process_websocket_recv,
self._process_websocket_reconnect
))
if self.odl_websocket_client is None:
LOG.error("Error starting websocket thread")
def _process_websocket_recv(self, payload, reconnect):
# Callback for websocket notification
LOG.debug("Websocket notification for hostconfig update")
for event in odl_ws_client.EventDataParser.get_item(payload):
try:
operation, path, data = event.get_fields()
if operation == event.OPERATION_DELETE:
host_id = event.extract_field(path, "neutron:host-id")
host_type = event.extract_field(path, "neutron:host-type")
if not host_id or not host_type:
LOG.warning("Invalid delete notification")
continue
self._worker.delete_agents_db_row(
host_id.strip("'"), host_type.strip("'"))
elif operation == event.OPERATION_CREATE:
if 'hostconfig' in data:
hostconfig = data['hostconfig']
self.update_agents_db_row(hostconfig)
except KeyError:
LOG.warning("Invalid JSON for websocket notification",
exc_info=True)
continue
# TODO(rsood): Mixing restconf and websocket can cause race conditions
def _process_websocket_reconnect(self, status):
if status == odl_ws_client.ODL_WEBSOCKET_CONNECTED:
# Get hostconfig data using restconf
LOG.debug("Websocket notification on reconnection")
self._get_and_update_hostconfigs()
class PseudoAgentDBBindingWorker(worker.BaseWorker):
"""Neutron Worker to update agentdb based on ODL hostconfig."""
AGENTDB_BINARY = 'neutron-odlagent-portbinding'
L2_TYPE = "ODL L2"
# TODO(mzmalick): binary, topic and resource_versions to be provided
# by ODL, Pending ODL NB patches.
_AGENTDB_ROW = {
'binary': AGENTDB_BINARY,
'host': '',
'topic': nl_const.L2_AGENT_TOPIC,
'configurations': {},
'resource_versions': '',
'agent_type': L2_TYPE,
'start_flag': True}
def __init__(self):
LOG.info("PseudoAgentDBBindingWorker init")
self._old_agents = set()
self._known_agents = set()
self.agents_db = None
super(PseudoAgentDBBindingWorker, self).__init__()
def start(self):
LOG.info("PseudoAgentDBBindingWorker starting")
super(PseudoAgentDBBindingWorker, self).start()
self._start()
def stop(self):
pass
def wait(self):
pass
def reset(self):
pass
def _start(self):
"""Initialization."""
LOG.debug("Initializing ODL Port Binding Worker")
if cfg.CONF.ml2_odl.enable_websocket_pseudo_agentdb:
self._websocket = PseudoAgentDBBindingWebSocket(self)
else:
self._periodic_task = (PseudoAgentDBBindingPeriodicTask(self))
def known_agent(self, host_id, agent_type):
agent = (host_id, agent_type)
return agent in self._known_agents or agent in self._old_agents
def add_known_agents(self, agents):
for agent in agents:
self._known_agents.add((agent['host'], agent['agent_type']))
def update_agents_db(self, hostconfigs):
LOG.debug("ODLPORTBINDING Updating agents DB with ODL hostconfigs")
self._old_agents = self._known_agents
self._known_agents = set()
for host_config in hostconfigs:
self._update_agents_db_row(host_config)
def update_agents_db_row(self, host_config):
self._old_agents = self._known_agents
self._update_agents_db_row(host_config)
def _update_agents_db_row(self, host_config):
if self.agents_db is None:
self.agents_db = directory.get_plugin()
# Update one row in agent db
host_id = host_config['host-id']
host_type = host_config['host-type']
config = host_config['config']
try:
agentdb_row = self._AGENTDB_ROW.copy()
agentdb_row['host'] = host_id
agentdb_row['agent_type'] = host_type
agentdb_row['configurations'] = jsonutils.loads(config)
if (host_id, host_type) in self._old_agents:
agentdb_row.pop('start_flag', None)
self.agents_db.create_or_update_agent(
context.get_admin_context(), agentdb_row)
self._known_agents.add((host_id, host_type))
except Exception:
LOG.exception("Unable to update agentdb.")
def delete_agents_db_row(self, host_id, host_type):
"""Delete agent row."""
try:
filters = {'agent_type': [host_type],
'host': [host_id]}
# TODO(rsood): get_agent can be used here
agent = self.agents_db.get_agents_db(
context.get_admin_context(), filters=filters)
if not agent:
return
LOG.debug("Deleting Agent with Agent id: %s", agent[0]['id'])
self.agents_db.delete_agent(
context.get_admin_context(), agent[0]['id'])
self._known_agents.remove((host_id, host_type))
except Exception:
LOG.exception("Unable to delete from agentdb.")
@registry.has_registry_receivers
class PseudoAgentDBBindingController(port_binding.PortBindingController):
"""Switch agnostic Port binding controller for OpenDayLight."""
def __init__(self):
"""Initialization."""
LOG.debug("Initializing ODL Port Binding Controller")
super(PseudoAgentDBBindingController, self).__init__()
self._worker = PseudoAgentDBBindingWorker()
@registry.receives(resources.PROCESS, [events.BEFORE_SPAWN])
def _before_spawn(self, resource, event, trigger, payload=None):
self._prepopulate = PseudoAgentDBBindingPrePopulate(self._worker)
def get_workers(self):
return [self._worker]
def _substitute_hconfig_tmpl(self, port_context, hconfig):
# TODO(mzmalick): Explore options for inlines string splicing of
# port-id to 14 bytes as required by vhostuser types
port_id = port_context.current['id']
conf = hconfig.get('configurations')
vnics = conf.get('supported_vnic_types')
if vnics is None:
return hconfig
for vnic in vnics:
if vnic.get('vif_type') == portbindings.VIF_TYPE_VHOST_USER:
details = vnic.get('vif_details')
if details is None:
continue
port_prefix = details.get('port_prefix')
port_prefix = port_prefix[:14]
subs_ids = {
# $IDENTIFER string substitution in hostconfigs JSON string
'PORT_ID': port_id[:(14 - len(port_prefix))],
}
# Substitute identifiers and Convert JSON string to dict
hconfig_conf_json = Template(jsonutils.dumps(details))
substituted_str = hconfig_conf_json.safe_substitute(subs_ids)
vnic['vif_details'] = jsonutils.loads(substituted_str)
return hconfig
def bind_port(self, port_context):
"""bind port using ODL host configuration."""
# Get all ODL hostconfigs for this host and type
agentdb = port_context.host_agents(PseudoAgentDBBindingWorker.L2_TYPE)
if not agentdb:
LOG.warning("No valid hostconfigs in agentsdb for host %s",
port_context.host)
return
for raw_hconfig in agentdb:
# do any $identifier substitution
hconfig = self._substitute_hconfig_tmpl(port_context, raw_hconfig)
# Found ODL hostconfig for this host in agentdb
LOG.debug("ODLPORTBINDING bind port with hostconfig: %s", hconfig)
if self._hconfig_bind_port(port_context, hconfig):
break # Port binding suceeded!
else: # Port binding failed!
LOG.warning(
"Failed to bind Port %(pid)s devid %(device_id)s "
"owner %(owner)s for host %(host)s "
"on network %(network)s.", {
'pid': port_context.current['id'],
'device_id': port_context.current['device_id'],
'owner': port_context.current['device_owner'],
'host': port_context.host,
'network': port_context.network.current['id']})
else: # No hostconfig found for host in agentdb.
LOG.warning("No ODL hostconfigs for host %s found in agentdb",
port_context.host)
def _hconfig_bind_port(self, port_context, hconfig):
"""bind port after validating odl host configuration."""
valid_segment = None
for segment in port_context.segments_to_bind:
if self._is_valid_segment(segment, hconfig['configurations']):
valid_segment = segment
break
else:
LOG.debug("No valid segments found!")
return False
confs = hconfig['configurations']['supported_vnic_types']
# nova provides vnic_type in port_context to neutron.
# neutron provides supported vif_type for binding based on vnic_type
# in this case ODL hostconfigs has the vif_type to bind for vnic_type
vnic_type = port_context.current.get(portbindings.VNIC_TYPE)
vif_details = None
for conf in confs:
if conf["vnic_type"] == vnic_type:
vif_type = conf.get('vif_type', portbindings.VIF_TYPE_OVS)
LOG.debug("Binding vnic:'%s' to vif:'%s'", vnic_type, vif_type)
vif_details = conf.get('vif_details', {})
break
else:
LOG.error(
"Binding failed: unsupported VNIC %(vnic_type)s on %(host)s",
{'vnic_type': vnic_type, 'host': port_context.host})
return False
if not vif_details: # empty vif_details could be trouble, warn.
LOG.warning("hostconfig:vif_details was empty!")
LOG.debug("Bind port %(port)s on network %(network)s with valid "
"segment %(segment)s and VIF type %(vif_type)r "
"VIF details %(vif_details)r.",
{'port': port_context.current['id'],
'network': port_context.network.current['id'],
'segment': valid_segment, 'vif_type': vif_type,
'vif_details': vif_details})
port_status = self._prepare_initial_port_status(port_context)
port_context.set_binding(valid_segment[api.ID], vif_type,
vif_details, status=port_status)
return True
def _prepare_initial_port_status(self, port_context):
port_status = nl_const.PORT_STATUS_ACTIVE
if odl_features.has(odl_features.OPERATIONAL_PORT_STATUS):
port_status = nl_const.PORT_STATUS_DOWN
provisioning_blocks.add_provisioning_component(
port_context._plugin_context, port_context.current['id'],
resources.PORT, provisioning_blocks.L2_AGENT_ENTITY)
return port_status
def _is_valid_segment(self, segment, conf):
"""Verify a segment is supported by ODL."""
network_type = segment[api.NETWORK_TYPE]
return network_type in conf['allowed_network_types']
networking-odl-16.0.0/networking_odl/ml2/port_binding.py0000664000175000017500000001220513656750541023346 0ustar zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
import stevedore
from oslo_config import cfg
from oslo_log import log
from oslo_utils import excutils
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class PortBindingController(object):
def get_workers(self):
return []
@abc.abstractmethod
def bind_port(self, port_context):
"""Attempt to bind a port.
:param context: PortContext instance describing the port
This method is called outside any transaction to attempt to
establish a port binding using calling mechanism driver. Bindings
may be created at each of multiple levels of a hierarchical
network, and are established from the top level downward. At
each level, the mechanism driver determines whether it can
bind to any of the network segments in the
context.segments_to_bind property, based on the value of the
context.host property, any relevant port or network
attributes, and its own knowledge of the network topology. At
the top level, context.segments_to_bind contains the static
segments of the port's network. At each lower level of
binding, it contains static or dynamic segments supplied by
the driver that bound at the level above. If the driver is
able to complete the binding of the port to any segment in
context.segments_to_bind, it must call context.set_binding
with the binding details. If it can partially bind the port,
it must call context.continue_binding with the network
segments to be used to bind at the next lower level.
If the binding results are committed after bind_port returns,
they will be seen by all mechanism drivers as
update_port_precommit and update_port_postcommit calls. But if
some other thread or process concurrently binds or updates the
port, these binding results will not be committed, and
update_port_precommit and update_port_postcommit will not be
called on the mechanism drivers with these results. Because
binding results can be discarded rather than committed,
drivers should avoid making persistent state changes in
bind_port, or else must ensure that such state changes are
eventually cleaned up.
Implementing this method explicitly declares the mechanism
driver as having the intention to bind ports. This is inspected
by the QoS service to identify the available QoS rules you
can use with ports.
"""
class PortBindingManager(PortBindingController):
# At this point, there is no requirement to have multiple
# port binding controllers at the same time.
# Stay with single controller until there is a real requirement
def __init__(self, name, controller):
self.name = name
self.controller = controller
@classmethod
def create(
cls, namespace='networking_odl.ml2.port_binding_controllers',
name=None):
name = name or cfg.CONF.ml2_odl.port_binding_controller
ext_mgr = stevedore.named.NamedExtensionManager(
namespace, [name], invoke_on_load=True)
assert len(ext_mgr.extensions) == 1, (
"Wrong port binding controller is specified")
extension = ext_mgr.extensions[0]
if isinstance(extension.obj, PortBindingController):
return cls(extension.name, extension.obj)
else:
raise ValueError(_(
"Port binding controller '%(name)s (%(controller)r)' "
"doesn't implement PortBindingController interface."),
{'name': extension.name, 'controller': extension.obj})
def get_workers(self):
return self.controller.get_workers()
def bind_port(self, port_context):
controller_details = {'name': self.name, 'controller': self.controller}
try:
self.controller.bind_port(port_context)
except Exception:
with excutils.save_and_reraise_exception():
LOG.exception(
"Controller '%(name)s (%(controller)r)' had an error "
"when binding port.", controller_details)
else:
if port_context._new_bound_segment:
LOG.info(
"Controller '%(name)s (%(controller)r)' has bound port.",
controller_details)
else:
LOG.debug(
"Controller %(name)s (%(controller)r) hasn't bound "
"port.", controller_details)
networking-odl-16.0.0/networking_odl/sfc/0000775000175000017500000000000013656750617020403 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/sfc/__init__.py0000664000175000017500000000000013656750541022476 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/sfc/flowclassifier/0000775000175000017500000000000013656750617023417 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/sfc/flowclassifier/sfc_flowclassifier_v2.py0000664000175000017500000000677013656750541030255 0ustar zuulzuul00000000000000# Copyright (c) 2016 Brocade Communication Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_sfc.extensions import flowclassifier as fc_const
from networking_sfc.services.flowclassifier.drivers import base as fc_driver
from networking_odl.common import constants as odl_const
from networking_odl.common import postcommit
from networking_odl.journal import full_sync
from networking_odl.journal import journal
LOG = logging.getLogger(__name__)
SFC_FC_RESOURCES = {
odl_const.ODL_SFC_FLOW_CLASSIFIER:
odl_const.NETWORKING_SFC_FLOW_CLASSIFIERS,
}
@postcommit.add_postcommit('flow_classifier')
class OpenDaylightSFCFlowClassifierDriverV2(
fc_driver.FlowClassifierDriverBase):
"""OpenDaylight SFC Flow Classifier Driver (Version 2) for networking-sfc.
This Driver pass through SFC Flow Classifier API calls to
OpenDaylight Neutron Northbound Project by using the REST
API's exposed by the project.
"""
def initialize(self):
LOG.debug("Initializing OpenDaylight Networking "
"SFC Flow Classifier driver Version 2")
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(fc_const.FLOW_CLASSIFIER_EXT, SFC_FC_RESOURCES)
@staticmethod
def _record_in_journal(context, object_type, operation, data=None):
if data is None:
data = context.current
journal.record(context._plugin_context, object_type,
context.current['id'], operation, data)
@log_helpers.log_method_call
def create_flow_classifier_precommit(self, context):
OpenDaylightSFCFlowClassifierDriverV2._record_in_journal(
context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def update_flow_classifier_precommit(self, context):
OpenDaylightSFCFlowClassifierDriverV2._record_in_journal(
context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def delete_flow_classifier_precommit(self, context):
OpenDaylightSFCFlowClassifierDriverV2._record_in_journal(
context, odl_const.ODL_SFC_FLOW_CLASSIFIER, odl_const.ODL_DELETE,
data=[])
# Need to implement these methods, else driver loading fails with error
# complaining about no abstract method implementation present.
@log_helpers.log_method_call
def create_flow_classifier(self, context):
super(OpenDaylightSFCFlowClassifierDriverV2,
self).create_flow_classifier(context)
@log_helpers.log_method_call
def update_flow_classifier(self, context):
super(OpenDaylightSFCFlowClassifierDriverV2,
self).update_flow_classifier(context)
@log_helpers.log_method_call
def delete_flow_classifier(self, context):
super(OpenDaylightSFCFlowClassifierDriverV2,
self).delete_flow_classifier(context)
networking-odl-16.0.0/networking_odl/sfc/flowclassifier/__init__.py0000664000175000017500000000000013656750541025512 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/sfc/sfc_driver_v2.py0000664000175000017500000001321613656750541023511 0ustar zuulzuul00000000000000# Copyright (c) 2017 Brocade Communication Systems
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_sfc.extensions import sfc as sfc_const
from networking_sfc.services.sfc.drivers import base as sfc_driver
from networking_odl.common import constants as odl_const
from networking_odl.common import postcommit
from networking_odl.journal import full_sync
from networking_odl.journal import journal
LOG = logging.getLogger(__name__)
SFC_RESOURCES = {
odl_const.ODL_SFC_PORT_PAIR: odl_const.NETWORKING_SFC_PORT_PAIRS,
odl_const.ODL_SFC_PORT_PAIR_GROUP:
odl_const.NETWORKING_SFC_PORT_PAIR_GROUPS,
odl_const.ODL_SFC_PORT_CHAIN: odl_const.NETWORKING_SFC_PORT_CHAINS
}
@postcommit.add_postcommit('port_pair', 'port_pair_group', 'port_chain')
class OpenDaylightSFCDriverV2(sfc_driver.SfcDriverBase):
"""OpenDaylight SFC Driver (Version 2) for networking-sfc.
Driver sends REST request for Networking SFC Resources (Port Pair,
Port Pair Group & Port Chain) to OpenDaylight Neutron Northbound.
OpenDaylight Neutron Northbound has API's defined for these resources
based on the Networking SFC APIs.
"""
def initialize(self):
LOG.debug("Initializing OpenDaylight Networking SFC driver(Version 2)")
self.journal = journal.OpenDaylightJournalThread()
full_sync.register(sfc_const.SFC_EXT, SFC_RESOURCES)
@staticmethod
def _record_in_journal(context, object_type, operation, data=None):
if data is None:
data = context.current
journal.record(context._plugin_context, object_type,
context.current['id'], operation, data)
@log_helpers.log_method_call
def create_port_pair_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def create_port_pair_group_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def create_port_chain_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_CREATE)
@log_helpers.log_method_call
def update_port_pair_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def update_port_pair_group_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def update_port_chain_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_UPDATE)
@log_helpers.log_method_call
def delete_port_pair_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_PAIR, odl_const.ODL_DELETE,
data=[])
@log_helpers.log_method_call
def delete_port_pair_group_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_PAIR_GROUP, odl_const.ODL_DELETE,
data=[])
@log_helpers.log_method_call
def delete_port_chain_precommit(self, context):
OpenDaylightSFCDriverV2._record_in_journal(
context, odl_const.ODL_SFC_PORT_CHAIN, odl_const.ODL_DELETE,
data=[])
# Need to implement these methods, else driver loading fails with error
# complaining about no abstract method implementation present.
@log_helpers.log_method_call
def create_port_pair(self, context):
super(OpenDaylightSFCDriverV2, self).create_port_pair(context)
@log_helpers.log_method_call
def create_port_pair_group(self, context):
super(OpenDaylightSFCDriverV2, self).create_port_pair_group(context)
@log_helpers.log_method_call
def create_port_chain(self, context):
super(OpenDaylightSFCDriverV2, self).create_port_chain(context)
@log_helpers.log_method_call
def update_port_pair(self, context):
super(OpenDaylightSFCDriverV2, self).update_port_pair(context)
@log_helpers.log_method_call
def update_port_pair_group(self, context):
super(OpenDaylightSFCDriverV2, self).update_port_pair_group(context)
@log_helpers.log_method_call
def update_port_chain(self, context):
super(OpenDaylightSFCDriverV2, self).update_port_chain(context)
@log_helpers.log_method_call
def delete_port_pair(self, context):
super(OpenDaylightSFCDriverV2, self).delete_port_pair(context)
@log_helpers.log_method_call
def delete_port_pair_group(self, context):
super(OpenDaylightSFCDriverV2, self).delete_port_pair_group(context)
@log_helpers.log_method_call
def delete_port_chain(self, context):
super(OpenDaylightSFCDriverV2, self).delete_port_chain(context)
networking-odl-16.0.0/networking_odl/db/0000775000175000017500000000000013656750617020215 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/head.py0000664000175000017500000000162213656750541021465 0ustar zuulzuul00000000000000# Copyright 2016 Intel Corporation.
# Copyright 2016 Isaku Yamahata
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.db.migration.models import head
from networking_odl.db import models # noqa
def get_metadata():
return head.model_base.BASEV2.metadata
networking-odl-16.0.0/networking_odl/db/__init__.py0000664000175000017500000000000013656750541022310 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/db.py0000664000175000017500000002220313656750541021147 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from neutron_lib.db import api as db_api
from oslo_log import log as logging
from sqlalchemy import asc
from sqlalchemy import bindparam
from sqlalchemy.ext import baked
from sqlalchemy import func
from sqlalchemy import or_
from sqlalchemy.orm import aliased
from networking_odl.common import constants as odl_const
from networking_odl.db import models
LOG = logging.getLogger(__name__)
bakery = baked.bakery()
def get_pending_or_processing_ops(context, object_uuid, operation=None):
# NOTE (sai): For performance reasons, we expect this method to use baked
# query (http://docs.sqlalchemy.org/en/latest/orm/extensions/baked.html)
baked_query = bakery(lambda s: s.query(
models.OpenDaylightJournal))
baked_query += lambda q: q.filter(
or_(models.OpenDaylightJournal.state == odl_const.PENDING,
models.OpenDaylightJournal.state == odl_const.PROCESSING),
models.OpenDaylightJournal.object_uuid == bindparam('uuid'))
if operation:
if isinstance(operation, (list, tuple)):
baked_query += lambda q: q.filter(
models.OpenDaylightJournal.operation.in_(bindparam('op',
expanding=True)))
else:
baked_query += lambda q: q.filter(
models.OpenDaylightJournal.operation == bindparam('op'))
return baked_query(context.session).params(
uuid=object_uuid, op=operation).all()
def get_pending_delete_ops_with_parent(context, object_type, parent_id):
rows = context.session.query(models.OpenDaylightJournal).filter(
or_(models.OpenDaylightJournal.state == odl_const.PENDING,
models.OpenDaylightJournal.state == odl_const.PROCESSING),
models.OpenDaylightJournal.object_type == object_type,
models.OpenDaylightJournal.operation == odl_const.ODL_DELETE
).all()
return (row for row in rows if parent_id in row.data)
def get_all_db_rows(context):
return context.session.query(models.OpenDaylightJournal).all()
def get_all_db_rows_by_state(context, state):
return context.session.query(models.OpenDaylightJournal).filter_by(
state=state).all()
# Retry deadlock exception for Galera DB.
# If two (or more) different threads call this method at the same time, they
# might both succeed in changing the same row to pending, but at least one
# of them will get a deadlock from Galera and will have to retry the operation.
@db_api.retry_if_session_inactive()
@db_api.CONTEXT_WRITER.savepoint
def get_oldest_pending_db_row_with_lock(context):
# NOTE (sai): For performance reasons, we expect this method to use baked
# query (http://docs.sqlalchemy.org/en/latest/orm/extensions/baked.html)
journal_dep = aliased(models.OpenDaylightJournal)
dep_query = bakery(lambda s1: s1.query(journal_dep))
dep_query += lambda q: q.filter(
models.OpenDaylightJournal.seqnum == journal_dep.seqnum)
dep_query += lambda q: q.outerjoin(
journal_dep.depending_on, aliased=True)
dep_query += lambda q: q.filter(
or_(models.OpenDaylightJournal.state == odl_const.PENDING,
models.OpenDaylightJournal.state == odl_const.PROCESSING))
row = bakery(lambda s2: s2.query(models.OpenDaylightJournal))
row += lambda q: q.filter(
models.OpenDaylightJournal.state == odl_const.PENDING,
~ (dep_query._as_query(q.session)).exists())
row += lambda q: q.order_by(
asc(models.OpenDaylightJournal.last_retried))
row = row(context.session).first()
if row:
update_db_row_state(context, row, odl_const.PROCESSING)
return row
def delete_dependency(context, entry):
"""Delete dependency upon the given ID"""
conn = context.session.connection()
stmt = models.journal_dependencies.delete(
models.journal_dependencies.c.depends_on == entry.seqnum)
conn.execute(stmt)
context.session.expire_all()
def update_db_row_state(context, row, state, flush=True):
row.state = state
context.session.merge(row)
if flush:
context.session.flush()
def update_pending_db_row_retry(context, row, retry_count):
if row.retry_count >= retry_count:
update_db_row_state(context, row, odl_const.FAILED)
else:
row.retry_count += 1
update_db_row_state(context, row, odl_const.PENDING)
def delete_row(context, row=None, row_id=None, flush=True):
if row_id:
row = context.session.query(models.OpenDaylightJournal).filter_by(
seqnum=row_id).one()
if row:
context.session.delete(row)
if flush:
context.session.flush()
def create_pending_row(context, object_type, object_uuid,
operation, data, depending_on=None):
if depending_on is None:
depending_on = []
row = models.OpenDaylightJournal(object_type=object_type,
object_uuid=object_uuid,
operation=operation, data=data,
state=odl_const.PENDING,
depending_on=depending_on)
context.session.add(row)
# Keep session flush for unit tests. NOOP for L2/L3 events since calls are
# made inside database session transaction with subtransactions=True.
context.session.flush()
return row
@db_api.CONTEXT_WRITER.savepoint
def delete_pending_rows(context, operations_to_delete):
context.session.query(models.OpenDaylightJournal).filter(
models.OpenDaylightJournal.operation.in_(operations_to_delete),
models.OpenDaylightJournal.state == odl_const.PENDING).delete(
synchronize_session=False)
context.session.expire_all()
def _update_periodic_task_state(context, expected_state, state, task):
row = context.session.query(models.OpenDaylightPeriodicTask).filter_by(
state=expected_state,
task=task).with_for_update().one_or_none()
if row is None:
return False
row.state = state
return True
def was_periodic_task_executed_recently(context, task, interval):
now = context.session.execute(func.now()).scalar()
delta = datetime.timedelta(seconds=interval)
row = context.session.query(models.OpenDaylightPeriodicTask).filter(
models.OpenDaylightPeriodicTask.task == task,
(now - delta >= (models.OpenDaylightPeriodicTask.lock_updated))
).one_or_none()
return bool(row is None)
def lock_periodic_task(context, task):
return _update_periodic_task_state(context, odl_const.PENDING,
odl_const.PROCESSING, task)
def unlock_periodic_task(context, task):
return _update_periodic_task_state(context, odl_const.PROCESSING,
odl_const.PENDING, task)
def update_periodic_task(context, task, operation=None):
"""Update the current periodic task details.
The function assumes the lock is held, so it mustn't be run outside of a
locked context.
"""
op_text = None
if operation:
op_text = operation.__name__
row = context.session.query(models.OpenDaylightPeriodicTask).filter_by(
task=task).one()
row.processing_operation = op_text
@db_api.CONTEXT_WRITER.savepoint
def delete_rows_by_state_and_time(context, state, time_delta):
# NOTE(mpeterson): The reason behind deleting one-by-one is that InnoDB
# ignores the WHERE clause to issue a LOCK when executing a DELETE. By
# executing each operation indepently, we minimize exposures to DEADLOCKS.
now = context.session.execute(func.now()).scalar()
rows = context.session.query(models.OpenDaylightJournal).filter(
models.OpenDaylightJournal.state == state,
models.OpenDaylightJournal.last_retried < now - time_delta).all()
for row in rows:
delete_row(context, row, flush=False)
context.session.expire_all()
@db_api.CONTEXT_WRITER.savepoint
def reset_processing_rows(context, max_timedelta):
# NOTE(mpeterson): The reason behind updating one-by-one is that InnoDB
# ignores the WHERE clause to issue a LOCK when executing an UPDATE. By
# executing each operation indepently, we minimize exposures to DEADLOCKS.
now = context.session.execute(func.now()).scalar()
max_timedelta = datetime.timedelta(seconds=max_timedelta)
rows = context.session.query(models.OpenDaylightJournal).filter(
models.OpenDaylightJournal.last_retried < now - max_timedelta,
models.OpenDaylightJournal.state == odl_const.PROCESSING).all()
for row in rows:
update_db_row_state(context, row, odl_const.PENDING, flush=False)
return len(rows)
networking-odl-16.0.0/networking_odl/db/migration/0000775000175000017500000000000013656750617022206 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/__init__.py0000664000175000017500000000000013656750541024301 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/0000775000175000017500000000000013656750617026036 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/__init__.py0000664000175000017500000000000013656750541030131 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/0000775000175000017500000000000013656750617027706 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/0000775000175000017500000000000013656750617030636 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0000775000175000017500000000000013656750617032115 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000022200000000000011211 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb241354_create_opendaylight_preiodic_task_table.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/6f7dfb24130000664000175000017500000000324413656750541033427 0ustar zuulzuul00000000000000# Copyright 2017 NEC Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""create opendaylight_preiodic_task table
Revision ID: 6f7dfb241354
Revises: 0472f56ff2fb
Create Date: 2017-05-24 03:01:00.755796
"""
from alembic import op
import sqlalchemy as sa
from networking_odl.common import constants as odl_const
# revision identifiers, used by Alembic.
revision = '6f7dfb241354'
down_revision = '0472f56ff2fb'
def upgrade():
periodic_table = op.create_table(
'opendaylight_periodic_task',
sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING,
name='state'),
nullable=False),
sa.Column('processing_operation', sa.String(70)),
sa.Column('task', sa.String(70), primary_key=True),
sa.Column('lock_updated', sa.TIMESTAMP, nullable=False,
server_default=sa.func.now(),
onupdate=sa.func.now())
)
op.bulk_insert(periodic_table,
[{'task': 'maintenance',
'state': odl_const.PENDING},
{'task': 'hostconfig',
'state': odl_const.PENDING}])
././@LongLink0000000000000000000000000000022200000000000011211 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd638_added_version_id_for_optimistic_locking.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/43af357fd60000664000175000017500000000211713656750541033431 0ustar zuulzuul00000000000000# Copyright (C) 2017 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Added version_id for optimistic locking
Revision ID: 43af357fd638
Revises: 3d560427d776
Create Date: 2016-03-24 10:14:56.408413
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '43af357fd638'
down_revision = '3d560427d776'
depends_on = ('fa0c536252a5',)
def upgrade():
op.add_column('opendaylightjournal',
sa.Column('version_id', sa.Integer, server_default='0',
nullable=False))
././@LongLink0000000000000000000000000000021100000000000011207 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff2fb_add_journal_dependencies_table.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/expand/0472f56ff20000664000175000017500000000251613656750541033353 0ustar zuulzuul00000000000000# Copyright 2017 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add journal dependencies table
Revision ID: 0472f56ff2fb
Revises: 43af357fd638
Create Date: 2017-04-02 11:02:01.622548
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0472f56ff2fb'
down_revision = '43af357fd638'
def upgrade():
op.create_table(
'opendaylight_journal_deps',
sa.Column('depends_on', sa.BigInteger(),
sa.ForeignKey('opendaylightjournal.seqnum',
ondelete='CASCADE'),
primary_key=True),
sa.Column('dependent', sa.BigInteger(),
sa.ForeignKey('opendaylightjournal.seqnum',
ondelete='CASCADE'),
primary_key=True))
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/contract/0000775000175000017500000000000013656750617032453 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000022000000000000011207 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b7d3a_drop_opendaylight_maintenance_table.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/contract/eccd865b0000664000175000017500000000163213656750541033677 0ustar zuulzuul00000000000000# Copyright 2017 NEC Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""drop opendaylight_maintenance table
Revision ID: eccd865b7d3a
Revises: fa0c536252a5
Create Date: 2017-05-24 03:00:40.194278
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'eccd865b7d3a'
down_revision = 'fa0c536252a5'
def upgrade():
op.drop_table('opendaylight_maintenance')
././@LongLink0000000000000000000000000000020300000000000011210 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/contract/7cbef5a56298_drop_created_at_column.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/pike/contract/7cbef5a50000664000175000017500000000163113656750541033674 0ustar zuulzuul00000000000000# Copyright 2017, NEC Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Drop created_at column
Revision ID: 7cbef5a56298
Revises: eccd865b7d3a
Create Date: 2017-08-16 05:49:53.964988
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '7cbef5a56298'
down_revision = 'eccd865b7d3a'
def upgrade():
op.drop_column('opendaylightjournal', 'created_at')
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/0000775000175000017500000000000013656750617031220 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/expand/0000775000175000017500000000000013656750617032477 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021200000000000011210 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf02afde_add_journal_maintenance_table.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/expand/703dbf020000664000175000017500000000332213656750541033545 0ustar zuulzuul00000000000000# Copyright 2016 Red Hat Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add journal maintenance table
Revision ID: 703dbf02afde
Revises: 37e242787ae5
Create Date: 2016-04-12 10:49:31.802663
"""
from alembic import op
from oslo_utils import uuidutils
import sqlalchemy as sa
from networking_odl.common import constants as odl_const
# revision identifiers, used by Alembic.
revision = '703dbf02afde'
down_revision = '37e242787ae5'
def upgrade():
maint_table = op.create_table(
'opendaylight_maintenance',
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('state', sa.Enum(odl_const.PENDING, odl_const.PROCESSING,
name='state'),
nullable=False),
sa.Column('processing_operation', sa.String(70)),
sa.Column('lock_updated', sa.TIMESTAMP, nullable=False,
server_default=sa.func.now(),
onupdate=sa.func.now())
)
# Insert the only row here that is used to synchronize the lock between
# different Neutron processes.
op.bulk_insert(maint_table,
[{'id': uuidutils.generate_uuid(),
'state': odl_const.PENDING}])
././@LongLink0000000000000000000000000000021300000000000011211 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d560427d776_add_sequence_number_to_journal.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/expand/3d5604270000664000175000017500000000337413656750541033423 0ustar zuulzuul00000000000000# Copyright 2016 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""add sequence number to journal
Revision ID: 3d560427d776
Revises: 703dbf02afde
Create Date: 2016-08-05 15:50:22.151078
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '3d560427d776'
down_revision = '703dbf02afde'
def upgrade():
op.create_table(
'opendaylightjournal_new',
sa.Column('seqnum', sa.BigInteger(),
primary_key=True, autoincrement=True),
sa.Column('object_type', sa.String(36), nullable=False),
sa.Column('object_uuid', sa.String(36), nullable=False),
sa.Column('operation', sa.String(36), nullable=False),
sa.Column('data', sa.PickleType, nullable=True),
sa.Column('state',
sa.Enum('pending', 'processing', 'failed', 'completed',
name='state'),
nullable=False, default='pending'),
sa.Column('retry_count', sa.Integer, default=0),
sa.Column('created_at', sa.DateTime, default=sa.func.now()),
sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(),
onupdate=sa.func.now()),
)
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/contract/0000775000175000017500000000000013656750617033035 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000021200000000000011210 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c536252a5_update_opendayligut_journal.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/newton/contract/fa0c530000664000175000017500000000316113656750541033736 0ustar zuulzuul00000000000000# Copyright 2016 Isaku Yamahata
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""update opendayligut journal
Revision ID: fa0c536252a5
Revises: 383acb0d38a0
Create Date: 2016-08-05 23:03:46.470595
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = 'fa0c536252a5'
down_revision = '383acb0d38a0'
depends_on = ('3d560427d776', )
def upgrade():
# Since a new primary key is introduced and alembic doesn't allow to
# add new primary key, create a new table with new primary key and
# rename it.
op.execute("INSERT INTO opendaylightjournal_new "
"(object_type, object_uuid, operation, data, "
"state, retry_count, created_at, last_retried) "
"SELECT object_type, object_uuid, operation, data, "
"state, retry_count, created_at, last_retried "
"FROM opendaylightjournal "
"WHERE state != 'completed' "
"ORDER BY created_at ASC")
op.drop_table('opendaylightjournal')
op.rename_table('opendaylightjournal_new', 'opendaylightjournal')
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000664000175000017500000000001513656750541031617 0ustar zuulzuul000000000000007cbef5a56298
././@LongLink0000000000000000000000000000016200000000000011214 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_branchpoint.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/b89a299e19f9_initial_b0000664000175000017500000000144713656750541033432 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial odl db, branchpoint
Revision ID: b89a299e19f9
Revises: None
Create Date: 2015-09-03 22:22:22.222222
"""
# revision identifiers, used by Alembic.
revision = 'b89a299e19f9'
down_revision = None
def upgrade():
pass
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/EXPAND_HEAD0000664000175000017500000000001513656750541031361 0ustar zuulzuul000000000000006f7dfb241354
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/0000775000175000017500000000000013656750617031154 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/0000775000175000017500000000000013656750617032433 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000022300000000000011212 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242787ae5_opendaylight_neutron_mechanism_driver_.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/37e242780000664000175000017500000000350313656750541033360 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""OpenDaylight Neutron mechanism driver refactor
Revision ID: 37e242787ae5
Revises: 247501328046
Create Date: 2015-10-30 22:09:27.221767
"""
from neutron.db import migration
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '37e242787ae5'
down_revision = '247501328046'
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.MITAKA]
def upgrade():
op.create_table(
'opendaylightjournal',
sa.Column('id', sa.String(36), primary_key=True),
sa.Column('object_type', sa.String(36), nullable=False),
sa.Column('object_uuid', sa.String(36), nullable=False),
sa.Column('operation', sa.String(36), nullable=False),
sa.Column('data', sa.PickleType, nullable=True),
sa.Column('state',
sa.Enum('pending', 'processing', 'failed', 'completed',
name='state'),
nullable=False, default='pending'),
sa.Column('retry_count', sa.Integer, default=0),
sa.Column('created_at', sa.DateTime, default=sa.func.now()),
sa.Column('last_retried', sa.TIMESTAMP, server_default=sa.func.now(),
onupdate=sa.func.now())
)
././@LongLink0000000000000000000000000000017300000000000011216 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501328046_initial_expand.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/expand/247501320000664000175000017500000000160413656750541033262 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Start of odl expand branch
Revision ID: 247501328046
Revises: b89a299e19f9
Create Date: 2015-09-03 22:27:49.292238
"""
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '247501328046'
down_revision = 'b89a299e19f9'
branch_labels = (cli.EXPAND_BRANCH,)
def upgrade():
pass
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/0000775000175000017500000000000013656750617032771 5ustar zuulzuul00000000000000././@LongLink0000000000000000000000000000017700000000000011222 Lustar 00000000000000networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0d38a0_initial_contract.pynetworking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/versions/mitaka/contract/383acb0000664000175000017500000000200313656750541033666 0ustar zuulzuul00000000000000# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Start of odl contract branch
Revision ID: 383acb0d38a0
Revises: b89a299e19f9
Create Date: 2015-09-03 22:27:49.306394
"""
from neutron.db import migration
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '383acb0d38a0'
down_revision = 'b89a299e19f9'
branch_labels = (cli.CONTRACT_BRANCH,)
# milestone identifier, used by neutron-db-manage
neutron_milestone = [migration.MITAKA]
def upgrade():
pass
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/README0000664000175000017500000000011613656750541026710 0ustar zuulzuul00000000000000This directory contains the migration scripts for the networking_odl project.
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/env.py0000664000175000017500000000542013656750541027175 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from logging import config as logging_config
from alembic import context
from neutron_lib.db import model_base
from oslo_config import cfg
from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from sqlalchemy import event
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration.models import head # noqa
MYSQL_ENGINE = None
ODL_VERSION_TABLE = 'odl_alembic_version'
config = context.config
neutron_config = config.neutron_config
logging_config.fileConfig(config.config_file_name)
target_metadata = model_base.BASEV2.metadata
def set_mysql_engine():
try:
mysql_engine = neutron_config.command.mysql_engine
except cfg.NoSuchOptError:
mysql_engine = None
global MYSQL_ENGINE
MYSQL_ENGINE = (mysql_engine or
model_base.BASEV2.__table_args__['mysql_engine'])
def include_object(object, name, type_, reflected, compare_to):
if type_ == 'table' and name in external.TABLES:
return False
return True
def run_migrations_offline():
set_mysql_engine()
kwargs = dict()
if neutron_config.database.connection:
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['include_object'] = include_object
kwargs['version_table'] = ODL_VERSION_TABLE
context.configure(**kwargs)
with context.begin_transaction():
context.run_migrations()
@event.listens_for(sa.Table, 'after_parent_attach')
def set_storage_engine(target, parent):
if MYSQL_ENGINE:
target.kwargs['mysql_engine'] = MYSQL_ENGINE
def run_migrations_online():
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=ODL_VERSION_TABLE
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
networking-odl-16.0.0/networking_odl/db/migration/alembic_migrations/script.py.mako0000664000175000017500000000200613656750541030634 0ustar zuulzuul00000000000000# Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
% if branch_labels:
branch_labels = ${repr(branch_labels)}
%endif
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
networking-odl-16.0.0/networking_odl/db/models.py0000664000175000017500000000564013656750541022053 0ustar zuulzuul00000000000000# Copyright (c) 2015 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from neutron_lib.db import model_base
from networking_odl.common import constants as odl_const
IdType = sa.BigInteger().with_variant(sa.Integer(), 'sqlite')
journal_dependencies = sa.Table(
'opendaylight_journal_deps', model_base.BASEV2.metadata,
sa.Column('depends_on', IdType,
sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'),
primary_key=True),
sa.Column('dependent', IdType,
sa.ForeignKey('opendaylightjournal.seqnum', ondelete='CASCADE'),
primary_key=True))
class OpenDaylightJournal(model_base.BASEV2):
__tablename__ = 'opendaylightjournal'
seqnum = sa.Column(IdType, primary_key=True, autoincrement=True)
object_type = sa.Column(sa.String(36), nullable=False)
object_uuid = sa.Column(sa.String(36), nullable=False)
operation = sa.Column(sa.String(36), nullable=False)
data = sa.Column(sa.PickleType, nullable=True)
state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.PROCESSING,
odl_const.FAILED, odl_const.COMPLETED,
name='state'),
nullable=False, default=odl_const.PENDING)
retry_count = sa.Column(sa.Integer, default=0)
last_retried = sa.Column(sa.TIMESTAMP, server_default=sa.func.now(),
onupdate=sa.func.now())
version_id = sa.Column(sa.Integer, server_default='0', nullable=False)
dependencies = sa.orm.relationship(
"OpenDaylightJournal", secondary=journal_dependencies,
primaryjoin=seqnum == journal_dependencies.c.depends_on,
secondaryjoin=seqnum == journal_dependencies.c.dependent,
backref="depending_on"
)
__mapper_args__ = {
'version_id_col': version_id
}
class OpenDaylightPeriodicTask(model_base.BASEV2):
__tablename__ = 'opendaylight_periodic_task'
state = sa.Column(sa.Enum(odl_const.PENDING, odl_const.PROCESSING,
name='state'),
nullable=False)
processing_operation = sa.Column(sa.String(70))
task = sa.Column(sa.String(70), primary_key=True)
lock_updated = sa.Column(sa.TIMESTAMP, nullable=False,
server_default=sa.func.now(),
onupdate=sa.func.now())
networking-odl-16.0.0/networking_odl/cmd/0000775000175000017500000000000013656750617020373 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/cmd/__init__.py0000664000175000017500000000000013656750541022466 0ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/cmd/test_setup_hostconfigs.sh0000775000175000017500000000046613656750541025541 0ustar zuulzuul00000000000000#!/bin/sh
python set_ovs_hostconfigs.py --debug --ovs_hostconfigs='{"ODL L2": {"supported_vnic_types":[{"vnic_type":"normal", "vif_type":"ovs", "vif_details":{}}], "allowed_network_types":["local","vlan", "vxlan","gre"], "bridge_mappings":{"physnet1":"br-ex"}}, "ODL L3": {"some_details": "dummy_details"}}'
networking-odl-16.0.0/networking_odl/cmd/analyze_journal.py0000775000175000017500000001106013656750541024137 0ustar zuulzuul00000000000000#!/usr/bin/env python
# Copyright (c) 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command line script to analyze journal entry processing time based on logs.
By default the input is read through pipe, unless a log file is specified.
Examples:
Analyzing devstack's Neutron log:
journalctl -u devstack@neutron-api | python analyze_journal.py
Analyzing an arbitrary log file:
python analyze_journal.py --file /path/to/file.log
"""
import collections
import re
import sys
import six
from oslo_config import cfg
from networking_odl._i18n import _
from networking_odl.journal import journal
COMMAND_LINE_OPTIONS = [
cfg.StrOpt('file', default=None,
help=_("Log file to analyze.")),
cfg.IntOpt('slowest', min=1, default=10,
help=_("Prints the N slowest entries (10 by default).")),
]
# This regex will match any replacement key in the log message and extract
# the key name.
KEY_MATCHER = re.compile(r'\%\((\S+)\)s')
LOG_KEYS = KEY_MATCHER.findall(journal.LOG_ENTRY_TEMPLATE)
KEY_TEMP_PATTERN = 'KEYPATTERN'
LOG_MATCHER = re.compile(
re.sub(KEY_TEMP_PATTERN, r'(\\S+)', re.escape(
KEY_MATCHER.sub(KEY_TEMP_PATTERN, journal.LOG_ENTRY_TEMPLATE))))
ENTRY_LOG_TEMPLATE = ' * Entry id: %s, processing time: %.3fs; %s %s %s'
EntryStats = collections.namedtuple(
'EntryStats', 'entry_id time op obj_type obj_id')
def setup_conf(output, args):
"""setup cmdline options."""
if any(flag in args for flag in ('-h', '--help')):
six.print_(__doc__, file=output)
conf = cfg.ConfigOpts()
conf.register_cli_opts(COMMAND_LINE_OPTIONS)
conf(args=args)
return conf
def parse_log(content):
entries = {}
for line in content:
matched = LOG_MATCHER.search(line)
if matched is None:
continue
entry_log = dict(zip(LOG_KEYS, matched.groups()))
entry_id = entry_log['entry_id']
entry = entries.get(entry_id, entry_log)
log_type = entry_log['log_type']
entry[log_type] = float(entry_log['timestamp'])
entries[entry_id] = entry
return entries
def analyze_entries(entries):
entries_stats = []
for entry_id, entry in entries.items():
recorded_time = entry.get(journal.LOG_RECORDED, None)
completed_time = entry.get(journal.LOG_COMPLETED, None)
if recorded_time is None or completed_time is None:
continue
delta = completed_time - recorded_time
entries_stats.append(EntryStats(
entry_id=entry_id, time=delta, op=entry['op'],
obj_type=entry['obj_type'], obj_id=entry['obj_id']))
return entries_stats
def _percentile(timings, percent):
location = int(len(timings) * (percent / 100.0))
return int(timings[location])
def print_stats(output, slowest, entries_stats):
entries_stats = sorted(
entries_stats, key=lambda entry_stats: entry_stats.time)
timings = [entry_stats.time for entry_stats in entries_stats]
avg = sum(timings) / len(timings)
six.print_('Average processing time: %ss' % avg, file=output)
six.print_('90th percentile: %ss' % _percentile(timings, 90), file=output)
six.print_('99th percentile: %ss' % _percentile(timings, 99), file=output)
six.print_('99.9th percentile: %ss' % _percentile(timings, 99.9),
file=output)
six.print_('%s slowest entries:' % slowest, file=output)
slowest = entries_stats[:-(slowest + 1):-1]
for entry_stats in slowest:
six.print_(ENTRY_LOG_TEMPLATE % entry_stats, file=output)
def get_content(file_name):
return open(file_name) if file_name else sys.stdin
def main(output=sys.stdout):
conf = setup_conf(output, sys.argv[1:])
with get_content(conf.file) as content:
entries = parse_log(content)
entries_stats = analyze_entries(entries)
if not entries_stats:
six.print_('No entry statistics found.', file=output)
return 1
print_stats(output, conf.slowest, entries_stats)
return 0
if __name__ == '__main__':
exit(main())
networking-odl-16.0.0/networking_odl/cmd/set_ovs_hostconfigs.py0000775000175000017500000003767013656750541025051 0ustar zuulzuul00000000000000#!/usr/bin/env python
# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Command line script to set host OVS configurations (it requires ovsctl)
Examples:
NOTE: bash accepts new line characters between quotes
To give a full custom json
python set_ovs_hostconfigs.py --ovs_hostconfigs='{
"ODL L2": {
"allowed_network_types":
["local","vlan", "vxlan","gre"],
"bridge_mappings": {"physnet1":"br-ex"}
"supported_vnic_types": [
{
"vnic_type":"normal",
"vif_type":"ovs",
"vif_details":{}
}
],
},
"ODL L3": {}
}'
To make sure to use system data path (Kernel)
python set_ovs_hostconfigs.py --noovs_dpdk
To make sure to use user space data path (vhostuser)
python set_ovs_hostconfigs.py --ovs_dpdk
To give bridge mappings
python --bridge_mapping=physnet1:br-ex,physnet2:br-eth0
"""
import os
import socket
import subprocess # nosec
import sys
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from networking_odl._i18n import _
LOG = logging.getLogger(__name__)
USERSPACE_DATAPATH_TYPES = ['netdev', 'dpdkvhostuser']
COMMAND_LINE_OPTIONS = [
cfg.ListOpt(
'allowed_network_types',
default=['local', 'flat', 'vlan', 'vxlan', 'gre'],
help=_("""
Specifies allowed network types given as a Comma-separated list of
types.
Default: --allowed_network_types=local,vlan,vxlan,gre
""")),
cfg.DictOpt(
'bridge_mappings',
default={},
help=_("""
Comma-separated list of : tuples mapping
physical network names to the agent's node-specific Open vSwitch
bridge names to be used for flat and VLAN networks. The length of
bridge names should be no more than 11. Each bridge must exist, and
should have a physical network interface configured as a port. All
physical networks configured on the server should have mappings to
appropriate bridges on each agent.
Note: If you remove a bridge from this mapping, make sure to
disconnect it from the integration bridge as it won't be managed by
the agent anymore.
Default: --bridge_mappings=
""")),
cfg.StrOpt(
'datapath_type',
choices=['system', 'netdev', 'dpdkvhostuser'],
default=None,
help=_("""
It specifies the OVS data path to use.
If this value is given then --ovs_dpdk will be ignored.
If neither this option or --ovs_dpdk are given then it will use a
valid value for current host.
Choices: --datapath_type=
--datapath_type=system # kernel data path
--datapath_type=netdev # userspace data path
--datapath_type=dpdkvhostuser # userspace data path
Default: --datapath_type=netdev # if support is detected
--datapath_type=system # in all other cases
""")),
cfg.StrOpt(
'host',
default=socket.gethostname(), # pylint: disable=no-member
help=_("""
It specifies the host name of the target machine.
Default: --host=$HOSTNAME # running machine host name
""")),
cfg.IPOpt(
'local_ip',
help=_("""
IP address of local overlay (tunnel) network end-point.
It accepts either an IPv4 or IPv6 address that resides on one
of the host network interfaces. The IP version of this
value must match the value of the 'overlay_ip_version'
option in the ML2 plug-in configuration file on the Neutron
server node(s).
Default: local_ip=
""")),
cfg.BoolOpt(
'ovs_dpdk',
default=None,
help=_("""
It uses user-space type of virtual interface (vhostuser) instead of
the system based one (ovs).
If this option is not specified it tries to detect vhostuser
support on running host and in case of positive match it uses it.
NOTE: if --datapath_type is given then this option is ignored.
Default:
""")),
cfg.BoolOpt(
'ovs_sriov_offload',
default=None,
help=_("""
It adds SR-IOV virtual interface support to allow ovs hardware
offload.
NOTE: This feature should be used with ovs>=2.8.0 and SR-IOV NIC
which support switchdev mode and tc offload.
Default:
""")),
cfg.StrOpt(
'ovs_hostconfigs',
help=_("""
Fives pre-made host configuration for OpenDaylight as a JSON
string.
NOTE: when specified all other options are ignored!
An entry should look like:
--ovs_hostconfigs='{
"ODL L2": {
"allowed_network_types":
["local","vlan", "vxlan","gre"],
"bridge_mappings": {"physnet1":"br-ex"}
"supported_vnic_types": [
{
"vnic_type":"normal",
"vif_type":"ovs",
"vif_details":{}
}
],
},
"ODL L3": {}
}'
Default: --ovs_hostconfigs=
""")),
cfg.StrOpt(
'vhostuser_mode',
choices=['client', 'server'],
default='client',
help=_("""
It specifies the OVS VHostUser mode.
Choices: --vhostuser_mode=client
--vhostuser_mode=server
Default: --vhostuser_mode=client
""")),
cfg.BoolOpt(
'vhostuser_ovs_plug',
default=True,
help=_("""
Enable VHostUser OVS Plug.
Default: --vhostuser_ovs_plug
""")),
cfg.StrOpt(
'vhostuser_port_prefix',
choices=['vhu', 'socket'],
default='vhu',
help=_("""
VHostUser socket port prefix.
Choices: --vhostuser_socket_dir=vhu
--vhostuser_socket_dir=socket
Default: --vhostuser_socket_dir=vhu
""")),
cfg.StrOpt(
'vhostuser_socket_dir',
default='/var/run/openvswitch',
help=_("""
OVS VHostUser socket directory.
Default: --vhostuser_socket_dir=/var/run/openvswitch
""")),
]
def set_ovs_extid_hostconfigs(conf, ovs_vsctl):
if conf.ovs_hostconfigs:
json_str = conf.ovs_hostconfigs.replace("\'", "\"")
LOG.debug("SET-HOSTCONFIGS: JSON String %s", json_str)
hostconfigs = jsonutils.loads(json_str)
else:
uuid = ovs_vsctl.uuid()
userspace_datapath_types = ovs_vsctl.userspace_datapath_types()
hostconfigs = _hostconfigs_from_conf(
conf=conf, uuid=uuid,
userspace_datapath_types=userspace_datapath_types)
ovs_vsctl.set_host_name(conf.host)
for name in sorted(hostconfigs):
ovs_vsctl.set_host_config(name, hostconfigs[name])
# for new netvirt
if conf.local_ip:
ovs_vsctl.set_local_ip(conf.local_ip)
if conf.bridge_mappings:
provider_mappings = ",".join(
"{}:{}".format(k, v) for k, v in conf.bridge_mappings.items())
ovs_vsctl.set_provider_mappings(provider_mappings)
def _hostconfigs_from_conf(conf, uuid, userspace_datapath_types):
vif_type = _vif_type_from_conf(
conf=conf, userspace_datapath_types=userspace_datapath_types)
datapath_type = conf.datapath_type or (
'system' if vif_type == 'ovs' else userspace_datapath_types[0])
vif_details = _vif_details_from_conf(
conf=conf, uuid=uuid, vif_type=vif_type)
host_config = {
"ODL L2": {
"allowed_network_types": conf.allowed_network_types,
"bridge_mappings": conf.bridge_mappings,
"datapath_type": datapath_type,
"supported_vnic_types": [
{
"vif_details": vif_details,
"vif_type": vif_type,
"vnic_type": "normal",
},
]
}
}
if vif_type == 'ovs' and conf.ovs_sriov_offload:
direct_vnic = {
"vif_details": vif_details,
"vif_type": vif_type,
"vnic_type": "direct",
}
host_config["ODL L2"]["supported_vnic_types"].append(direct_vnic)
return host_config
def _vif_type_from_conf(conf, userspace_datapath_types):
# take vif_type from datapath_type ------------------------------------
if conf.datapath_type:
# take it from datapath_type
if conf.datapath_type in USERSPACE_DATAPATH_TYPES:
if conf.datapath_type not in userspace_datapath_types:
LOG.warning(
"Using user space data path type '%s' even if no "
"support was detected.", conf.datapath_type)
return 'vhostuser'
else:
return 'ovs'
# take vif_type from ovs_dpdk -----------------------------------------
if conf.ovs_dpdk is True:
if userspace_datapath_types:
return 'vhostuser'
raise ValueError(_(
"--ovs_dpdk option was specified but the 'netdev' datapath_type "
"was not enabled. "
"To override use option --datapath_type=netdev"))
elif conf.ovs_dpdk is False:
return 'ovs'
# take detected dtype -------------------------------------------------
if userspace_datapath_types:
return 'vhostuser'
return 'ovs'
def _vif_details_from_conf(conf, uuid, vif_type):
host_addresses = [conf.local_ip or conf.host]
if vif_type == 'ovs':
# OVS legacy mode
return {"uuid": uuid,
"host_addresses": host_addresses,
"has_datapath_type_netdev": False,
"support_vhost_user": False}
elif vif_type == 'vhostuser':
# enable VHOSTUSER
return {"uuid": uuid,
"host_addresses": host_addresses,
"has_datapath_type_netdev": True,
"support_vhost_user": True,
"port_prefix": conf.vhostuser_port_prefix,
"vhostuser_socket_dir": conf.vhostuser_socket_dir,
"vhostuser_ovs_plug": conf.vhostuser_ovs_plug,
"vhostuser_mode": conf.vhostuser_mode,
"vhostuser_socket": os.path.join(
conf.vhostuser_socket_dir,
conf.vhostuser_port_prefix + '$PORT_ID')}
raise ValueError(_("vif type: '%s' not supported") % vif_type)
def setup_conf(args):
"""setup cmdline options."""
conf = cfg.ConfigOpts()
# NOTE, Logging options must be registered before parsing cli
# options, refer:-
# https://docs.openstack.org/oslo.log/latest/user/usage.html#oslo-logging-setup-methods
logging.register_options(conf)
if '-h' in args or '--help' in args:
# Prints out script documentation."
print(__doc__)
conf.register_cli_opts(COMMAND_LINE_OPTIONS)
conf(args=args)
return conf
class OvsVsctl(object):
"""Wrapper class for ovs-vsctl command tool
"""
COMMAND = 'ovs-vsctl'
TABLE = 'Open_vSwitch'
_uuid = None
def uuid(self):
uuid = self._uuid
if uuid is None:
self._uuid = uuid = self._get('.', '_uuid')
return uuid
_datapath_types = None
def datapath_types(self):
datapath_types = self._datapath_types
if datapath_types is None:
try:
datapath_types = self._get('.', 'datapath_types')
except subprocess.CalledProcessError:
datapath_types = 'system'
self._datapath_types = datapath_types
return datapath_types
_userspace_datapath_types = None
def userspace_datapath_types(self):
userspace_datapath_types = self._userspace_datapath_types
if userspace_datapath_types is None:
datapath_types = self.datapath_types()
userspace_datapath_types = tuple(
datapath_type
for datapath_type in USERSPACE_DATAPATH_TYPES
if datapath_type in datapath_types)
self._userspace_datapath_types = userspace_datapath_types
return userspace_datapath_types
def set_host_name(self, host_name):
self._set_external_ids('odl_os_hostconfig_hostid', host_name)
def set_host_config(self, name, value):
self._set_external_ids(
name='odl_os_hostconfig_config_' + name.lower().replace(' ', '_'),
value=jsonutils.dumps(value))
def set_local_ip(self, local_ip):
self._set_other_config("local_ip", local_ip)
def set_provider_mappings(self, provider_mappings):
self._set_other_config("provider_mappings", provider_mappings)
# --- implementation details ----------------------------------------------
def _set_external_ids(self, name, value):
# Refer below for ovs ext-id strings
# https://review.opendev.org/#/c/309630/
value = 'external_ids:{}={}'.format(name, value)
self._set(record=self.uuid(), value=value)
def _set_other_config(self, name, value):
value = 'other_config:{}={}'.format(name, value)
self._set(record=self.uuid(), value=value)
def _get(self, record, name):
return self._execute('get', self.TABLE, record, name)
def _set(self, record, value):
self._execute('set', self.TABLE, record, value)
def _execute(self, *args):
command_line = (self.COMMAND,) + args
LOG.info(
"SET-HOSTCONFIGS: Executing cmd: %s", ' '.join(command_line))
res = subprocess.check_output(command_line).strip() # nosec
# Note(lajoskatona): on py3 subprocess.check_output returns back binary
# to make that consumable we have to decode that.
if isinstance(res, six.binary_type):
return res.decode()
return res
def setup_logging(conf):
# NOTE, Hacky way to enable logging. oslo log needs other parameters also
# to register, but neutron.conf is not present on compute node therefore
# we can not dependant on it. However, nova-compute is present on compute
# node, so code can depend on oslo log
# if script does not have write permission in the directory then
# stack trace appear on console.
logging.setup(conf, 'networking-odl')
LOG.info("Logging enabled!")
def main(args=None):
"""Main."""
if args is None:
args = sys.argv[1:]
conf = setup_conf(args)
setup_logging(conf)
if os.geteuid() != 0:
LOG.error('Root permissions are required to configure ovsdb.')
return 1
try:
set_ovs_extid_hostconfigs(conf=conf, ovs_vsctl=OvsVsctl())
except Exception as ex: # pylint: disable=broad-except
LOG.error("Fatal error: %s", ex, exc_info=conf.debug)
return 1
else:
return 0
if __name__ == '__main__':
exit(main())
networking-odl-16.0.0/networking_odl/locale/0000775000175000017500000000000013656750617021067 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/locale/en_GB/0000775000175000017500000000000013656750617022041 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/locale/en_GB/LC_MESSAGES/0000775000175000017500000000000013656750617023626 5ustar zuulzuul00000000000000networking-odl-16.0.0/networking_odl/locale/en_GB/LC_MESSAGES/networking_odl.po0000664000175000017500000003321213656750541027210 0ustar zuulzuul00000000000000# Andi Chandler , 2017. #zanata
msgid ""
msgstr ""
"Project-Id-Version: networking-odl VERSION\n"
"Report-Msgid-Bugs-To: https://bugs.launchpad.net/openstack-i18n/\n"
"POT-Creation-Date: 2018-03-07 20:20+0000\n"
"MIME-Version: 1.0\n"
"Content-Type: text/plain; charset=UTF-8\n"
"Content-Transfer-Encoding: 8bit\n"
"PO-Revision-Date: 2017-12-12 09:16+0000\n"
"Last-Translator: Andi Chandler \n"
"Language-Team: English (United Kingdom)\n"
"Language: en_GB\n"
"X-Generator: Zanata 4.3.3\n"
"Plural-Forms: nplurals=2; plural=(n != 1)\n"
msgid ""
"\n"
" Comma-separated list of : tuples "
"mapping\n"
" physical network names to the agent's node-specific Open "
"vSwitch\n"
" bridge names to be used for flat and VLAN networks. The length "
"of\n"
" bridge names should be no more than 11. Each bridge must exist, "
"and\n"
" should have a physical network interface configured as a port. "
"All\n"
" physical networks configured on the server should have mappings "
"to\n"
" appropriate bridges on each agent.\n"
"\n"
" Note: If you remove a bridge from this mapping, make sure to\n"
" disconnect it from the integration bridge as it won't be managed "
"by\n"
" the agent anymore.\n"
"\n"
" Default: --bridge_mappings=\n"
" "
msgstr ""
"\n"
" Comma-separated list of : tuples "
"mapping\n"
" physical network names to the agent's node-specific Open "
"vSwitch\n"
" bridge names to be used for flat and VLAN networks. The length "
"of\n"
" bridge names should be no more than 11. Each bridge must exist, "
"and\n"
" should have a physical network interface configured as a port. "
"All\n"
" physical networks configured on the server should have mappings "
"to\n"
" appropriate bridges on each agent.\n"
"\n"
" Note: If you remove a bridge from this mapping, make sure to\n"
" disconnect it from the integration bridge as it won't be managed "
"by\n"
" the agent any more.\n"
"\n"
" Default: --bridge_mappings=\n"
" "
msgid ""
"\n"
" Enable VHostUser OVS Plug.\n"
"\n"
" Default: --vhostuser_ovs_plug\n"
" "
msgstr ""
"\n"
" Enable VHostUser OVS Plug.\n"
"\n"
" Default: --vhostuser_ovs_plug\n"
" "
msgid ""
"\n"
" Fives pre-made host configuration for OpenDaylight as a JSON\n"
" string.\n"
"\n"
" NOTE: when specified all other options are ignored!\n"
"\n"
" An entry should look like:\n"
" --ovs_hostconfigs='{\n"
" \"ODL L2\": {\n"
" \"allowed_network_types\":\n"
" [\"local\",\"vlan\", \"vxlan\",\"gre\"],\n"
" \"bridge_mappings\": {\"physnet1\":\"br-ex\"}\n"
" \"supported_vnic_types\": [\n"
" {\n"
" \"vnic_type\":\"normal\",\n"
" \"vif_type\":\"ovs\",\n"
" \"vif_details\":{}\n"
" }\n"
" ],\n"
" },\n"
" \"ODL L3\": {}\n"
" }'\n"
"\n"
" Default: --ovs_hostconfigs=\n"
" "
msgstr ""
"\n"
" Fives pre-made host configuration for OpenDaylight as a JSON\n"
" string.\n"
"\n"
" NOTE: when specified all other options are ignored!\n"
"\n"
" An entry should look like:\n"
" --ovs_hostconfigs='{\n"
" \"ODL L2\": {\n"
" \"allowed_network_types\":\n"
" [\"local\",\"vlan\", \"vxlan\",\"gre\"],\n"
" \"bridge_mappings\": {\"physnet1\":\"br-ex\"}\n"
" \"supported_vnic_types\": [\n"
" {\n"
" \"vnic_type\":\"normal\",\n"
" \"vif_type\":\"ovs\",\n"
" \"vif_details\":{}\n"
" }\n"
" ],\n"
" },\n"
" \"ODL L3\": {}\n"
" }'\n"
"\n"
" Default: --ovs_hostconfigs=\n"
" "
msgid ""
"\n"
" IP address of local overlay (tunnel) network end-point.\n"
" It accepts either an IPv4 or IPv6 address that resides on one\n"
" of the host network interfaces. The IP version of this\n"
" value must match the value of the 'overlay_ip_version'\n"
" option in the ML2 plug-in configuration file on the Neutron\n"
" server node(s).\n"
"\n"
" Default: local_ip=\n"
" "
msgstr ""
"\n"
" IP address of local overlay (tunnel) network end-point.\n"
" It accepts either an IPv4 or IPv6 address that resides on one\n"
" of the host network interfaces. The IP version of this\n"
" value must match the value of the 'overlay_ip_version'\n"
" option in the ML2 plug-in configuration file on the Neutron\n"
" server node(s).\n"
"\n"
" Default: local_ip=\n"
" "
msgid ""
"\n"
" It adds SR-IOV virtual interface support to allow ovs hardware\n"
" offload.\n"
"\n"
" NOTE: This feature should be used with ovs>=2.8.0 and SR-IOV "
"NIC\n"
" which support switchdev mode and tc offload.\n"
"\n"
" Default:\n"
" "
msgstr ""
"\n"
" It adds SR-IOV virtual interface support to allow OVS hardware\n"
" offload.\n"
"\n"
" NOTE: This feature should be used with ovs>=2.8.0 and SR-IOV "
"NIC\n"
" which support switchdev mode and tc offload.\n"
"\n"
" Default:\n"
" "
msgid ""
"\n"
" It specifies the OVS VHostUser mode.\n"
"\n"
" Choices: --vhostuser_mode=client\n"
" --vhostuser_mode=server\n"
"\n"
" Default: --vhostuser_mode=client\n"
" "
msgstr ""
"\n"
" It specifies the OVS VHostUser mode.\n"
"\n"
" Choices: --vhostuser_mode=client\n"
" --vhostuser_mode=server\n"
"\n"
" Default: --vhostuser_mode=client\n"
" "
msgid ""
"\n"
" It specifies the OVS data path to use.\n"
"\n"
" If this value is given then --ovs_dpdk will be ignored.\n"
" If neither this option or --ovs_dpdk are given then it will use "
"a\n"
" valid value for current host.\n"
"\n"
" Choices: --datapath_type=\n"
" --datapath_type=system # kernel data path\n"
" --datapath_type=netdev # userspace data path\n"
" --datapath_type=dpdkvhostuser # userspace data path\n"
"\n"
" Default: --datapath_type=netdev # if support is "
"detected\n"
" --datapath_type=system # in all other cases\n"
" "
msgstr ""
"\n"
" It specifies the OVS data path to use.\n"
"\n"
" If this value is given then --ovs_dpdk will be ignored.\n"
" If neither this option or --ovs_dpdk are given then it will use "
"a\n"
" valid value for current host.\n"
"\n"
" Choices: --datapath_type=\n"
" --datapath_type=system # kernel data path\n"
" --datapath_type=netdev # userspace data path\n"
" --datapath_type=dpdkvhostuser # userspace data path\n"
"\n"
" Default: --datapath_type=netdev # if support is "
"detected\n"
" --datapath_type=system # in all other cases\n"
" "
msgid ""
"\n"
" It specifies the host name of the target machine.\n"
"\n"
" Default: --host=$HOSTNAME # running machine host name\n"
" "
msgstr ""
"\n"
" It specifies the host name of the target machine.\n"
"\n"
" Default: --host=$HOSTNAME # running machine host name\n"
" "
msgid ""
"\n"
" It uses user-space type of virtual interface (vhostuser) instead "
"of\n"
" the system based one (ovs).\n"
"\n"
" If this option is not specified it tries to detect vhostuser\n"
" support on running host and in case of positive match it uses "
"it.\n"
"\n"
" NOTE: if --datapath_type is given then this option is ignored.\n"
"\n"
" Default:\n"
" "
msgstr ""
"\n"
" It uses user-space type of virtual interface (vhostuser) instead "
"of\n"
" the system based one (ovs).\n"
"\n"
" If this option is not specified it tries to detect vhostuser\n"
" support on running host and in case of positive match it uses "
"it.\n"
"\n"
" NOTE: if --datapath_type is given then this option is ignored.\n"
"\n"
" Default:\n"
" "
msgid ""
"\n"
" OVS VHostUser socket directory.\n"
"\n"
" Default: --vhostuser_socket_dir=/var/run/openvswitch\n"
" "
msgstr ""
"\n"
" OVS VHostUser socket directory.\n"
"\n"
" Default: --vhostuser_socket_dir=/var/run/openvswitch\n"
" "
msgid ""
"\n"
" Specifies allowed network types given as a Comma-separated list "
"of\n"
" types.\n"
"\n"
" Default: --allowed_network_types=local,vlan,vxlan,gre\n"
" "
msgstr ""
"\n"
" Specifies allowed network types given as a Comma-separated list "
"of\n"
" types.\n"
"\n"
" Default: --allowed_network_types=local,vlan,vxlan,gre\n"
" "
msgid ""
"\n"
" VHostUser socket port prefix.\n"
"\n"
" Choices: --vhostuser_socket_dir=vhu\n"
" --vhostuser_socket_dir=socket\n"
"\n"
" Default: --vhostuser_socket_dir=vhu\n"
" "
msgstr ""
"\n"
" VHostUser socket port prefix.\n"
"\n"
" Choices: --vhostuser_socket_dir=vhu\n"
" --vhostuser_socket_dir=socket\n"
"\n"
" Default: --vhostuser_socket_dir=vhu\n"
" "
msgid "(V2 driver) Journal maintenance operations interval in seconds."
msgstr "(V2 driver) Journal maintenance operations interval in seconds."
msgid "(V2 driver) Number of times to retry a row before failing."
msgstr "(V2 driver) Number of times to retry a row before failing."
msgid ""
"(V2 driver) Time in seconds to wait before a processing row is marked back "
"to pending."
msgstr ""
"(V2 driver) Time in seconds to wait before a processing row is marked back "
"to pending."
msgid ""
"(V2 driver) Time to keep completed rows (in seconds).For performance reasons "
"it's not recommended to change this from the default value (0) which "
"indicates completed rows aren't kept.This value will be checked every "
"maintenance_interval by the cleanup thread. To keep completed rows "
"indefinitely, set the value to -1"
msgstr ""
"(V2 driver) Time to keep completed rows (in seconds). For performance "
"reasons it's not recommended to change this from the default value (0) which "
"indicates completed rows aren't kept. This value will be checked every "
"maintenance_interval by the cleanup thread. To keep completed rows "
"indefinitely, set the value to -1"
msgid ""
"--ovs_dpdk option was specified but the 'netdev' datapath_type was not "
"enabled. To override use option --datapath_type=netdev"
msgstr ""
"--ovs_dpdk option was specified but the 'netdev' datapath_type was not "
"enabled. To override use option --datapath_type=netdev"
msgid "Enable websocket for pseudo-agent-port-binding."
msgstr "Enable websocket for pseudo-agent-port-binding."
msgid ""
"Enables the networking-odl driver to supply special neutron ports of \"dhcp"
"\" type to OpenDaylight Controller for its use in providing DHCP Service."
msgstr ""
"Enables the networking-odl driver to supply special neutron ports of \"dhcp"
"\" type to OpenDaylight Controller for its use in providing DHCP Service."
msgid "HTTP URL of OpenDaylight REST interface."
msgstr "HTTP URL of OpenDaylight REST interface."
msgid "HTTP password for authentication."
msgstr "HTTP password for authentication."
msgid "HTTP timeout in seconds."
msgstr "HTTP timeout in seconds."
msgid "HTTP username for authentication."
msgstr "HTTP username for authentication."
msgid "Invalid ODL URL"
msgstr "Invalid ODL URL"
msgid "Name of the controller to be used for port binding."
msgstr "Name of the controller to be used for port binding."
#, python-format
msgid "OpenDaylight API returned %(status)s %(reason)s"
msgstr "OpenDaylight API returned %(status)s %(reason)s"
msgid "Path for ODL host configuration REST interface"
msgstr "Path for ODL host configuration REST interface"
msgid "Poll interval in seconds for getting ODL hostconfig"
msgstr "Poll interval in seconds for getting ODL hostconfig"
msgid "Test without real ODL."
msgstr "Test without real ODL."
msgid "Tomcat session timeout in minutes."
msgstr "Tomcat session timeout in minutes."
msgid "Wait this many seconds before retrying the odl features fetch"
msgstr "Wait this many seconds before retrying the ODL features fetch"
msgid "bad_request (http400),check path"
msgstr "bad_request (http400),check path"
msgid "bad_request (http400),check path."
msgstr "bad_request (http400),check path."
msgid "resource_list can not be None"
msgstr "resource_list can not be None"
msgid "unsupported operation {}"
msgstr "unsupported operation {}"
msgid "websocket subscribe bad stream data"
msgstr "websocket subscribe bad stream data"
networking-odl-16.0.0/networking_odl/_i18n.py0000664000175000017500000000226413656750541021120 0ustar zuulzuul00000000000000# Copyright 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html .
"""
import oslo_i18n
DOMAIN = "networking_odl"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
# requires oslo.i18n >=2.1.0
_C = _translators.contextual_form
# The plural translation function using the name "_P"
# requires oslo.i18n >=2.1.0
_P = _translators.plural_form
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
networking-odl-16.0.0/lower-constraints.txt0000664000175000017500000001063213656750541021037 0ustar zuulzuul00000000000000alabaster==0.7.10
alembic==0.9.8
amqp==2.2.2
appdirs==1.4.3
asn1crypto==0.24.0
astroid==1.6.2
Babel==2.5.3
bandit==1.4.0
bashate==0.5.1
beautifulsoup4==4.6.0
blockdiag==1.5.3
cachetools==2.0.1
certifi==2018.1.18
ceilometer==11.0.0
cffi==1.11.5
chardet==3.0.4
cliff==2.11.0
cmd2==0.8.1
contextlib2==0.5.5
cotyledon==1.6.8
coverage==4.5.1
cryptography==2.1.4
debtcollector==1.19.0
decorator==4.2.1
deprecation==2.0
Django==2.2
django-appconf==1.0.2
django-babel==0.6.2
django-compressor==2.2
django-pyscss==2.0.2
doc8==0.8.0
docutils==0.14
dogpile.cache==0.6.5
dulwich==0.19.0
enum-compat==0.0.2
eventlet==0.20.0
exabgp==4.0.5
extras==1.0.0
fasteners==0.14.1
fixtures==3.0.0
flake8==2.6.2
flake8-import-order==0.17.1
funcparserlib==0.3.6
future==0.16.0
futurist==1.6.0
gitdb2==2.0.3
GitPython==2.1.8
greenlet==0.4.13
hacking==1.1.0
horizon==17.1.0
httplib2==0.10.3
idna==2.6
imagesize==1.0.0
iso8601==0.1.12
isort==4.3.4
Jinja2==2.10
jmespath==0.9.3
jsonpatch==1.21
jsonpath-rw==1.4.0
jsonpath-rw-ext==1.1.3
jsonpointer==2.0
jsonschema==2.6.0
kazoo==2.4.0
keystoneauth1==3.14.0
keystonemiddleware==4.21.0
kombu==4.1.0
lazy-object-proxy==1.3.1
linecache2==1.0.0
logutils==0.3.5
lxml==4.1.1
Mako==1.0.7
MarkupSafe==1.0
mccabe==0.2.1
mock==2.0.0
monotonic==1.4
mox3==0.25.0
msgpack==0.5.6
msgpack-python==0.5.6
munch==2.2.0
netaddr==0.7.19
netifaces==0.10.6
networking-bagpipe==8.0.0
networking-l2gw==12.0.0
networking-sfc==10.0.0.0b1
networking-bgpvpn==12.0.0b1
neutron==16.0.0.0b1
neutron-lib==2.0.0
openstackdocstheme==1.30.0
openstacksdk==0.31.2
os-client-config==1.29.0
os-service-types==1.7.0
os-xenapi==0.3.1
osc-lib==1.10.0
oslo.cache==1.29.0
oslo.concurrency==3.26.0
oslo.config==5.2.0
oslo.context==2.20.0
oslo.db==4.37.0
oslo.i18n==3.20.0
oslo.log==3.37.0
oslo.messaging==5.36.0
oslo.middleware==3.35.0
oslo.policy==1.34.0
oslo.privsep==1.32.0
oslo.reports==1.27.0
oslo.rootwrap==5.13.0
oslo.serialization==2.25.0
oslo.service==1.30.0
oslo.utils==3.36.0
oslo.versionedobjects==1.35.1
oslotest==3.3.0
osprofiler==2.3.0
ovs==2.8.1
ovsdbapp==1.0.0
packaging==17.1
Paste==2.0.3
PasteDeploy==1.5.2
pbr==4.0.0
pecan==1.3.2
pep8==1.5.7
pika==0.10.0
pika-pool==0.1.3
Pillow==5.0.0
Pint==0.8.1
ply==3.11
prettytable==0.7.2
psutil==5.4.3
pyasn1==0.4.2
pyasn1-modules==0.2.1
pycadf==2.7.0
pycodestyle==2.4.0
pycparser==2.18
pycryptodomex==3.5.1
pyflakes==0.8.1
Pygments==2.2.0
pyinotify==0.9.6
pylint==1.8.3
pymongo==3.6.1
pyOpenSSL==17.5.0
pyparsing==2.2.0
pyperclip==1.6.0
pyroute2==0.5.7
pyScss==1.3.4
pysmi==0.2.2
pysnmp==4.4.4
python-barbicanclient==4.6.0
python-cinderclient==5.0.0
python-dateutil==2.7.0
python-designateclient==2.9.0
python-editor==1.0.3
python-glanceclient==2.9.1
python-keystoneclient==3.22.0
python-mimeparse==1.6.0
python-neutronclient==6.7.0
python-novaclient==10.1.0
python-subunit==1.2.0
python-swiftclient==3.5.0
pytz==2018.3
PyYAML==3.12
rcssmin==1.0.6
reno==2.7.0
repoze.lru==0.7
requests==2.18.4
requestsexceptions==1.4.0
restructuredtext-lint==1.1.3
rfc3986==1.1.0
rjsmin==1.0.12
Routes==2.4.1
semantic-version==2.6.0
seqdiag==0.9.5
setproctitle==1.1.10
simplejson==3.13.2
six==1.11.0
smmap2==2.0.3
snowballstemmer==1.2.1
Sphinx==1.6.5
sphinxcontrib-blockdiag==1.5.5
sphinxcontrib-seqdiag==0.8.5
sphinxcontrib-websupport==1.0.1
SQLAlchemy==1.2.5
sqlalchemy-migrate==0.11.0
sqlparse==0.2.4
statsd==3.2.2
stestr==2.0.0
stevedore==1.28.0
Tempita==0.5.2
tenacity==4.9.0
testrepository==0.0.20
testresources==2.0.1
testscenarios==0.5.0
testtools==2.3.0
tinyrpc==0.8
tooz==1.61.0
traceback2==1.4.0
urllib3==1.22
vine==1.1.4
voluptuous==0.11.1
waitress==1.1.0
warlock==1.3.0
webcolors==1.8.1
WebOb==1.8.2
websocket-client==0.47.0
WebTest==2.0.29
wrapt==1.10.11
XStatic==1.0.1
XStatic-Angular==1.5.8.0
XStatic-Angular-Bootstrap==2.2.0.0
XStatic-Angular-FileUpload==12.0.4.0
XStatic-Angular-Gettext==2.3.8.0
XStatic-Angular-lrdragndrop==1.0.2.2
XStatic-Angular-Schema-Form==0.8.13.0
XStatic-Bootstrap-Datepicker==1.3.1.0
XStatic-Bootstrap-SCSS==3.3.7.1
XStatic-bootswatch==3.3.7.0
XStatic-D3==3.5.17.0
XStatic-Font-Awesome==4.7.0.0
XStatic-Hogan==2.0.0.2
XStatic-Jasmine==2.4.1.1
XStatic-jQuery==1.10.2.1
XStatic-JQuery-Migrate==1.2.1.1
XStatic-jquery-ui==1.12.0.1
XStatic-JQuery.quicksearch==2.0.3.1
XStatic-JQuery.TableSorter==2.14.5.1
XStatic-JSEncrypt==2.3.1.1
XStatic-mdi==1.4.57.0
XStatic-objectpath==1.2.1.0
XStatic-Rickshaw==1.5.0.0
XStatic-roboto-fontface==0.5.0.0
XStatic-smart-table==1.4.13.2
XStatic-Spin==1.2.5.2
XStatic-term.js==0.0.7.0
XStatic-tv4==1.2.7.0
zake==0.2.2
networking-odl-16.0.0/AUTHORS0000664000175000017500000002615413656750616015662 0ustar zuulzuul00000000000000AKamyshnikova
Aaron Rosen
Achuth Maniyedath
Achuth Maniyedath
Adam Harwell
Akihiro MOTOKI
Akihiro Motoki
Aleks Chirko
Alessandro Pilotti
Alessio Ababilov
Alessio Ababilov
Alon Kochba
Amir Sadoughi
Andre Pech
Andreas Jaeger
Andreas Jaeger
Angus Lees
Anh Tran
Anil Vishnoi
Ankur Gupta
Ann Kamyshnikova
Armando Migliaccio
Arvind Somy
Arvind Somya
Ashik Alias
Assaf Muller
Atsushi SAKAI
Barak Dabush
Bernard Cafarelli
Bhuvan Arumugam
Bob Kukura
Bob Melander
Boden R
Brad Hall
Brant Knudson
Brian Waldon
Cao Xuan Hoang
Carl Baldwin
Cedric Brandily
Chang Bo Guo
Christian Berendt
Chuck Short
Clark Boylan
Clint Byrum
Corey Bryant
Cédric Ollivier
Dan Prince
Dan Wendlandt
Davanum Srinivas
Dave Lapsley
Dave Tucker
Deepak N
Deepthi V V
Dirk Mueller
Dong Jun
Doug Hellmann
Doug Hellmann
Doug Wiegley
Ed Warnicke
Edan David
Edgar Magana
Elod Illes
Elod Illes
Emilien Macchi
Eugene Nikanorov
Federico
Federico Ressi
Flavio Fernandes
Flavio Percoco
Frederick F. Kautz IV
Gary Kotton
Gary Kotton
Gauvain Pocentek
Gordon Chung
Guilherme Salgado
Guo Ruijing
Guoshuai Li
Han Manjong
Hareesh Puthalath
He Jie Xu
Hemanth Ravi
Henry Gessau
Henry Gessau
Henry Gessau
HenryVIII
Hirofumi Ichihara
Ian Wienand
Ignacio Scopetta
Igor Duarte Cardoso
Ihar Hrachyshka
Ionuț Arțăriși
Irena Berezovsky
Isaku Yamahata
Isaku Yamahata
Isaku Yamahata
JJ Asghar
Jacek Swiderski
Jaime Caamaño Ruiz
Jakub Libosvar
James E. Blair
James E. Blair
James E. Blair
James Page
Jamo Luhrsen
Jason Kölker
Jay Pipes
Jeremy Liu
Jeremy Stanley
Jiajun Liu
Joe Gordon
Joe Heck
John Dunning
Jon Schlueter
Jordan Tardif
Josh
Juan Vidal
Juliano Martinez
Julien Danjou
Justin Lund
Keshava Bharadwaj
Kevin Benton
Kevin L. Mitchell
Koby Aizer
Kris Lindgren
Kun Huang
Kyle Mestery
Kyle Mestery
Lajos Katona
Luis Tomas Bolivar
Luke Gorrie
Luong Anh Tuan
Major Hayden
Manjeet Singh Bhatia
Manuel Buil
MaoyangLiu
Marcelo Amaral
Marcus G K Williams
Mark McClain
Mark McClain
Mark McLoughlin
Maru Newby
Maru Newby