././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6481018
networking_arista-2023.1.0/0000775000175000017500000000000000000000000015514 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/.coveragerc0000664000175000017500000000021500000000000017633 0ustar00zuulzuul00000000000000[run]
branch = True
source = networking-arista
omit = networking-arista/tests/*,networking-arista/openstack/*
[report]
ignore_errors = True
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/.mailmap0000664000175000017500000000033000000000000017131 0ustar00zuulzuul00000000000000# Format is:
#
#
Sukhdev Kapur
Shashank Hegde
Andre Pech
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/.stestr.conf0000664000175000017500000000007600000000000017770 0ustar00zuulzuul00000000000000[DEFAULT]
test_path=./networking_arista/tests/unit
top_dir=./
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/.zuul.yaml0000664000175000017500000000020300000000000017450 0ustar00zuulzuul00000000000000- project:
name: x/networking-arista
templates:
- openstack-python3-antelope-jobs-neutron
- check-requirements
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/AUTHORS0000664000175000017500000000245300000000000016570 0ustar00zuulzuul00000000000000Alex Reimers
Alin Iorga
Andreas Jaeger
Ann Kamyshnikova
Anuraag Mittal
Arnoud de Jonge
Aurelien Lourot
BenoƮt Knecht
Bertrand Lallau
Boden R
Corey Bryant
Doug Wiegley
Gary Kotton
Henry Gessau
Ihar Hrachyshka
Mark Goddard
Mark McClain
Mitchell Jameson
Monty Taylor
Nader Lahouti
Nguyen Hung Phuong
Paul Bourke
Shashank Hegde
Shashank Hegde
Sukhdev
Sukhdev Kapur
Vieri <15050873171@163.com>
Vu Cong Tuan
Zhao Lei
huang.zhiping
kangyufei
melissaml
shangxiaobj
sunyandi
wei wang
zhangyanxian
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/CONTRIBUTING.rst0000664000175000017500000000104600000000000020156 0ustar00zuulzuul00000000000000If you would like to contribute to the development of OpenStack,
you must follow the steps in this page:
https://docs.openstack.org/infra/manual/developers.html
Once those steps have been completed, changes to OpenStack
should be submitted for review via the Gerrit tool, following
the workflow documented at:
https://docs.openstack.org/infra/manual/developers.html#development-workflow
Pull requests submitted through GitHub will be ignored.
Bugs should be filed on Launchpad, not GitHub:
https://bugs.launchpad.net/networking-arista
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/ChangeLog0000664000175000017500000003165700000000000017302 0ustar00zuulzuul00000000000000CHANGES
=======
2023.1.0
--------
* Setup branch for 2023.1 Antelope
* Add support for provisioning L2 connectivity for L3 GW ports
* Dropping lower-constraints test
* Use context manager from neutron-lib accessing VlanAllocation
* Revert "Check for missing port bindings each sync period"
* Ensure queries are executed in reader sessions
* Don't send sync complete on sync failure after switchover
* Bump python versions to match neutron
* Don't use router\_interface fixed\_ip for MLAG peer SVI IPs
* Start running jobs against newer python versions
* Add connectivity property to MechanismDrivers
* Make unit tests backwards compatible with the latest pyPI Neutron
* Fix unit tests and lower-constraints
* Use new callback payloads for PORT AFTER\_UPDATE and AFTER\_DELETE
* Handle new payload format for SUBNETs, SG and SG\_RULES
* Temporary fix to make code work with SQLAlchemy 1.4.18
* Add support for specifying switch shortnames in physnets for HPB
* Fix L3 plugin to handle new payload format
* Fix arista\_trunk plugin to handle new payload format
* Add test coverage for managed\_physnets without HPB
* Fix handling of FQDN in port binding delete
* Check for missing port bindings each sync period
* Revert "Bind DVR ports when DOWN to workaround upstream issue"
* Bind DVR ports when DOWN to workaround upstream issue
* Fix security\_group unit tests
* Don't call get\_session in vlan type driver
* Try re-importing any deleted multiprocessing modules
* Fix AttributeError on select.poll()
* Encode special characters in CVX username and password
* Reduce info level logging output
* Fix handling of ERROR ports and '' device\_id
* Test routes are not deleted on sync
* Remove delete\_default\_gateway from create\_router
* Use show version to validate vrf support
* Use an interprocess lock for physnets
* Prevent recursive check for vrf supported commands
* Synchronize requirements with neutron
* Wrap L3 synchronize function in try catch
* Add enable\_clenup to devstack plugin
* Added missing format character
* Add option to create default route to router VRFs
* Update L3 plugin to handle new syntax for deprecated vrf forwarding
* Update L3 plugin to handle switches that support vrf instance
* Update create\_network test wrapper to set mtu as 1450 if does not exist. Change-Id: I3b80697173b133e0c6924c6b22b28cfae28ddc77
* Update README
* Add dockerfiles subdir
* Improve performance of sync under heavy load
* Declare that the arista plugin provides L2 connectivity
* Get next segment from NetworkSegment DB
* Removed creation of INTERNAL-TENANT-ID
* Create network when project\_id is an empty string
* Correct case of ERR\_CVX\_NOT\_LEADER
* OpenDev Migration Patch
* Filter segments for network\_id with empty project\_id
* Fix allocate\_tenant\_segment to support filters
* Return networks with non-empty project\_id
* use trunk constants from neutron-lib
* Add another test mech driver for scale simulation
* Use immediateload for loading rules attribute
* add python 3.7 unit test job
* remove noqa use of neutron.db.api
* update project for zuul v3
* use neutron-lib for rpc
* Ensure arista\_vlan type driver has correct network\_vlan\_range
* Add unit test for router\_gateway ports in the L3 plugin
* Only create SVIs for router\_interface ports in sync worker
* Make protected\_vlans valid range inclusive
* Allow arista\_vlan type driver to startup when CVX is not reachable
* Fix a number of HA bugs
* Add a test driver to simulate running Neutron in HA deployments
* Add ability to cleanup L3 resources
* Load listeners to expire stale objects from session
* Update min tox version to 2.0
* use common rpc and exceptions from neutron-lib
* add bug link in README
* opt in for neutron-lib consumption patches
* Add support for VRFs on MLAGs
* Remove the duplicated word
* Improve Security Group support
* Fixup the arista\_vlan type driver
* Make resource updates implicit
* update requirements for neutron-lib 1.18.0
* Another set of stability fixes for the driver rewrite
* Use original\_binding\_levels when current binding\_levels is not set
* Delete resources in reverse sync order
* use add\_worker() in plugin for consistency with other service plugins
* Remove remaining portion of NeutronNets in favor of calling core plugin
* Convert l3 sync thread to ServiceWorker
* Simplify base class to service worker
* Add \*-dev tox envs to install dependencies correctly for local testing
* Align baremetal EGRESS/INGRESS meaning with VM meaning
* Cleanup some unused methods from mechanism arista
* A number of fixes for the driver rewrite
* Clear security group rules before adding new ones
* Increase default command timeout
* Update resource id mapping and add additional logging
* fix tox python3 overrides
* use get reader/writer session from neutron-lib
* Switch to stestr to comply with OpenStack python guidelines[1]
* Cleanup Arista VLAN Type Driver
* Add support for trunks
* Cleanup Arista Security Group Support
* Pass resources from Arista Mech Driver to Sync Worker for provisioning
* Add ability to pass resources from mechanism driver to sync thread
* Cleanup port binding logic
* Cleanup synchronization
* Replace deprecated "auth\_uri" by "www\_authenticate\_uri"
* Unconditionally setting the authentication header
* use rpc Connection rather than create\_connection
* Update tox.ini
* Updated from global requirements
* Update requirements to pull in latest neutron package
* use common agent topics from neutron-lib
* modify keystone spelling errors
* Updated from global requirements
* Change http link to https according to OpenStack website
* Updated from global requirements
* Query PortBindingLevel table for segments bind to a port
* Updated from global requirements
* Updated from global requirements
* Updated from global requirements
* Remove use of the arista\_provisioned\_vms table
* Remove use of the arista\_provisioned\_nets table
* Remove use of the arista\_provisioned\_tenants table
* Updated from global requirements
* Updated from global requirements
* Updated from global requirements
* Remove the EAPI rpc implementation
* Updated from global requirements
* Fix support for security groups on Port-Channels
* Throw error in Arista JSON API calls on error status code
* Revert "Remove the EAPI rpc implementation"
* Updated from global requirements
* Remove the EAPI rpc implementation
* Drop MANIFEST.in - it's not needed by pbr
* Updated from global requirements
2017.2.0
--------
* Handle errors in GETs gracefully
* enable trunk service plugin in devstack when using networking-arista
* modify ml2 dirver to support neutron trunk port & subport
* Updated from global requirements
* Use DEVICE\_ID\_RESERVED\_DHCP\_PORT constant defined in neutron-lib
* fix text conversion for python2/3
* Split arista\_ml2 into separate files for sync and RPC wrappers
* Removed registration of keystone authentication with CVX
* Updated from global requirements
* Bind ports using physnet provided by higher level drivers
* Updated from global requirements
* Move Sync Thread to ML2 worker process
* use service type constants from neutron\_lib plugins
* Update get\_physical\_network with the latest change to topology
* Updated from global requirements
* oslo-incubator is not used anymore
* Make the JSON API the default CVX endpoint
* Updated from global requirements
* migrate neutron.plugins.ml2.driver\_api imports to neutron\_lib
* Migrate neutron.db.api imports to neutron\_lib.db.api
* Remove usage of Neutron's MechanismDriverError
* Migrate neutron plugin constant imports to neutron-lib
* migrate imports for neutron.callbacks.\* to neutron\_lib
* Change log level from info to debug
* Add port to database with supported device\_owner
* Treat MLAG pairs as a single physical network
* Use only 'binding:host\_id' to detect port migration
* Updated from global requirements
* Updated from global requirements
* Use vnic\_type to determine whether a port is baremetal
* Updated from global requirements
* Updated from global requirements
* Handle portbindings with no local\_link\_information gracefully
* Updated from global requirements
* Fix ml2\_conf\_arista.ini installation
* Implement method for RBAC policy changes
* Add call to the directory add\_plugin method
* Updated from global requirements
* Updated from global requirements
* Flush transactions to ensure data is present in the database
* Update classifiers for Python 3 support
* Sync from global requirements
* Enable Python3.5 support
* Capture to ensure CVX error is logged
* Capture and ensure LOG.exception output
* Add cfg.CONF.import\_group() to document option dependencies
* Use identity v3 api
* oslo.config will begin to enfore types in a future version
* Add unordered list wrapper to help with validating mocked calls
* remove H302 pep8 exception
* Update get\_session() calls to get\_(reader|writer)\_session
* Provide a return\_value for get\_region\_updated\_time mock
* Add explicit check for rpc is not None
* Clear out ml2\_vlan\_allocations if CVX has no VLAN pool configured
* Remove external dependency on jsonrpclib
* Fail VLAN allocations from outside the assigned range
2017.1.0
--------
* Only send managed segments to CVX as part of port update
* Add a manage\_fabric configuration option
* Use neutron-lib portbindings api-def
* Handling dhcp port migration
* Removed unneeded segments setting in non-HPB case
* Replace jsonrpclib with jsonrpc\_requests module
* Correction for importing context module
* Stop passing sessions from mech\_arista to db\_lib
* Bump minimum requirements to match Ocata
* Fix VlanAllocation import
* LOG marker mismatch in arista\_ml2.py
* Fix for segments info in delete\_network\_segments
* Copying the config file to ml2 plugin directory
* Fix setting dynamic flag for each segment in list
* neutron-lib: use L3 constant from neutron-lib
* Change passing session to context in segments db functions
* default ARISTA\_REGION\_NAME to REGION\_NAME
* Fixed Arista VLAN type driver initialization
* Don't include openstack/common in flake8 exclude list
* Adding all config options to devstack plugin
2016.2.0
--------
* Calling delete\_network\_segments instead
* Importing db from neutron\_lib
* Fixed a bug in segment creation and deletion
* Fix regression when flat networks were created
* Fixed a bug in unplugging baremetal ports
* Adding JSON support for HPB, DVR and Baremetal
* Migrated neutron constants to neutron-lib for l3 plugin
* Redact keystone password from logs in arista\_ml2
* Remove reference to neutron.db
* use LOG.warning instead of LOG.warn
* Remove reference to neutron.i18n
* Migrate Neutron constant and exception references to neutron-lib
* Add devstack plugin support
* Move neutron include to tox.ini file
* Support of HPB in Arista Mechanism Driver
* Add support constraining requirements to match upstream
* Revert "Redact keystone password from logs in arista\_ml2"
* Redact keystone password from logs in arista\_ml2
* Fixed incorrect cli command for sync heartbeat
* Do not fail initialization when EOS not available
* Fix typo in etc/ml2\_conf\_arista.ini
* Updating L3 plugin to accomodate upstream changes
* Make feature checks abstract methods
* Fix sending the sync name
* Obscure passwords in log files
* Allow empty fields for Keystone
* Fix current\_sync\_name
* Have the ML2 plugin use EOS' OpenStack JSON API
* TrivialFix: Clean imports in code
* Temporary workaround to project id migration
* Removed cli command check in sync to fix CVX publication issue
* Enable DeprecationWarning in test environments
* Adding Ironic Support
2016.1.0
--------
* Adding DVR support
* Do not delete a network with ports attached to it
* Fixing L3 Plugin to match with upstream change
* Added the arista\_vlan type driver
* Fix L3 plugin to match with upstream
* Check that shared networks are VLAN networks before in Arista ML2
* Fixed typos in arista\_l3\_driver.py
* Fix error handling to handle invalid URL, response
* Enabling creation of unattached ports
* Ensuring that the response contains 'errors' key
* Use requests library instead of jsonrpclib
2015.2
------
* Adding database migration scripts
* Use auth\_uri in when set
* Adding support for multiple EOS instances
* Supporting neutron HA
* Using 'INTERNAL-TENANT-ID' as the network owner
* Fixed HA router network cleanup
* Fix a spelling typo in error message
* Change ignore-errors to ignore\_errors
* Fix port creation on shared networks
* Removing unused dependency: discover
* Migration of Arista drivers from neutron to here
* Fixing for transition from stackforge to openstack
* Fix unit tests
* Updating pbr in requirements to reflect neutron's requirements
* Adding an API to update the host id of a VM
* Bumping the package version for Master to 2015.2
* Adding jsonrpclib to requirements.txt
* Added missing jsonrpclib dependency
2015.1.3
--------
* Moving Neutron dependency
* Arista L3 Service Plugin decomposition
* Migrate to oslo.log
* Fixes sync between Arista ML2 driver and EOS
* Updated the package version
* Moving Arista ML2 driver from neutron tree
* Fixing README.rst to point to correct repo for this project
* Initial Setup of the base project for networking-arista drivers
* Added .gitreview
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/HACKING.rst0000664000175000017500000000024700000000000017315 0ustar00zuulzuul00000000000000networking-arista Style Commandments
===============================================
Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/LICENSE0000664000175000017500000002363700000000000016534 0ustar00zuulzuul00000000000000
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6481018
networking_arista-2023.1.0/PKG-INFO0000664000175000017500000000212700000000000016613 0ustar00zuulzuul00000000000000Metadata-Version: 1.2
Name: networking_arista
Version: 2023.1.0
Summary: Arista Networking drivers
Home-page: https://opendev.org/x/networking-arista/
Author: Arista Networks
Author-email: openstack-dev@arista.com
License: UNKNOWN
Description: ===============================
networking-arista
===============================
Arista Networking drivers
* Free software: Apache license
* Source: https://opendev.org/x/networking-arista
* Bug: https://bugs.launchpad.net/networking-arista
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Requires-Python: >=3.8
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/README.rst0000664000175000017500000000036400000000000017206 0ustar00zuulzuul00000000000000===============================
networking-arista
===============================
Arista Networking drivers
* Free software: Apache license
* Source: https://opendev.org/x/networking-arista
* Bug: https://bugs.launchpad.net/networking-arista
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/babel.cfg0000664000175000017500000000002100000000000017233 0ustar00zuulzuul00000000000000[python: **.py]
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.632103
networking_arista-2023.1.0/devstack/0000775000175000017500000000000000000000000017320 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/devstack/plugin.sh0000664000175000017500000000737200000000000021163 0ustar00zuulzuul00000000000000# -*- mode: shell-script -*-
function install_lldp() {
echo_summary "Installing LLDP"
install_package lldpd
restart_service lldpd
}
function install_arista_driver() {
echo_summary "Installing Arista Driver"
setup_develop $ARISTA_DIR
}
function configure_arista() {
echo_summary "Configuring Neutron for Arista Driver"
cp $ARISTA_ML2_CONF_SAMPLE $ARISTA_ML2_CONF_FILE
iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_host $ARISTA_EAPI_HOST
iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_username $ARISTA_EAPI_USERNAME
iniset $ARISTA_ML2_CONF_FILE ml2_arista eapi_password $ARISTA_EAPI_PASSWORD
iniset $ARISTA_ML2_CONF_FILE ml2_arista region_name $ARISTA_REGION_NAME
if [ -n "${ARISTA_USE_FQDN+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE ml2_arista use_fqdn $ARISTA_USE_FQDN
fi
if [ -n "${ARISTA_USE_FQDN_PHYSNET+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE ml2_arista use_fqdn_physnet $ARISTA_USE_FQDN_PHYSNET
fi
if [ -n "${ARISTA_ML2_SYNC_INTERVAL+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE ml2_arista sync_interval $ARISTA_ML2_SYNC_INTERVAL
fi
if [ -n "${ARISTA_SEC_GROUP_SUPPORT+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE ml2_arista sec_group_support $ARISTA_SEC_GROUP_SUPPORT
fi
if [ -n "${ARISTA_SWITCH_INFO+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE ml2_arista switch_info $ARISTA_SWITCH_INFO
fi
if [ -n "${ARISTA_PRIMARY_L3_HOST+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista primary_l3_host $ARISTA_PRIMARY_L3_HOST
fi
if [ -n "${ARISTA_PRIMARY_L3_HOST_USERNAME+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista primary_l3_host_username $ARISTA_PRIMARY_L3_HOST_USERNAME
fi
if [ -n "${ARISTA_PRIMARY_L3_HOST_PASSWORD+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista primary_l3_host_password $ARISTA_PRIMARY_L3_HOST_PASSWORD
fi
if [ -n "${ARISTA_SECONDARY_L3_HOST+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista secondary_l3_host $ARISTA_SECONDARY_L3_HOST
fi
if [ -n "${ARISTA_SECONDARY_L3_HOST_USERNAME+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista secondary_l3_host_username $ARISTA_SECONDARY_L3_HOST_USERNAME
fi
if [ -n "${ARISTA_SECONDARY_L3_HOST_PASSWORD+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista secondary_l3_host_password $ARISTA_SECONDARY_L3_HOST_PASSWORD
fi
if [ -n "${ARISTA_MLAG_CONFIG+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista mlag_config $ARISTA_MLAG_CONFIG
fi
if [ -n "${ARISTA_USE_VRF+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista use_vrf $ARISTA_USE_VRF
fi
if [ -n "${ARISTA_ENABLE_CLEANUP+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista enable_cleanup $ARISTA_ENABLE_CLEANUP
fi
if [ -n "${ARISTA_VRF_DEFAULT_ROUTE+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista vrf_default_route $ARISTA_VRF_DEFAULT_ROUTE
fi
if [ -n "${ARISTA_L3_SYNC_INTERVAL+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE l3_arista l3_sync_interval $ARISTA_L3_SYNC_INTERVAL
fi
if [ -n "${ARISTA_TYPE_DRIVER_SYNC_INTERVAL+x}" ]; then
iniset $ARISTA_ML2_CONF_FILE arista_type_driver sync_interval $ARISTA_TYPE_DRIVER_SYNC_INTERVAL
fi
neutron_server_config_add $ARISTA_ML2_CONF_FILE
}
if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then
neutron_service_plugin_class_add "trunk"
if is_service_enabled "q-agt"; then
install_lldp
fi
elif [[ "$1" == "stack" && "$2" == "install" ]]; then
install_arista_driver
elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then
configure_arista
elif [[ "$1" == "stack" && "$2" == "extra" ]]; then
# no-op
:
fi
if [[ "$1" == "unstack" ]]; then
# no-op
:
fi
if [[ "$1" == "clean" ]]; then
# no-op
:
fi
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/devstack/settings0000664000175000017500000000061400000000000021104 0ustar00zuulzuul00000000000000if ! [[ "$Q_ML2_PLUGIN_MECHANISM_DRIVERS" =~ "arista" ]]; then
Q_ML2_PLUGIN_MECHANISM_DRIVERS="$Q_ML2_PLUGIN_MECHANISM_DRIVERS,arista"
fi
ARISTA_DIR=${ARISTA_DIR:-$DEST/networking-arista}
ARISTA_ML2_CONF_SAMPLE=$ARISTA_DIR/etc/ml2_conf_arista.ini
ARISTA_ML2_CONF_FILE=${ARISTA_ML2_CONF_FILE:-"$NEUTRON_CONF_DIR/ml2_conf_arista.ini"}
ARISTA_REGION_NAME=${ARISTA_REGION_NAME:-"$REGION_NAME"}
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6241035
networking_arista-2023.1.0/doc/0000775000175000017500000000000000000000000016261 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.632103
networking_arista-2023.1.0/doc/source/0000775000175000017500000000000000000000000017561 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/doc/source/conf.py0000775000175000017500000000464100000000000021070 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc',
#'sphinx.ext.intersphinx',
'oslosphinx'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'networking-arista'
copyright = u'2013, OpenStack Foundation'
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
# Output file base name for HTML help builder.
htmlhelp_basename = '%sdoc' % project
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'%s.tex' % project,
u'%s Documentation' % project,
u'OpenStack Foundation', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
#intersphinx_mapping = {'http://docs.python.org/': None}
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/doc/source/contributing.rst0000664000175000017500000000011300000000000023015 0ustar00zuulzuul00000000000000============
Contributing
============
.. include:: ../../CONTRIBUTING.rst
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/doc/source/index.rst0000664000175000017500000000100500000000000021416 0ustar00zuulzuul00000000000000.. networking-arista documentation master file, created by
sphinx-quickstart on Tue Jul 9 22:26:36 2013.
You can adapt this file completely to your liking, but it should at least
contain the root `toctree` directive.
Welcome to networking-arista's documentation!
========================================================
Contents:
.. toctree::
:maxdepth: 2
readme
installation
usage
contributing
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/doc/source/installation.rst0000664000175000017500000000033400000000000023014 0ustar00zuulzuul00000000000000============
Installation
============
At the command line::
$ pip install networking-arista
Or, if you have virtualenvwrapper installed::
$ mkvirtualenv networking-arista
$ pip install networking-arista
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/doc/source/readme.rst0000664000175000017500000000003600000000000021547 0ustar00zuulzuul00000000000000.. include:: ../../README.rst
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/doc/source/usage.rst0000664000175000017500000000013700000000000021420 0ustar00zuulzuul00000000000000========
Usage
========
To use networking-arista in a project::
import networking-arista
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.632103
networking_arista-2023.1.0/dockerfiles/0000775000175000017500000000000000000000000020006 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/dockerfiles/README0000664000175000017500000000027600000000000020673 0ustar00zuulzuul00000000000000This directory contains Dockerfiles for building container images that
include neutron and networking-arista. The name the OpenStack distribution is
added as a suffix (eg Dockerfile.RHOSP).
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.632103
networking_arista-2023.1.0/etc/0000775000175000017500000000000000000000000016267 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/etc/ml2_conf_arista.ini0000664000175000017500000001350200000000000022033 0ustar00zuulzuul00000000000000[DEFAULT]
[arista_type_driver]
#
# From networking_arista
#
# VLAN Sync interval in seconds between Neutron plugin and EOS. This
# interval defines how often the VLAN synchronization is performed.
# This is an optional field. If not set, a value of 10 seconds is
# assumed. (integer value)
#sync_interval = 10
[l3_arista]
#
# From networking_arista
#
# Username for Arista EOS. This is required field. If not set, all
# communications to Arista EOS will fail (string value)
#primary_l3_host_username =
# Password for Arista EOS. This is required field. If not set, all
# communications to Arista EOS will fail (string value)
#primary_l3_host_password =
# Arista EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail (string value)
#primary_l3_host =
# Arista EOS IP address for second Switch MLAGed with the first one.
# This is an optional field, however, if mlag_config flag is set, then
# this is required. If not set, all communications to Arista EOS will
# fail (string value)
#secondary_l3_host =
# Connection timeout interval in seconds. This interval defines how
# long an EAPI request from the driver to EOS waits before timing out.
# If not set, a value of 10 seconds is assumed. (integer value)
#conn_timeout = 10
# This flag is used to indicate if Arista Switches are configured in
# MLAG mode. If yes, all L3 config is pushed to both the switches
# automatically. If this flag is set to True, ensure to specify IP
# addresses of both switches. This is optional. If not set, a value of
# "False" is assumed. (boolean value)
#mlag_config = false
# A "True" value for this flag indicates to create a router in VRF. If
# not set, all routers are created in default VRF. This is optional.
# If not set, a value of "False" is assumed. (boolean value)
#use_vrf = false
# A "True" value for this flag indicates to create a default route in
# VRF. This setting is valid only when used with the use_vrf=True. If
# not set, all routers are created without default gateway. This is
# optional. If not set, a value of "False" is assumed.
#vrf_default_route = false
# Sync interval in seconds between L3 Service plugin and EOS. This
# interval defines how often the synchronization is performed. This is
# an optional field. If not set, a value of 180 seconds is assumed
# (integer value)
#l3_sync_interval = 180
# Toggle to enable cleanup of unused VLANs, VRFs and SVIs on EOS L3
# hosts in the sync worker. If enabled, ensure that all non-openstack
# VLANs are added to protected_vlans to ensure that they are not
# removed by the sync worker. If not set, a value of "False" is
# assumed. (boolean value)
#enable_cleanup = false
# List of vlans or : ranges that should never be
# cleaned up by the L3 sync worker. This applies to both VLANs and
# SVIs (list value)
#protected_vlans =
[ml2_arista]
#
# From networking_arista
#
# Username for Arista EOS. This is required field. If not set, all
# communications to Arista EOS will fail. (string value)
#eapi_username =
# Password for Arista EOS. This is required field. If not set, all
# communications to Arista EOS will fail. (string value)
#eapi_password =
# Arista EOS IP address. This is required field. If not set, all
# communications to Arista EOS will fail.
# If CVX has been deployed in a highly available (HA) cluster, specify each
# instance IP separated by a comma. (string value)
#eapi_host =
# Defines if hostnames are sent to Arista EOS as FQDNs
# ("node1.domain.com") or as short names ("node1"). This is optional.
# If not set, a value of "True" is assumed. (boolean value)
#use_fqdn = true
# In HPB deployments, this should be set to False if short switch hostnames
# are used for physnets in network_vlan_ranges and bridge_mappings. If
# FQDNs are used in physnets, this should be set to True. (boolean value)
#use_fqdn_physnet = true
# Sync interval in seconds between Neutron plugin and EOS. This
# interval defines how often the synchronization is performed. This is
# an optional field. If not set, a value of 30 seconds is assumed.
# (integer value)
#sync_interval = 30
# Connection timeout interval in seconds. This interval defines how
# long an API request from the driver to CVX waits before timing out.
# If not set, a value of 60 seconds is assumed. (integer value)
#conn_timeout = 60
# Defines Region Name that is assigned to this OpenStack Controller.
# This is useful when multiple OpenStack/Neutron controllers are
# managing the same Arista HW clusters. Note that this name must match
# with the region name registered (or known) to keystone service.
# Authentication with Keystone is performed by EOS. This is optional.
# If not set, a value of "RegionOne" is assumed. (string value)
#region_name = RegionOne
# Specifies if the Security Groups needs to deployed for baremetal
# deployments. If this flag is set to True, this means switch_info(see
# below) must be defined. If this flag is not defined, it is assumed
# to be False (boolean value)
#sec_group_support = false
# This is a comma separated list of Arista switches where security
# groups (i.e. ACLs) need to be applied. Each string has three values
# separated by : in the follow format
# ::, ...
# For Example: 172.13.23.55:admin:admin, 172.13.23.56:admin:admin, ...
# This is required if sec_group_support is set to "True" (list value)
#switch_info =
# This is a comma separated list of physical networks which are
# managed by Arista switches. This list will be used by the Arista ML2
# plugin to make the decision if it can participate in binding or
# updating a port.
# For Example: managed_physnets = arista_network (list value)
#managed_physnets =
# Specifies whether the Arista ML2 plugin should bind ports to vxlan
# fabric segments and dynamically allocate vlan segments based on the
# host to connect the port to the vxlan fabric (boolean value)
#manage_fabric = false
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/etc/policy.json0000664000175000017500000001465200000000000020471 0ustar00zuulzuul00000000000000{
"context_is_admin": "role:admin",
"admin_or_owner": "rule:context_is_admin or tenant_id:%(tenant_id)s",
"context_is_advsvc": "role:advsvc",
"admin_or_network_owner": "rule:context_is_admin or tenant_id:%(network:tenant_id)s",
"admin_only": "rule:context_is_admin",
"regular_user": "",
"shared": "field:networks:shared=True",
"shared_firewalls": "field:firewalls:shared=True",
"external": "field:networks:router:external=True",
"default": "rule:admin_or_owner",
"create_subnet": "rule:admin_or_network_owner",
"get_subnet": "rule:admin_or_owner or rule:shared",
"update_subnet": "rule:admin_or_network_owner",
"delete_subnet": "rule:admin_or_network_owner",
"create_network": "",
"get_network": "rule:admin_or_owner or rule:shared or rule:external or rule:context_is_advsvc",
"get_network:router:external": "rule:regular_user",
"get_network:segments": "rule:admin_only",
"get_network:provider:network_type": "rule:admin_only",
"get_network:provider:physical_network": "rule:admin_only",
"get_network:provider:segmentation_id": "rule:admin_only",
"get_network:queue_id": "rule:admin_only",
"create_network:shared": "rule:admin_only",
"create_network:router:external": "rule:admin_only",
"create_network:segments": "rule:admin_only",
"create_network:provider:network_type": "rule:admin_only",
"create_network:provider:physical_network": "rule:admin_only",
"create_network:provider:segmentation_id": "rule:admin_only",
"update_network": "rule:admin_or_owner",
"update_network:segments": "rule:admin_only",
"update_network:shared": "rule:admin_only",
"update_network:provider:network_type": "rule:admin_only",
"update_network:provider:physical_network": "rule:admin_only",
"update_network:provider:segmentation_id": "rule:admin_only",
"update_network:router:external": "rule:admin_only",
"delete_network": "rule:admin_or_owner",
"create_port": "",
"create_port:mac_address": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"create_port:binding:host_id": "rule:admin_only",
"create_port:binding:profile": "rule:admin_only",
"create_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"get_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_port:queue_id": "rule:admin_only",
"get_port:binding:vif_type": "rule:admin_only",
"get_port:binding:vif_details": "rule:admin_only",
"get_port:binding:host_id": "rule:admin_only",
"get_port:binding:profile": "rule:admin_only",
"update_port": "rule:admin_or_owner or rule:context_is_advsvc",
"update_port:fixed_ips": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:port_security_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"update_port:binding:host_id": "rule:admin_only",
"update_port:binding:profile": "rule:admin_only",
"update_port:mac_learning_enabled": "rule:admin_or_network_owner or rule:context_is_advsvc",
"delete_port": "rule:admin_or_owner or rule:context_is_advsvc",
"get_router:ha": "rule:admin_only",
"create_router": "rule:regular_user",
"create_router:external_gateway_info:enable_snat": "rule:admin_only",
"create_router:distributed": "rule:admin_only",
"create_router:ha": "rule:admin_only",
"get_router": "rule:admin_or_owner",
"get_router:distributed": "rule:admin_only",
"update_router:external_gateway_info:enable_snat": "rule:admin_only",
"update_router:distributed": "rule:admin_only",
"update_router:ha": "rule:admin_only",
"delete_router": "rule:admin_or_owner",
"add_router_interface": "rule:admin_or_owner",
"remove_router_interface": "rule:admin_or_owner",
"create_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"update_router:external_gateway_info:external_fixed_ips": "rule:admin_only",
"create_firewall": "",
"get_firewall": "rule:admin_or_owner",
"create_firewall:shared": "rule:admin_only",
"get_firewall:shared": "rule:admin_only",
"update_firewall": "rule:admin_or_owner",
"update_firewall:shared": "rule:admin_only",
"delete_firewall": "rule:admin_or_owner",
"create_firewall_policy": "",
"get_firewall_policy": "rule:admin_or_owner or rule:shared_firewalls",
"create_firewall_policy:shared": "rule:admin_or_owner",
"update_firewall_policy": "rule:admin_or_owner",
"delete_firewall_policy": "rule:admin_or_owner",
"create_firewall_rule": "",
"get_firewall_rule": "rule:admin_or_owner or rule:shared_firewalls",
"update_firewall_rule": "rule:admin_or_owner",
"delete_firewall_rule": "rule:admin_or_owner",
"create_qos_queue": "rule:admin_only",
"get_qos_queue": "rule:admin_only",
"update_agent": "rule:admin_only",
"delete_agent": "rule:admin_only",
"get_agent": "rule:admin_only",
"create_dhcp-network": "rule:admin_only",
"delete_dhcp-network": "rule:admin_only",
"get_dhcp-networks": "rule:admin_only",
"create_l3-router": "rule:admin_only",
"delete_l3-router": "rule:admin_only",
"get_l3-routers": "rule:admin_only",
"get_dhcp-agents": "rule:admin_only",
"get_l3-agents": "rule:admin_only",
"get_loadbalancer-agent": "rule:admin_only",
"get_loadbalancer-pools": "rule:admin_only",
"create_floatingip": "rule:regular_user",
"create_floatingip:floating_ip_address": "rule:admin_only",
"update_floatingip": "rule:admin_or_owner",
"delete_floatingip": "rule:admin_or_owner",
"get_floatingip": "rule:admin_or_owner",
"create_network_profile": "rule:admin_only",
"update_network_profile": "rule:admin_only",
"delete_network_profile": "rule:admin_only",
"get_network_profiles": "",
"get_network_profile": "",
"update_policy_profiles": "rule:admin_only",
"get_policy_profiles": "",
"get_policy_profile": "",
"create_metering_label": "rule:admin_only",
"delete_metering_label": "rule:admin_only",
"get_metering_label": "rule:admin_only",
"create_metering_label_rule": "rule:admin_only",
"delete_metering_label_rule": "rule:admin_only",
"get_metering_label_rule": "rule:admin_only",
"get_service_provider": "rule:regular_user",
"get_lsn": "rule:admin_only",
"create_lsn": "rule:admin_only"
}
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6361027
networking_arista-2023.1.0/networking_arista/0000775000175000017500000000000000000000000021246 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/__init__.py0000664000175000017500000000145400000000000023363 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import gettext
import pbr.version
import six
__version__ = pbr.version.VersionInfo(
'networking_arista').version_string()
if six.PY2:
gettext.install('networking_arista', unicode=1)
else:
gettext.install('networking_arista')
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/_i18n.py0000664000175000017500000000252500000000000022542 0ustar00zuulzuul00000000000000# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import oslo_i18n
DOMAIN = "networking_arista"
_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
# The primary translation function using the well-known name "_"
_ = _translators.primary
# The contextual translation function using the name "_C"
_C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6361027
networking_arista-2023.1.0/networking_arista/common/0000775000175000017500000000000000000000000022536 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/__init__.py0000664000175000017500000000000000000000000024635 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/api.py0000664000175000017500000001347500000000000023673 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Arista Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_log import log as logging
from oslo_utils import excutils
import requests
from requests import exceptions as requests_exc
from six.moves.urllib import parse
from networking_arista._i18n import _LI, _LW, _LC
from networking_arista.common import exceptions as arista_exc
LOG = logging.getLogger(__name__)
# EAPI error message
ERR_CVX_NOT_LEADER = 'only available on cluster leader'
ERR_INVALID_COMMAND = 'invalid command'
class EAPIClient(object):
def __init__(self, host, username=None, password=None, verify=False,
timeout=None):
self.host = host
self.timeout = timeout
self.url = self._make_url(host)
self.session = requests.Session()
self.session.headers['Content-Type'] = 'application/json'
self.session.headers['Accept'] = 'application/json'
self.session.verify = verify
self.session.auth = (username, password)
@staticmethod
def _make_url(host, scheme='https'):
return parse.urlunsplit(
(scheme, host, '/command-api', '', '')
)
def execute(self, commands, commands_to_log=None, keep_alive=True):
params = {
'timestamps': False,
'format': 'json',
'version': 1,
'cmds': commands
}
data = {
'id': 'Networking Arista Driver',
'method': 'runCmds',
'jsonrpc': '2.0',
'params': params
}
if commands_to_log:
log_data = dict(data)
log_data['params'] = dict(params)
log_data['params']['cmds'] = commands_to_log
else:
log_data = data
LOG.info(
_LI('EAPI request %(ip)s contains %(data)s'),
{'ip': self.host, 'data': json.dumps(log_data)}
)
# We can disable keep_alive if we call this from plugin init so we
# don't break the SSL session. Normally keep_alive=False should be used
# only for calls from init, all the rest should use keep_alive=True
self.session.headers['Connection'] = ('keep-alive' if keep_alive
else 'close')
# request handling
try:
error = None
response = self.session.post(
self.url,
data=json.dumps(data),
timeout=self.timeout
)
except requests_exc.ConnectionError:
error = _LW('Error while trying to connect to %(ip)s')
except requests_exc.ConnectTimeout:
error = _LW('Timed out while trying to connect to %(ip)s')
except requests_exc.Timeout:
error = _LW('Timed out during an EAPI request to %(ip)s')
except requests_exc.InvalidURL:
error = _LW('Ingoring attempt to connect to invalid URL at %(ip)s')
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warning(
_LW('Error during processing the EAPI request %(error)s'),
{'error': e}
)
finally:
if error:
msg = error % {'ip': self.host}
# stop processing since we've encountered request error
LOG.warning(msg)
raise arista_exc.AristaRpcError(msg=msg)
if response.status_code != requests.status_codes.codes.OK:
msg = _LC(
'Error (%(code)s - %(reason)s) while executing the command')
LOG.error(msg, {
'code': response.status_code,
'reason': response.text})
# response handling
try:
resp_data = response.json()
return resp_data['result']
except ValueError:
LOG.info(_LI('Ignoring invalid JSON response'))
except KeyError:
if 'error' in resp_data:
for i, d in enumerate(resp_data['error']['data'], 1):
if not isinstance(d, dict):
continue
if 'messages' in d:
LOG.info(
_LI('Command %(cmd)s returned message %(msg)s'),
{'cmd': i, 'msg': d['messages']})
if 'errors' in d:
LOG.info(
_LI('Command %(cmd)s returned error %(err)s'),
{'cmd': i, 'err': d['errors']})
if ERR_CVX_NOT_LEADER in d['errors'][0]:
LOG.info(_LI('%(ip)s is not the CVX leader'),
{'ip': self.host})
return
msg = resp_data['error'].get('message', '')
if ERR_INVALID_COMMAND in msg:
raise arista_exc.AristaServicePluginInvalidCommand(msg=msg)
msg = ('Unexpected EAPI error: %s' %
resp_data.get('error', {}).get('message', 'Unknown Error'))
LOG.info(msg)
raise arista_exc.AristaRpcError(msg=msg)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warning(
_LW('Error during processing the EAPI response %(error)s'),
{'error': e}
)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/config.py0000664000175000017500000002547700000000000024374 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from networking_arista._i18n import _
# Arista ML2 Mechanism driver specific configuration knobs.
#
# Following are user configurable options for Arista ML2 Mechanism
# driver. The eapi_username, eapi_password, and eapi_host are
# required options. Region Name must be the same that is used by
# Keystone service. This option is available to support multiple
# OpenStack/Neutron controllers.
ARISTA_DRIVER_OPTS = [
cfg.StrOpt('eapi_username',
default='',
help=_('Username for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.')),
cfg.StrOpt('eapi_password',
default='',
secret=True, # do not expose value in the logs
help=_('Password for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.')),
cfg.StrOpt('eapi_host',
default='',
help=_('Arista EOS IP address. This is required field. '
'If not set, all communications to Arista EOS '
'will fail.'
'If CVX has been deployed in a highly available (HA) '
'cluster, specify each instance IP separated by '
'a comma.')),
cfg.BoolOpt('use_fqdn',
default=True,
help=_('Defines if hostnames are sent to Arista EOS as FQDNs '
'("node1.domain.com") or as short names ("node1"). '
'This is optional. If not set, a value of "True" '
'is assumed.')),
cfg.BoolOpt('use_fqdn_physnet',
default=True,
help=_('In HPB deployments, this should be set to False if '
'short switch hostnames are used for physnets in '
'network_vlan_ranges and bridge_mappings. If FQDNs '
'are used in physnets, this should be set to True.')),
cfg.IntOpt('sync_interval',
default=30,
help=_('Sync interval in seconds between Neutron plugin and '
'EOS. This interval defines how often the '
'synchronization is performed. This is an optional '
'field. If not set, a value of 30 seconds is '
'assumed.')),
cfg.IntOpt('conn_timeout',
default=60,
help=_('Connection timeout interval in seconds. This interval '
'defines how long an API request from the driver to '
'CVX waits before timing out. If not set, a value of 60 '
'seconds is assumed.')),
cfg.StrOpt('region_name',
default='RegionOne',
help=_('Defines Region Name that is assigned to this OpenStack '
'Controller. This is useful when multiple '
'OpenStack/Neutron controllers are managing the same '
'Arista HW clusters. Note that this name must match '
'with the region name registered (or known) to keystone '
'service. Authentication with Keystone is performed by '
'EOS. This is optional. If not set, a value of '
'"RegionOne" is assumed.')),
cfg.BoolOpt('sec_group_support',
default=False,
help=_('Specifies if the Security Groups needs to deployed '
'for baremetal deployments. If this flag is set to '
'True, this means switch_info(see below) must be '
'defined. If this flag is not defined, it is assumed '
'to be False')),
cfg.ListOpt('switch_info',
default=[],
help=_('This is a comma separated list of Arista switches '
'where security groups (i.e. ACLs) need to be '
'applied. Each string has three values separated '
'by : in the follow format '
'::, ...\n'
'For Example: 172.13.23.55:admin:admin, '
'172.13.23.56:admin:admin, ...\n'
'This is required if sec_group_support is set to '
'"True"')),
cfg.ListOpt('managed_physnets',
default=[],
help=_('This is a comma separated list of physical networks '
'which are managed by Arista switches. '
'This list will be used by the Arista ML2 plugin '
'to make the decision if it can participate in binding '
'or updating a port.\n'
'For Example: '
'managed_physnets = arista_network')),
cfg.BoolOpt('manage_fabric',
default=False,
help=_('Specifies whether the Arista ML2 plugin should bind '
'ports to vxlan fabric segments and dynamically '
'allocate vlan segments based on the host to connect '
'the port to the vxlan fabric')),
]
""" Arista L3 Service Plugin specific configuration knobs.
Following are user configurable options for Arista L3 plugin
driver. The eapi_username, eapi_password, and eapi_host are
required options.
"""
ARISTA_L3_PLUGIN = [
cfg.StrOpt('primary_l3_host_username',
default='',
help=_('Username for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.StrOpt('primary_l3_host_password',
default='',
secret=True, # do not expose value in the logs
help=_('Password for Arista EOS. This is required field. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.StrOpt('primary_l3_host',
default='',
help=_('Arista EOS IP address. This is required field. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.StrOpt('secondary_l3_host',
default='',
help=_('Arista EOS IP address for second Switch MLAGed with '
'the first one. This is an optional field, however, if '
'mlag_config flag is set, then this is required. '
'If not set, all communications to Arista EOS '
'will fail')),
cfg.IntOpt('conn_timeout',
default=10,
help=_('Connection timeout interval in seconds. This interval '
'defines how long an EAPI request from the driver to '
'EOS waits before timing out. If not set, a value of 10 '
'seconds is assumed.')),
cfg.BoolOpt('mlag_config',
default=False,
help=_('This flag is used to indicate if Arista Switches are '
'configured in MLAG mode. If yes, all L3 config '
'is pushed to both the switches automatically. '
'If this flag is set to True, ensure to specify IP '
'addresses of both switches. '
'This is optional. If not set, a value of "False" '
'is assumed.')),
cfg.BoolOpt('use_vrf',
default=False,
help=_('A "True" value for this flag indicates to create a '
'router in VRF. If not set, all routers are created '
'in default VRF. '
'This is optional. If not set, a value of "False" '
'is assumed.')),
cfg.BoolOpt('vrf_default_route',
default=False,
help=_('A "True" value for this flag indicates to create a '
'default route in VRF. This setting is valid only '
'when used with the use_vrf=True. If not set, '
'all routers are created without default gateway.'
'This is optional. If not set, a value of "False" '
'is assumed.')),
cfg.IntOpt('l3_sync_interval',
default=180,
help=_('Sync interval in seconds between L3 Service plugin '
'and EOS. This interval defines how often the '
'synchronization is performed. This is an optional '
'field. If not set, a value of 180 seconds is assumed')),
cfg.BoolOpt('enable_cleanup',
default=False,
help=_('Toggle to enable cleanup of unused VLANs, VRFs and '
'SVIs on EOS L3 hosts in the sync worker. If enabled, '
'ensure that all non-openstack VLANs are added to '
'protected_vlans to ensure that they are not removed '
'by the sync worker. If not set, a value of "False" '
'is assumed.')),
cfg.ListOpt('protected_vlans',
default=[],
help=_('List of vlans or : ranges that '
'should never be cleaned up by the L3 sync worker. '
'This applies to both VLANs and SVIs')),
]
ARISTA_TYPE_DRIVER_OPTS = [
cfg.IntOpt('sync_interval',
default=10,
help=_('VLAN Sync interval in seconds between Neutron plugin '
'and EOS. This interval defines how often the VLAN '
'synchronization is performed. This is an optional '
'field. If not set, a value of 10 seconds is '
'assumed.')),
]
cfg.CONF.register_opts(ARISTA_L3_PLUGIN, "l3_arista")
cfg.CONF.register_opts(ARISTA_DRIVER_OPTS, "ml2_arista")
cfg.CONF.register_opts(ARISTA_TYPE_DRIVER_OPTS, "arista_type_driver")
def list_opts():
return [
('ml2_arista',
ARISTA_DRIVER_OPTS),
('l3_arista',
ARISTA_L3_PLUGIN),
('arista_type_driver',
ARISTA_TYPE_DRIVER_OPTS)
]
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/constants.py0000664000175000017500000000426300000000000025131 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from networking_arista._i18n import _
# Resource actions
CREATE = 'create'
DELETE = 'delete'
FULL_SYNC = 'full_sync'
# Resource types
TENANT_RESOURCE = 'tenant'
NETWORK_RESOURCE = 'network'
SEGMENT_RESOURCE = 'segment'
DHCP_RESOURCE = 'dhcp'
ROUTER_RESOURCE = 'router'
VM_RESOURCE = 'vm'
BAREMETAL_RESOURCE = 'baremetal'
PORT_SUFFIX = '_port'
DHCP_PORT_RESOURCE = DHCP_RESOURCE + PORT_SUFFIX
ROUTER_PORT_RESOURCE = ROUTER_RESOURCE + PORT_SUFFIX
VM_PORT_RESOURCE = VM_RESOURCE + PORT_SUFFIX
BAREMETAL_PORT_RESOURCE = BAREMETAL_RESOURCE + PORT_SUFFIX
PORT_BINDING_RESOURCE = 'port_binding'
ALL_RESOURCE_TYPES = [TENANT_RESOURCE,
NETWORK_RESOURCE,
SEGMENT_RESOURCE,
DHCP_RESOURCE,
ROUTER_RESOURCE,
VM_RESOURCE,
BAREMETAL_RESOURCE,
DHCP_PORT_RESOURCE,
VM_PORT_RESOURCE,
BAREMETAL_PORT_RESOURCE,
PORT_BINDING_RESOURCE]
# Constants
INTERNAL_TENANT_ID = 'INTERNAL-TENANT-ID'
MECHANISM_DRV_NAME = 'arista'
# SG Constants
# When a SG is applied to a VM, ingress refers to traffic flowing
# into a VM and egress refers to traffic flowing out.
# In the baremetal case, traffic flowing out of a switchport is
# flowing into the baremetal. Therefore, INGRESS SG rules
# should be applied as 'out' ACLs and EGRESS rules as 'in' ACLs.
INGRESS_DIRECTION = 'out'
EGRESS_DIRECTION = 'in'
# EAPI error messages of interest
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
ERR_CVX_NOT_LEADER = _('only available on cluster leader')
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/db_lib.py0000664000175000017500000005633400000000000024336 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from contextlib import suppress
from oslo_config import cfg
from oslo_log import log as logging
from sqlalchemy import and_, or_
from sqlalchemy import func
from sqlalchemy.orm import immediateload, Query, aliased
from sqlalchemy.sql import visitors
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib.db import api as db
from neutron_lib.services.trunk import constants as t_const
from neutron.db.models import l3 as l3_models
from neutron.db.models import l3ha as l3ha_models
from neutron.db.models.plugins.ml2 import vlanallocation
from neutron.db.models import securitygroup as sg_models
from neutron.db.models import segment as segment_models
from neutron.db import models_v2
from neutron.plugins.ml2 import models as ml2_models
from neutron.services.trunk import models as trunk_models
from networking_arista.common import utils
LOG = logging.getLogger(__name__)
def has_table(query, table):
for visitor in visitors.iterate(query.statement):
if visitor.__visit_name__ == 'table':
with suppress(TypeError):
if table == visitor.entity_namespace:
return True
return False
def join_if_necessary(query, *args, **kwargs):
table = args[0]
if has_table(query, table):
return query
return query.join(*args, **kwargs)
def outerjoin_if_necessary(query, *args, **kwargs):
table = args[0]
if has_table(query, table):
return query
return query.outerjoin(*args, **kwargs)
def filter_network_type(query):
"""Filter unsupported segment types"""
segment_model = segment_models.NetworkSegment
query = (query
.filter(
segment_model.network_type.in_(
utils.SUPPORTED_NETWORK_TYPES)))
return query
def filter_unbound_ports(query):
"""Filter ports not bound to a host or network"""
# hack for pep8 E711: comparison to None should be
# 'if cond is not None'
none = None
port_model = models_v2.Port
binding_level_model = ml2_models.PortBindingLevel
query = (query
.join_if_necessary(port_model)
.join_if_necessary(binding_level_model)
.filter(
binding_level_model.host != '',
port_model.device_id != none,
port_model.network_id != none))
return query
def filter_by_device_owner(query, device_owners=None):
"""Filter ports by device_owner
Either filter using specified device_owner or using the list of all
device_owners supported and unsupported by the arista ML2 plugin
"""
port_model = models_v2.Port
if not device_owners:
device_owners = utils.SUPPORTED_DEVICE_OWNERS
supported_device_owner_filter = [
port_model.device_owner.ilike('%s%%' % owner)
for owner in device_owners]
unsupported_device_owner_filter = [
port_model.device_owner.notilike('%s%%' % owner)
for owner in utils.UNSUPPORTED_DEVICE_OWNERS]
query = (query
.filter(
and_(*unsupported_device_owner_filter),
or_(*supported_device_owner_filter)))
return query
def filter_by_device_id(query):
"""Filter ports attached to devices we don't care about
Currently used to filter DHCP_RESERVED ports
"""
port_model = models_v2.Port
unsupported_device_id_filter = [
port_model.device_id.notilike('%s%%' % id)
for id in utils.UNSUPPORTED_DEVICE_IDS]
query = (query
.filter(and_(*unsupported_device_id_filter)))
return query
def filter_by_vnic_type(query, vnic_type):
"""Filter ports by vnic_type (currently only used for baremetals)"""
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
dst_binding_model = ml2_models.DistributedPortBinding
query = (query
.outerjoin_if_necessary(
binding_model,
port_model.id == binding_model.port_id)
.outerjoin_if_necessary(
dst_binding_model,
port_model.id == dst_binding_model.port_id)
.filter(
(binding_model.vnic_type == vnic_type) |
(dst_binding_model.vnic_type == vnic_type)))
return query
def filter_unmanaged_physnets(query):
"""Filter ports managed by other ML2 plugins """
config = cfg.CONF.ml2_arista
managed_physnets = config['managed_physnets']
# Filter out ports bound to segments on physnets that we're not
# managing
segment_model = segment_models.NetworkSegment
if managed_physnets:
query = (query
.join_if_necessary(segment_model)
.filter(segment_model.physical_network.in_(
managed_physnets)))
return query
def filter_inactive_ports(query):
"""Filter ports that aren't in active status """
port_model = models_v2.Port
query = (query
.filter(port_model.status.in_([n_const.PORT_STATUS_ACTIVE,
n_const.PORT_STATUS_BUILD])))
return query
def filter_unnecessary_ports(query, device_owners=None, vnic_type=None,
active=True):
"""Filter out all ports are not needed on CVX """
query = (query
.filter_unbound_ports()
.filter_by_device_owner(device_owners)
.filter_by_device_id()
.filter_unmanaged_physnets())
if active:
query = query.filter_inactive_ports()
if vnic_type:
query = query.filter_by_vnic_type(vnic_type)
return query
Query.join_if_necessary = join_if_necessary
Query.outerjoin_if_necessary = outerjoin_if_necessary
Query.filter_network_type = filter_network_type
Query.filter_unbound_ports = filter_unbound_ports
Query.filter_by_device_owner = filter_by_device_owner
Query.filter_by_device_id = filter_by_device_id
Query.filter_by_vnic_type = filter_by_vnic_type
Query.filter_unmanaged_physnets = filter_unmanaged_physnets
Query.filter_inactive_ports = filter_inactive_ports
Query.filter_unnecessary_ports = filter_unnecessary_ports
def get_tenants(tenant_id=None):
"""Returns list of all project/tenant ids that may be relevant on CVX"""
session = db.get_reader_session()
project_ids = set()
with session.begin():
for m in [models_v2.Network, models_v2.Port, l3_models.Router]:
q = session.query(m.project_id).filter(m.project_id != '')
if tenant_id is not None:
q = q.filter(m.project_id == tenant_id)
project_ids.update(pid[0] for pid in q.distinct())
return [{'project_id': project_id} for project_id in project_ids]
def get_networks(network_id=None):
"""Returns list of all networks that may be relevant on CVX"""
session = db.get_reader_session()
with session.begin():
model = models_v2.Network
l3ha_network = l3ha_models.L3HARouterNetwork
networks = (session.query(model, l3ha_network)
.outerjoin(l3ha_network,
l3ha_network.network_id == model.id))
if network_id is not None:
networks = networks.filter(model.id == network_id)
return networks.all()
def get_segments(segment_id=None):
"""Returns list of all network segments that may be relevant on CVX"""
session = db.get_reader_session()
with session.begin():
model = segment_models.NetworkSegment
segments = session.query(model).filter_network_type()
if segment_id is not None:
segments = segments.filter(model.id == segment_id)
return segments.all()
def get_instances(device_owners=None, vnic_type=None, instance_id=None):
"""Returns filtered list of all instances in the neutron db"""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
binding_model = ml2_models.PortBinding
router_model = l3_models.Router
instances = (session
.query(port_model,
binding_model,
router_model)
.outerjoin(
binding_model,
port_model.id == binding_model.port_id)
.outerjoin(
router_model,
port_model.device_id == router_model.id)
.distinct(port_model.device_id)
.group_by(port_model.device_id)
.filter_unnecessary_ports(device_owners, vnic_type))
if instance_id is not None:
instances = instances.filter(port_model.device_id == instance_id)
return instances.all()
def get_dhcp_instances(instance_id=None):
"""Returns filtered list of DHCP instances that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_DHCP],
instance_id=instance_id)
def get_router_instances(instance_id=None):
"""Returns filtered list of routers that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_DVR_INTERFACE,
n_const.DEVICE_OWNER_ROUTER_HA_INTF,
n_const.DEVICE_OWNER_ROUTER_INTF,
n_const.DEVICE_OWNER_ROUTER_GW],
instance_id=instance_id)
def get_vm_instances(instance_id=None):
"""Returns filtered list of vms that may be relevant on CVX"""
return get_instances(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX],
vnic_type=portbindings.VNIC_NORMAL,
instance_id=instance_id)
def get_baremetal_instances(instance_id=None):
"""Returns filtered list of baremetals that may be relevant on CVX"""
return get_instances(vnic_type=portbindings.VNIC_BAREMETAL,
instance_id=instance_id)
def get_ports(device_owners=None, vnic_type=None, port_id=None, active=True):
"""Returns list of all ports in neutron the db"""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
router_model = l3_models.Router
ports = (session
.query(port_model, router_model)
.outerjoin(router_model,
router_model.id == port_model.device_id)
.filter_unnecessary_ports(device_owners, vnic_type, active))
if port_id is not None:
ports = ports.filter(port_model.id == port_id)
return ports.all()
def get_dhcp_ports(port_id=None):
"""Returns filtered list of DHCP instances that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_DHCP],
port_id=port_id)
def get_router_ports(port_id=None):
"""Returns filtered list of routers that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_DVR_INTERFACE,
n_const.DEVICE_OWNER_ROUTER_HA_INTF,
n_const.DEVICE_OWNER_ROUTER_INTF,
n_const.DEVICE_OWNER_ROUTER_GW],
port_id=port_id)
def get_vm_ports(port_id=None):
"""Returns filtered list of vms that may be relevant on CVX"""
return get_ports(device_owners=[n_const.DEVICE_OWNER_COMPUTE_PREFIX,
t_const.TRUNK_SUBPORT_OWNER],
vnic_type=portbindings.VNIC_NORMAL, port_id=port_id)
def get_baremetal_ports(port_id=None):
"""Returns filtered list of baremetals that may be relevant on CVX"""
return get_ports(vnic_type=portbindings.VNIC_BAREMETAL, port_id=port_id)
def get_port_bindings(binding_key=None):
"""Returns filtered list of port bindings that may be relevant on CVX
This query is a little complex as we need all binding levels for any
binding that has a single managed physnet, but we need to filter bindings
that have no managed physnets. In order to achieve this, we join to the
binding_level_model once to filter bindings with no managed levels,
then a second time to get all levels for the remaining bindings.
The loop at the end is a convenience to associate levels with bindings
as a list. This would ideally be done through the use of an orm.relation,
but due to some sqlalchemy limitations imposed to make OVO work, we can't
add relations to existing models.
"""
session = db.get_reader_session()
with session.begin():
binding_level_model = ml2_models.PortBindingLevel
aliased_blm = aliased(ml2_models.PortBindingLevel)
port_binding_model = ml2_models.PortBinding
dist_binding_model = ml2_models.DistributedPortBinding
bindings = (session.query(port_binding_model, aliased_blm)
.join(binding_level_model,
and_(
port_binding_model.port_id ==
binding_level_model.port_id,
port_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.join(aliased_blm,
and_(port_binding_model.port_id ==
aliased_blm.port_id,
port_binding_model.host ==
aliased_blm.host)))
dist_bindings = (session.query(dist_binding_model, aliased_blm)
.join(
binding_level_model,
and_(dist_binding_model.port_id ==
binding_level_model.port_id,
dist_binding_model.host ==
binding_level_model.host))
.filter_unnecessary_ports()
.filter(dist_binding_model.status.in_([
n_const.PORT_STATUS_BUILD,
n_const.PORT_STATUS_ACTIVE]))
.join(aliased_blm,
and_(dist_binding_model.port_id ==
aliased_blm.port_id,
dist_binding_model.host ==
aliased_blm.host)))
if binding_key:
port_id = binding_key[0]
if type(binding_key[1]) == tuple:
switch_id = binding_key[1][0]
switch_port = binding_key[1][1]
bindings = bindings.filter(and_(
port_binding_model.port_id == port_id,
port_binding_model.profile.ilike('%%%s%%' % switch_id),
port_binding_model.profile.ilike('%%%s%%' % switch_port)))
dist_bindings = dist_bindings.filter(and_(
dist_binding_model.port_id == port_id,
dist_binding_model.profile.ilike('%%%s%%' % switch_id),
dist_binding_model.profile.ilike('%%%s%%' % switch_port)))
else:
host_id = binding_key[1]
bindings = bindings.filter(and_(
port_binding_model.port_id == port_id,
port_binding_model.host == host_id))
dist_bindings = dist_bindings.filter(and_(
dist_binding_model.port_id == port_id,
dist_binding_model.host == host_id))
binding_levels = collections.defaultdict(list)
for binding, level in bindings.all() + dist_bindings.all():
binding_levels[binding].append(level)
bindings_with_levels = list()
for binding, levels in binding_levels.items():
binding.levels = levels
bindings_with_levels.append(binding)
return bindings_with_levels
def get_mlag_physnets():
mlag_pairs = dict()
session = db.get_reader_session()
with session.begin():
physnets = session.query(
vlanallocation.VlanAllocation.physical_network
).distinct().all()
for (physnet,) in physnets:
if '_' in physnet:
peers = physnet.split('_')
mlag_pairs[peers[0]] = physnet
mlag_pairs[peers[1]] = physnet
return mlag_pairs
def segment_is_dynamic(segment_id):
session = db.get_reader_session()
with session.begin():
segment_model = segment_models.NetworkSegment
res = bool(session
.query(segment_model)
.filter_by(id=segment_id)
.filter_by(is_dynamic=True).count())
return res
def segment_bound(segment_id):
session = db.get_reader_session()
with session.begin():
binding_level_model = ml2_models.PortBindingLevel
res = bool(session
.query(binding_level_model)
.filter_by(segment_id=segment_id).count())
return res
def tenant_provisioned(tenant_id):
"""Returns true if any networks or ports exist for a tenant."""
session = db.get_reader_session()
with session.begin():
res = any(
session.query(m).filter(m.tenant_id == tenant_id).count()
for m in [models_v2.Network, models_v2.Port]
)
return res
def instance_provisioned(device_id):
"""Returns true if any ports exist for an instance."""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.device_id == device_id)
.filter_inactive_ports().count())
return res
def port_provisioned(port_id):
"""Returns true if port still exists."""
session = db.get_reader_session()
with session.begin():
port_model = models_v2.Port
res = bool(session.query(port_model)
.filter(port_model.id == port_id)
.filter_inactive_ports().count())
return res
def get_parent(port_id):
"""Get trunk subport's parent port"""
session = db.get_reader_session()
res = dict()
with session.begin():
subport_model = trunk_models.SubPort
trunk_model = trunk_models.Trunk
subport = (session.query(subport_model).
filter(subport_model.port_id == port_id).first())
if subport:
trunk = (session.query(trunk_model).
filter(trunk_model.id == subport.trunk_id).first())
if trunk:
trunk_port_id = trunk.port.id
res = getattr(get_ports(port_id=trunk_port_id,
active=False)[0], 'Port')
return res
def get_port_binding_level(filters):
"""Returns entries from PortBindingLevel based on the specified filters."""
session = db.get_reader_session()
with session.begin():
return (session.query(ml2_models.PortBindingLevel).
filter_by(**filters).
order_by(ml2_models.PortBindingLevel.level).
all())
def get_security_groups():
session = db.get_reader_session()
with session.begin():
sg_model = sg_models.SecurityGroup
# We do an immediate load to prevent the need for the sync worker
# to issue subqueries
security_groups = (session.query(sg_model)
.options(immediateload(sg_model.rules)))
return security_groups
def get_baremetal_sg_bindings():
session = db.get_reader_session()
with session.begin():
sg_binding_model = sg_models.SecurityGroupPortBinding
binding_model = ml2_models.PortBinding
sg_bindings = (session
.query(sg_binding_model,
binding_model)
.outerjoin(
binding_model,
sg_binding_model.port_id == binding_model.port_id)
.filter_unnecessary_ports(
vnic_type=portbindings.VNIC_BAREMETAL)
.group_by(sg_binding_model.port_id)
.having(func.count(sg_binding_model.port_id) == 1))
return sg_bindings
def get_subnet_gateway_ipv4(subnet_id):
"""Returns the IPv4 gateway of the router if exists"""
session = db.get_reader_session()
with session.begin():
subnet_model = models_v2.Subnet
router_model = l3_models.Router
port_model = models_v2.Port
ip_allocation_model = models_v2.IPAllocation
result = (session
.query(router_model.name,
router_model.id,
subnet_model.gateway_ip.label('gip'),
subnet_model.cidr,
subnet_model.ip_version,
port_model.network_id,
ip_allocation_model.ip_address)
.filter(and_(router_model.gw_port_id == port_model.id,
port_model.network_id ==
subnet_model.network_id,
ip_allocation_model.network_id ==
subnet_model.network_id,
subnet_model.ip_version == 4,
subnet_model.id == subnet_id)).first())
return _format_gateway_result(result)
def get_network_gateway_ipv4(network_id):
"""Returns all the routers and IPv4 gateway that have network as gateway"""
session = db.get_reader_session()
with session.begin():
subnet_model = models_v2.Subnet
router_model = l3_models.Router
port_model = models_v2.Port
ip_allocation_model = models_v2.IPAllocation
result = (session
.query(router_model.name,
router_model.id,
subnet_model.gateway_ip.label('gip'),
subnet_model.cidr,
subnet_model.ip_version,
port_model.network_id,
ip_allocation_model.ip_address)
.filter(and_(router_model.gw_port_id == port_model.id,
port_model.network_id ==
subnet_model.network_id,
ip_allocation_model.network_id ==
subnet_model.network_id,
port_model.network_id == network_id,
subnet_model.ip_version == 4)).first())
return _format_gateway_result(result)
def _format_gateway_result(db_result):
"""This function formats result as needed by add_router_interface"""
if not db_result:
return None
result = {
k: db_result[i]
for i, k in enumerate(
('name', 'id', 'gip', 'cidr', 'ip_version', 'network_id',
'fixed_ip'))}
return result
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/exceptions.py0000664000175000017500000000326300000000000025275 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Exceptions used by Arista ML2 Mechanism Driver."""
from neutron_lib import exceptions
from networking_arista._i18n import _
class AristaRpcError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaConfigError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaServicePluginRpcError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaServicePluginConfigError(exceptions.NeutronException):
message = _('%(msg)s')
class AristaServicePluginInvalidCommand(exceptions.NeutronException):
message = _('%(msg)s')
class VlanUnavailable(exceptions.NeutronException):
"""An exception indicating VLAN creation failed because it's not available.
A specialization of the NeutronException indicating network creation failed
because a specified VLAN is unavailable on the physical network.
:param vlan_id: The VLAN ID.
:param physical_network: The physical network.
"""
message = _("Unable to create the network. "
"The VLAN %(vlan_id)s on physical network "
"%(physical_network)s is not available.")
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/common/utils.py0000664000175000017500000000423400000000000024253 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants as n_const
from neutron_lib.services.trunk import constants as t_const
from oslo_config import cfg
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
SUPPORTED_NETWORK_TYPES = [
n_const.TYPE_VLAN,
n_const.TYPE_VXLAN]
SUPPORTED_DEVICE_OWNERS = [
n_const.DEVICE_OWNER_COMPUTE_PREFIX,
n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
n_const.DEVICE_OWNER_DHCP,
n_const.DEVICE_OWNER_DVR_INTERFACE,
n_const.DEVICE_OWNER_ROUTER_HA_INTF,
n_const.DEVICE_OWNER_ROUTER_INTF,
n_const.DEVICE_OWNER_ROUTER_GW,
t_const.TRUNK_SUBPORT_OWNER]
UNSUPPORTED_DEVICE_OWNERS = [
n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'probe']
UNSUPPORTED_DEVICE_IDS = [
n_const.DEVICE_ID_RESERVED_DHCP_PORT]
SUPPORTED_SG_PROTOCOLS = [
None,
n_const.PROTO_NAME_TCP,
n_const.PROTO_NAME_UDP,
n_const.PROTO_NAME_ICMP]
LOG = logging.getLogger(__name__)
def supported_device_owner(device_owner):
if (any([device_owner.startswith(supported_owner) for
supported_owner in SUPPORTED_DEVICE_OWNERS]) and
not any([device_owner.startswith(unsupported_owner) for
unsupported_owner in UNSUPPORTED_DEVICE_OWNERS])):
return True
LOG.debug('Unsupported device owner: %s', device_owner)
return False
def hostname(hostname, extra=None):
fqdns_used = cfg.CONF.ml2_arista['use_fqdn']
return hostname if fqdns_used else hostname.split('.')[0]
def physnet(hostname):
fqdns_used = cfg.CONF.ml2_arista['use_fqdn_physnet']
return hostname if fqdns_used else hostname.split('.')[0]
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6361027
networking_arista-2023.1.0/networking_arista/db/0000775000175000017500000000000000000000000021633 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/README0000664000175000017500000000010600000000000022510 0ustar00zuulzuul00000000000000Alembic database migration scripts for the networking-arista package.
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/__init__.py0000664000175000017500000000000000000000000023732 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6361027
networking_arista-2023.1.0/networking_arista/db/migration/0000775000175000017500000000000000000000000023624 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/README0000664000175000017500000000010600000000000024501 0ustar00zuulzuul00000000000000Alembic database migration scripts for the networking-arista package.
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/__init__.py0000664000175000017500000000000000000000000025723 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/0000775000175000017500000000000000000000000027454 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/README0000664000175000017500000000004600000000000030334 0ustar00zuulzuul00000000000000Generic single-database configuration.././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/__init__.py0000664000175000017500000000000000000000000031553 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/env.py0000664000175000017500000000667600000000000030635 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from logging.config import fileConfig
from alembic import context
from neutron_lib.db import model_base
from oslo_config import cfg
from oslo_db.sqlalchemy import session
import sqlalchemy as sa
from sqlalchemy import event
from neutron.db.migration.alembic_migrations import external
from neutron.db.migration.models import head # noqa
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
neutron_config = config.neutron_config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
# add your model's MetaData object here
# for 'autogenerate' support
# from myapp import mymodel
# target_metadata = mymodel.Base.metadata
target_metadata = model_base.BASEV2.metadata
MYSQL_ENGINE = None
ARISTA_VERSION_TABLE = 'arista_alembic_version'
def set_mysql_engine():
try:
mysql_engine = neutron_config.command.mysql_engine
except cfg.NoSuchOptError:
mysql_engine = None
global MYSQL_ENGINE
MYSQL_ENGINE = (mysql_engine or
model_base.BASEV2.__table_args__['mysql_engine'])
def include_object(object, name, type_, reflected, compare_to):
if type_ == 'table' and name in external.TABLES:
return False
else:
return True
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL or an Engine.
Calls to context.execute() here emit the given string to the
script output.
"""
set_mysql_engine()
kwargs = dict()
if neutron_config.database.connection:
kwargs['url'] = neutron_config.database.connection
else:
kwargs['dialect_name'] = neutron_config.database.engine
kwargs['include_object'] = include_object
kwargs['version_table'] = ARISTA_VERSION_TABLE
context.configure(**kwargs)
with context.begin_transaction():
context.run_migrations()
@event.listens_for(sa.Table, 'after_parent_attach')
def set_storage_engine(target, parent):
if MYSQL_ENGINE:
target.kwargs['mysql_engine'] = MYSQL_ENGINE
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
set_mysql_engine()
engine = session.create_engine(neutron_config.database.connection)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata,
include_object=include_object,
version_table=ARISTA_VERSION_TABLE,
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
engine.dispose()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/script.py.mako0000664000175000017500000000204700000000000032263 0ustar00zuulzuul00000000000000# Copyright ${create_date.year} Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""${message}
Revision ID: ${up_revision}
Revises: ${down_revision | comma,n}
Create Date: ${create_date}
"""
# revision identifiers, used by Alembic.
revision = ${repr(up_revision)}
down_revision = ${repr(down_revision)}
branch_labels = ${repr(branch_labels)}
depends_on = ${repr(depends_on)}
from alembic import op
import sqlalchemy as sa
${imports if imports else ""}
def upgrade():
${upgrades if upgrades else "pass"}
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/0000775000175000017500000000000000000000000031324 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000023000000000000011450 xustar0000000000000000130 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/296b4e0236e0_initial_db_version.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/296b4e0236e0_i0000664000175000017500000000147500000000000033241 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial db version
Revision ID: 296b4e0236e0
Create Date: 2015-10-23 14:37:49.594974
"""
# revision identifiers, used by Alembic.
revision = '296b4e0236e0'
down_revision = None
def upgrade():
pass
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/CONTRACT_HEAD0000664000175000017500000000001500000000000033241 0ustar00zuulzuul00000000000000941bad5630c1
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/EXPAND_HEAD0000664000175000017500000000001500000000000033003 0ustar00zuulzuul000000000000001c6993ce7db0
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/__init__.py0000664000175000017500000000000000000000000033423 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6281033
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/0000775000175000017500000000000000000000000032776 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021500000000000011453 xustar0000000000000000113 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/contract/
28 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/contra0000775000175000017500000000000000000000000034205 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000026200000000000011455 xustar0000000000000000156 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/contract/47036dc8697a_initial_db_version_contract.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/contra0000664000175000017500000000162400000000000034212 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial db version
Revision ID: 47036dc8697a
Create Date: 2015-10-23 14:37:49.594974
"""
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '47036dc8697a'
down_revision = '296b4e0236e0'
branch_labels = (cli.CONTRACT_BRANCH,)
def upgrade():
pass
././@PaxHeader0000000000000000000000000000021300000000000011451 xustar0000000000000000111 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/expand/
28 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/expand0000775000175000017500000000000000000000000034176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000025600000000000011460 xustar0000000000000000152 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/expand/1c6993ce7db0_initial_db_version_expand.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/liberty/expand0000664000175000017500000000162200000000000034201 0ustar00zuulzuul00000000000000# Copyright (c) 2015 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Initial db version
Revision ID: 1c6993ce7db0
Create Date: 2015-10-23 14:37:49.594974
"""
from neutron.db.migration import cli
# revision identifiers, used by Alembic.
revision = '1c6993ce7db0'
down_revision = '296b4e0236e0'
branch_labels = (cli.EXPAND_BRANCH,)
def upgrade():
pass
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6281033
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/0000775000175000017500000000000000000000000032624 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000021400000000000011452 xustar0000000000000000112 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contract/
28 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contrac0000775000175000017500000000000000000000000034176 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000026000000000000011453 xustar0000000000000000154 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contract/39c2eeb67116_drop_aristaprovisionednets.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contrac0000664000175000017500000000170300000000000034201 0ustar00zuulzuul00000000000000# Copyright 2017 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
"""Drop AristaProvisionedNets
Revision ID: 39c2eeb67116
Revises: dc7bf9c1ab4d
Create Date: 2017-08-25 16:42:31.814580
"""
# revision identifiers, used by Alembic.
revision = '39c2eeb67116'
down_revision = 'dc7bf9c1ab4d'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table('arista_provisioned_nets')
././@PaxHeader0000000000000000000000000000025700000000000011461 xustar0000000000000000153 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contract/941bad5630c1_drop_aristaprovisionedvms.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contrac0000664000175000017500000000170100000000000034177 0ustar00zuulzuul00000000000000# Copyright 2017 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
"""Drop AristaProvisionedVms
Revision ID: 941bad5630c1
Revises: 39c2eeb67116
Create Date: 2017-08-31 17:44:21.334780
"""
# revision identifiers, used by Alembic.
revision = '941bad5630c1'
down_revision = '39c2eeb67116'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table('arista_provisioned_vms')
././@PaxHeader0000000000000000000000000000026300000000000011456 xustar0000000000000000157 path=networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contract/dc7bf9c1ab4d_drop_aristaprovisionedtenants.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/db/migration/alembic_migrations/versions/queens/contrac0000664000175000017500000000171100000000000034200 0ustar00zuulzuul00000000000000# Copyright 2017 Arista Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from alembic import op
"""Drop AristaProvisionedTenants
Revision ID: dc7bf9c1ab4d
Revises: 47036dc8697a
Create Date: 2017-08-23 17:10:36.000671
"""
# revision identifiers, used by Alembic.
revision = 'dc7bf9c1ab4d'
down_revision = '47036dc8697a'
branch_labels = None
depends_on = None
def upgrade():
op.drop_table('arista_provisioned_tenants')
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/l3Plugin/0000775000175000017500000000000000000000000022743 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/l3Plugin/__init__.py0000664000175000017500000000000000000000000025042 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/l3Plugin/arista_l3_driver.py0000664000175000017500000007572400000000000026570 0ustar00zuulzuul00000000000000# Copyright 2014 Arista Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import hashlib
import socket
import struct
from neutron.plugins.ml2.driver_context import NetworkContext # noqa
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as const
from neutron_lib import context as nctx
from oslo_config import cfg
from oslo_log import log as logging
from networking_arista._i18n import _, _LI
from networking_arista.common import api
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('l3_arista', 'networking_arista.common.config')
EOS_UNREACHABLE_MSG = _('Unable to reach EOS')
DEFAULT_VLAN = 1
MLAG_SWITCHES = 2
VIRTUAL_ROUTER_MAC = '00:11:22:33:44:55'
IPV4_BITS = 32
IPV6_BITS = 128
# This string-format-at-a-distance confuses pylint :(
# pylint: disable=too-many-format-args
router_in_vrf_v1 = {
'router': {'create': ['vrf definition {0}',
'rd {1}',
'exit',
'ip routing vrf {0}'],
'delete': ['no vrf definition {0}']},
'interface': {'add': ['vlan {0}',
'exit',
'interface vlan {0}',
'vrf forwarding {1}',
'ip address {2}'],
'remove': ['no interface vlan {0}']}}
router_in_vrf_v2 = {
'router': {'create': ['vrf instance {0}',
'rd {1}',
'exit',
'ip routing vrf {0}'],
'delete': ['no vrf instance {0}']},
'interface': {'add': ['vlan {0}',
'exit',
'interface vlan {0}',
'vrf {1}',
'ip address {2}'],
'remove': ['no interface vlan {0}']}}
router_in_default_vrf = {
'router': {'create': [], # Place holder for now.
'delete': []}, # Place holder for now.
'interface': {'add': ['ip routing',
'vlan {0}',
'exit',
'interface vlan {0}',
'ip address {2}'],
'remove': ['no interface vlan {0}']}}
router_in_default_vrf_v6 = {
'router': {'create': [],
'delete': []},
'interface': {'add': ['ipv6 unicast-routing',
'vlan {0}',
'exit',
'interface vlan {0}',
'ipv6 enable',
'ipv6 address {2}'],
'remove': ['no interface vlan {0}']}}
additional_cmds_for_mlag = {
'router': {'create': ['ip virtual-router mac-address {0}'],
'delete': []},
'interface': {'add': ['ip virtual-router address {0}'],
'remove': []}}
additional_cmds_for_mlag_v6 = {
'router': {'create': [],
'delete': []},
'interface': {'add': ['ipv6 virtual-router address {0}'],
'remove': []}}
additional_cmds_for_default_route = {
'add': ['ip route vrf {0} 0.0.0.0/0 {1}'],
# Remove is used when updating a network to remove default gateway
# when deleting an interface we don't need to delete the route
'remove': ['no ip route vrf {0} 0.0.0.0/0']}
class AristaL3Driver(object):
"""Wraps Arista JSON RPC.
All communications between Neutron and EOS are over JSON RPC.
EOS - operating system used on Arista hardware
Command API - JSON RPC API provided by Arista EOS
"""
def __init__(self):
self._servers = []
self._hosts = []
self._interfaceDict = None
self._validate_config()
host = cfg.CONF.l3_arista.primary_l3_host
self._hosts.append(host)
self._servers.append(self._make_eapi_client(host))
self._mlag_configured = cfg.CONF.l3_arista.mlag_config
self._vrf_syntax_v2_supported = None
self._router_in_vrf = router_in_vrf_v2
self._use_vrf = cfg.CONF.l3_arista.use_vrf
self._vrf_default_route = False
if self._use_vrf:
self._vrf_default_route = cfg.CONF.l3_arista.vrf_default_route
if self._vrf_default_route:
# only subscribe for events if vrf default route is enabled
self.subscribe()
if self._mlag_configured:
host = cfg.CONF.l3_arista.secondary_l3_host
self._hosts.append(host)
self._servers.append(self._make_eapi_client(host))
self._additionalRouterCmdsDict = additional_cmds_for_mlag['router']
self._additionalInterfaceCmdsDict = (
additional_cmds_for_mlag['interface'])
if self._use_vrf:
self.routerDict = self._router_in_vrf['router']
self._update_vrf_commands(keep_alive=False)
self._interfaceDict = self._router_in_vrf['interface']
else:
self.routerDict = router_in_default_vrf['router']
self._interfaceDict = router_in_default_vrf['interface']
self._enable_cleanup = cfg.CONF.l3_arista.enable_cleanup
self._protected_vlans = self._parse_protected_vlans(
cfg.CONF.l3_arista.protected_vlans)
def subscribe(self):
# Subscribe to the events related to networks and subnets
registry.subscribe(self.update_subnet, resources.SUBNET,
events.AFTER_UPDATE)
registry.subscribe(self.update_network, resources.NETWORK,
events.AFTER_UPDATE)
def update_subnet(self, resource, event, trigger, payload):
subnet_info = payload.latest_state
if subnet_info['ip_version'] == 6:
LOG.info('IPv6 networks not supported with L3 plugin')
return
ctx = nctx.get_admin_context()
ml2_db = NetworkContext(self, ctx, {'id': subnet_info['network_id']})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router_info = db_lib.get_subnet_gateway_ipv4(subnet_info['id'])
if router_info:
router_info['seg_id'] = seg_id
router_name = self._arista_router_name(router_info['id'],
router_info['name'])
self._delete_default_gateway(router_name)
self.add_router_interface(ctx, router_info)
self._setup_default_gateway(router_info)
def _reset_network_default_route(self, network_id):
router_info = self._prepare_network_default_gateway(network_id)
if router_info:
router_name = self._arista_router_name(router_info['id'],
router_info['name'])
self._delete_default_gateway(router_name)
self._add_network_default_gateway(network_id)
def update_network(self, resource, event, trigger, payload):
self._reset_network_default_route(payload.resource_id)
def _prepare_network_default_gateway(self, network_id):
router_info = db_lib.get_network_gateway_ipv4(network_id)
if not router_info:
return
ip_version = router_info['ip_version']
if ip_version == 6:
LOG.info('IPv6 networks not supported with L3 plugin')
return
ctx = nctx.get_admin_context()
ml2_db = NetworkContext(self, ctx, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router_info['seg_id'] = seg_id
return router_info
def _add_network_default_gateway(self, network_id):
router_info = self._prepare_network_default_gateway(network_id)
if router_info:
ctx = nctx.get_admin_context()
self.add_router_interface(ctx, router_info)
self._setup_default_gateway(router_info)
@staticmethod
def _raise_invalid_protected_vlans(vlan_string):
msg = '%s is not a valid vlan or vlan range' % vlan_string
LOG.error(msg)
raise arista_exc.AristaServicePluginConfigError(msg=msg)
def _parse_protected_vlans(self, vlan_strings):
# VLAN 1 is always protected as it exists by default on EOS
vlans = set([1])
for vlan_string in vlan_strings:
vlan_parsed = vlan_string.split(':', 2)
if len(vlan_parsed) > 2:
self._raise_invalid_protected_vlans(vlan_string)
try:
min_vlan = int(vlan_parsed[0])
except ValueError:
self._raise_invalid_protected_vlans(vlan_string)
try:
max_vlan = int(vlan_parsed[-1])
except ValueError:
self._raise_invalid_protected_vlans(vlan_string)
if not (const.MIN_VLAN_TAG <= min_vlan <=
max_vlan <= const.MAX_VLAN_TAG):
self._raise_invalid_protected_vlans(vlan_string)
vlans.update(range(min_vlan, max_vlan + 1))
return vlans
@staticmethod
def _make_eapi_client(host):
return api.EAPIClient(
host,
username=cfg.CONF.l3_arista.primary_l3_host_username,
password=cfg.CONF.l3_arista.primary_l3_host_password,
verify=False,
timeout=cfg.CONF.l3_arista.conn_timeout
)
def _supports_vrf_instance(self, version):
ver_tokens = version.split('.')
if len(ver_tokens) < 2 or int(ver_tokens[0]) < 4:
return False
if int(ver_tokens[0]) == 4 and int(ver_tokens[1]) < 22:
return False
return True
def _check_vrf_syntax_v2_support(self, host, keep_alive=True):
cmds = ['show version']
result = None
try:
result = self._run_eos_cmds(cmds, host, log_exception=False,
keep_alive=keep_alive,
update_vrf_commands=False)
LOG.info(_LI('show version result %s'), result)
except Exception:
# We don't know what exception we got return None
# At this moment we don't know what command we support for vrf
# creation
return None
return result and self._supports_vrf_instance(result[0].get('version',
''))
def _update_vrf_commands(self, keep_alive=True):
# This assumes all switches run the same version. This needs to be
# updated if we'll support distributed routing
new_vrf_support = self._check_vrf_syntax_v2_support(
self._servers[0], keep_alive=keep_alive)
if new_vrf_support == self._vrf_syntax_v2_supported:
return
LOG.info(_LI('Updating VRF command supported: %s'),
'vrf instance' if new_vrf_support else 'vrf definition')
self._vrf_syntax_v2_supported = new_vrf_support
if self._vrf_syntax_v2_supported is False:
self._router_in_vrf = router_in_vrf_v1
else:
self._router_in_vrf = router_in_vrf_v2
# we don't need to update self.interfaceDict as it is updated by
# _select_dicts function before it is used
self.routerDict = self._router_in_vrf['router']
def _validate_config(self):
if cfg.CONF.l3_arista.get('primary_l3_host') == '':
msg = _('Required option primary_l3_host is not set')
LOG.error(msg)
raise arista_exc.AristaServicePluginConfigError(msg=msg)
if cfg.CONF.l3_arista.get('mlag_config'):
if cfg.CONF.l3_arista.get('secondary_l3_host') == '':
msg = _('Required option secondary_l3_host is not set')
LOG.error(msg)
raise arista_exc.AristaServicePluginConfigError(msg=msg)
if cfg.CONF.l3_arista.get('primary_l3_host_username') == '':
msg = _('Required option primary_l3_host_username is not set')
LOG.error(msg)
raise arista_exc.AristaServicePluginConfigError(msg=msg)
def create_router_on_eos(self, router_name, rdm, server):
"""Creates a router on Arista HW Device.
:param router_name: globally unique identifier for router/VRF
:param rdm: A value generated by hashing router name
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
rd = "%s:%s" % (rdm, rdm)
for c in self.routerDict['create']:
cmds.append(c.format(router_name, rd))
if self._mlag_configured:
mac = VIRTUAL_ROUTER_MAC
for c in self._additionalRouterCmdsDict['create']:
cmds.append(c.format(mac))
self._run_config_cmds(cmds, server)
def delete_router_from_eos(self, router_name, server):
"""Deletes a router from Arista HW Device.
:param router_name: globally unique identifier for router/VRF
:param server: Server endpoint on the Arista switch to be configured
"""
cmds = []
for c in self.routerDict['delete']:
cmds.append(c.format(router_name))
if self._mlag_configured:
for c in self._additionalRouterCmdsDict['delete']:
cmds.append(c)
self._run_config_cmds(cmds, server)
def _select_dicts(self, ipv):
if self._use_vrf:
if ipv == 6:
msg = (_('IPv6 subnets are not supported with VRFs'))
LOG.info(msg)
self._interfaceDict = self._router_in_vrf['interface']
else:
if ipv == 6:
# for IPv6 use IPv6 commands
self._interfaceDict = router_in_default_vrf_v6['interface']
self._additionalInterfaceCmdsDict = (
additional_cmds_for_mlag_v6['interface'])
else:
self._interfaceDict = router_in_default_vrf['interface']
self._additionalInterfaceCmdsDict = (
additional_cmds_for_mlag['interface'])
def add_interface_to_router(self, segment_id,
router_name, fixed_ip, router_ip, mask,
server):
"""Adds an interface to existing HW router on Arista HW device.
:param segment_id: VLAN Id associated with interface that is added
:param router_name: globally unique identifier for router/VRF
:param fixed_ip: Fixed IP associated with the port
:param router_ip: IP address of the router
:param mask: subnet mask to be used
:param server: Server endpoint on the Arista switch to be configured
"""
if not segment_id:
segment_id = DEFAULT_VLAN
cmds = []
for c in self._interfaceDict['add']:
if self._mlag_configured:
# In VARP config, use router ID else, use fixed IP.
# If fixed Ip was not set this will be gateway IP address.
ip = router_ip
else:
ip = fixed_ip + '/' + mask
cmds.append(c.format(segment_id, router_name, ip))
if self._mlag_configured:
for c in self._additionalInterfaceCmdsDict['add']:
cmds.append(c.format(fixed_ip))
self._run_config_cmds(cmds, server)
def delete_interface_from_router(self, segment_id, router_name, server):
"""Deletes an interface from existing HW router on Arista HW device.
:param segment_id: VLAN Id associated with interface that is added
:param router_name: globally unique identifier for router/VRF
:param server: Server endpoint on the Arista switch to be configured
"""
if not segment_id:
segment_id = DEFAULT_VLAN
cmds = []
for c in self._interfaceDict['remove']:
cmds.append(c.format(segment_id))
self._run_config_cmds(cmds, server)
def create_router(self, context, router):
"""Creates a router on Arista Switch.
Deals with multiple configurations - such as Router per VRF,
a router in default VRF, Virtual Router in MLAG configurations
"""
if router:
router_name = self._arista_router_name(router['id'],
router['name'])
hashed = hashlib.sha256(router_name.encode('utf-8'))
rdm = str(int(hashed.hexdigest(), 16) % 65536)
mlag_peer_failed = False
for s in self._servers:
try:
self.create_router_on_eos(router_name, rdm, s)
mlag_peer_failed = False
except Exception:
if self._mlag_configured and not mlag_peer_failed:
# In paired switch, it is OK to fail on one switch
mlag_peer_failed = True
else:
msg = (_('Failed to create router %s on EOS') %
router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
if self._vrf_default_route:
ext_gateway = router.get('external_gateway_info')
if ext_gateway:
network_id = ext_gateway.get('network_id')
if network_id:
self._add_network_default_gateway(network_id)
def delete_router(self, context, router_id, router):
"""Deletes a router from Arista Switch."""
if router:
router_name = self._arista_router_name(router_id, router['name'])
mlag_peer_failed = False
for s in self._servers:
try:
self.delete_router_from_eos(router_name, s)
mlag_peer_failed = False
except Exception:
if self._mlag_configured and not mlag_peer_failed:
# In paired switch, it is OK to fail on one switch
mlag_peer_failed = True
else:
msg = (_('Failed to create router %s on EOS') %
router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def update_router(self, context, router_id, original_router, new_router):
"""Updates a router which is already created on Arista Switch.
"""
if not self._vrf_default_route or not new_router:
return
ext_gateway = new_router.get('external_gateway_info')
if ext_gateway is None:
# Remove default gateway if it exists
orig_ext_gateway = original_router.get('external_gateway_info')
if orig_ext_gateway is None:
# External gateway did not change
return
network_id = orig_ext_gateway['network_id']
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
new_router['seg_id'] = seg_id
new_router['ip_version'] = 4
self.remove_router_interface(context, new_router,
delete_gateway=True)
return
network_id = ext_gateway.get('network_id')
if network_id:
self._reset_network_default_route(network_id)
def _setup_default_gateway(self, router_info):
mlag_peer_failed = False
gip = router_info['gip']
router_name = self._arista_router_name(router_info['id'],
router_info['name'])
for s in self._servers:
try:
self._setup_switch_default_gateway(router_name, gip, s)
mlag_peer_failed = False
except Exception:
if self._mlag_configured and not mlag_peer_failed:
# In paired switch, it is OK to fail on one switch
mlag_peer_failed = True
else:
msg = (_('Failed to setup router gateway %s on EOS') %
router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def _setup_switch_default_gateway(self, router_name, gip, server):
cmds = [
c.format(router_name, gip)
for c in additional_cmds_for_default_route['add']
]
self._run_config_cmds(cmds, server)
def _delete_default_gateway(self, router_name):
mlag_peer_failed = False
for s in self._servers:
try:
self._delete_switch_default_gateway(router_name, s)
mlag_peer_failed = False
except Exception:
if self._mlag_configured and not mlag_peer_failed:
# In paired switch, it is OK to fail on one switch
mlag_peer_failed = True
else:
msg = (_('Failed to delete router gateway %s on EOS') %
router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def _delete_switch_default_gateway(self, router_name, server):
cmds = [
c.format(router_name)
for c in additional_cmds_for_default_route['remove']
]
self._run_config_cmds(cmds, server)
def add_router_interface(self, context, router_info):
"""Adds an interface to a router created on Arista HW router.
This deals with both IPv6 and IPv4 configurations.
"""
if router_info:
if router_info['ip_version'] == 6 and self._use_vrf:
# For the moment we ignore the interfaces to be added
# on IPv6 subnets.
LOG.info('Using VRFs. Ignoring IPv6 interface')
return
self._select_dicts(router_info['ip_version'])
cidr = router_info['cidr']
subnet_mask = cidr.split('/')[1]
router_name = self._arista_router_name(router_info['id'],
router_info['name'])
if self._mlag_configured:
# For MLAG, we send a specific IP address as opposed to cidr
# For now, we are using x.x.x.253 and x.x.x.254 as virtual IP
# unless either collides with the router interface fixed_ip,
# in which case we use x.x.x.252
mlag_peer_failed = False
router_ips = self._get_router_ips(cidr, len(self._servers),
router_info['ip_version'],
router_info['fixed_ip'])
for i, server in enumerate(self._servers):
# Get appropriate virtual IP address for this router
router_ip = router_ips[i]
try:
self.add_interface_to_router(router_info['seg_id'],
router_name,
router_info['fixed_ip'],
router_ip, subnet_mask,
server)
mlag_peer_failed = False
except Exception:
if not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_('Failed to add interface to router '
'%s on EOS') % router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(
msg=msg)
else:
for s in self._servers:
self.add_interface_to_router(router_info['seg_id'],
router_name,
router_info['fixed_ip'],
None, subnet_mask, s)
def remove_router_interface(self, context, router_info,
delete_gateway=False):
"""Removes previously configured interface from router on Arista HW.
This deals with both IPv6 and IPv4 configurations.
"""
if router_info:
if router_info['ip_version'] == 6 and self._use_vrf:
# For the moment we ignore the interfaces to be added
# on IPv6 subnets.
LOG.info('Using VRFs. Ignoring IPv6 interface')
return
router_name = self._arista_router_name(router_info['id'],
router_info['name'])
mlag_peer_failed = False
for s in self._servers:
try:
if delete_gateway:
self._delete_switch_default_gateway(router_name, s)
self.delete_interface_from_router(router_info['seg_id'],
router_name, s)
if self._mlag_configured:
mlag_peer_failed = False
except Exception:
if self._mlag_configured and not mlag_peer_failed:
mlag_peer_failed = True
else:
msg = (_('Failed to remove interface to router '
'%s on EOS') % router_name)
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def _run_config_cmds(self, commands, server, log_exception=True,
keep_alive=True, update_vrf_commands=True):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param server: Server endpoint on the Arista switch to be configured
"""
command_start = ['enable', 'configure']
command_end = ['exit']
full_command = command_start + commands + command_end
self._run_eos_cmds(full_command, server, log_exception, keep_alive,
update_vrf_commands)
def _run_eos_cmds(self, commands, server, log_exception=True,
keep_alive=True, update_vrf_commands=True):
LOG.info(_LI('Executing command on Arista EOS: %s'), commands)
try:
# this returns array of return values for every command in
# full_command list
ret = server.execute(commands, keep_alive=keep_alive)
LOG.info(_LI('Results of execution on Arista EOS: %s'), ret)
return ret
except arista_exc.AristaServicePluginInvalidCommand:
msg = (_('VRF creation command unsupported. This request should '
'work on next retry.'))
if log_exception:
LOG.exception(msg)
if self._use_vrf and update_vrf_commands:
# For now we assume that the only command that raises this
# exception is vrf instance/definition and we need to update
# the current support
self._update_vrf_commands()
raise
except Exception:
msg = (_('Error occurred while trying to execute '
'commands %(cmd)s on EOS %(host)s') %
{'cmd': commands, 'host': server})
if log_exception:
LOG.exception(msg)
raise arista_exc.AristaServicePluginRpcError(msg=msg)
def _arista_router_name(self, router_id, name):
"""Generate an arista specific name for this router.
Use a unique name so that OpenStack created routers/SVIs
can be distinguishged from the user created routers/SVIs
on Arista HW. Replace spaces with underscores for CLI compatibility
"""
return '__OpenStack__' + router_id + '-' + name.replace(' ', '_')
def _get_binary_from_ipv4(self, ip_addr):
"""Converts IPv4 address to binary form."""
return struct.unpack("!L", socket.inet_pton(socket.AF_INET,
ip_addr))[0]
def _get_binary_from_ipv6(self, ip_addr):
"""Converts IPv6 address to binary form."""
hi, lo = struct.unpack("!QQ", socket.inet_pton(socket.AF_INET6,
ip_addr))
return (hi << 64) | lo
def _get_ipv4_from_binary(self, bin_addr):
"""Converts binary address to Ipv4 format."""
return socket.inet_ntop(socket.AF_INET, struct.pack("!L", bin_addr))
def _get_ipv6_from_binary(self, bin_addr):
"""Converts binary address to Ipv6 format."""
hi = bin_addr >> 64
lo = bin_addr & 0xFFFFFFFF
return socket.inet_ntop(socket.AF_INET6, struct.pack("!QQ", hi, lo))
def _get_router_ips(self, cidr, ip_count, ip_ver, fixed_ip):
"""For a given IP subnet and IP version type, generate IP for router.
This method takes the network address (cidr) and selects a set of
IP addresses that should be assigned to virtual router running
on multiple switches. It uses upper addresses in a subnet address
as IP for the router. Each instace of the router, on each switch,
requires uniqe IP address. For example in IPv4 case, on a 255
subnet, it will pick X.X.X.254 as first addess, X.X.X.253 for next,
and so on, skipping an address specified as the reserved_ip
"""
network_addr, prefix = cidr.split('/')
if ip_ver == 4:
bits = IPV4_BITS
ip = self._get_binary_from_ipv4(network_addr)
elif ip_ver == 6:
bits = IPV6_BITS
ip = self._get_binary_from_ipv6(network_addr)
mask = (pow(2, bits) - 1) << (bits - int(prefix))
network_addr = ip & mask
router_ips = list()
ip_idx = 0
while len(router_ips) < ip_count:
start_ip = MLAG_SWITCHES + ip_idx
router_ip = pow(2, bits - int(prefix)) - start_ip
router_ip = network_addr | router_ip
if ip_ver == 4:
router_ip = self._get_ipv4_from_binary(router_ip)
else:
router_ip = self._get_ipv6_from_binary(router_ip)
ip_idx += 1
if router_ip != fixed_ip.lower():
router_ips.append(router_ip + '/' + prefix)
return router_ips
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/l3Plugin/l3_arista.py0000664000175000017500000004341500000000000025205 0ustar00zuulzuul00000000000000# Copyright 2014 Arista Networks, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import traceback
from neutron_lib.agent import topics
from neutron_lib import constants as n_const
from neutron_lib import context as nctx
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib import rpc as n_rpc
from neutron_lib.services import base as service_base
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import excutils
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.api.rpc.handlers import l3_rpc
from neutron.db import extraroute_db
from neutron.db import l3_agentschedulers_db
from neutron.db import l3_gwmode_db
from neutron.plugins.ml2.driver_context import NetworkContext # noqa
from networking_arista._i18n import _LE, _LI
from networking_arista.l3Plugin import arista_l3_driver
LOG = logging.getLogger(__name__)
class AristaL3SyncWorker(worker.BaseWorker):
def __init__(self, driver):
self.driver = driver
self._enable_cleanup = driver._enable_cleanup
self._protected_vlans = driver._protected_vlans
self._servers = driver._servers
self._use_vrf = driver._use_vrf
self._vrf_default_route = driver._vrf_default_route
self._loop = None
super(AristaL3SyncWorker, self).__init__(worker_process_count=0)
def start(self):
super(AristaL3SyncWorker, self).start()
if self._loop is None:
self._loop = loopingcall.FixedIntervalLoopingCall(
self.synchronize
)
self._loop.start(interval=cfg.CONF.l3_arista.l3_sync_interval)
def stop(self):
if self._loop is not None:
self._loop.stop()
def wait(self):
if self._loop is not None:
self._loop.wait()
self._loop = None
def reset(self):
self.stop()
self.wait()
self.start()
def get_subnet_info(self, subnet_id):
return self.get_subnet(subnet_id)
def get_routers_and_interfaces(self):
core = directory.get_plugin()
ctx = nctx.get_admin_context()
routers = directory.get_plugin(plugin_constants.L3).get_routers(ctx)
router_interfaces = list()
for r in routers:
ports = core.get_ports(
ctx,
filters={
'device_id': [r['id']],
'device_owner': [n_const.DEVICE_OWNER_ROUTER_INTF,
n_const.DEVICE_OWNER_ROUTER_GW]}) or []
for p in ports:
router_interface = r.copy()
net_id = p['network_id']
subnet_id = p['fixed_ips'][0]['subnet_id']
subnet = core.get_subnet(ctx, subnet_id)
ml2_db = NetworkContext(self, ctx, {'id': net_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router_interface['seg_id'] = seg_id
router_interface['cidr'] = subnet['cidr']
router_interface['gip'] = subnet['gateway_ip']
if p['id'] == r['gw_port_id']:
r['gip'] = subnet['gateway_ip']
router_interface['fixed_ip'] = p['fixed_ips'][0]['ip_address']
router_interface['ip_version'] = subnet['ip_version']
router_interface['subnet_id'] = subnet_id
router_interfaces.append(router_interface)
return routers, router_interfaces
def _get_ext_gateway_ip(self, router):
if ('external_gateway_info' not in router or
router['external_gateway_info'] is None):
return None
return router.get('gip')
def synchronize(self):
"""Synchronizes Router DB from Neturon DB with EOS.
Walks through the Neturon Db and ensures that all the routers
created in Netuton DB match with EOS. After creating appropriate
routers, it ensures to add interfaces as well.
Uses idempotent properties of EOS configuration, which means
same commands can be repeated.
"""
LOG.info(_LI('Syncing Neutron Router DB <-> EOS'))
try:
if self._use_vrf:
self.driver._update_vrf_commands()
routers, router_interfaces = self.get_routers_and_interfaces()
if self._enable_cleanup:
expected_vrfs = set()
expected_vrf_default_routes = set()
if self._use_vrf:
expected_vrfs.update(self.driver._arista_router_name(
r['id'], r['name']) for r in routers)
if self._vrf_default_route:
expected_vrf_default_routes.update(
(self.driver._arista_router_name(router['id'],
router['name']),
self._get_ext_gateway_ip(router))
for router in routers)
expected_vlans = set(r['seg_id'] for r in router_interfaces)
LOG.info(_LI('Syncing Neutron Router DB - cleanup'))
self.do_cleanup(expected_vrfs, expected_vlans,
expected_vrf_default_routes)
LOG.info(_LI('Syncing Neutron Router DB - creating routers'))
self.create_routers(routers)
LOG.info(_LI('Syncing Neutron Router DB - creating interfaces'))
self.create_router_interfaces(router_interfaces)
LOG.info(_LI('Syncing Neutron Router DB finished'))
except Exception:
exc_str = traceback.format_exc()
LOG.error(_LE("Error during synchronize processing %s"), exc_str)
def get_vrfs(self, server):
ret = self.driver._run_eos_cmds(['show vrf'], server)
if len(ret or []) != 1 or 'vrfs' not in ret[0].keys():
return set()
eos_vrfs = set(vrf for vrf in ret[0]['vrfs'].keys()
if vrf.startswith('__OpenStack__'))
return eos_vrfs
def get_svis(self, server):
ret = self.driver._run_eos_cmds(['show ip interface'], server)
if len(ret or []) != 1 or 'interfaces' not in ret[0].keys():
return set()
eos_svis = set(
int(vlan.strip('Vlan'))
for vlan in ret[0]['interfaces'].keys() if 'Vlan' in vlan)
return eos_svis
def get_vlans(self, server):
ret = self.driver._run_eos_cmds(['show vlan'], server)
if len(ret or []) != 1 or 'vlans' not in ret[0].keys():
return set()
eos_vlans = set(int(vlan) for vlan, info in ret[0]['vlans'].items()
if not info['dynamic'])
return eos_vlans
def _get_vrf_default_route(self, vrf, vrf_routes):
if (not vrf_routes or
'routes' not in vrf_routes or
'0.0.0.0/0' not in vrf_routes['routes'] or
'vias' not in vrf_routes['routes']['0.0.0.0/0']):
return set()
result = set()
for via in vrf_routes['routes']['0.0.0.0/0']['vias']:
result.add((vrf, via.get('nexthopAddr')))
return result
def get_vrf_default_routes(self, server):
ret = self.driver._run_eos_cmds(['show ip route vrf all'], server)
if len(ret or []) != 1 or 'vrfs' not in ret[0].keys():
return set()
eos_vrf_gateway = set()
for vrf, vrf_route in ret[0]['vrfs'].items():
if '__OpenStack__' in vrf:
eos_vrf_gateway.update(self._get_vrf_default_route(vrf,
vrf_route))
return eos_vrf_gateway
def do_cleanup(self, expected_vrfs, expected_vlans,
expected_vrf_default_routes):
for server in self._servers:
eos_svis = self.get_svis(server)
eos_vlans = self.get_vlans(server)
svis_to_delete = (eos_svis - self._protected_vlans -
expected_vlans)
vlans_to_delete = (eos_vlans - self._protected_vlans -
expected_vlans)
delete_cmds = []
delete_cmds.extend('no interface vlan %s' % svi
for svi in svis_to_delete)
delete_cmds.extend('no vlan %s' % vlan
for vlan in vlans_to_delete)
if self._use_vrf:
eos_vrfs = self.get_vrfs(server)
vrfs_to_delete = eos_vrfs - expected_vrfs
delete_cmds.extend([c.format(vrf)
for c in self.driver.routerDict['delete']
for vrf in vrfs_to_delete])
if self._vrf_default_route:
eos_vrf_gateway = self.get_vrf_default_routes(server)
vrf_gw_to_delete = (eos_vrf_gateway -
expected_vrf_default_routes)
delete_cmds.extend(
'no ip route vrf %s 0.0.0.0/0' % vrf
for (vrf, _) in vrf_gw_to_delete)
if delete_cmds:
self.driver._run_config_cmds(delete_cmds, server)
def create_routers(self, routers):
for r in routers:
try:
self.driver.create_router(self, r)
except Exception:
LOG.error(_LE("Error Adding router %(router_id)s "
"on Arista HW"), {'router_id': r})
def create_router_interfaces(self, router_interfaces):
for r in router_interfaces:
try:
self.driver.add_router_interface(self, r)
except Exception:
LOG.error(_LE("Error Adding interface %(subnet_id)s "
"to router %(router_id)s on Arista HW"),
{'subnet_id': r['subnet_id'], 'router_id': r['id']})
class AristaL3ServicePlugin(service_base.ServicePluginBase,
extraroute_db.ExtraRoute_db_mixin,
l3_gwmode_db.L3_NAT_db_mixin,
l3_agentschedulers_db.L3AgentSchedulerDbMixin):
"""Implements L3 Router service plugin for Arista hardware.
Creates routers in Arista hardware, manages them, adds/deletes interfaces
to the routes.
"""
supported_extension_aliases = ["router", "ext-gw-mode",
"extraroute"]
def __init__(self, driver=None):
super(AristaL3ServicePlugin, self).__init__()
self.driver = driver or arista_l3_driver.AristaL3Driver()
self.setup_rpc()
self.add_worker(AristaL3SyncWorker(self.driver))
def setup_rpc(self):
# RPC support
self.topic = topics.L3PLUGIN
self.conn = n_rpc.Connection()
self.agent_notifiers.update(
{n_const.AGENT_TYPE_L3: l3_rpc_agent_api.L3AgentNotifyAPI()})
self.endpoints = [l3_rpc.L3RpcCallback()]
self.conn.create_consumer(self.topic, self.endpoints,
fanout=False)
self.conn.consume_in_threads()
def get_plugin_type(self):
return plugin_constants.L3
def get_plugin_description(self):
"""Returns string description of the plugin."""
return ("Arista L3 Router Service Plugin for Arista Hardware "
"based routing")
@log_helpers.log_method_call
def create_router(self, context, router):
"""Create a new router entry in DB, and create it Arista HW."""
# Add router to the DB
new_router = super(AristaL3ServicePlugin, self).create_router(
context,
router)
# create router on the Arista Hw
try:
self.driver.create_router(context, new_router)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error creating router on Arista HW router=%s "),
new_router)
super(AristaL3ServicePlugin, self).delete_router(
context,
new_router['id']
)
@log_helpers.log_method_call
def update_router(self, context, router_id, router):
"""Update an existing router in DB, and update it in Arista HW."""
# Read existing router record from DB
original_router = self.get_router(context, router_id)
# Update router DB
new_router = super(AristaL3ServicePlugin, self).update_router(
context, router_id, router)
# Modify router on the Arista Hw
try:
self.driver.update_router(context, router_id,
original_router, new_router)
return new_router
except Exception:
LOG.error(_LE("Error updating router on Arista HW router=%s "),
new_router)
@log_helpers.log_method_call
def delete_router(self, context, router_id):
"""Delete an existing router from Arista HW as well as from the DB."""
router = self.get_router(context, router_id)
# Delete router on the Arista Hw
try:
self.driver.delete_router(context, router_id, router)
except Exception as e:
LOG.error(_LE("Error deleting router on Arista HW "
"router %(r)s exception=%(e)s"),
{'r': router, 'e': e})
super(AristaL3ServicePlugin, self).delete_router(context, router_id)
@log_helpers.log_method_call
def add_router_interface(self, context, router_id, interface_info):
"""Add a subnet of a network to an existing router."""
new_router = super(AristaL3ServicePlugin, self).add_router_interface(
context, router_id, interface_info)
core = directory.get_plugin()
# Get network info for the subnet that is being added to the router.
# Check if the interface information is by port-id or subnet-id
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
if add_by_sub:
subnet = core.get_subnet(context, interface_info['subnet_id'])
# If we add by subnet and we have no port allocated, assigned
# gateway IP for the interface
fixed_ip = subnet['gateway_ip']
elif add_by_port:
port = core.get_port(context, interface_info['port_id'])
subnet_id = port['fixed_ips'][0]['subnet_id']
fixed_ip = port['fixed_ips'][0]['ip_address']
subnet = core.get_subnet(context, subnet_id)
network_id = subnet['network_id']
# To create SVI's in Arista HW, the segmentation Id is required
# for this network.
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
# Package all the info needed for Hw programming
router = self.get_router(context, router_id)
router_info = copy.deepcopy(new_router)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
router_info['cidr'] = subnet['cidr']
router_info['gip'] = subnet['gateway_ip']
router_info['fixed_ip'] = fixed_ip
router_info['ip_version'] = subnet['ip_version']
try:
self.driver.add_router_interface(context, router_info)
return new_router
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE("Error Adding subnet %(subnet)s to "
"router %(router_id)s on Arista HW"),
{'subnet': subnet, 'router_id': router_id})
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info)
@log_helpers.log_method_call
def remove_router_interface(self, context, router_id, interface_info):
"""Remove a subnet of a network from an existing router."""
router_to_del = (
super(AristaL3ServicePlugin, self).remove_router_interface(
context,
router_id,
interface_info))
# Get network information of the subnet that is being removed
core = directory.get_plugin()
subnet = core.get_subnet(context, router_to_del['subnet_id'])
network_id = subnet['network_id']
# For SVI removal from Arista HW, segmentation ID is needed
ml2_db = NetworkContext(self, context, {'id': network_id})
seg_id = ml2_db.network_segments[0]['segmentation_id']
router = self.get_router(context, router_id)
router_info = copy.deepcopy(router_to_del)
router_info['seg_id'] = seg_id
router_info['name'] = router['name']
router_info['ip_version'] = subnet['ip_version']
try:
self.driver.remove_router_interface(context, router_info)
return router_to_del
except Exception as exc:
LOG.error(_LE("Error removing interface %(interface)s from "
"router %(router_id)s on Arista HW"
"Exception =(exc)s"),
{'interface': interface_info, 'router_id': router_id,
'exc': exc})
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6401024
networking_arista-2023.1.0/networking_arista/ml2/0000775000175000017500000000000000000000000021740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/__init__.py0000664000175000017500000000000000000000000024037 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/arista_resources.py0000664000175000017500000005664300000000000025705 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from neutron_lib import constants as n_const
from neutron_lib.services.trunk import constants as t_const
from oslo_log import log as logging
from networking_arista.common import constants as a_const
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.common import utils
LOG = logging.getLogger(__name__)
class AttributeFormatter(object):
"""Formats a single attribute of the CVX model based on the neutron model
There are 4 elements to an AttributeFormatter:
1. neutron_key - name of the key in the neutron model
2. cvx_key - name of the key in the cvx model
3. format(optional) - function to alter the value to be CVX compatible
4. submodel(optional) - If the get_db_resources function queries multiple
models, the name of the model that contains the neutron_key must be
specified
"""
def __init__(self, neutron_key, cvx_key, format=None, submodel=None):
self.neutron_key = neutron_key
self.cvx_key = cvx_key
self.format = format or (
lambda neutron_val, base_resource: neutron_val)
self.submodel = submodel
def transform(self, resource):
base_resource = resource
if self.submodel:
resource = getattr(resource, self.submodel)
return (self.cvx_key, self.format(resource[self.neutron_key],
base_resource))
class AristaResourcesBase(object):
"""Tracks state of resources of one resource type on neutron and CVX
An AristaResources class is responsible for:
- tracking resources that have been provisioned in neutron
- tracking resources that have been provisioned on CVX
- creating and deleting resources on CVX to bring it in line with neutron
- formatting neutron resources to be compatible with CVX's API
- tracking the correct endpoint for CVX API calls
In order to facilitate this each resource type should define:
1. formatter - a list of AttributeFormatters to convert neutron attributes
to models compatible with CVX's API
2. id_key - the key in the CVX model that uniquely identifies the resource
3. endpoint - format string for region resource endpoint
4. get_db_resources - function that queries the neutron db for all
resources of the resource type in question
"""
formatter = [AttributeFormatter('id', 'id')]
id_key = 'id'
endpoint = 'region/%(region)s'
def __init__(self, rpc):
self.region = rpc.region
self.rpc = rpc
self.cvx_data_stale = True
self.neutron_data_stale = True
self.cvx_ids = set()
self.neutron_resources = dict()
def clear_cvx_data(self):
self.cvx_data_stale = True
self.cvx_ids = set()
def clear_neutron_data(self):
self.neutron_data_stale = True
self.neutron_resources = dict()
def clear_all_data(self):
self.clear_cvx_data()
self.clear_neutron_data()
def update_neutron_resource(self, id, action):
LOG.debug("%(pid)s Requesting %(action)s %(class)s resource %(id)s",
{'action': action, 'class': self.__class__.__name__,
'id': id, 'pid': os.getpid()})
resource = self.get_db_resources(id)
if action == a_const.FULL_SYNC:
self.clear_neutron_data()
LOG.info("%(pid)s Full sync resource %(class)s",
{'class': self.__class__.__name__,
'pid': os.getpid()})
return
assert(len(resource) <= 1)
if resource:
# Until we start using etcd, we need to unconditionally send the
# create request because it might have been delete by another
# worker. We force this by removing the resource to our 'view' of
# cvx resources
self.force_resource_update(id)
LOG.debug("%(pid)s Resource %(class)s %(id)s found, creating",
{'class': self.__class__.__name__, 'id': id,
'pid': os.getpid()})
self._add_neutron_resource(resource[0])
else:
LOG.debug("%(pid)s Resource %(class)s %(id)s not found, deleting",
{'class': self.__class__.__name__, 'id': id,
'pid': os.getpid()})
self._delete_neutron_resource(id)
def _add_neutron_resource(self, resource):
formatted_resource = self.format_for_create(resource)
resource_id = list(formatted_resource.keys())[0]
LOG.debug("%(pid)s %(class)s resource %(id)s added locally",
{'class': self.__class__.__name__,
'id': resource_id,
'pid': os.getpid()})
# If the resource has changed, force a POST to CVX
old_resource = self.neutron_resources.get(resource_id)
if old_resource and old_resource != formatted_resource[resource_id]:
LOG.debug("%(pid)s %(class)s resource %(id)s requires update",
{'class': self.__class__.__name__,
'id': resource_id,
'pid': os.getpid()})
self.force_resource_update(resource_id)
self.neutron_resources.update(formatted_resource)
def force_resource_update(self, id):
self.cvx_ids.discard(id)
def _delete_neutron_resource(self, id):
# Until we start using etcd, we need to unconditionally send the
# delete request because it might have been created by another worker.
# We force this by adding the resource to our 'view' of cvx resources
self.cvx_ids.add(id)
try:
del self.neutron_resources[id]
LOG.debug("%(pid)s %(class)s resource %(id)s removed locally",
{'class': self.__class__.__name__, 'id': id,
'pid': os.getpid()})
except KeyError:
LOG.debug("Resource ID %(id)s already deleted locally", {'id': id})
def get_endpoint(self):
return self.endpoint % {'region': self.region}
@classmethod
def get_resource_ids(cls, resource):
return set([resource[cls.id_key]])
def get_cvx_ids(self):
if self.cvx_data_stale:
LOG.info("%(pid)s Getting %(class)s from CVX",
{'class': self.__class__.__name__,
'pid': os.getpid()})
cvx_data = self.rpc.send_api_request(self.get_endpoint(), 'GET')
for resource in cvx_data:
self.cvx_ids |= self.get_resource_ids(resource)
self.cvx_data_stale = False
return self.cvx_ids
@staticmethod
def get_db_resources(key=None):
raise NotImplementedError
def get_neutron_ids(self):
if self.neutron_data_stale:
self.get_neutron_resources()
return set(self.neutron_resources.keys())
def get_neutron_resources(self):
if self.neutron_data_stale:
LOG.info("%(pid)s Getting %(class)s from neutron",
{'class': self.__class__.__name__,
'pid': os.getpid()})
for resource in self.get_db_resources():
self._add_neutron_resource(resource)
self.neutron_data_stale = False
return self.neutron_resources
def resource_ids_to_delete(self):
cvx_resource_ids = self.get_cvx_ids()
neutron_resource_ids = self.get_neutron_ids()
return (cvx_resource_ids - neutron_resource_ids)
def resource_ids_to_create(self):
cvx_resource_ids = self.get_cvx_ids()
neutron_resource_ids = self.get_neutron_ids()
return (neutron_resource_ids - cvx_resource_ids)
@classmethod
def format_for_create(cls, neutron_resource):
cvx_resource = dict(
attr.transform(neutron_resource) for attr in cls.formatter
)
return {cvx_resource[cls.id_key]: cvx_resource}
@classmethod
def format_for_delete(cls, id):
return {cls.id_key: id}
def create_cvx_resources(self):
resource_ids_to_create = self.resource_ids_to_create()
neutron_resources = self.get_neutron_resources()
resources_to_create = list(neutron_resources[resource_id] for
resource_id in resource_ids_to_create)
if resources_to_create:
LOG.info("%(pid)s Creating %(class)s resources with ids %(ids)s "
"on CVX",
{'class': self.__class__.__name__,
'ids': ', '.join(str(r) for r in resource_ids_to_create),
'pid': os.getpid()})
self.rpc.send_api_request(self.get_endpoint(), 'POST',
resources_to_create)
self.cvx_ids.update(resource_ids_to_create)
LOG.info("%(pid)s %(class)s resources with ids %(ids)s created "
"on CVX",
{'class': self.__class__.__name__,
'ids': ', '.join(str(r) for r in resource_ids_to_create),
'pid': os.getpid()})
else:
LOG.debug("%(pid)s No %(class)s resources to create",
{'class': self.__class__.__name__,
'pid': os.getpid()})
return resources_to_create
def delete_cvx_resources(self):
resource_ids_to_delete = self.resource_ids_to_delete()
resources_to_delete = list(self.format_for_delete(id) for id in
resource_ids_to_delete)
if resources_to_delete:
LOG.info("%(pid)s Deleting %(class)s resources with ids %(ids)s "
"from CVX",
{'class': self.__class__.__name__,
'ids': ', '.join(str(r) for r in resource_ids_to_delete),
'pid': os.getpid()})
try:
self.rpc.send_api_request(self.get_endpoint(), 'DELETE',
resources_to_delete)
except arista_exc.AristaRpcError as err:
if not err.msg.startswith('Unknown port id'):
raise
self.cvx_ids -= resource_ids_to_delete
LOG.info("%(pid)s %(class)s resources with ids %(ids)s deleted "
"from CVX",
{'class': self.__class__.__name__,
'ids': ', '.join(str(r) for r in resource_ids_to_delete),
'pid': os.getpid()})
else:
LOG.debug("%(pid)s No %(class)s resources to delete",
{'class': self.__class__.__name__,
'pid': os.getpid()})
return resources_to_delete
class Tenants(AristaResourcesBase):
endpoint = 'region/%(region)s/tenant'
formatter = [AttributeFormatter('project_id', 'id')]
get_db_resources = staticmethod(db_lib.get_tenants)
class Networks(AristaResourcesBase):
def _is_shared(rbac_entries, resource):
for entry in rbac_entries:
if (entry.action == 'access_as_shared' and
entry.target_tenant == '*'):
return True
return False
def modify_blank_project_id(project_id, resource):
if len(project_id) == 0:
l3ha_network = getattr(resource, 'L3HARouterNetwork')
if l3ha_network:
project_id = l3ha_network.project_id
return project_id
formatter = [AttributeFormatter('id', 'id',
submodel='Network'),
AttributeFormatter('project_id', 'tenantId',
modify_blank_project_id,
submodel='Network'),
AttributeFormatter('name', 'name',
submodel='Network'),
AttributeFormatter('rbac_entries', 'shared', _is_shared,
submodel='Network')]
endpoint = 'region/%(region)s/network'
get_db_resources = staticmethod(db_lib.get_networks)
class Segments(AristaResourcesBase):
formatter = [AttributeFormatter('id', 'id'),
AttributeFormatter('network_type', 'type'),
AttributeFormatter('segmentation_id', 'segmentationId'),
AttributeFormatter('network_id', 'networkId'),
AttributeFormatter('is_dynamic', 'segmentType',
lambda n, r: 'dynamic' if n else 'static')]
endpoint = 'region/%(region)s/segment'
get_db_resources = staticmethod(db_lib.get_segments)
class Dhcps(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'id',
submodel='Port'),
AttributeFormatter('host', 'hostId',
utils.hostname,
submodel='PortBinding'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
endpoint = 'region/%(region)s/dhcp'
get_db_resources = staticmethod(db_lib.get_dhcp_instances)
class Routers(AristaResourcesBase):
def modify_blank_project_id(project_id, resource):
if len(project_id) == 0:
l3ha_router = getattr(resource, 'Router')
if l3ha_router:
project_id = l3ha_router.project_id
return project_id
formatter = [AttributeFormatter('device_id', 'id',
submodel='Port'),
AttributeFormatter('device_owner', 'hostId',
lambda *args: '(see router ports)',
submodel='Port'),
AttributeFormatter('project_id', 'tenantId',
modify_blank_project_id,
submodel='Port')]
endpoint = 'region/%(region)s/router'
get_db_resources = staticmethod(db_lib.get_router_instances)
class Vms(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'id',
submodel='Port'),
AttributeFormatter('host', 'hostId',
utils.hostname,
submodel='PortBinding'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
endpoint = 'region/%(region)s/vm'
get_db_resources = staticmethod(db_lib.get_vm_instances)
class Baremetals(AristaResourcesBase):
formatter = [AttributeFormatter('device_id', 'id',
submodel='Port'),
AttributeFormatter('host', 'hostId',
submodel='PortBinding'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
endpoint = 'region/%(region)s/baremetal'
get_db_resources = staticmethod(db_lib.get_baremetal_instances)
class DhcpPorts(AristaResourcesBase):
endpoint = 'region/%(region)s/port?type=dhcp'
formatter = [AttributeFormatter('id', 'id', submodel='Port'),
AttributeFormatter('name', 'portName', submodel='Port'),
AttributeFormatter('device_owner', 'vlanType',
lambda *args: 'allowed',
submodel='Port'),
AttributeFormatter('network_id', 'networkId',
submodel='Port'),
AttributeFormatter('device_id', 'instanceId',
submodel='Port'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'dhcp',
submodel='Port'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
get_db_resources = staticmethod(db_lib.get_dhcp_ports)
class RouterPorts(AristaResourcesBase):
def modify_blank_project_id(project_id, resource):
if len(project_id) == 0:
l3ha_router = getattr(resource, 'Router')
if l3ha_router:
project_id = l3ha_router.project_id
return project_id
endpoint = 'region/%(region)s/port?type=router'
formatter = [AttributeFormatter('id', 'id', submodel='Port'),
AttributeFormatter('name', 'portName', submodel='Port'),
AttributeFormatter('device_owner', 'vlanType',
lambda *args: 'allowed',
submodel='Port'),
AttributeFormatter('network_id', 'networkId',
submodel='Port'),
AttributeFormatter('device_id', 'instanceId',
submodel='Port'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'router',
submodel='Port'),
AttributeFormatter('project_id', 'tenantId',
modify_blank_project_id, submodel='Port')]
get_db_resources = staticmethod(db_lib.get_router_ports)
class VmPorts(AristaResourcesBase):
endpoint = 'region/%(region)s/port?type=vm'
formatter = [AttributeFormatter('id', 'id', submodel='Port'),
AttributeFormatter('name', 'portName', submodel='Port'),
AttributeFormatter('device_owner', 'vlanType',
lambda *args: 'allowed', submodel='Port'),
AttributeFormatter('network_id', 'networkId',
submodel='Port'),
AttributeFormatter('device_id', 'instanceId',
submodel='Port'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'vm', submodel='Port'),
AttributeFormatter('project_id', 'tenantId',
submodel='Port')]
get_db_resources = staticmethod(db_lib.get_vm_ports)
@classmethod
def format_for_create(cls, resource):
# This is needed until we can update the upstream trunk port
# handling to add device_id to subports
port = getattr(resource, 'Port')
if port['device_owner'] == t_const.TRUNK_SUBPORT_OWNER:
parent_port = db_lib.get_parent(port['id'])
port['device_id'] = parent_port.get('device_id')
return super(VmPorts, cls).format_for_create(resource)
class BaremetalPorts(AristaResourcesBase):
def _get_vlan_type(device_owner, resource):
if (device_owner.startswith(n_const.DEVICE_OWNER_BAREMETAL_PREFIX) or
device_owner.startswith(n_const.DEVICE_OWNER_COMPUTE_PREFIX)):
return 'native'
else:
return 'allowed'
endpoint = 'region/%(region)s/port?type=baremetal'
formatter = [AttributeFormatter('id', 'id', submodel='Port'),
AttributeFormatter('name', 'portName', submodel='Port'),
AttributeFormatter('device_owner', 'vlanType',
_get_vlan_type, submodel='Port'),
AttributeFormatter('network_id', 'networkId',
submodel='Port'),
AttributeFormatter('device_id', 'instanceId',
submodel='Port'),
AttributeFormatter('device_owner', 'instanceType',
lambda *args: 'baremetal',
submodel='Port'),
AttributeFormatter('project_id', 'tenantId', submodel='Port')]
get_db_resources = staticmethod(db_lib.get_baremetal_ports)
class PortBindings(AristaResourcesBase):
endpoint = 'region/%(region)s/portbinding'
get_db_resources = staticmethod(db_lib.get_port_bindings)
@staticmethod
def maybe_strip_fqdn(id):
port_id, binding = id
if type(binding) != tuple:
binding = utils.hostname(binding)
return (port_id, binding)
def force_resource_update(self, id):
id = self.maybe_strip_fqdn(id)
super(PortBindings, self).force_resource_update(id)
def _delete_neutron_resource(self, id):
id = self.maybe_strip_fqdn(id)
super(PortBindings, self)._delete_neutron_resource(id)
@classmethod
def get_resource_ids(cls, resource):
resource_ids = set()
port_id = resource['portId']
for host_binding in resource.get('hostBinding', []):
resource_ids.add((port_id, host_binding['host']))
for switch_binding in resource.get('switchBinding', []):
resource_ids.add((port_id, (switch_binding['switch'],
switch_binding['interface'])))
return resource_ids
@classmethod
def format_for_delete(cls, id):
model = dict()
port_id, binding = id
model['portId'] = port_id
if type(binding) == tuple:
switch, interface = binding
model['switchBinding'] = [{'switch': switch,
'interface': interface}]
else:
host = binding
model['hostBinding'] = [{'host': host}]
return model
@classmethod
def format_for_create(cls, binding):
cvx_resources = {}
# First build the list of segments to which the port is bound
# binding levels are in order from 0 -> highest
# which is typically vxlan -> vlan
# The Arista JSON API depends on this ordering being present in
# the segments list
segments = []
for binding_level in (sorted(binding['levels'],
key=lambda bl: bl.level)):
segments.append({'id': binding_level.segment_id})
# Determine if this is a switch or host bindings and populate
# the appropriate model attribute accordingly
host = utils.hostname(binding['host'])
port_id = binding['port_id']
# If the binding profile isn't valid json, this is a host binding
try:
profile = json.loads(binding.profile)
except ValueError:
profile = {}
if profile.get('local_link_information'):
for link in profile['local_link_information']:
switch_binding = {'host': host,
'switch': link['switch_id'],
'interface': link['port_id'],
'segment': segments}
binding_key = (link['switch_id'], link['port_id'])
cvx_resources[(port_id, binding_key)] = {
'portId': port_id,
'hostBinding': [],
'switchBinding': [switch_binding]}
else:
cvx_resources[(port_id, host)] = {
'portId': port_id,
'hostBinding': [{'host': host, 'segment': segments}],
'switchBinding': []}
return cvx_resources
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/arista_sync.py0000664000175000017500000002533700000000000024643 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import random
import time
from eventlet import event
from eventlet import greenthread
from six.moves.queue import Empty
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import log as logging
from networking_arista.common import constants as a_const
from networking_arista.ml2 import arista_resources as resources
from networking_arista.ml2.rpc.arista_json import AristaRPCWrapperJSON
LOG = logging.getLogger(__name__)
class AristaSyncWorker(worker.BaseWorker):
def __init__(self, provision_queue):
super(AristaSyncWorker, self).__init__(worker_process_count=1)
self._rpc = AristaRPCWrapperJSON()
self.provision_queue = provision_queue
self._thread = None
self._running = False
self.done = event.Event()
self._sync_interval = cfg.CONF.ml2_arista.sync_interval
def initialize(self):
self._last_sync_time = 0
self._cvx_uuid = None
self._synchronizing_uuid = None
self._in_full_sync = False
self._resources_to_update = list()
self.tenants = resources.Tenants(self._rpc)
self.networks = resources.Networks(self._rpc)
self.segments = resources.Segments(self._rpc)
self.dhcps = resources.Dhcps(self._rpc)
self.routers = resources.Routers(self._rpc)
self.vms = resources.Vms(self._rpc)
self.baremetals = resources.Baremetals(self._rpc)
self.dhcp_ports = resources.DhcpPorts(self._rpc)
self.router_ports = resources.RouterPorts(self._rpc)
self.vm_ports = resources.VmPorts(self._rpc)
self.baremetal_ports = resources.BaremetalPorts(self._rpc)
self.port_bindings = resources.PortBindings(self._rpc)
# Sync order is important because of entity dependencies:
# PortBinding -> Port -> Instance -> Tenant
# -> Segment -> Network -> Tenant
self.sync_order = [self.tenants,
self.networks,
self.segments,
self.dhcps,
self.routers,
self.vms,
self.baremetals,
self.dhcp_ports,
self.router_ports,
self.vm_ports,
self.baremetal_ports,
self.port_bindings]
def _on_done(self, gt, *args, **kwargs):
self._thread = None
self._running = False
def start(self):
if self._thread is not None:
LOG.warning('Arista sync loop has already been started')
return
LOG.info("Arista sync worker started")
super(AristaSyncWorker, self).start()
self.initialize()
self._running = True
LOG.info("Spawning Arista sync loop")
self._thread = greenthread.spawn(self.sync_loop)
self._thread.link(self._on_done)
def stop(self, graceful=True):
if graceful:
self._running = False
else:
self._thread.kill()
def wait(self):
return self.done.wait()
def reset(self):
self.stop()
self.wait()
self.start()
def get_resource_class(self, resource_type):
class_map = {a_const.TENANT_RESOURCE: self.tenants,
a_const.NETWORK_RESOURCE: self.networks,
a_const.SEGMENT_RESOURCE: self.segments,
a_const.DHCP_RESOURCE: self.dhcps,
a_const.ROUTER_RESOURCE: self.routers,
a_const.VM_RESOURCE: self.vms,
a_const.BAREMETAL_RESOURCE: self.baremetals,
a_const.DHCP_PORT_RESOURCE: self.dhcp_ports,
a_const.ROUTER_PORT_RESOURCE: self.router_ports,
a_const.VM_PORT_RESOURCE: self.vm_ports,
a_const.BAREMETAL_PORT_RESOURCE: self.baremetal_ports,
a_const.PORT_BINDING_RESOURCE: self.port_bindings}
return class_map[resource_type]
def update_neutron_resource(self, resource):
LOG.debug("%(pid)s %(action)s %(rtype)s with id %(id)s",
{'action': resource.action,
'rtype': resource.resource_type,
'id': resource.id,
'pid': os.getpid()})
resource_class = self.get_resource_class(resource.resource_type)
resource_class.update_neutron_resource(resource.id, resource.action)
for resource_type, resource_id in resource.related_resources:
LOG.debug("%(pid)s %(action)s requisite %(rtype)s with id %(id)s",
{'action': resource.action,
'rtype': resource_type,
'id': resource_id,
'pid': os.getpid()})
resource_class = self.get_resource_class(resource_type)
resource_class.update_neutron_resource(resource_id,
resource.action)
def force_full_sync(self):
self._in_full_sync = True
for resource_type in reversed(self.sync_order):
resource_type.clear_all_data()
def check_if_out_of_sync(self):
cvx_uuid = self._rpc.get_cvx_uuid()
out_of_sync = False
if self._cvx_uuid != cvx_uuid:
LOG.info("%(pid)s Initiating full sync - local uuid %(l_uuid)s"
" - cvx uuid %(c_uuid)s",
{'l_uuid': self._cvx_uuid,
'c_uuid': cvx_uuid,
'pid': os.getpid()})
self.force_full_sync()
self._synchronizing_uuid = cvx_uuid
out_of_sync = True
self._last_sync_time = time.time()
return out_of_sync
def wait_for_mech_driver_update(self, timeout):
try:
resource = self.provision_queue.get(timeout=timeout)
LOG.info("%(pid)s Processing %(res)s", {'res': resource,
'pid': os.getpid()})
self._resources_to_update.append(resource)
except Empty:
pass
return len(self._resources_to_update) > 0
def wait_for_sync_required(self):
timeout = (self._sync_interval -
(time.time() - self._last_sync_time))
LOG.info("%(pid)s Arista Sync time %(time)s last sync %(last_sync)s "
"timeout %(timeout)s", {'time': time.time(),
'last_sync': self._last_sync_time,
'timeout': timeout,
'pid': os.getpid()})
if timeout < 0:
return self.check_if_out_of_sync()
else:
return self.wait_for_mech_driver_update(timeout)
def synchronize_resources(self):
"""Synchronize worker with CVX
All database queries must occur while the sync lock is held. This
tightly couples reads with writes and ensures that an older read
does not result in the last write. Eg:
Worker 1 reads (P1 created)
Worder 2 reads (P1 deleted)
Worker 2 writes (Delete P1 from CVX)
Worker 1 writes (Create P1 on CVX)
By ensuring that all reads occur with the sync lock held, we ensure
that Worker 1 completes its writes before Worker2 is allowed to read.
A failure to write results in a full resync and purges all reads from
memory.
It is also important that we compute resources to sync in reverse sync
order in order to avoid missing dependencies on creation. Eg:
If we query in sync order
1. Query Instances -> I1 isn't there
2. Query Port table -> Port P1 is there, connected to I1
3. We send P1 to CVX without sending I1 -> Error raised
But if we query P1 first:
1. Query Ports P1 -> P1 is not there
2. Query Instances -> find I1
3. We create I1, not P1 -> harmless, mech driver creates P1
Missing dependencies on deletion will helpfully result in the
dependent resource not being created:
1. Query Ports -> P1 is found
2. Query Instances -> I1 not found
3. Creating P1 fails on CVX
"""
# Grab the sync lock
if not self._rpc.sync_start():
LOG.info("%(pid)s Failed to grab the sync lock",
{'pid': os.getpid()})
return
for resource in self._resources_to_update:
self.update_neutron_resource(resource)
self._resources_to_update = list()
# Sync any necessary resources.
# We delete in reverse order and create in order to ensure that
# dependent resources are deleted before the resources they depend
# on and created after them
for resource_type in reversed(self.sync_order):
resource_type.delete_cvx_resources()
for resource_type in self.sync_order:
resource_type.create_cvx_resources()
# Release the sync lock
self._rpc.sync_end()
# Update local uuid if this was a full sync
if self._in_full_sync:
LOG.info("%(pid)s Full sync for cvx uuid %(uuid)s complete",
{'uuid': self._synchronizing_uuid,
'pid': os.getpid()})
self._cvx_uuid = self._synchronizing_uuid
def sync_loop(self):
while self._running:
try:
sync_required = self.wait_for_sync_required()
if sync_required:
self.synchronize_resources()
except Exception:
LOG.exception("%(pid)s Arista Sync failed",
{'pid': os.getpid()})
# Release sync lock if held and at least one full sync was
# successful with the current cvx uuid
if (self._rpc.current_sync_name is not None and
self._cvx_uuid is not None and
self._cvx_uuid == self._synchronizing_uuid):
self._rpc.sync_end()
self._synchronizing_uuid = None
self._in_full_sync = False
# Yield to avoid starvation
greenthread.sleep(random.random())
self.done.send(True)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/arista_trunk.py0000664000175000017500000001601200000000000025020 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
from oslo_log import log
from neutron_lib.api.definitions import port as p_api
from neutron_lib.api.definitions import portbindings
from neutron_lib.api.definitions import trunk as t_api
from neutron_lib.api.definitions import trunk_details as td_api
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as t_const
from neutron.services.trunk.drivers import base
LOG = log.getLogger(__name__)
NAME = 'arista'
SUPPORTED_INTERFACES = (
portbindings.VIF_TYPE_OTHER,
)
SUPPORTED_SEGMENTATION_TYPES = (
t_const.SEGMENTATION_TYPE_VLAN,
)
class AristaTrunkDriver(base.DriverBase):
@property
def is_loaded(self):
try:
return NAME in cfg.CONF.ml2.mechanism_drivers
except cfg.NoSuchOptError:
return False
@registry.receives(resources.TRUNK_PLUGIN, [events.AFTER_INIT])
def register(self, resource, event, trigger, **kwargs):
"""Called in trunk plugin's AFTER_INIT"""
super(AristaTrunkDriver, self).register(resource, event,
trigger, kwargs)
registry.subscribe(self.subport_create,
resources.SUBPORTS, events.AFTER_CREATE)
registry.subscribe(self.subport_delete,
resources.SUBPORTS, events.AFTER_DELETE)
registry.subscribe(self.trunk_create,
resources.TRUNK, events.AFTER_CREATE)
registry.subscribe(self.trunk_update,
resources.TRUNK, events.AFTER_UPDATE)
registry.subscribe(self.trunk_delete,
resources.TRUNK, events.AFTER_DELETE)
self.core_plugin = directory.get_plugin()
LOG.debug("Arista trunk driver initialized.")
@classmethod
def create(cls):
return cls(NAME, SUPPORTED_INTERFACES, SUPPORTED_SEGMENTATION_TYPES,
can_trunk_bound_port=True)
def bind_port(self, parent):
ctx = context.get_admin_context()
trunk = parent.get(td_api.TRUNK_DETAILS, {})
subports = trunk.get(t_api.SUB_PORTS, [])
subport_ids = [subport['port_id'] for subport in subports]
self._bind_subports(ctx, subport_ids, parent)
trunk_plugin = directory.get_plugin(t_api.ALIAS)
trunk_plugin.update_trunk(ctx, trunk.get('trunk_id'),
{t_api.TRUNK:
{'status': t_const.TRUNK_ACTIVE_STATUS}})
def _bind_subports(self, ctx, subport_ids, parent):
host_id = parent.get(portbindings.HOST_ID)
vnic_type = parent.get(portbindings.VNIC_TYPE)
profile = parent.get(portbindings.PROFILE)
device_id = parent.get('device_id')
for subport_id in subport_ids:
self.core_plugin.update_port(
ctx, subport_id,
{p_api.RESOURCE_NAME:
{portbindings.HOST_ID: host_id,
portbindings.VNIC_TYPE: vnic_type,
portbindings.PROFILE: profile,
'device_owner': t_const.TRUNK_SUBPORT_OWNER,
'device_id': device_id,
'status': n_const.PORT_STATUS_ACTIVE}})
def _unbind_subports(self, ctx, subport_ids, parent):
for subport_id in subport_ids:
self.core_plugin.update_port(
ctx, subport_id,
{p_api.RESOURCE_NAME:
{portbindings.HOST_ID: None,
portbindings.VNIC_TYPE: None,
portbindings.PROFILE: None,
'device_owner': '',
'device_id': '',
'status': n_const.PORT_STATUS_DOWN}})
def _delete_trunk(self, trunk):
ctx = context.get_admin_context()
parent_id = trunk.port_id
parent = self.core_plugin.get_port(ctx, parent_id)
if parent.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL:
return
subport_ids = [subport.port_id
for subport in trunk.sub_ports]
self._unbind_subports(ctx, subport_ids, parent)
def trunk_create(self, resource, event, trunk_plugin, payload):
ctx = context.get_admin_context()
parent_id = payload.states[0].port_id
parent = self.core_plugin.get_port(ctx, parent_id)
if parent.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL:
return
subport_ids = [subport.port_id
for subport in payload.states[0].sub_ports]
self._bind_subports(ctx, subport_ids, parent)
trunk_plugin.update_trunk(ctx, payload.resource_id,
{t_api.TRUNK:
{'status': parent['status']}})
def trunk_update(self, resource, event, trunk_plugin, payload):
if payload.states[1].status != t_const.TRUNK_ACTIVE_STATUS:
self._delete_trunk(payload.states[1])
def trunk_delete(self, resource, event, trunk_plugin, payload):
self._delete_trunk(payload.states[0])
def subport_create(self, resource, event, trunk_plugin, payload):
ctx = context.get_admin_context()
parent_id = payload.states[1].port_id
parent = self.core_plugin.get_port(ctx, parent_id)
if parent.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL:
return
subport_ids = [subport.port_id for subport in
payload.metadata['subports']]
self._bind_subports(ctx, subport_ids, parent)
trunk_plugin.update_trunk(ctx, payload.resource_id,
{t_api.TRUNK:
{'status': parent['status']}})
def subport_delete(self, resource, event, trunk_plugin, payload):
ctx = context.get_admin_context()
parent_id = payload.states[1].port_id
parent = self.core_plugin.get_port(ctx, parent_id)
if parent.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL:
return
subport_ids = [subport.port_id for subport in
payload.metadata['subports']]
self._unbind_subports(ctx, subport_ids, parent)
trunk_plugin.update_trunk(ctx, payload.resource_id,
{t_api.TRUNK:
{'status': parent['status']}})
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/mechanism_arista.py0000664000175000017500000004274200000000000025632 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
from neutron_lib.plugins.ml2 import api as driver_api
from neutron_lib.services.trunk import constants as trunk_consts
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_log import log as logging
from neutron.db import segments_db
from networking_arista.common import constants as a_const
from networking_arista.common import db_lib
from networking_arista.ml2 import arista_trunk
from networking_arista.ml2.rpc.arista_eapi import AristaRPCWrapperEapi
# When used as a Neutron plugin, neutron-lib imports this code. However earlier
# neutron-lib has already imported 'multiprocessing'. This means the python
# module cache (sys.modules) contains a version of 'multiprocessing' where
# select.poll() exists.
#
# Further down we import arista_sync, which spawns a greenthread. This
# greenthread then uses a green version of 'multiprocessing' where
# select.poll() has been removed.
#
# Doing here multiprocessing.Queue.put() and in the greenthread
# multiprocessing.Queue.get(timeout=...) leads to:
# AttributeError: module 'select' has no attribute 'poll'
#
# We can't do eventlet.monkey_patch() early enough (before the first
# 'mutiprocessing' import) as we would have to do it in neutron-lib and it's
# forbidden, see https://review.opendev.org/#/c/333017/
#
# The solution is to let the python module cache here forget the already
# imported 'multiprocessing' and re-import a green one. Here again
# eventlet.monkey_patch() doesn't seem to help as it doesn't seem to touch
# 'multiprocessing'. Thus we use eventlet.import_patched() instead:
import eventlet
import sys
modules_to_forget = []
for imported_module_name in sys.modules:
if imported_module_name.startswith('multiprocessing'):
modules_to_forget.append(imported_module_name)
for module_to_forget in modules_to_forget:
del sys.modules[module_to_forget]
for module_to_forget in modules_to_forget:
try:
eventlet.import_patched(module_to_forget)
except ImportError:
pass
# import a green 'multiprocessing':
multiprocessing = eventlet.import_patched('multiprocessing')
from networking_arista.ml2 import arista_sync # noqa: E402
LOG = logging.getLogger(__name__)
cfg.CONF.import_group('ml2_arista', 'networking_arista.common.config')
def log_context(function, context):
pretty_context = json.dumps(context, sort_keys=True, indent=4)
LOG.debug(function)
LOG.debug(pretty_context)
class MechResource(object):
"""Container class for passing data to sync worker"""
def __init__(self, id, resource_type, action, related_resources=None):
self.id = id
self.resource_type = resource_type
self.action = action
self.related_resources = related_resources or list()
def __str__(self):
return "%s %s ID: %s" % (self.action, self.resource_type, self.id)
class AristaDriver(driver_api.MechanismDriver):
"""Ml2 Mechanism driver for Arista networking hardware.
Remembers all networks and VMs that are provisioned on Arista Hardware.
Does not send network provisioning request if the network has already been
provisioned before for the given port.
"""
def __init__(self):
confg = cfg.CONF.ml2_arista
self.managed_physnets = confg['managed_physnets']
self.manage_fabric = confg['manage_fabric']
self.eapi = AristaRPCWrapperEapi()
self.mlag_pairs = dict()
self.provision_queue = multiprocessing.Queue()
self.trunk_driver = None
self.vif_details = {portbindings.VIF_DETAILS_CONNECTIVITY:
self.connectivity}
@property
def connectivity(self):
return portbindings.CONNECTIVITY_L2
def initialize(self):
self.mlag_pairs = db_lib.get_mlag_physnets()
self.trunk_driver = arista_trunk.AristaTrunkDriver.create()
def get_workers(self):
return [arista_sync.AristaSyncWorker(self.provision_queue)]
def create_network(self, network, segments):
"""Enqueue network create"""
tenant_id = network['project_id']
action = a_const.CREATE if tenant_id else a_const.FULL_SYNC
n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, action)
n_res.related_resources.append((a_const.TENANT_RESOURCE, tenant_id))
for segment in segments:
n_res.related_resources.append(
(a_const.SEGMENT_RESOURCE, segment['id']))
self.provision_queue.put(n_res)
def delete_network(self, network, segments):
"""Enqueue network delete"""
tenant_id = network['project_id']
action = a_const.DELETE if tenant_id else a_const.FULL_SYNC
n_res = MechResource(network['id'], a_const.NETWORK_RESOURCE, action)
# Delete tenant if this was the last tenant resource
if not db_lib.tenant_provisioned(tenant_id):
n_res.related_resources.append(
(a_const.TENANT_RESOURCE, tenant_id))
for segment in segments:
n_res.related_resources.append(
(a_const.SEGMENT_RESOURCE, segment['id']))
self.provision_queue.put(n_res)
def delete_segment(self, segment):
"""Enqueue segment delete"""
s_res = MechResource(segment['id'], a_const.SEGMENT_RESOURCE,
a_const.DELETE)
self.provision_queue.put(s_res)
def get_instance_type(self, port):
"""Determine the port type based on device owner and vnic type"""
if port[portbindings.VNIC_TYPE] == portbindings.VNIC_BAREMETAL:
return a_const.BAREMETAL_RESOURCE
owner_to_type = {
n_const.DEVICE_OWNER_DHCP: a_const.DHCP_RESOURCE,
n_const.DEVICE_OWNER_DVR_INTERFACE: a_const.ROUTER_RESOURCE,
n_const.DEVICE_OWNER_ROUTER_INTF: a_const.ROUTER_RESOURCE,
n_const.DEVICE_OWNER_ROUTER_HA_INTF: a_const.ROUTER_RESOURCE,
n_const.DEVICE_OWNER_ROUTER_GW: a_const.ROUTER_RESOURCE,
trunk_consts.TRUNK_SUBPORT_OWNER: a_const.VM_RESOURCE}
if port['device_owner'] in owner_to_type.keys():
return owner_to_type[port['device_owner']]
elif port['device_owner'].startswith(
n_const.DEVICE_OWNER_COMPUTE_PREFIX):
return a_const.VM_RESOURCE
return None
def _get_binding_keys(self, port, host):
"""Get binding keys from the port binding"""
binding_keys = list()
switch_binding = port[portbindings.PROFILE].get(
'local_link_information', None)
if switch_binding:
for binding in switch_binding:
switch_id = binding.get('switch_id')
port_id = binding.get('port_id')
binding_keys.append((port['id'], (switch_id, port_id)))
else:
binding_keys.append((port['id'], host))
return binding_keys
def create_port_binding(self, port, host):
"""Enqueue port binding create"""
tenant_id = port['project_id']
instance_type = self.get_instance_type(port)
if not instance_type:
return
port_type = instance_type + a_const.PORT_SUFFIX
action = a_const.CREATE if tenant_id else a_const.FULL_SYNC
related_resources = list()
related_resources.append((a_const.TENANT_RESOURCE, tenant_id))
related_resources.append((instance_type, port['device_id']))
related_resources.append((port_type, port['id']))
for pb_key in self._get_binding_keys(port, host):
pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE,
action, related_resources=related_resources)
self.provision_queue.put(pb_res)
def delete_port_binding(self, port, host):
"""Enqueue port binding delete"""
tenant_id = port['project_id']
instance_type = self.get_instance_type(port)
if not instance_type:
return
port_type = instance_type + a_const.PORT_SUFFIX
action = a_const.DELETE if tenant_id else a_const.FULL_SYNC
related_resources = list()
# Delete tenant if this was the last tenant resource
if not db_lib.tenant_provisioned(tenant_id):
related_resources.append((a_const.TENANT_RESOURCE, tenant_id))
# Delete instance if this was the last instance port
if not db_lib.instance_provisioned(port['device_id']):
related_resources.append((instance_type, port['device_id']))
# Delete port if this was the last port binding
if not db_lib.port_provisioned(port['id']):
related_resources.append((port_type, port['id']))
for pb_key in self._get_binding_keys(port, host):
pb_res = MechResource(pb_key, a_const.PORT_BINDING_RESOURCE,
action, related_resources=related_resources)
self.provision_queue.put(pb_res)
def create_network_postcommit(self, context):
"""Provision the network on CVX"""
network = context.current
log_context("create_network_postcommit: network", network)
segments = context.network_segments
self.create_network(network, segments)
def update_network_postcommit(self, context):
"""Send network updates to CVX:
- Update the network name
- Add new segments
"""
network = context.current
orig_network = context.original
log_context("update_network_postcommit: network", network)
log_context("update_network_postcommit: orig", orig_network)
segments = context.network_segments
# New segments may have been added
self.create_network(network, segments)
def delete_network_postcommit(self, context):
"""Delete the network from CVX"""
network = context.current
log_context("delete_network_postcommit: network", network)
segments = context.network_segments
self.delete_network(network, segments)
def update_port_postcommit(self, context):
"""Send port updates to CVX
This method is also responsible for the initial creation of ports
as we wait until after a port is bound to send the port data to CVX
"""
port = context.current
orig_port = context.original
network = context.network.current
log_context("update_port_postcommit: port", port)
log_context("update_port_postcommit: orig", orig_port)
# Device id can change without a port going DOWN, but the new device
# id may not be supported
if orig_port and port['device_id'] != orig_port['device_id']:
self.delete_port_binding(orig_port, context.original_host)
if context.status in [n_const.PORT_STATUS_ACTIVE,
n_const.PORT_STATUS_BUILD]:
if context.binding_levels:
segments = [
level['bound_segment'] for level in context.binding_levels]
self.create_network(network, segments)
self.create_port_binding(port, context.host)
else:
if (context.original_host and
context.status != context.original_status):
self.delete_port_binding(orig_port, context.original_host)
self._try_to_release_dynamic_segment(context, migration=True)
def delete_port_postcommit(self, context):
"""Delete the port from CVX"""
port = context.current
log_context("delete_port_postcommit: port", port)
self.delete_port_binding(port, context.host)
self._try_to_release_dynamic_segment(context)
def _bind_baremetal_port(self, context, segment):
"""Bind the baremetal port to the segment"""
port = context.current
vif_details = {
portbindings.VIF_DETAILS_VLAN: str(
segment[driver_api.SEGMENTATION_ID])
}
context.set_binding(segment[driver_api.ID],
portbindings.VIF_TYPE_OTHER,
vif_details,
n_const.ACTIVE)
LOG.debug("AristaDriver: bound port info- port ID %(id)s "
"on network %(network)s",
{'id': port['id'],
'network': context.network.current['id']})
if port.get('trunk_details'):
self.trunk_driver.bind_port(port)
return True
def _get_physnet(self, context):
"""Find the appropriate physnet for the host
- Baremetal ports' physnet is determined by looking at the
local_link_information contained in the binding profile
- Other ports' physnet is determined by looking for the host in the
topology
"""
port = context.current
physnet = None
if (port.get(portbindings.VNIC_TYPE) == portbindings.VNIC_BAREMETAL):
physnet = self.eapi.get_baremetal_physnet(context)
else:
physnet = self.eapi.get_host_physnet(context)
# If the switch is part of an mlag pair, the physnet is called
# peer1_peer2
physnet = self.mlag_pairs.get(physnet, physnet)
return physnet
def _bind_fabric(self, context, segment):
"""Allocate dynamic segments for the port
Segment physnets are based on the switch to which the host is
connected.
"""
port_id = context.current['id']
physnet = self._get_physnet(context)
if not physnet:
LOG.debug("bind_port for port %(port)s: no physical_network "
"found", {'port': port_id})
return False
with lockutils.lock(physnet, external=True):
context.allocate_dynamic_segment(
{'network_id': context.network.current['id'],
'network_type': n_const.TYPE_VLAN,
'physical_network': physnet})
next_segment = segments_db.get_dynamic_segment(
context._plugin_context, context.network.current['id'],
physical_network=physnet)
LOG.debug("bind_port for port %(port)s: "
"current_segment=%(current_seg)s, "
"next_segment=%(next_seg)s",
{'port': port_id, 'current_seg': segment,
'next_seg': next_segment})
context.continue_binding(segment['id'], [next_segment])
return True
def bind_port(self, context):
"""Bind port to a network segment.
Provisioning request to Arista Hardware to plug a host
into appropriate network is done when the port is created
this simply tells the ML2 Plugin that we are binding the port
"""
port = context.current
log_context("bind_port: port", port)
for segment in context.segments_to_bind:
physnet = segment.get(driver_api.PHYSICAL_NETWORK)
segment_type = segment[driver_api.NETWORK_TYPE]
if not physnet:
if (segment_type == n_const.TYPE_VXLAN and self.manage_fabric):
if self._bind_fabric(context, segment):
continue
elif (port.get(portbindings.VNIC_TYPE) ==
portbindings.VNIC_BAREMETAL):
if (not self.managed_physnets or
physnet in self.managed_physnets):
if self._bind_baremetal_port(context, segment):
continue
LOG.debug("Arista mech driver unable to bind port %(port)s to "
"%(seg_type)s segment on physical_network %(physnet)s",
{'port': port.get('id'), 'seg_type': segment_type,
'physnet': physnet})
def _try_to_release_dynamic_segment(self, context, migration=False):
"""Release dynamic segment if necessary
If this port was the last port using a segment and the segment was
allocated by this driver, it should be released
"""
if migration:
binding_levels = context.original_binding_levels
else:
binding_levels = context.binding_levels
LOG.debug("_try_to_release_dynamic_segment: "
"binding_levels=%(bl)s", {'bl': binding_levels})
if not binding_levels:
return
for prior_level, binding in enumerate(binding_levels[1:]):
allocating_driver = binding_levels[prior_level].get(
driver_api.BOUND_DRIVER)
if allocating_driver != a_const.MECHANISM_DRV_NAME:
continue
bound_segment = binding.get(driver_api.BOUND_SEGMENT, {})
segment_id = bound_segment.get('id')
if not db_lib.segment_is_dynamic(segment_id):
continue
if not db_lib.segment_bound(segment_id):
context.release_dynamic_segment(segment_id)
self.delete_segment(bound_segment)
LOG.debug("Released dynamic segment %(seg)s allocated "
"by %(drv)s", {'seg': segment_id,
'drv': allocating_driver})
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/ml2/rpc/0000775000175000017500000000000000000000000022524 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/rpc/__init__.py0000664000175000017500000000000000000000000024623 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/rpc/arista_eapi.py0000664000175000017500000003245700000000000025372 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from oslo_log import log as logging
import requests
import six
from six.moves.urllib.parse import quote
from neutron_lib.api.definitions import portbindings
from networking_arista._i18n import _, _LI, _LW, _LE
from networking_arista.common import constants as const
from networking_arista.common import exceptions as arista_exc
from networking_arista.common import utils
from networking_arista.ml2.rpc.base import AristaRPCWrapperBase
LOG = logging.getLogger(__name__)
class AristaRPCWrapperEapi(AristaRPCWrapperBase):
def __init__(self):
super(AristaRPCWrapperEapi, self).__init__()
# The cli_commands dict stores the mapping between the CLI command key
# and the actual CLI command.
self.cli_commands = {
'resource-pool': [],
'features': {},
}
def _send_eapi_req(self, cmds, commands_to_log=None):
# This method handles all EAPI requests (using the requests library)
# and returns either None or response.json()['result'] from the EAPI
# request.
#
# Exceptions related to failures in connecting/ timeouts are caught
# here and logged. Other unexpected exceptions are logged and raised
request_headers = {}
request_headers['Content-Type'] = 'application/json'
request_headers['Accept'] = 'application/json'
url = self._api_host_url(host=self._server_ip)
params = {}
params['timestamps'] = "false"
params['format'] = "json"
params['version'] = 1
params['cmds'] = cmds
data = {}
data['id'] = "Arista ML2 driver"
data['method'] = "runCmds"
data['jsonrpc'] = "2.0"
data['params'] = params
response = None
try:
# NOTE(pbourke): shallow copy data and params to remove sensitive
# information before logging
log_data = dict(data)
log_data['params'] = dict(params)
log_data['params']['cmds'] = commands_to_log or cmds
msg = (_('EAPI request to %(ip)s contains %(cmd)s') %
{'ip': self._server_ip, 'cmd': json.dumps(log_data)})
LOG.info(msg)
response = requests.post(url, timeout=self.conn_timeout,
verify=False, data=json.dumps(data))
LOG.info(_LI('EAPI response contains: %s'), response.json())
try:
return response.json()['result']
except KeyError:
if response.json()['error']['code'] == 1002:
for data in response.json()['error']['data']:
if type(data) == dict and 'errors' in data:
if const.ERR_CVX_NOT_LEADER in data['errors'][0]:
msg = six.text_type("%s is not the master" % (
self._server_ip))
LOG.info(msg)
return None
msg = "Unexpected EAPI error"
LOG.info(msg)
raise arista_exc.AristaRpcError(msg=msg)
except requests.exceptions.ConnectionError:
msg = (_('Error while trying to connect to %(ip)s') %
{'ip': self._server_ip})
LOG.warning(msg)
return None
except requests.exceptions.ConnectTimeout:
msg = (_('Timed out while trying to connect to %(ip)s') %
{'ip': self._server_ip})
LOG.warning(msg)
return None
except requests.exceptions.Timeout:
msg = (_('Timed out during an EAPI request to %(ip)s') %
{'ip': self._server_ip})
LOG.warning(msg)
return None
except requests.exceptions.InvalidURL:
msg = (_('Ignore attempt to connect to invalid URL %(ip)s') %
{'ip': self._server_ip})
LOG.warning(msg)
return None
except ValueError:
LOG.info("Ignoring invalid JSON response")
return None
except Exception as error:
msg = six.text_type(error)
LOG.warning(msg)
raise
def check_vlan_type_driver_commands(self):
"""Checks the validity of CLI commands for Arista's VLAN type driver.
This method tries to execute the commands used exclusively by the
arista_vlan type driver and stores the commands if they succeed.
"""
cmd = ['show openstack resource-pool vlan region %s uuid'
% self.region]
try:
self._run_eos_cmds(cmd)
self.cli_commands['resource-pool'] = cmd
except arista_exc.AristaRpcError:
self.cli_commands['resource-pool'] = []
LOG.warning(
_LW("'resource-pool' command '%s' is not available on EOS"),
cmd)
def get_vlan_assignment_uuid(self):
"""Returns the UUID for the region's vlan assignment on CVX
:returns: string containing the region's vlan assignment UUID
"""
vlan_uuid_cmd = self.cli_commands['resource-pool']
if vlan_uuid_cmd:
return self._run_eos_cmds(commands=vlan_uuid_cmd)[0]
return None
def get_vlan_allocation(self):
"""Returns the status of the region's VLAN pool in CVX
:returns: dictionary containg the assigned, allocated and available
VLANs for the region
"""
if not self.cli_commands['resource-pool']:
LOG.warning(_('The version of CVX you are using does not support '
'arista VLAN type driver.'))
else:
cmd = ['show openstack resource-pools region %s' % self.region]
command_output = self._run_eos_cmds(cmd)
if command_output:
regions = command_output[0]['physicalNetwork']
if self.region in regions.keys():
return regions[self.region]['vlanPool']['default']
return {'assignedVlans': '',
'availableVlans': '',
'allocatedVlans': ''}
def _run_eos_cmds(self, commands, commands_to_log=None):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_log : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
"""
# Always figure out who is master (starting with the last known val)
try:
if self._get_eos_master() is None:
msg = "Failed to identify CVX master"
self.set_cvx_unavailable()
raise arista_exc.AristaRpcError(msg=msg)
except Exception:
self.set_cvx_unavailable()
raise
self.set_cvx_available()
log_cmds = commands
if commands_to_log:
log_cmds = commands_to_log
LOG.info(_LI('Executing command on Arista EOS: %s'), log_cmds)
# this returns array of return values for every command in
# full_command list
try:
response = self._send_eapi_req(cmds=commands,
commands_to_log=log_cmds)
if response is None:
# Reset the server as we failed communicating with it
self._server_ip = None
self.set_cvx_unavailable()
msg = "Failed to communicate with CVX master"
raise arista_exc.AristaRpcError(msg=msg)
return response
except arista_exc.AristaRpcError:
raise
def _build_command(self, cmds, sync=False):
"""Build full EOS's openstack CLI command.
Helper method to add commands to enter and exit from openstack
CLI modes.
:param cmds: The openstack CLI commands that need to be executed
in the openstack config mode.
:param sync: This flags indicates that the region is being synced.
"""
region_cmd = 'region %s' % self.region
if sync:
region_cmd = self.cli_commands[const.CMD_REGION_SYNC]
full_command = [
'enable',
'configure',
'cvx',
'service openstack',
region_cmd,
]
full_command.extend(cmds)
return full_command
def _run_openstack_cmds(self, commands, commands_to_log=None, sync=False):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param commands_to_logs : This should be set to the command that is
logged. If it is None, then the commands
param is logged.
:param sync: This flags indicates that the region is being synced.
"""
full_command = self._build_command(commands, sync=sync)
if commands_to_log:
full_log_command = self._build_command(commands_to_log, sync=sync)
else:
full_log_command = None
return self._run_eos_cmds(full_command, full_log_command)
def _get_eos_master(self):
# Use guarded command to figure out if this is the master
cmd = ['show openstack agent uuid']
cvx = self._get_cvx_hosts()
# Identify which EOS instance is currently the master
for self._server_ip in cvx:
try:
response = self._send_eapi_req(cmds=cmd, commands_to_log=cmd)
if response is not None:
return self._server_ip
else:
continue # Try another EOS instance
except Exception:
raise
# Couldn't find an instance that is the leader and returning none
self._server_ip = None
msg = "Failed to reach the CVX master"
LOG.error(msg)
return None
def _api_host_url(self, host=""):
return ('https://%s:%s@%s/command-api' %
(quote(self._api_username()),
quote(self._api_password()),
host))
def get_baremetal_physnet(self, context):
"""Returns dictionary which contains mac to hostname mapping"""
port = context.current
host_id = context.host
cmd = ['show network physical-topology hosts']
try:
response = self._run_eos_cmds(cmd)
binding_profile = port.get(portbindings.PROFILE, {})
link_info = binding_profile.get('local_link_information', [])
for link in link_info:
switch_id = link.get('switch_id')
for host in response[0]['hosts'].values():
if switch_id == host['name']:
physnet = utils.physnet(host['hostname'])
LOG.debug("get_physical_network: Physical Network for "
"%(host)s is %(physnet)s",
{'host': host_id, 'physnet': physnet})
return physnet
LOG.debug("Physical network not found for %(host)s",
{'host': host_id})
except Exception as exc:
LOG.error(_LE('command %(cmd)s failed with '
'%(exc)s'), {'cmd': cmd, 'exc': exc})
return None
def get_host_physnet(self, context):
"""Returns dictionary which contains physical topology information
for a given host_id
"""
host_id = utils.hostname(context.host)
cmd = ['show network physical-topology neighbors']
try:
response = self._run_eos_cmds(cmd)
# Get response for 'show network physical-topology neighbors'
# command
neighbors = response[0]['neighbors']
for neighbor in neighbors:
if host_id in neighbor:
physnet = utils.physnet(
neighbors[neighbor]['toPort'][0]['hostname'])
LOG.debug("get_physical_network: Physical Network for "
"%(host)s is %(physnet)s", {'host': host_id,
'physnet': physnet})
return physnet
LOG.debug("Physical network not found for %(host)s",
{'host': host_id})
except Exception as exc:
LOG.error(_LE('command %(cmd)s failed with '
'%(exc)s'), {'cmd': cmd, 'exc': exc})
return None
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/rpc/arista_json.py0000664000175000017500000001717000000000000025420 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import socket
from oslo_log import log as logging
from oslo_utils import excutils
import requests
import six
from six.moves.urllib.parse import quote
from networking_arista._i18n import _, _LI, _LW
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc.base import AristaRPCWrapperBase
LOG = logging.getLogger(__name__)
class AristaRPCWrapperJSON(AristaRPCWrapperBase):
def __init__(self):
super(AristaRPCWrapperJSON, self).__init__()
self.current_sync_name = None
def _get_url(self, host="", user="", password=""):
return ('https://%s:%s@%s/openstack/api/' %
(user, password, host))
def _api_host_url(self, host=""):
return self._get_url(host, quote(self._api_username()),
quote(self._api_password()))
def _send_request(self, host, path, method, data=None,
sanitized_data=None):
request_headers = {
'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Sync-ID': self.current_sync_name
}
url = self._api_host_url(host=host) + path
# Don't log the password
log_url = self._get_url(host=host, user=self._api_username(),
password="*****") + path
resp = None
data = json.dumps(data)
try:
msg = (_('JSON request type: %(type)s url %(url)s data: '
'%(data)s sync_id: %(sync)s') %
{'type': method, 'url': log_url,
'data': sanitized_data or data,
'sync': self.current_sync_name})
LOG.info(msg)
func_lookup = {
'GET': requests.get,
'POST': requests.post,
'PUT': requests.put,
'PATCH': requests.patch,
'DELETE': requests.delete
}
func = func_lookup.get(method)
if not func:
LOG.warning(_LW('Unrecognized HTTP method %s'), method)
return None
resp = func(url, timeout=self.conn_timeout, verify=False,
data=data, headers=request_headers)
msg = (_LI('JSON response contains: %(code)s %(resp)s') %
{'code': resp.status_code,
'resp': resp.json()})
LOG.info(msg)
if resp.ok:
return resp.json()
else:
raise arista_exc.AristaRpcError(msg=resp.json().get('error'))
except requests.exceptions.ConnectionError:
msg = (_('Error connecting to %(url)s') % {'url': url})
LOG.warning(msg)
except requests.exceptions.ConnectTimeout:
msg = (_('Timed out connecting to API request to %(url)s') %
{'url': url})
LOG.warning(msg)
except requests.exceptions.Timeout:
msg = (_('Timed out during API request to %(url)s') %
{'url': url})
LOG.warning(msg)
except requests.exceptions.InvalidURL:
msg = (_('Ignore attempt to connect to invalid URL %(url)s') %
{'url': self._server_ip})
LOG.warning(msg)
except ValueError:
LOG.warning(_LW("Ignoring invalid JSON response: %s"), resp.text)
except Exception as error:
msg = six.text_type(error)
LOG.warning(msg)
# reraise the exception
with excutils.save_and_reraise_exception() as ctxt:
ctxt.reraise = True
return {} if method == 'GET' else None
def _check_if_cvx_leader(self, host):
url = 'agent/'
data = self._send_request(host, url, 'GET')
return False if not data else data.get('isLeader', False)
def _get_eos_master(self):
cvx = self._get_cvx_hosts()
for self._server_ip in cvx:
if self._check_if_cvx_leader(self._server_ip):
return self._server_ip
return None
def send_api_request(self, path, method, data=None, sanitized_data=None):
host = self._get_eos_master()
if not host:
msg = six.text_type("Could not find CVX leader")
LOG.info(msg)
self.set_cvx_unavailable()
raise arista_exc.AristaRpcError(msg=msg)
self.set_cvx_available()
return self._send_request(host, path, method, data, sanitized_data)
def _set_region_update_interval(self):
path = 'region/%s' % self.region
data = {
'name': self.region,
'syncInterval': self.sync_interval
}
self.send_api_request(path, 'PUT', [data])
def register_with_eos(self, sync=False):
self.create_region(self.region)
self._set_region_update_interval()
def get_cvx_uuid(self):
path = 'agent/'
try:
data = self.send_api_request(path, 'GET')
return data.get('uuid', None)
except arista_exc.AristaRpcError:
return None
def create_region(self, region):
path = 'region/'
data = {'name': region}
return self.send_api_request(path, 'POST', [data])
def get_region(self, name):
path = 'region/%s' % name
try:
regions = self.send_api_request(path, 'GET')
for region in regions:
if region['name'] == name:
return region
except arista_exc.AristaRpcError:
pass
return None
def sync_start(self):
LOG.info('Attempt to start sync')
self.current_sync_name = None
try:
region = self.get_region(self.region)
# If the region doesn't exist, we may need to create
# it in order for POSTs to the sync endpoint to succeed
if not region:
self.register_with_eos()
return False
if region.get('syncInterval') != self.sync_interval:
self._set_region_update_interval()
if region and region['syncStatus'] == 'syncInProgress':
LOG.info('Sync in progress, not syncing')
return False
req_id = self._get_random_name()
data = {
'requester': socket.gethostname().split('.')[0],
'requestId': req_id
}
path = 'region/' + self.region + '/sync'
self.send_api_request(path, 'POST', data)
self.current_sync_name = req_id
return True
except (KeyError, arista_exc.AristaRpcError):
LOG.info('Not syncing due to RPC error')
return False
LOG.info('Not syncing due to server syncStatus')
return False
def sync_end(self):
LOG.info('Attempting to end sync')
try:
path = 'region/' + self.region + '/sync'
self.send_api_request(path, 'DELETE')
self.current_sync_name = None
return True
except arista_exc.AristaRpcError:
LOG.info('Not ending sync due to RPC error')
return False
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/rpc/base.py0000664000175000017500000000675400000000000024024 0ustar00zuulzuul00000000000000# Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import base64
import os
from oslo_config import cfg
from oslo_log import log as logging
import six
from networking_arista._i18n import _, _LW
from networking_arista.common import exceptions as arista_exc
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class AristaRPCWrapperBase(object):
"""Wraps Arista JSON RPC.
All communications between Neutron and EOS are over JSON RPC.
EOS - operating system used on Arista hardware
Command API - JSON RPC API provided by Arista EOS
"""
def __init__(self):
self._validate_config()
self._server_ip = None
self.region = cfg.CONF.ml2_arista.region_name
self.sync_interval = cfg.CONF.ml2_arista.sync_interval
self.conn_timeout = cfg.CONF.ml2_arista.conn_timeout
self.eapi_hosts = cfg.CONF.ml2_arista.eapi_host.split(',')
# Indication of CVX availabililty in the driver.
self._cvx_available = True
# Reference to SyncService object which is set in AristaDriver
self.sync_service = None
def _validate_config(self):
if cfg.CONF.ml2_arista.get('eapi_host') == '':
msg = _('Required option eapi_host is not set')
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
if cfg.CONF.ml2_arista.get('eapi_username') == '':
msg = _('Required option eapi_username is not set')
LOG.error(msg)
raise arista_exc.AristaConfigError(msg=msg)
def _api_username(self):
return cfg.CONF.ml2_arista.eapi_username
def _api_password(self):
return cfg.CONF.ml2_arista.eapi_password
def _get_random_name(self, length=10):
"""Returns a base64 encoded name."""
result = base64.b64encode(os.urandom(10)).translate(None, b'=+/')
return result if six.PY2 else result.decode('utf-8')
def _get_cvx_hosts(self):
cvx = []
if self._server_ip:
# If we know the master's IP, let's start with that
cvx.append(self._server_ip)
for h in self.eapi_hosts:
if h.strip() not in cvx:
cvx.append(h.strip())
return cvx
def set_cvx_unavailable(self):
self._cvx_available = False
if self.sync_service:
self.sync_service.force_sync()
def set_cvx_available(self):
self._cvx_available = True
def cvx_available(self):
return self._cvx_available
def check_cvx_availability(self):
try:
if self._get_eos_master():
self.set_cvx_available()
return True
except Exception as exc:
LOG.warning(_LW('%s when getting CVX master'), exc)
LOG.warning("Failed to initialize connection with CVX. Please "
"ensure CVX is reachable and running EOS 4.18.1 "
"or greater.")
self.set_cvx_unavailable()
return False
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/ml2/security_groups/0000775000175000017500000000000000000000000025206 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/security_groups/__init__.py0000664000175000017500000000000000000000000027305 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/security_groups/arista_security_groups.py0000664000175000017500000001363000000000000032374 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.plugins import directory
from neutron_lib.services import base as service_base
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from networking_arista.ml2.security_groups import security_group_sync
from networking_arista.ml2.security_groups import switch_helper
LOG = logging.getLogger(__name__)
class AristaSecurityGroupHandler(
service_base.ServicePluginBase,
switch_helper.AristaSecurityGroupCallbackHelper):
"""Security Group Handler for Arista networking hardware.
Registers for the notification of security group updates.
Once a notification is recieved, it takes appropriate actions by updating
Arista hardware appropriately.
"""
def __init__(self):
super(AristaSecurityGroupHandler, self).__init__()
self.initialize_switch_endpoints()
self.subscribe()
self.add_worker(security_group_sync.AristaSecurityGroupSyncWorker())
def get_plugin_description(self):
return "Arista baremetal security group service plugin"
@classmethod
def get_plugin_type(cls):
return "arista_security_group"
@log_helpers.log_method_call
def create_security_group(self, resource, event, trigger, payload):
sg = payload.latest_state
rules = sg['security_group_rules']
sg_id = sg['id']
cmds = self.get_create_security_group_commands(sg_id, rules)
self.run_cmds_on_all_switches(cmds)
@log_helpers.log_method_call
def delete_security_group(self, resource, event, trigger, payload):
sg_id = payload.resource_id
cmds = self.get_delete_security_group_commands(sg_id)
self.run_cmds_on_all_switches(cmds)
@log_helpers.log_method_call
def create_security_group_rule(self, resource, event, trigger, payload):
sgr = payload.latest_state
sg_id = sgr['security_group_id']
cmds = self.get_create_security_group_rule_commands(sg_id, sgr)
self.run_cmds_on_all_switches(cmds)
@log_helpers.log_method_call
def delete_security_group_rule(self, resource, event, trigger, payload):
sgr_id = payload.resource_id
context = payload.context
plugin = directory.get_plugin()
sgr = plugin.get_security_group_rule(context, sgr_id)
sg_id = sgr['security_group_id']
cmds = self.get_delete_security_group_rule_commands(sg_id, sgr)
self.run_cmds_on_all_switches(cmds)
@staticmethod
def _valid_baremetal_port(port):
"""Check if port is a baremetal port with exactly one security group"""
if port.get(portbindings.VNIC_TYPE) != portbindings.VNIC_BAREMETAL:
return False
sgs = port.get('security_groups', [])
if len(sgs) == 0:
# Nothing to do
return False
if len(port.get('security_groups', [])) > 1:
LOG.warning('SG provisioning failed for %(port)s. Only one '
'SG may be applied per port.',
{'port': port['id']})
return False
return True
@log_helpers.log_method_call
def apply_security_group(self, resource, event, trigger, payload):
port = payload.latest_state
if not self._valid_baremetal_port(port):
return
# _valid_baremetal_port guarantees we have exactly one SG
sg_id = port.get('security_groups')[0]
profile = port.get(portbindings.PROFILE, {})
self._update_port_group_info(switches=self._get_switches(profile))
switch_cmds = self.get_apply_security_group_commands(sg_id, profile)
self.run_per_switch_cmds(switch_cmds)
@log_helpers.log_method_call
def remove_security_group(self, resource, event, trigger, payload):
port = payload.latest_state
if not self._valid_baremetal_port(port):
return
# _valid_baremetal_port guarantees we have exactly one SG
sg_id = port.get('security_groups')[0]
profile = port.get(portbindings.PROFILE, {})
self._update_port_group_info(switches=self._get_switches(profile))
switch_cmds = self.get_remove_security_group_commands(sg_id, profile)
self.run_per_switch_cmds(switch_cmds)
def subscribe(self):
# Subscribe to the events related to security groups and rules
registry.subscribe(
self.create_security_group, resources.SECURITY_GROUP,
events.AFTER_CREATE)
registry.subscribe(
self.delete_security_group, resources.SECURITY_GROUP,
events.AFTER_DELETE)
registry.subscribe(
self.create_security_group_rule, resources.SECURITY_GROUP_RULE,
events.AFTER_CREATE)
# We need to handle SG rules in before delete to be able to query
# the db for the rule details
registry.subscribe(
self.delete_security_group_rule, resources.SECURITY_GROUP_RULE,
events.BEFORE_DELETE)
# Apply SG rules to intfs on AFTER_UPDATE, remove them on AFTER_DELETE
registry.subscribe(
self.apply_security_group, resources.PORT, events.AFTER_UPDATE)
registry.subscribe(
self.remove_security_group, resources.PORT, events.AFTER_DELETE)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/security_groups/security_group_sync.py0000664000175000017500000000744500000000000031711 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import worker
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from networking_arista.ml2.security_groups import switch_helper
LOG = logging.getLogger(__name__)
class AristaSecurityGroupSyncWorker(
worker.BaseWorker,
switch_helper.AristaSecurityGroupSyncHelper):
"""Worker that handles synchronizing Security Group ACLs on Arista switches
The worker periodically queries the neutron db and sends all security
groups, security group rules and security group port bindings to to
registered switches.
"""
def __init__(self):
super(AristaSecurityGroupSyncWorker, self).__init__()
self.initialize_switch_endpoints()
self._loop = None
def start(self):
super(AristaSecurityGroupSyncWorker, self).start()
if self._loop is None:
self._loop = loopingcall.FixedIntervalLoopingCall(
self.synchronize
)
self._loop.start(interval=cfg.CONF.ml2_arista.sync_interval)
def stop(self):
if self._loop is not None:
self._loop.stop()
def wait(self):
if self._loop is not None:
self._loop.wait()
self._loop = None
def reset(self):
self.stop()
self.wait()
self.start()
def synchronize_switch(self, switch_ip, expected_acls, expected_bindings):
"""Update ACL config on a switch to match expected config
This is done as follows:
1. Get switch ACL config using show commands
2. Update expected bindings based on switch LAGs
3. Get commands to synchronize switch ACLs
4. Get commands to synchronize switch ACL bindings
5. Run sync commands on switch
"""
# Get ACL rules and interface mappings from the switch
switch_acls, switch_bindings = self._get_dynamic_acl_info(switch_ip)
# Adjust expected bindings for switch LAG config
expected_bindings = self.adjust_bindings_for_lag(switch_ip,
expected_bindings)
# Get synchronization commands
switch_cmds = list()
switch_cmds.extend(
self.get_sync_acl_cmds(switch_acls, expected_acls))
switch_cmds.extend(
self.get_sync_binding_cmds(switch_bindings, expected_bindings))
# Update switch config
self.run_openstack_sg_cmds(switch_cmds, self._switches.get(switch_ip))
def synchronize(self):
"""Perform sync of the security groups between ML2 and EOS."""
# Get expected ACLs and rules
expected_acls = self.get_expected_acls()
# Get expected interface to ACL mappings
all_expected_bindings = self.get_expected_bindings()
# Check that config is correct on every registered switch
for switch_ip in self._switches.keys():
expected_bindings = all_expected_bindings.get(switch_ip, [])
try:
self.synchronize_switch(switch_ip, expected_acls,
expected_bindings)
except Exception:
LOG.exception("Failed to sync SGs for %(switch)s",
{'switch': switch_ip})
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/security_groups/switch_helper.py0000664000175000017500000005104100000000000030421 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import re
from neutron_lib import constants as n_const
from oslo_config import cfg
from oslo_log import log as logging
from networking_arista._i18n import _LI
from networking_arista.common import api
from networking_arista.common import constants as a_const
from networking_arista.common import db_lib
from networking_arista.common import exceptions as arista_exc
from networking_arista.common import utils
LOG = logging.getLogger(__name__)
class AristaSecurityGroupSwitchHelper(object):
"""Helper class for applying baremetal security groups on Arista switches
This helper class contains methods for adding and removing security
groups, security group rules and security group port bindings to and from
Arista switches.
"""
def initialize_switch_endpoints(self):
"""Initialize endpoints for switch communication"""
self._switches = {}
self._port_group_info = {}
self._validate_config()
for s in cfg.CONF.ml2_arista.switch_info:
switch_ip, switch_user, switch_pass = s.split(":")
if switch_pass == "''":
switch_pass = ''
self._switches[switch_ip] = api.EAPIClient(
switch_ip,
switch_user,
switch_pass,
verify=False,
timeout=cfg.CONF.ml2_arista.conn_timeout)
self._check_dynamic_acl_support()
def _check_dynamic_acl_support(self):
"""Log an error if any switches don't support dynamic ACLs"""
cmds = ['ip access-list openstack-test dynamic',
'no ip access-list openstack-test']
for switch_ip, switch_client in self._switches.items():
try:
self.run_openstack_sg_cmds(cmds)
except Exception:
LOG.error("Switch %s does not support dynamic ACLs. SG "
"support will not be enabled on this switch.",
switch_ip)
def _validate_config(self):
"""Ensure at least one switch is configured"""
if len(cfg.CONF.ml2_arista.get('switch_info')) < 1:
msg = _('Required option - when "sec_group_support" is enabled, '
'at least one switch must be specified ')
LOG.exception(msg)
raise arista_exc.AristaConfigError(msg=msg)
def run_openstack_sg_cmds(self, commands, switch):
"""Execute/sends a CAPI (Command API) command to EOS.
In this method, list of commands is appended with prefix and
postfix commands - to make is understandble by EOS.
:param commands : List of command to be executed on EOS.
:param switch: Endpoint on the Arista switch to be configured
"""
if not switch:
LOG.exception("No client found for switch")
return []
if len(commands) == 0:
return []
command_start = ['enable', 'configure']
command_end = ['exit']
full_command = command_start + commands + command_end
return self._run_eos_cmds(full_command, switch)
def _run_eos_cmds(self, commands, switch):
"""Execute/sends a CAPI (Command API) command to EOS.
This method is useful for running show commands that require no
prefix or postfix commands.
:param commands : List of commands to be executed on EOS.
:param switch: Endpoint on the Arista switch to be configured
"""
LOG.info(_LI('Executing command on Arista EOS: %s'), commands)
try:
# this returns array of return values for every command in
# commands list
ret = switch.execute(commands)
LOG.info(_LI('Results of execution on Arista EOS: %s'), ret)
return ret
except Exception:
msg = (_('Error occurred while trying to execute '
'commands %(cmd)s on EOS %(host)s') %
{'cmd': commands, 'host': switch})
LOG.exception(msg)
@staticmethod
def _acl_name(name, direction):
"""Generate an arista specific name for this ACL.
Use a unique name so that OpenStack created ACLs
can be distinguishged from the user created ACLs
on Arista HW.
"""
direction = direction.upper()
return 'SG' + '-' + direction + '-' + name
@staticmethod
def _get_switchports(profile):
"""Return list of (switch_ip, interface) tuples from local_link_info"""
switchports = []
if profile.get('local_link_information'):
for link in profile['local_link_information']:
if 'switch_info' in link and 'port_id' in link:
switch = link['switch_info']
interface = link['port_id']
switchports.append((switch, interface))
else:
LOG.warning("Incomplete link information: %s", link)
return switchports
def _update_port_group_info(self, switches=None):
"""Refresh data on switch interfaces' port group membership"""
if switches is None:
switches = self._switches.keys()
for switch_ip in switches:
client = self._switches.get(switch_ip)
ret = self._run_eos_cmds(['show interfaces'], client)
if not ret or len(ret) == 0:
LOG.warning("Unable to retrieve interface info for %s",
switch_ip)
continue
intf_info = ret[0]
self._port_group_info[switch_ip] = intf_info.get('interfaces', {})
def _get_port_for_acl(self, port_id, switch):
"""Gets interface name for ACLs
Finds the Port-Channel name if port_id is in a Port-Channel, otherwise
ACLs are applied to Ethernet interface.
:param port_id: Name of port from ironic db
:param server: Server endpoint on the Arista switch to be configured
"""
all_intf_info = self._port_group_info.get(switch, {})
intf_info = all_intf_info.get(port_id, {})
member_info = intf_info.get('interfaceMembership', '')
port_group_info = re.search(r'Member of (?P\S+)',
member_info)
if port_group_info:
port_id = port_group_info.group('port_group')
return port_id
@staticmethod
def _supported_rule(protocol, ethertype):
"""Checks that the rule is an IPv4 rule of a supported protocol"""
if not protocol or protocol not in utils.SUPPORTED_SG_PROTOCOLS:
return False
if ethertype != n_const.IPv4:
return False
return True
def _format_rule(self, protocol, cidr, min_port, max_port, direction):
"""Get EOS formatted rule"""
if cidr is None:
cidr = 'any'
if direction == n_const.INGRESS_DIRECTION:
dst_ip = 'any'
src_ip = cidr
elif direction == n_const.EGRESS_DIRECTION:
dst_ip = cidr
src_ip = 'any'
if protocol == n_const.PROTO_NAME_ICMP:
rule = "permit icmp %s %s" % (src_ip, dst_ip)
if min_port:
rule += " %s" % (min_port)
if max_port:
rule += " %s" % (max_port)
else:
rule = "permit %s %s %s" % (protocol, src_ip, dst_ip)
if min_port and max_port:
rule += " range %s %s" % (min_port, max_port)
elif min_port and not max_port:
rule += " eq %s" % min_port
return rule
def _format_rules_for_eos(self, rules):
"""Format list of rules for EOS and sort into ingress/egress rules"""
in_rules = []
eg_rules = []
for rule in rules:
protocol = rule.get('protocol')
cidr = rule.get('remote_ip_prefix', 'any')
min_port = rule.get('port_range_min')
max_port = rule.get('port_range_max')
direction = rule.get('direction')
ethertype = rule.get('ethertype')
if not self._supported_rule(protocol, ethertype):
continue
formatted_rule = self._format_rule(protocol, cidr, min_port,
max_port, direction)
if rule['direction'] == n_const.INGRESS_DIRECTION:
in_rules.append(formatted_rule)
elif rule['direction'] == n_const.EGRESS_DIRECTION:
eg_rules.append(formatted_rule)
return in_rules, eg_rules
class AristaSecurityGroupCallbackHelper(AristaSecurityGroupSwitchHelper):
def run_cmds_on_all_switches(self, cmds):
"""Runs all cmds on all configured switches
This helper is used for ACL and rule creation/deletion as ACLs
and rules must exist on all switches.
"""
for switch in self._switches.values():
self.run_openstack_sg_cmds(cmds, switch)
def run_per_switch_cmds(self, switch_cmds):
"""Applies cmds to appropriate switches
This takes in a switch->cmds mapping and runs only the set of cmds
specified for a switch on that switch. This helper is used for
applying/removing ACLs to/from interfaces as this config will vary
from switch to switch.
"""
for switch_ip, cmds in switch_cmds.items():
switch = self._switches.get(switch_ip)
self.run_openstack_sg_cmds(cmds, switch)
def _get_switches(self, profile):
"""Get set of switches referenced in a port binding profile"""
switchports = self._get_switchports(profile)
switches = set([switchport[0] for switchport in switchports])
return switches
def get_create_security_group_commands(self, sg_id, sg_rules):
"""Commands for creating ACL"""
cmds = []
in_rules, eg_rules = self._format_rules_for_eos(sg_rules)
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(in_rule)
cmds.append("exit")
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(eg_rule)
cmds.append("exit")
return cmds
def get_delete_security_group_commands(self, sg_id):
"""Commands for deleting ACL"""
cmds = []
cmds.append("no ip access-list %s" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
cmds.append("no ip access-list %s" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
return cmds
def _get_rule_cmds(self, sg_id, sg_rule, delete=False):
"""Helper for getting add/delete ACL rule commands"""
rule_prefix = ""
if delete:
rule_prefix = "no "
in_rules, eg_rules = self._format_rules_for_eos([sg_rule])
cmds = []
if in_rules:
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.INGRESS_DIRECTION))
for in_rule in in_rules:
cmds.append(rule_prefix + in_rule)
cmds.append("exit")
if eg_rules:
cmds.append("ip access-list %s dynamic" %
self._acl_name(sg_id, n_const.EGRESS_DIRECTION))
for eg_rule in eg_rules:
cmds.append(rule_prefix + eg_rule)
cmds.append("exit")
return cmds
def get_create_security_group_rule_commands(self, sg_id, sg_rule):
"""Commands for adding rule to ACL"""
return self._get_rule_cmds(sg_id, sg_rule)
def get_delete_security_group_rule_commands(self, sg_id, sg_rule):
"""Commands for removing rule from ACLS"""
return self._get_rule_cmds(sg_id, sg_rule, delete=True)
def _get_interface_commands(self, sg_id, profile, delete=False):
"""Helper for getting interface ACL apply/remove commands"""
rule_prefix = ""
if delete:
rule_prefix = "no "
switch_cmds = {}
switchports = self._get_switchports(profile)
for switch_ip, intf in switchports:
cmds = []
intf_id = self._get_port_for_acl(intf, switch_ip)
cmds.append("interface %s" % intf_id)
name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION)
cmds.append(rule_prefix + "ip access-group %s %s" %
(name, a_const.INGRESS_DIRECTION))
name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION)
cmds.append(rule_prefix + "ip access-group %s %s" %
(name, a_const.EGRESS_DIRECTION))
cmds.append("exit")
if switch_ip not in switch_cmds.keys():
switch_cmds[switch_ip] = []
switch_cmds[switch_ip].extend(cmds)
return switch_cmds
def get_apply_security_group_commands(self, sg_id, profile):
"""Commands for applying ACL to interface"""
return self._get_interface_commands(sg_id, profile)
def get_remove_security_group_commands(self, sg_id, profile):
"""Commands for removing ACL from interface"""
return self._get_interface_commands(sg_id, profile, delete=True)
class AristaSecurityGroupSyncHelper(AristaSecurityGroupSwitchHelper):
def _parse_acl_config(self, acl_config):
"""Parse configured ACLs and rules
ACLs are returned as a dict of rule sets:
{: set([]),
: set([]),
...,
}
"""
parsed_acls = dict()
for acl in acl_config['aclList']:
parsed_acls[acl['name']] = set()
for rule in acl['sequence']:
parsed_acls[acl['name']].add(rule['text'])
return parsed_acls
def _parse_binding_config(self, binding_config):
"""Parse configured interface -> ACL bindings
Bindings are returned as a set of (intf, name, direction) tuples:
set([(intf1, acl_name, direction),
(intf2, acl_name, direction),
...,
])
"""
parsed_bindings = set()
for acl in binding_config['aclList']:
for intf in acl['configuredIngressIntfs']:
parsed_bindings.add((intf['name'], acl['name'],
a_const.INGRESS_DIRECTION))
for intf in acl['configuredEgressIntfs']:
parsed_bindings.add((intf['name'], acl['name'],
a_const.EGRESS_DIRECTION))
return parsed_bindings
def _get_dynamic_acl_info(self, switch_ip):
"""Retrieve ACLs, ACLs rules and interface bindings from switch"""
cmds = ["enable",
"show ip access-lists dynamic",
"show ip access-lists summary dynamic"]
switch = self._switches.get(switch_ip)
_, acls, bindings = self._run_eos_cmds(cmds, switch)
parsed_acls = self._parse_acl_config(acls)
parsed_bindings = self._parse_binding_config(bindings)
return parsed_acls, parsed_bindings
def get_expected_acls(self):
"""Query the neutron DB for Security Groups and Rules
Groups and rules are returned as a dict of rule sets:
{: set([]),
: set([]),
...,
}
"""
security_groups = db_lib.get_security_groups()
expected_acls = collections.defaultdict(set)
for sg in security_groups:
in_rules, out_rules = self._format_rules_for_eos(sg['rules'])
ingress_acl_name = self._acl_name(sg['id'],
n_const.INGRESS_DIRECTION)
egress_acl_name = self._acl_name(sg['id'],
n_const.EGRESS_DIRECTION)
expected_acls[ingress_acl_name].update(in_rules)
expected_acls[egress_acl_name].update(out_rules)
return expected_acls
def get_expected_bindings(self):
"""Query the neutron DB for SG->switch interface bindings
Bindings are returned as a dict of bindings for each switch:
{: set([(intf1, acl_name, direction),
(intf2, acl_name, direction)]),
: set([(intf1, acl_name, direction)]),
...,
}
"""
sg_bindings = db_lib.get_baremetal_sg_bindings()
all_expected_bindings = collections.defaultdict(set)
for sg_binding, port_binding in sg_bindings:
sg_id = sg_binding['security_group_id']
try:
binding_profile = json.loads(port_binding.profile)
except ValueError:
binding_profile = {}
switchports = self._get_switchports(binding_profile)
for switch, intf in switchports:
ingress_name = self._acl_name(sg_id, n_const.INGRESS_DIRECTION)
egress_name = self._acl_name(sg_id, n_const.EGRESS_DIRECTION)
all_expected_bindings[switch].add(
(intf, ingress_name, a_const.INGRESS_DIRECTION))
all_expected_bindings[switch].add(
(intf, egress_name, a_const.EGRESS_DIRECTION))
return all_expected_bindings
def adjust_bindings_for_lag(self, switch_ip, bindings):
"""Adjusting interface names for expected bindings where LAGs exist"""
# Get latest LAG info for switch
self._update_port_group_info([switch_ip])
# Update bindings to account for LAG info
adjusted_bindings = set()
for binding in bindings:
adjusted_bindings.add(
(self._get_port_for_acl(binding[0], switch_ip),) + binding[1:])
return adjusted_bindings
def get_sync_acl_cmds(self, switch_acls, expected_acls):
"""Returns the list of commands required synchronize switch ACLs
1. Identify unexpected ACLs and delete them
2. Iterate over expected ACLs
a. Add missing ACLs + all rules
b. Delete unexpected rules
c. Add missing rules
"""
switch_cmds = list()
# Delete any stale ACLs
acls_to_delete = (set(switch_acls.keys()) - set(expected_acls.keys()))
for acl in acls_to_delete:
switch_cmds.append('no ip access-list %s' % acl)
# Update or create ACLs and rules
for acl, expected_rules in expected_acls.items():
switch_rules = switch_acls.get(acl, set())
rules_to_delete = switch_rules - expected_rules
rules_to_add = expected_rules - switch_rules
# Check if ACL requires create or rule changes
if (acl in switch_acls and
len(rules_to_add | rules_to_delete) == 0):
continue
switch_cmds.append('ip access-list %s dynamic' % acl)
# Delete any stale rules
for rule in rules_to_delete:
switch_cmds.append('no ' + rule)
# Add any missing rules
for rule in rules_to_add:
switch_cmds.append(rule)
switch_cmds.append('exit')
return switch_cmds
def get_sync_binding_cmds(self, switch_bindings, expected_bindings):
"""Returns the list of commands required to synchronize ACL bindings
1. Delete any unexpected bindings
2. Add any missing bindings
"""
switch_cmds = list()
# Update any necessary switch interface ACLs
bindings_to_delete = switch_bindings - expected_bindings
bindings_to_add = expected_bindings - switch_bindings
for intf, acl, direction in bindings_to_delete:
switch_cmds.extend(['interface %s' % intf,
'no ip access-group %s %s' %
(acl, direction),
'exit'])
for intf, acl, direction in bindings_to_add:
switch_cmds.extend(['interface %s' % intf,
'ip access-group %s %s' % (acl, direction),
'exit'])
return switch_cmds
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/ml2/type_drivers/0000775000175000017500000000000000000000000024457 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/type_drivers/__init__.py0000664000175000017500000000000000000000000026556 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/type_drivers/driver_helpers.py0000664000175000017500000001574300000000000030060 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from neutron_lib import constants as n_const
from neutron_lib import context
from neutron_lib.db import api as db_api
from oslo_log import log
from six import moves
from neutron.objects import network as network_obj
from neutron.objects.plugins.ml2 import vlanallocation as vlanalloc
from networking_arista._i18n import _LI
from networking_arista.common.constants import EOS_UNREACHABLE_MSG
from networking_arista.common import exceptions as arista_exc
LOG = log.getLogger(__name__)
class VlanSyncService(object):
"""Sync vlan assignment from CVX into the OpenStack db."""
def __init__(self, rpc_wrapper):
self._rpc = rpc_wrapper
self._force_sync = True
self._vlan_assignment_uuid = None
self._assigned_vlans = dict()
def force_sync(self):
self._force_sync = True
def _parse_vlan_ranges(self, vlan_pool, return_as_ranges=False):
vlan_ids = set()
if return_as_ranges:
vlan_ids = list()
if not vlan_pool:
return vlan_ids
vlan_ranges = vlan_pool.split(',')
for vlan_range in vlan_ranges:
endpoints = vlan_range.split('-')
if len(endpoints) == 2:
vlan_min = int(endpoints[0])
vlan_max = int(endpoints[1])
if return_as_ranges:
vlan_ids.append((vlan_min, vlan_max))
else:
vlan_ids |= set(moves.range(vlan_min, vlan_max + 1))
elif len(endpoints) == 1:
single_vlan = int(endpoints[0])
if return_as_ranges:
vlan_ids.append((single_vlan, single_vlan))
else:
vlan_ids.add(single_vlan)
return vlan_ids
def get_network_vlan_ranges(self):
return self._assigned_vlans
def _sync_required(self):
try:
if not self._force_sync and self._region_in_sync():
LOG.info(_LI('VLANs are in sync!'))
return False
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
return True
def _region_in_sync(self):
eos_vlan_assignment_uuid = self._rpc.get_vlan_assignment_uuid()
return (self._vlan_assignment_uuid and
(self._vlan_assignment_uuid['uuid'] ==
eos_vlan_assignment_uuid['uuid']))
def _set_vlan_assignment_uuid(self):
try:
self._vlan_assignment_uuid = self._rpc.get_vlan_assignment_uuid()
except arista_exc.AristaRpcError:
self._force_sync = True
def do_synchronize(self):
if not self._sync_required():
return
self.synchronize()
self._set_vlan_assignment_uuid()
def synchronize(self):
LOG.info(_LI('Syncing VLANs with EOS'))
try:
self._rpc.check_vlan_type_driver_commands()
vlan_pool = self._rpc.get_vlan_allocation()
except arista_exc.AristaRpcError:
LOG.warning(EOS_UNREACHABLE_MSG)
self._force_sync = True
return
LOG.debug('vlan_pool %(vlan)s', {'vlan': vlan_pool})
self._assigned_vlans = {
'default': self._parse_vlan_ranges(vlan_pool['assignedVlans']),
}
cvx_available_vlans = frozenset(
self._parse_vlan_ranges(vlan_pool['availableVlans']))
cvx_used_vlans = frozenset(
self._parse_vlan_ranges(vlan_pool['allocatedVlans']))
# Force vlan sync if assignedVlans is empty or availableVlans and
# allocatedVlans both are empty in the vlan_pool
if not(self._assigned_vlans['default'] and
(cvx_available_vlans or cvx_used_vlans)):
LOG.info(_LI('Force sync, vlan pool is empty'))
self.force_sync()
else:
self._force_sync = False
allocated_vlans = {}
ctx = context.get_admin_context()
with db_api.CONTEXT_READER.using(ctx):
for physical_network in self._assigned_vlans:
filter = {
'network_type': n_const.TYPE_VLAN,
'physical_network': physical_network,
}
objs = network_obj.NetworkSegment.get_objects(ctx, **filter)
allocated_vlans.update(
{physical_network: [obj.segmentation_id for obj in objs]})
LOG.debug('allocated vlans %(vlan)s', {'vlan': allocated_vlans})
with db_api.CONTEXT_WRITER.using(ctx):
physnets = vlanalloc.VlanAllocation.get_physical_networks(ctx)
physnets_unconfigured = physnets - set(self._assigned_vlans)
if physnets_unconfigured:
vlanalloc.VlanAllocation.delete_physical_networks(
ctx, physnets_unconfigured)
allocations = collections.defaultdict(list)
for alloc in vlanalloc.VlanAllocation.get_objects(ctx):
allocations[alloc.physical_network].append(alloc)
for physical_network, vlan_ranges in self._assigned_vlans.items():
if physical_network in allocations:
for alloc in allocations[physical_network]:
try:
vlan_ranges.remove(alloc.vlan_id)
except KeyError:
alloc.delete()
vlanalloc.VlanAllocation.bulk_create(ctx, physical_network,
vlan_ranges)
LOG.debug('vlan_ranges: %(vlan)s', {'vlan': vlan_ranges})
for vlan_id in vlan_ranges:
allocated = (vlan_id not in cvx_available_vlans and
(vlan_id in cvx_used_vlans or vlan_id in
allocated_vlans[physical_network]))
LOG.debug('Updating %(phys)s %(vlan)s %(alloc)s',
{'phys': physical_network, 'vlan': vlan_id,
'alloc': allocated})
vlanalloc.VlanAllocation.update_objects(ctx,
values={'allocated': allocated,
'vlan_id': vlan_id,
'physical_network': physical_network},
physical_network=physical_network,
vlan_id=vlan_id)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/ml2/type_drivers/type_arista_vlan.py0000664000175000017500000000745200000000000030405 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import threading
from oslo_config import cfg
from oslo_log import log
from neutron.db.models.plugins.ml2 import vlanallocation
from neutron.plugins.ml2.drivers import type_vlan
from neutron_lib.db import api as db_api
from networking_arista._i18n import _LI
from networking_arista.common import exceptions as exc
from networking_arista.ml2.rpc.arista_eapi import AristaRPCWrapperEapi
from networking_arista.ml2.type_drivers import driver_helpers
LOG = log.getLogger(__name__)
cfg.CONF.import_group('arista_type_driver', 'networking_arista.common.config')
class AristaVlanTypeDriver(type_vlan.VlanTypeDriver):
"""Manage state for VLAN networks with ML2.
The VlanTypeDriver implements the 'vlan' network_type. VLAN
network segments provide connectivity between VMs and other
devices using any connected IEEE 802.1Q conformant
physical_network segmented into virtual networks via IEEE 802.1Q
headers. Up to 4094 VLAN network segments can exist on each
available physical_network.
"""
def __init__(self):
super(AristaVlanTypeDriver, self).__init__()
self.rpc = AristaRPCWrapperEapi()
self.sync_service = driver_helpers.VlanSyncService(self.rpc)
self.network_vlan_ranges = dict()
self.sync_timeout = cfg.CONF.arista_type_driver['sync_interval']
def initialize(self):
self.rpc.check_vlan_type_driver_commands()
self._synchronization_thread()
LOG.info(_LI("AristaVlanTypeDriver initialization complete"))
def _synchronization_thread(self):
self.sync_service.do_synchronize()
self.timer = threading.Timer(self.sync_timeout,
self._synchronization_thread)
self.timer.start()
def _update_network_vlan_ranges(self):
session = db_api.get_reader_session()
va = vlanallocation.VlanAllocation
with session.begin(subtransactions=True):
vlans = session.query(va).filter(va.physical_network == 'default')
self.network_vlan_ranges = {
'default': set((vlan.vlan_id, vlan.vlan_id)
for vlan in vlans.all())}
def validate_provider_segment(self, segment):
self._update_network_vlan_ranges()
super(AristaVlanTypeDriver, self).validate_provider_segment(segment)
def allocate_tenant_segment(self, context, filters=None):
self._update_network_vlan_ranges()
return super(AristaVlanTypeDriver,
self).allocate_tenant_segment(context, filters=filters)
def release_segment(self, context, segment):
self._update_network_vlan_ranges()
return super(AristaVlanTypeDriver,
self).release_segment(context, segment)
def allocate_fully_specified_segment(self, context, **raw_segment):
with db_api.CONTEXT_READER.using(context):
alloc = (
context.session.query(self.model).filter_by(**raw_segment).
first())
if not alloc:
raise exc.VlanUnavailable(**raw_segment)
return super(AristaVlanTypeDriver,
self).allocate_fully_specified_segment(
context, **raw_segment)
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/tests/0000775000175000017500000000000000000000000022410 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/__init__.py0000664000175000017500000000000000000000000024507 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/base.py0000664000175000017500000000143200000000000023674 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*-
# Copyright 2010-2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslotest import base
class TestCase(base.BaseTestCase):
"""Test case base class for all unit tests."""
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/test_networking_arista.py0000664000175000017500000000145000000000000027553 0ustar00zuulzuul00000000000000# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_networking_arista
----------------------------------
Tests for `networking_arista` module.
"""
from networking_arista.tests import base
class TestNetworking_arista(base.TestCase):
def test_something(self):
pass
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/tests/unit/0000775000175000017500000000000000000000000023367 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/__init__.py0000664000175000017500000000126700000000000025506 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.use_stderr = False
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/tests/unit/common/0000775000175000017500000000000000000000000024657 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/common/__init__.py0000664000175000017500000000000000000000000026756 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/common/test_api.py0000664000175000017500000002161300000000000027044 0ustar00zuulzuul00000000000000# Copyright (c) 2017 Arista Networks, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Importing neutron.tests.common triggers neutron's eventlet monkey_patch util.
# Without this, the import of requests below will result in an unpatched
# requests module running in all tests, which will cause strange failures such
# as maximum recursion depth exceeded in SSL
import neutron.tests.common # noqa: F401
import mock
import requests
from requests import exceptions as requests_exc
import testtools
from networking_arista.common import api
class TestEAPIClientInit(testtools.TestCase):
def test_basic_init(self):
host_ip = '10.20.30.40'
client = api.EAPIClient(host_ip)
self.assertEqual(client.host, host_ip)
self.assertEqual(client.url, 'https://10.20.30.40/command-api')
self.assertDictContainsSubset(
{'Content-Type': 'application/json', 'Accept': 'application/json'},
client.session.headers
)
def test_init_enable_verify(self):
client = api.EAPIClient('10.0.0.1', verify=True)
self.assertTrue(client.session.verify)
def test_init_auth(self):
client = api.EAPIClient('10.0.0.1', username='user', password='pass')
self.assertEqual(client.session.auth, ('user', 'pass'))
def test_init_timeout(self):
client = api.EAPIClient('10.0.0.1', timeout=99)
self.assertEqual(client.timeout, 99)
def test_make_url(self):
url = api.EAPIClient._make_url('1.2.3.4')
self.assertEqual(url, 'https://1.2.3.4/command-api')
def test_make_url_http(self):
url = api.EAPIClient._make_url('5.6.7.8', 'http')
self.assertEqual(url, 'http://5.6.7.8/command-api')
class TestEAPIClientExecute(testtools.TestCase):
def setUp(self):
super(TestEAPIClientExecute, self).setUp()
mock.patch('requests.Session.post').start()
self.mock_log = mock.patch.object(api, 'LOG').start()
self.mock_json_dumps = mock.patch.object(api.json, 'dumps').start()
self.addCleanup(mock.patch.stopall)
self.client = api.EAPIClient('10.0.0.1', timeout=99)
def _test_execute_helper(self, commands, commands_to_log=None):
expected_data = {
'id': 'Networking Arista Driver',
'method': 'runCmds',
'jsonrpc': '2.0',
'params': {
'timestamps': False,
'format': 'json',
'version': 1,
'cmds': commands
}
}
self.client.session.post.assert_called_once_with(
'https://10.0.0.1/command-api',
data=self.mock_json_dumps.return_value,
timeout=99
)
self.mock_log.info.assert_has_calls(
[
mock.call(
mock.ANY,
{
'ip': '10.0.0.1',
'data': self.mock_json_dumps.return_value
}
)
]
)
log_data = dict(expected_data)
log_data['params'] = dict(expected_data['params'])
log_data['params']['cmds'] = commands_to_log or commands
self.mock_json_dumps.assert_has_calls(
[
mock.call(log_data),
mock.call(expected_data)
]
)
def test_command_prep(self):
commands = ['enable']
self.client.execute(commands)
self._test_execute_helper(commands)
def test_commands_to_log(self):
commands = ['config', 'secret']
commands_to_log = ['config', '******']
self.client.execute(commands, commands_to_log)
self._test_execute_helper(commands, commands_to_log)
def _test_execute_error_helper(self, raise_exception, expected_exception,
warning_has_params=False):
commands = ['config']
self.client.session.post.side_effect = raise_exception
self.assertRaises(
expected_exception,
self.client.execute,
commands
)
self._test_execute_helper(commands)
if warning_has_params:
args = (mock.ANY, mock.ANY)
else:
args = (mock.ANY,)
self.mock_log.warning.assert_called_once_with(*args)
def test_request_connection_error(self):
self._test_execute_error_helper(
requests_exc.ConnectionError,
api.arista_exc.AristaRpcError
)
def test_request_connect_timeout(self):
self._test_execute_error_helper(
requests_exc.ConnectTimeout,
api.arista_exc.AristaRpcError
)
def test_request_timeout(self):
self._test_execute_error_helper(
requests_exc.Timeout,
api.arista_exc.AristaRpcError
)
def test_request_connect_InvalidURL(self):
self._test_execute_error_helper(
requests_exc.InvalidURL,
api.arista_exc.AristaRpcError
)
def test_request_other_exception(self):
class OtherException(Exception):
pass
self._test_execute_error_helper(
OtherException,
OtherException,
warning_has_params=True
)
def _test_response_helper(self, response_data):
mock_response = mock.MagicMock(requests.Response)
mock_response.status_code = requests.status_codes.codes.OK
mock_response.json.return_value = response_data
self.client.session.post.return_value = mock_response
def test_response_success(self):
mock_response = mock.MagicMock(requests.Response)
mock_response.status_code = requests.status_codes.codes.OK
mock_response.json.return_value = {'result': mock.sentinel}
self.client.session.post.return_value = mock_response
retval = self.client.execute(['enable'])
self.assertEqual(retval, mock.sentinel)
def test_response_json_error(self):
mock_response = mock.MagicMock(requests.Response)
mock_response.status_code = requests.status_codes.codes.OK
mock_response.json.side_effect = ValueError
self.client.session.post.return_value = mock_response
retval = self.client.execute(['enable'])
self.assertIsNone(retval)
self.mock_log.info.assert_has_calls([mock.call(mock.ANY)])
def _test_response_format_error_helper(self, bad_response):
mock_response = mock.MagicMock(requests.Response)
mock_response.status_code = requests.status_codes.codes.OK
mock_response.json.return_value = bad_response
self.client.session.post.return_value = mock_response
self.assertRaises(
api.arista_exc.AristaRpcError,
self.client.execute,
['enable']
)
self.mock_log.info.assert_has_calls([mock.call(mock.ANY)])
def test_response_format_error(self):
self._test_response_format_error_helper({})
def test_response_unknown_error_code(self):
self._test_response_format_error_helper(
{'error': {'code': 999, 'data': []}}
)
def test_response_known_error_code(self):
self._test_response_format_error_helper(
{'error': {'code': 1002, 'data': []}}
)
def test_response_known_error_code_data_is_not_dict(self):
self._test_response_format_error_helper(
{'error': {'code': 1002, 'data': ['some text']}}
)
def test_response_not_cvx_leader(self):
mock_response = mock.MagicMock(requests.Response)
mock_response.status_code = requests.status_codes.codes.OK
mock_response.json.return_value = {
'error': {
'code': 1002,
'data': [{'errors': [api.ERR_CVX_NOT_LEADER]}]
}
}
self.client.session.post.return_value = mock_response
retval = self.client.execute(['enable'])
self.assertIsNone(retval)
def test_response_other_exception(self):
class OtherException(Exception):
pass
mock_response = mock.MagicMock(requests.Response)
mock_response.status_code = requests.status_codes.codes.OK
mock_response.json.return_value = 'text'
self.client.session.post.return_value = mock_response
self.assertRaises(
TypeError,
self.client.execute,
['enable']
)
self.mock_log.warning.assert_has_calls(
[
mock.call(mock.ANY, {'error': mock.ANY})
]
)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/common/test_db_lib.py0000664000175000017500000003443400000000000027513 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_utils import importutils
from neutron.tests.unit import testlib_api
from networking_arista.common import db_lib
from networking_arista.tests.unit import utils
class DbLibTest(testlib_api.SqlTestCase):
"""Test cases for database helper functions."""
def setUp(self):
super(DbLibTest, self).setUp()
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
directory.add_plugin(plugin_constants.CORE, plugin_klass())
def test_get_tenants_empty(self):
tenants = db_lib.get_tenants()
self.assertEqual(tenants, [])
def test_get_tenants_from_networks(self):
tenant_1_id = 't1'
tenant_2_id = 't2'
utils.create_networks([{'id': 'n1',
'project_id': tenant_1_id},
{'id': 'n2',
'project_id': tenant_2_id}])
tenants = db_lib.get_tenants()
expected_tenants = [{'project_id': tenant_1_id},
{'project_id': tenant_2_id}]
self.assertItemsEqual(tenants, expected_tenants)
def test_get_network_with_no_tenant(self):
tenant_1_id = 't1'
tenant_2_id = ''
utils.create_networks([{'id': 'n1',
'project_id': tenant_1_id},
{'id': 'n2',
'project_id': tenant_2_id}])
networks = [getattr(net, 'Network') for net in db_lib.get_networks()]
project_list = [net.project_id for net in networks]
self.assertIn(tenant_1_id, project_list)
self.assertIn(tenant_2_id, project_list)
def test_get_tenants_with_shared_network_ports(self):
tenant_1_id = 't1'
tenant_2_id = 't2'
utils.create_networks([{'id': 'n1',
'project_id': tenant_1_id}])
utils.create_ports([{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm1',
'device_owner': 'compute:None',
'tenant_id': tenant_2_id,
'id': 'p1',
'network_id': 'n1',
'mac_address': '00:00:00:00:00:01'}])
tenants = db_lib.get_tenants()
expected_tenants = [{'project_id': tenant_1_id},
{'project_id': tenant_2_id}]
self.assertItemsEqual(tenants, expected_tenants)
def test_get_tenants_uniqueness(self):
tenant_1_id = 't1'
tenant_2_id = 't2'
utils.create_networks([{'id': 'n1',
'project_id': tenant_1_id},
{'id': 'n2',
'project_id': tenant_2_id}])
utils.create_ports([{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm1',
'device_owner': 'compute:None',
'tenant_id': tenant_1_id,
'id': 'p1',
'network_id': 'n1',
'mac_address': '00:00:00:00:00:01'},
{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm2',
'device_owner': 'compute:None',
'tenant_id': tenant_2_id,
'id': 'p2',
'network_id': 'n2',
'mac_address': '00:00:00:00:00:02'}])
tenants = db_lib.get_tenants()
expected_tenants = [{'project_id': tenant_1_id},
{'project_id': tenant_2_id}]
self.assertItemsEqual(tenants, expected_tenants)
def test_get_tenants_port_network_union(self):
tenant_1_id = 't1'
tenant_2_id = 't2'
tenant_3_id = 't3'
tenant_4_id = 't4'
utils.create_networks([{'id': 'n1',
'project_id': tenant_1_id},
{'id': 'n2',
'project_id': tenant_2_id}])
utils.create_ports([{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm1',
'device_owner': 'compute:None',
'tenant_id': tenant_3_id,
'id': 'p1',
'network_id': 'n1',
'mac_address': '00:00:00:00:00:01'},
{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm2',
'device_owner': 'compute:None',
'tenant_id': tenant_4_id,
'id': 'p2',
'network_id': 'n2',
'mac_address': '00:00:00:00:00:02'}])
tenants = db_lib.get_tenants()
expected_tenants = [{'project_id': tenant_1_id},
{'project_id': tenant_2_id},
{'project_id': tenant_3_id},
{'project_id': tenant_4_id}]
self.assertItemsEqual(tenants, expected_tenants)
def test_tenant_provisioned(self):
tenant_1_id = 't1'
port_1_id = 'p1'
tenant_2_id = 't2'
port_2_id = 'p2'
network_id = 'network-id'
self.assertFalse(db_lib.tenant_provisioned(tenant_1_id))
self.assertFalse(db_lib.tenant_provisioned(tenant_2_id))
utils.create_networks([{'id': network_id,
'project_id': tenant_1_id}])
self.assertTrue(db_lib.tenant_provisioned(tenant_1_id))
self.assertFalse(db_lib.tenant_provisioned(tenant_2_id))
utils.create_ports([{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm1',
'device_owner': 'compute:None',
'tenant_id': tenant_1_id,
'id': port_1_id,
'network_id': network_id,
'mac_address': '00:00:00:00:00:01'}])
self.assertTrue(db_lib.tenant_provisioned(tenant_1_id))
self.assertFalse(db_lib.tenant_provisioned(tenant_2_id))
utils.create_ports([{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm2',
'device_owner': 'compute:None',
'tenant_id': tenant_2_id,
'id': port_2_id,
'network_id': network_id,
'mac_address': '00:00:00:00:00:02'}])
self.assertTrue(db_lib.tenant_provisioned(tenant_1_id))
self.assertTrue(db_lib.tenant_provisioned(tenant_2_id))
utils.delete_port(port_1_id)
utils.delete_port(port_2_id)
utils.delete_network(network_id)
self.assertFalse(db_lib.tenant_provisioned(tenant_1_id))
self.assertFalse(db_lib.tenant_provisioned(tenant_2_id))
def test_get_segments(self):
net_id1 = 'n1'
net_id2 = 'n2'
utils.create_networks([{'id': net_id1,
'project_id': 'p1'},
{'id': net_id2,
'project_id': ''}])
utils.create_segments([{'id': 's1',
'network_id': net_id1,
'network_type': 'vlan',
'segmentation_id': 100,
'is_dynamic': False},
{'id': 's2',
'network_id': net_id2,
'network_type': 'vlan',
'segmentation_id': 200,
'is_dynamic': True}])
segments = db_lib.get_segments()
netid_list = [seg.network_id for seg in segments]
self.assertIn(net_id1, netid_list)
self.assertIn(net_id2, netid_list)
def test_segment_is_dynamic(self):
static_segment_id = 's1'
dynamic_segment_id = 's2'
utils.create_networks([{'id': 'n1',
'project_id': 't1'}])
utils.create_segments([{'id': static_segment_id,
'network_id': 'n1',
'network_type': 'vlan',
'segmentation_id': 100,
'is_dynamic': False},
{'id': dynamic_segment_id,
'network_id': 'n1',
'network_type': 'vlan',
'segmentation_id': 200,
'is_dynamic': True}])
self.assertFalse(db_lib.segment_is_dynamic(static_segment_id))
self.assertTrue(db_lib.segment_is_dynamic(dynamic_segment_id))
def test_segment_bound(self):
bound_segment_id = 's1'
unbound_segment_id = 's2'
utils.create_networks([{'id': 'n1',
'project_id': 't1'}])
utils.create_segments([{'id': bound_segment_id,
'network_id': 'n1',
'network_type': 'vlan',
'segmentation_id': 100,
'is_dynamic': True},
{'id': unbound_segment_id,
'network_id': 'n1',
'network_type': 'vlan',
'segmentation_id': 200,
'is_dynamic': True}])
utils.create_ports([{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'vm1',
'device_owner': 'compute:None',
'binding': {'host': 'host',
'vif_type': 'ovs',
'vnic_type': 'normal'},
'tenant_id': 't1',
'id': 'p1',
'network_id': 'n1',
'mac_address': '01:02:03:04:05:06',
'binding_levels': [
{'host': 'host',
'segment_id': bound_segment_id,
'level': 0,
'driver': 'arista'}]}])
self.assertTrue(db_lib.segment_bound(bound_segment_id))
self.assertFalse(db_lib.segment_bound(unbound_segment_id))
def test_empty_device_id(self):
utils.create_networks([{'id': 'n1',
'project_id': 't1'}])
utils.create_segments([{'id': 's1',
'network_id': 'n1',
'network_type': 'vlan',
'segmentation_id': 100,
'is_dynamic': True}])
utils.create_ports([{'admin_state_up': True,
'status': 'ERROR',
'device_id': '',
'device_owner': 'compute:None',
'binding': {'host': 'host',
'vif_type': 'other',
'vnic_type': 'baremetal'},
'tenant_id': 't1',
'id': 'p1',
'network_id': 'n1',
'mac_address': '01:02:03:04:05:06',
'binding_levels': []},
{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'bm1',
'device_owner': 'compute:None',
'binding': {'host': 'host',
'vif_type': 'other',
'vnic_type': 'baremetal'},
'tenant_id': 't1',
'id': 'p2',
'network_id': 'n1',
'mac_address': '01:02:03:04:05:07',
'binding_levels': [
{'host': 'host',
'segment_id': 's1',
'level': 0,
'driver': 'arista'}]},
{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': 'bm2',
'device_owner': 'compute:None',
'binding': {'host': 'host',
'vif_type': 'other',
'vnic_type': 'baremetal'},
'tenant_id': 't1',
'id': 'p3',
'network_id': 'n1',
'mac_address': '01:02:03:04:05:08',
'binding_levels': [
{'host': 'host',
'segment_id': 's1',
'level': 0,
'driver': 'arista'}]}])
expected_instances = []
instances = db_lib.get_baremetal_instances(instance_id='')
self.assertItemsEqual(instances, expected_instances)
././@PaxHeader0000000000000000000000000000003300000000000011451 xustar000000000000000027 mtime=1693270226.644102
networking_arista-2023.1.0/networking_arista/tests/unit/l3Plugin/0000775000175000017500000000000000000000000025064 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/l3Plugin/__init__.py0000664000175000017500000000126700000000000027203 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.use_stderr = False
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/l3Plugin/test_arista_l3_driver.py0000664000175000017500000016577600000000000031757 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import itertools
import mock
from oslo_config import cfg
from neutron.tests import base
from neutron.tests.unit.db import test_db_base_plugin_v2
from neutron_lib import context
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from networking_arista.common import exceptions as arista_exc
from networking_arista.l3Plugin import arista_l3_driver as arista
from networking_arista.l3Plugin import l3_arista
from networking_arista.tests.unit import utils
def setup_arista_config(value='', vrf=False, mlag=False, vrf_gateway=False):
cfg.CONF.set_override('primary_l3_host', value, "l3_arista")
cfg.CONF.set_override('primary_l3_host_username', value, "l3_arista")
if vrf:
cfg.CONF.set_override('use_vrf', vrf, "l3_arista")
if vrf_gateway:
cfg.CONF.set_override('vrf_default_route', vrf_gateway,
"l3_arista")
if mlag:
cfg.CONF.set_override('secondary_l3_host', value, "l3_arista")
cfg.CONF.set_override('mlag_config', mlag, "l3_arista")
class AristaL3DriverTestCasesDefaultVrf(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF.
"""
def setUp(self):
super(AristaL3DriverTestCasesDefaultVrf, self).setUp()
setup_arista_config('value')
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos(self):
router_name = 'test-router-1'
route_domain = '123:123'
self.drv.create_router_on_eos(router_name, route_domain,
self.drv._servers[0])
cmds = ['enable', 'configure', 'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_delete_router_from_eos(self):
router_name = 'test-router-1'
self.drv.delete_router_from_eos(router_name, self.drv._servers[0])
cmds = ['enable', 'configure', 'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_add_interface_to_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask,
self.drv._servers[0])
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s/%s' % (fixed_ip, mask), 'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
self.drv.delete_interface_from_router(segment_id, router_name,
self.drv._servers[0])
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
class AristaL3DriverTestCasesUsingVRFs(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions using multiple VRFs.
Note that the configuration commands are different when VRFs are used.
"""
def setUp(self):
super(AristaL3DriverTestCasesUsingVRFs, self).setUp()
setup_arista_config('value', vrf=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos_v2_syntax(self):
max_vrfs = 5
routers = ['testRouterV2-%s' % n for n in range(max_vrfs)]
domains = ['20%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for (r, d) in zip(routers, domains):
self.drv.create_router_on_eos(r, d, self.drv._servers[0])
cmds = ['enable', 'configure',
'vrf instance %s' % r,
'rd %(rd)s:%(rd)s' % {'rd': d}, 'exit',
'ip routing vrf %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_create_router_on_eos_v1_syntax(self):
max_vrfs = 5
routers = ['testRouterV1-%s' % n for n in range(max_vrfs)]
domains = ['10%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for (r, d) in zip(routers, domains):
self.drv.create_router_on_eos(r, d, self.drv._servers[0])
cmds = ['enable', 'configure',
'vrf definition %s' % r,
'rd %(rd)s:%(rd)s' % {'rd': d}, 'exit',
'ip routing vrf %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_delete_router_from_eos_v1_syntax(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for r in routers:
self.drv.delete_router_from_eos(r, self.drv._servers[0])
cmds = ['enable', 'configure', 'no vrf definition %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_delete_router_from_eos_v2_syntax(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for r in routers:
self.drv.delete_router_from_eos(r, self.drv._servers[0])
cmds = ['enable', 'configure', 'no vrf instance %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_add_interface_to_router_on_eos_v1(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
self.drv._select_dicts(4)
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask,
self.drv._servers[0])
cmds = ['enable', 'configure',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf forwarding %s' % router_name,
'ip address %s/%s' % (fixed_ip, mask), 'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_add_interface_to_router_on_eos_v2(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
self.drv._select_dicts(4)
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask,
self.drv._servers[0])
cmds = ['enable', 'configure',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf %s' % router_name,
'ip address %s/%s' % (fixed_ip, mask), 'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
self.drv.delete_interface_from_router(segment_id, router_name,
self.drv._servers[0])
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
class AristaL3DriverTestCasesVRFDefaultGateway(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions using multiple VRFs.
Note that the configuration commands are different when VRFs are used.
"""
def setUp(self):
super(AristaL3DriverTestCasesVRFDefaultGateway, self).setUp()
setup_arista_config('value', vrf=True, vrf_gateway=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos_v2_syntax(self):
max_vrfs = 5
routers = ['testRouterV2-%s' % n for n in range(max_vrfs)]
domains = ['20%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for (r, d) in zip(routers, domains):
self.drv.create_router_on_eos(r, d, self.drv._servers[0])
cmds = ['enable', 'configure',
'vrf instance %s' % r,
'rd %(rd)s:%(rd)s' % {'rd': d}, 'exit',
'ip routing vrf %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_create_router_on_eos_v1_syntax(self):
max_vrfs = 5
routers = ['testRouterV1-%s' % n for n in range(max_vrfs)]
domains = ['10%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for (r, d) in zip(routers, domains):
self.drv.create_router_on_eos(r, d, self.drv._servers[0])
cmds = ['enable', 'configure',
'vrf definition %s' % r,
'rd %(rd)s:%(rd)s' % {'rd': d}, 'exit',
'ip routing vrf %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_delete_router_from_eos_v1_syntax(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for r in routers:
self.drv.delete_router_from_eos(r, self.drv._servers[0])
cmds = ['enable', 'configure', 'no vrf definition %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_delete_router_from_eos_v2_syntax(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for r in routers:
self.drv.delete_router_from_eos(r, self.drv._servers[0])
cmds = ['enable', 'configure', 'no vrf instance %s' % r, 'exit']
self.drv._servers[0].execute.assert_called_with(cmds,
keep_alive=True)
def test_add_interface_to_router_on_eos_v1(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
self.drv._select_dicts(4)
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask,
self.drv._servers[0])
cmds = ['enable', 'configure',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf forwarding %s' % router_name,
'ip address %s/%s' % (fixed_ip, mask),
'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_add_interface_to_router_on_eos_v2(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
self.drv._select_dicts(4)
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask,
self.drv._servers[0])
cmds = ['enable', 'configure',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf %s' % router_name,
'ip address %s/%s' % (fixed_ip, mask),
'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
self.drv.delete_interface_from_router(segment_id, router_name,
self.drv._servers[0])
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
self.drv._servers[0].execute.assert_called_once_with(cmds,
keep_alive=True)
class AristaL3DriverTestCasesMlagConfig(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF using MLAG configuration.
MLAG configuration means that the commands will be sent to both
primary and secondary Arista Switches.
"""
def setUp(self):
super(AristaL3DriverTestCasesMlagConfig, self).setUp()
setup_arista_config('value', mlag=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos(self):
router_name = 'test-router-1'
route_domain = '123:123'
router_mac = '00:11:22:33:44:55'
for s in self.drv._servers:
self.drv.create_router_on_eos(router_name, route_domain, s)
cmds = ['enable', 'configure',
'ip virtual-router mac-address %s' % router_mac, 'exit']
s.execute.assert_called_with(cmds, keep_alive=True)
def test_delete_router_from_eos(self):
router_name = 'test-router-1'
for s in self.drv._servers:
self.drv.delete_router_from_eos(router_name, s)
cmds = ['enable', 'configure', 'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
def test_add_interface_to_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
for s in self.drv._servers:
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask, s)
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s' % router_ip,
'ip virtual-router address %s' % fixed_ip, 'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
for s in self.drv._servers:
self.drv.delete_interface_from_router(segment_id, router_name, s)
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
class AristaL3DriverTestCasesMlagVRFConfig(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in VRFs using MLAG configuration.
"""
def setUp(self):
super(AristaL3DriverTestCasesMlagVRFConfig, self).setUp()
setup_arista_config('value', mlag=True, vrf=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_create_router_on_eos(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
domains = ['10%s' % n for n in range(max_vrfs)]
router_mac = '00:11:22:33:44:55'
for s in self.drv._servers:
for (r, d) in zip(routers, domains):
self.drv.create_router_on_eos(r, d, s)
cmds = ['enable', 'configure',
'vrf instance %s' % r,
'rd %(rd)s:%(rd)s' % {'rd': d},
'exit',
'ip routing vrf %s' % r,
'ip virtual-router mac-address %s' % router_mac,
'exit']
s.execute.assert_called_with(cmds, keep_alive=True)
def test_delete_router_from_eos(self):
max_vrfs = 5
routers = ['testRouter-%s' % n for n in range(max_vrfs)]
for s in self.drv._servers:
for r in routers:
self.drv.delete_router_from_eos(r, s)
cmds = ['enable', 'configure', 'no vrf instance %s' % r,
'exit']
s.execute.assert_called_with(cmds, keep_alive=True)
def test_add_interface_to_router_on_eos_v1(self):
router_name = 'test-router-1'
segment_id = '123'
fixed_ip = '10.10.10.15'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = False
self.drv._update_vrf_commands()
self.drv._select_dicts(4)
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for s in self.drv._servers:
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask, s)
cmds = ['enable', 'configure',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf forwarding %s' % router_name,
'ip address %s' % router_ip,
'ip virtual-router address %s' % fixed_ip,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
def test_add_interface_to_router_on_eos_v2(self):
router_name = 'test-router-1'
segment_id = '123'
router_ip = '10.10.10.10'
mask = '255.255.255.0'
fixed_ip = '10.10.10.15'
with mock.patch.object(self.drv, '_check_vrf_syntax_v2_support') as c:
c.return_value = True
self.drv._update_vrf_commands()
self.drv._select_dicts(4)
c.assert_called_once_with(self.drv._servers[0], keep_alive=True)
for s in self.drv._servers:
self.drv.add_interface_to_router(segment_id, router_name, fixed_ip,
router_ip, mask, s)
cmds = ['enable', 'configure',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'vrf %s' % router_name,
'ip address %s' % router_ip,
'ip virtual-router address %s' % fixed_ip,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
def test_delete_interface_from_router_on_eos(self):
router_name = 'test-router-1'
segment_id = '123'
for s in self.drv._servers:
self.drv.delete_interface_from_router(segment_id, router_name, s)
cmds = ['enable', 'configure', 'no interface vlan %s' % segment_id,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
class AristaL3DriverTestCases_v4(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF using IPv4.
"""
def setUp(self):
super(AristaL3DriverTestCases_v4, self).setUp()
setup_arista_config('value')
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v4_interface_to_router(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
fixed_ip = '10.10.10.15'
segment_id = 123
# Add couple of IPv4 subnets to router
for cidr in cidrs:
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.add_router_interface(None, router))
for s in self.drv._servers:
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s/%s' % (fixed_ip, cidr.split('/')[1]),
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
def test_delete_v4_interface_from_router(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
fixed_ip = '10.10.10.15'
segment_id = 123
# remove couple of IPv4 subnets from router
for cidr in cidrs:
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4,
'fixed_id': fixed_ip}
self.assertFalse(self.drv.remove_router_interface(None, router))
for s in self.drv._servers:
cmds = ['enable', 'configure',
'no interface vlan %s' % segment_id,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
class AristaL3DriverTestCases_v6(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF using IPv6.
"""
def setUp(self):
super(AristaL3DriverTestCases_v6, self).setUp()
setup_arista_config('value')
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v6_interface_to_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
fixed_ip = '3FFE::5'
segment_id = 123
# Add couple of IPv6 subnets to router
for cidr in cidrs:
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.add_router_interface(None, router))
for s in self.drv._servers:
cmds = ['enable', 'configure', 'ipv6 unicast-routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id, 'ipv6 enable',
'ipv6 address %s/%s' % (fixed_ip, cidr.split('/')[1]),
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
def test_delete_v6_interface_from_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
fixed_ip = '3FFE::5'
segment_id = 123
# remove couple of IPv6 subnets from router
for cidr in cidrs:
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.remove_router_interface(None, router))
for s in self.drv._servers:
cmds = ['enable', 'configure',
'no interface vlan %s' % segment_id,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
class AristaL3DriverTestCases_MLAG_v4(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF on MLAG'ed switches using IPv4.
"""
def setUp(self):
super(AristaL3DriverTestCases_MLAG_v4, self).setUp()
setup_arista_config('value', mlag=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v4_interface_to_router(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
fixed_ip = '10.10.10.15'
segment_id = 123
svi_ips = [['10.10.10.254', '10.10.10.253'],
['10.11.11.254', '10.11.11.253']]
# Add couple of IPv6 subnets to router
for i, cidr in enumerate(cidrs):
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.add_router_interface(None, router))
for j, s in enumerate(self.drv._servers):
svi_ip = svi_ips[i][j]
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s/%s' % (svi_ip, cidr.split('/')[1]),
'ip virtual-router address %s' % fixed_ip, 'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
def test_add_v4_interface_to_router_high_fixed_ip(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
fixed_ips = ['10.10.10.254', '10.11.11.253']
segment_id = 123
svi_ips = [['10.10.10.253', '10.10.10.252'],
['10.11.11.254', '10.11.11.252']]
# Add couple of IPv6 subnets to router
for i, cidr in enumerate(cidrs):
fixed_ip = fixed_ips[i]
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.add_router_interface(None, router))
for j, s in enumerate(self.drv._servers):
svi_ip = svi_ips[i][j]
cmds = ['enable', 'configure', 'ip routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id,
'ip address %s/%s' % (svi_ip, cidr.split('/')[1]),
'ip virtual-router address %s' % fixed_ip, 'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
def test_delete_v4_interface_from_router(self):
gateway_ip = '10.10.10.1'
cidrs = ['10.10.10.0/24', '10.11.11.0/24']
segment_id = 123
# remove couple of IPv6 subnets from router
for cidr in cidrs:
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 4}
self.assertFalse(self.drv.remove_router_interface(None, router))
for s in self.drv._servers:
cmds = ['enable', 'configure',
'no interface vlan %s' % segment_id,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
class AristaL3DriverTestCases_MLAG_v6(base.BaseTestCase):
"""Test cases to test the RPC between Arista Driver and EOS.
Tests all methods used to send commands between Arista L3 Driver and EOS
to program routing functions in Default VRF on MLAG'ed switches using IPv6.
"""
def setUp(self):
super(AristaL3DriverTestCases_MLAG_v6, self).setUp()
setup_arista_config('value', mlag=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_no_exception_on_correct_configuration(self):
self.assertIsNotNone(self.drv)
def test_add_v6_interface_to_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/112', '2001::/112']
fixed_ip = '3FFE::5'
segment_id = 123
svi_ips = [['3ffe::fffe', '3ffe::fffd'],
['2001::fffe', '2001::fffd']]
# Add couple of IPv6 subnets to router
for i, cidr in enumerate(cidrs):
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.add_router_interface(None, router))
for j, s in enumerate(self.drv._servers):
svi_ip = svi_ips[i][j]
cmds = ['enable', 'configure', 'ipv6 unicast-routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id, 'ipv6 enable',
'ipv6 address %s/%s' % (svi_ip, cidr.split('/')[1]),
'ipv6 virtual-router address %s' % fixed_ip, 'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
def test_add_v6_interface_to_router_high_fixed_ip(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/112', '2001::/112']
fixed_ips = ['3FFE::FFFE', '2001::FFFD']
segment_id = 123
svi_ips = [['3ffe::fffd', '3ffe::fffc'],
['2001::fffe', '2001::fffc']]
# Add couple of IPv6 subnets to router
for i, cidr in enumerate(cidrs):
fixed_ip = fixed_ips[i]
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6,
'fixed_ip': fixed_ip}
self.assertFalse(self.drv.add_router_interface(None, router))
for j, s in enumerate(self.drv._servers):
svi_ip = svi_ips[i][j]
cmds = ['enable', 'configure', 'ipv6 unicast-routing',
'vlan %s' % segment_id, 'exit',
'interface vlan %s' % segment_id, 'ipv6 enable',
'ipv6 address %s/%s' % (svi_ip, cidr.split('/')[1]),
'ipv6 virtual-router address %s' % fixed_ip, 'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
def test_delete_v6_interface_from_router(self):
gateway_ip = '3FFE::1'
cidrs = ['3FFE::/16', '2001::/16']
segment_id = 123
# remove couple of IPv6 subnets from router
for cidr in cidrs:
router = {'id': 'r1',
'name': 'test-router-1',
'tenant_id': 'ten-a',
'seg_id': '%s' % segment_id,
'cidr': "%s" % cidr,
'gip': "%s" % gateway_ip,
'ip_version': 6}
self.assertFalse(self.drv.remove_router_interface(None, router))
for s in self.drv._servers:
cmds = ['enable', 'configure',
'no interface vlan %s' % segment_id,
'exit']
s.execute.assert_called_once_with(cmds, keep_alive=True)
s.reset_mock()
class AristaL3DriverTestCasesMlag_one_switch_failed(base.BaseTestCase):
"""Test cases to test with non redundant hardare in redundancy mode.
In the following test cases, the driver is configured in MLAG (redundancy
mode) but, one of the switches is mocked to throw exceptoin to mimic
failure of the switch. Ensure that the operation does not fail when
one of the switches fails.
"""
def setUp(self):
super(AristaL3DriverTestCasesMlag_one_switch_failed, self).setUp()
setup_arista_config('value', mlag=True)
self.drv = arista.AristaL3Driver()
self.drv._servers = []
self.drv._servers.append(mock.MagicMock())
self.drv._servers.append(mock.MagicMock())
def test_create_router_when_one_switch_fails(self):
router = {}
router['id'] = 'r1'
router['name'] = 'test-router-1'
# Make one of the switches throw an exception - i.e. fail
self.drv._servers[0].execute = mock.Mock(side_effect=Exception)
with mock.patch.object(arista.LOG, 'exception') as log_exception:
self.drv.create_router(None, router)
log_exception.assert_called_once_with(mock.ANY)
def test_delete_router_when_one_switch_fails(self):
router = {}
router['id'] = 'r1'
router['name'] = 'test-router-1'
router_id = '345'
# Make one of the switches throw an exception - i.e. fail
self.drv._servers[1].execute = mock.Mock(side_effect=Exception)
with mock.patch.object(arista.LOG, 'exception') as log_exception:
self.drv.delete_router(None, router_id, router)
log_exception.assert_called_once_with(mock.ANY)
def test_add_router_interface_when_one_switch_fails(self):
router = {}
router['id'] = 'r1'
router['name'] = 'test-router-1'
router['tenant_id'] = 'ten-1'
router['seg_id'] = '100'
router['ip_version'] = 4
router['cidr'] = '10.10.10.0/24'
router['gip'] = '10.10.10.1'
router['fixed_ip'] = '10.10.10.15'
# Make one of the switches throw an exception - i.e. fail
self.drv._servers[1].execute = mock.Mock(side_effect=Exception)
with mock.patch.object(arista.LOG, 'exception') as log_exception:
self.drv.add_router_interface(None, router)
log_exception.assert_called_once_with(mock.ANY)
def test_remove_router_interface_when_one_switch_fails(self):
router = {}
router['id'] = 'r1'
router['name'] = 'test-router-1'
router['tenant_id'] = 'ten-1'
router['seg_id'] = '100'
router['ip_version'] = 4
router['cidr'] = '10.10.10.0/24'
router['gip'] = '10.10.10.1'
# Make one of the switches throw an exception - i.e. fail
self.drv._servers[0].execute = mock.Mock(side_effect=Exception)
with mock.patch.object(arista.LOG, 'exception') as log_exception:
self.drv.remove_router_interface(None, router)
log_exception.assert_called_once_with(mock.ANY)
class AristaL3ProtectedVlanParserTestCases(base.BaseTestCase):
"""Test cases to test the parsing of protected_vlans config
1. Empty string
2. Single VLAN
3. Single VLAN range
4. Multiple VLANs
5. Multiple VLAN ranges
6. Hybrid VLANs + ranges
7. Invalid VLAN
8. Range with invalid min
9. Range with invalid max
10. Range with min > max
11. Non-int VLAN
12. Non-int min
13. Non-int max
"""
def setUp(self):
super(AristaL3ProtectedVlanParserTestCases, self).setUp()
setup_arista_config('value')
def test_empty_string(self):
cfg.CONF.set_override('protected_vlans', '', 'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans, set([1]))
def test_single_vlan(self):
cfg.CONF.set_override('protected_vlans', '100', 'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans, set([1, 100]))
def test_single_range(self):
cfg.CONF.set_override('protected_vlans', '100:105', 'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans,
set([1] + [i for i in range(100, 106)]))
def test_multiple_vlans(self):
cfg.CONF.set_override('protected_vlans', '100,105', 'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans, set([1, 100, 105]))
def test_multiple_ranges(self):
cfg.CONF.set_override('protected_vlans', '100:105,110:115',
'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans,
set(itertools.chain([1], range(100, 106),
range(110, 116))))
def test_hybrid_vlan_and_range(self):
cfg.CONF.set_override('protected_vlans', '100,110:115', 'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans,
set([1, 100] + list(range(110, 116))))
def test_vlan_range_inclusive(self):
cfg.CONF.set_override('protected_vlans', '1,4094', 'l3_arista')
self.drv = arista.AristaL3Driver()
self.assertEqual(self.drv._protected_vlans, set([1, 4094]))
def test_invalid_vlan(self):
cfg.CONF.set_override('protected_vlans', '5000', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
def test_invalid_max(self):
cfg.CONF.set_override('protected_vlans', '100:5000', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
def test_invalid_min(self):
cfg.CONF.set_override('protected_vlans', '-100:100', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
def test_bad_range_bounds(self):
cfg.CONF.set_override('protected_vlans', '200:100', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
def test_non_int_vlan(self):
cfg.CONF.set_override('protected_vlans', 'string', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
def test_non_int_min(self):
cfg.CONF.set_override('protected_vlans', 'string:100', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
def test_non_int_max(self):
cfg.CONF.set_override('protected_vlans', '100:string', 'l3_arista')
self.assertRaises(arista_exc.AristaServicePluginConfigError,
arista.AristaL3Driver)
class AristaL3SyncWorkerTestBase(
test_db_base_plugin_v2.NeutronDbPluginV2TestCase):
"""Base test class for L3 Sync Worker test cases"""
def setUp(self, cleanup=True, vrfDefaultRoute=False):
cfg.CONF.import_opt('network_vlan_ranges',
'neutron.plugins.ml2.drivers.type_vlan',
group='ml2_type_vlan')
cfg.CONF.set_override('network_vlan_ranges', 'default',
'ml2_type_vlan')
cfg.CONF.set_override('enable_cleanup', 'True' if cleanup else 'False',
'l3_arista')
cfg.CONF.set_override('vrf_default_route', str(vrfDefaultRoute),
'l3_arista')
setup_arista_config('value', mlag=True, vrf=True)
service_plugins = {'arista_l3': 'arista_l3'}
super(AristaL3SyncWorkerTestBase, self).setUp(
plugin='ml2',
service_plugins=service_plugins)
self.driver = directory.get_plugin(constants.L3)
self.context = context.get_admin_context()
self.drv = self.driver.driver
self.switch1 = utils.MockSwitch()
self.switch2 = utils.MockSwitch()
self.switches = [self.switch1, self.switch2]
self.drv._servers = self.switches
for worker in self.driver._workers:
if isinstance(worker, l3_arista.AristaL3SyncWorker):
self.sync_worker = worker
self.sync_worker._servers = self.switches
@staticmethod
def _get_rd(name):
hashed = hashlib.sha256(name.encode('utf-8'))
rdm = str(int(hashed.hexdigest(), 16) % 65536)
return '%s:%s' % (rdm, rdm)
class AristaL3SyncWorkerCleanupTestCases(AristaL3SyncWorkerTestBase):
"""Test cases to test the L3 Sync Worker with enable_cleanup=True.
1. Test that VRFs are not cleaned up if router exists
2. Test that SVIs and VLANs are not cleaned up if router interface exists
3. Test that stale VRFs are cleaned up if enable_cleanup=True
4. Test that stale SVIs and VLANs are cleaned up if enable_cleanup=True
5. Test that stale VRFs are not cleaned up if not name __OpenStack__<...>
6. Test that stale SVIs and VLANs are not cleaned up if protected
7. Test that we don't create SVIs or VLANs for router_gateway interfaces
"""
def setUp(self):
super(AristaL3SyncWorkerCleanupTestCases, self).setUp()
def test_router_exists(self):
router = {'router': {'id': 'r1',
'name': 'router1',
'tenant_id': 't1',
'admin_state_up': True}}
self.driver.create_router(self.context, router)
self.sync_worker.synchronize()
eos_vrf_name = '__OpenStack__r1-router1'
expected_vrfs = {eos_vrf_name:
{'rd': self._get_rd(eos_vrf_name),
'routes': {},
'svis': []}}
self.assertEqual(self.switch1._vrfs, expected_vrfs)
self.assertEqual(self.switch2._vrfs, expected_vrfs)
def test_router_interface_exists(self):
router_dict = {'router': {'name': 'router1',
'tenant_id': 't1',
'admin_state_up': True}}
router = self.driver.create_router(self.context, router_dict)
net_dict = {'network': {'name': 'n1',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': 'default',
'provider:network_type': 'vlan',
'provider:segmentation_id': 100}}
net = self.plugin.create_network(self.context, net_dict)
subnet_dict = {'subnet':
{'tenant_id': net['tenant_id'],
'name': net['name'],
'network_id': net['id'],
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': None,
'enable_dhcp': False,
'dns_nameservers': None,
'host_routes': None}}
subnet = self.plugin.create_subnet(self.context, subnet_dict)
router_interface = {'subnet_id': subnet['id']}
self.driver.add_router_interface(self.context, router['id'],
router_interface)
self.sync_worker.synchronize()
expected_svis_s1 = {'vlan 100': {'ip': '10.0.0.254',
'mask': '24',
'vip': '10.0.0.1'}}
expected_svis_s2 = {'vlan 100': {'ip': '10.0.0.253',
'mask': '24',
'vip': '10.0.0.1'}}
expected_vlans = {'100': {'dynamic': False}}
self.assertEqual(self.switch1._svis, expected_svis_s1)
self.assertEqual(self.switch2._svis, expected_svis_s2)
self.assertEqual(self.switch1._vlans, expected_vlans)
self.assertEqual(self.switch2._vlans, expected_vlans)
def test_stale_vrf(self):
eos_vrf_name = '__OpenStack__r1-router1'
self.switch1._vrfs = {eos_vrf_name:
{'rd': self._get_rd(eos_vrf_name),
'svis': []}}
self.switch2._vrfs = {eos_vrf_name:
{'rd': self._get_rd(eos_vrf_name),
'svis': []}}
self.sync_worker.synchronize()
self.assertEqual(self.switch1._vrfs, {})
self.assertEqual(self.switch2._vrfs, {})
def test_stale_svi_and_vlan(self):
self.switch1._svis = {'vlan 100': {'ip': '10.0.0.254',
'mask': '24',
'vip': '10.0.0.1'}}
self.switch1._vlans = {'100': {'dynamic': False}}
self.switch2._svis = {'vlan 100': {'ip': '10.0.0.253',
'mask': '24',
'vip': '10.0.0.1'}}
self.switch2._vlans = {'100': {'dynamic': False}}
self.sync_worker.synchronize()
self.assertEqual(self.switch1._svis, {})
self.assertEqual(self.switch2._svis, {})
self.assertEqual(self.switch1._vlans, {})
self.assertEqual(self.switch2._vlans, {})
def test_non_openstack_vrf(self):
eos_vrf_name = 'other-vrf'
expected_vrfs = {eos_vrf_name:
{'rd': self._get_rd(eos_vrf_name),
'svis': []}}
self.switch1._vrfs = expected_vrfs
self.switch2._vrfs = expected_vrfs
self.sync_worker.synchronize()
self.assertEqual(self.switch1._vrfs, expected_vrfs)
self.assertEqual(self.switch2._vrfs, expected_vrfs)
def test_protected_svi_and_vlan(self):
self.sync_worker._protected_vlans = set([100])
protected_svis = {'vlan 100': {'ip': '10.0.0.254',
'mask': '24',
'vip': '10.0.0.1'}}
protected_vlans = {'100': {'dynamic': False}}
self.switch1._svis = protected_svis
self.switch1._vlans = protected_vlans
self.switch2._svis = protected_svis
self.switch2._vlans = protected_vlans
self.sync_worker.synchronize()
self.assertEqual(self.switch1._svis, protected_svis)
self.assertEqual(self.switch1._vlans, protected_vlans)
self.assertEqual(self.switch2._svis, protected_svis)
self.assertEqual(self.switch2._vlans, protected_vlans)
def test_router_gw_interface(self):
router_dict = {'router': {'name': 'router1',
'tenant_id': 't1',
'admin_state_up': True}}
router = self.driver.create_router(self.context, router_dict)
router_model = self.driver._get_router(self.context, router['id'])
net_dict = {'network': {'name': 'n1',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': 'default',
'provider:network_type': 'vlan',
'provider:segmentation_id': 100}}
net = self.plugin.create_network(self.context, net_dict)
subnet_dict = {'subnet':
{'tenant_id': net['tenant_id'],
'name': net['name'],
'network_id': net['id'],
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': None,
'enable_dhcp': False,
'dns_nameservers': None,
'host_routes': None}}
subnet = self.plugin.create_subnet(self.context, subnet_dict)
ext_ips = [{'subnet_id': subnet['id'], 'ip_address': '10.0.0.2'}]
self.driver._create_gw_port(self.context, router['id'], router_model,
net['id'], ext_ips)
self.sync_worker.synchronize()
switch1_svi = {'vlan 100':
{'ip': '10.0.0.254',
'mask': '24',
'vip': '10.0.0.2'}
}
switch2_svi = {'vlan 100':
{'ip': '10.0.0.253',
'mask': '24',
'vip': '10.0.0.2'}
}
switch_vlan = {'100': {'dynamic': False}}
self.assertEqual(self.switch1._svis, switch1_svi)
self.assertEqual(self.switch2._svis, switch2_svi)
self.assertEqual(self.switch1._vlans, switch_vlan)
self.assertEqual(self.switch2._vlans, switch_vlan)
class AristaL3SyncWorkerNoCleanupTestCases(AristaL3SyncWorkerTestBase):
"""Test cases for the L3 Sync Worker with enable_cleanup=False
1. Test that stale VRFs are not cleaned up if enable_cleanup=False
2. Test that stale SVIs and VLANs aren't cleaned up if enable_cleanup=False
"""
def setUp(self):
super(AristaL3SyncWorkerNoCleanupTestCases, self).setUp(cleanup=False)
def test_stale_vrf(self):
eos_vrf_name = '__OpenStack__r1-router1'
expected_vrfs = {eos_vrf_name:
{'rd': self._get_rd(eos_vrf_name),
'svis': []}}
self.switch1._vrfs = expected_vrfs
self.switch2._vrfs = expected_vrfs
self.sync_worker.synchronize()
self.assertEqual(self.switch1._vrfs, expected_vrfs)
self.assertEqual(self.switch2._vrfs, expected_vrfs)
def test_stale_svi_and_vlan(self):
expected_svis = {'vlan 100': {'ip': '10.0.0.254',
'mask': '24',
'vip': '10.0.0.1'}}
expected_vlans = {'100': {'dynamic': False}}
self.switch1._svis = expected_svis
self.switch1._vlans = expected_vlans
self.switch2._svis = expected_svis
self.switch2._vlans = expected_vlans
self.sync_worker.synchronize()
self.assertEqual(self.switch1._svis, expected_svis)
self.assertEqual(self.switch1._vlans, expected_vlans)
self.assertEqual(self.switch2._svis, expected_svis)
self.assertEqual(self.switch2._vlans, expected_vlans)
class AristaL3SyncWorkerVrfDefaultRouteCleanupTestCases(
AristaL3SyncWorkerTestBase):
"""Test cases for the L3 Sync Worker vrfDefaultRoute=True
This test also enabled cleanup=True
1. Test that default routes are not reset on sync
"""
def setUp(self):
super(AristaL3SyncWorkerVrfDefaultRouteCleanupTestCases,
self).setUp(vrfDefaultRoute=True)
def test_vrf_default_route_sync(self):
router_dict = {'router': {'name': 'router1',
'tenant_id': 't1',
'admin_state_up': True}}
router = self.driver.create_router(self.context, router_dict)
router_model = self.driver._get_router(self.context, router['id'])
net_dict = {'network': {'name': 'n1',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': 'default',
'provider:network_type': 'vlan',
'provider:segmentation_id': 100}}
net = self.plugin.create_network(self.context, net_dict)
subnet_dict = {'subnet':
{'tenant_id': net['tenant_id'],
'name': net['name'],
'network_id': net['id'],
'ip_version': 4,
'cidr': '10.0.0.0/24',
'gateway_ip': '10.0.0.1',
'allocation_pools': None,
'enable_dhcp': False,
'dns_nameservers': None,
'host_routes': None}}
subnet = self.plugin.create_subnet(self.context, subnet_dict)
ext_ips = [{'subnet_id': subnet['id'], 'ip_address': '10.0.0.2'}]
self.driver._create_gw_port(self.context, router['id'], router_model,
net['id'], ext_ips)
self.sync_worker.synchronize()
switch1_svi = {'vlan 100':
{'ip': '10.0.0.254',
'mask': '24',
'vip': '10.0.0.2'}
}
switch2_svi = {'vlan 100':
{'ip': '10.0.0.253',
'mask': '24',
'vip': '10.0.0.2'}
}
switch_vlan = {'100': {'dynamic': False}}
self.assertEqual(self.switch1._svis, switch1_svi)
self.assertEqual(self.switch2._svis, switch2_svi)
self.assertEqual(self.switch1._vlans, switch_vlan)
self.assertEqual(self.switch2._vlans, switch_vlan)
self.sync_worker.synchronize()
self.switch1.assert_command_not_received('no ip route vrf')
self.switch2.assert_command_not_received('no ip route vrf')
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6481018
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/0000775000175000017500000000000000000000000024061 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/__init__.py0000664000175000017500000000126700000000000026200 0ustar00zuulzuul00000000000000# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
cfg.CONF.use_stderr = False
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/mechanism_fabric.py0000664000175000017500000000364700000000000027717 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants
from neutron_lib.plugins.ml2 import api as driver_api
class TestFabricDriver(driver_api.MechanismDriver):
def __init__(self):
self.vif_details = {portbindings.VIF_DETAILS_CONNECTIVITY:
self.connectivity}
@property
def connectivity(self):
return portbindings.CONNECTIVITY_L2
def initialize(self):
pass
def bind_port(self, context):
"""Bind port to a network segment."""
port = context.current
for segment in context.segments_to_bind:
physnet = segment.get(driver_api.PHYSICAL_NETWORK)
if (not physnet and
segment[driver_api.NETWORK_TYPE] == constants.TYPE_VXLAN):
physnet_map = {'host1': 'physnet1',
'host2': 'physnet2'}
physnet = physnet_map.get(port[portbindings.HOST_ID],
'other_physnet')
next_segment = context.allocate_dynamic_segment(
{'network_id': context.network.current['id'],
'network_type': constants.TYPE_VLAN,
'physical_network': physnet})
context.continue_binding(segment['id'], [next_segment])
return True
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/mechanism_ha_simulator.py0000664000175000017500000001003000000000000031140 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from multiprocessing import Queue
import random
from neutron.agent import rpc as agent_rpc
from neutron_lib.agent import topics
from neutron_lib.api.definitions import portbindings
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import context
from networking_arista.ml2 import arista_sync
from networking_arista.ml2.mechanism_arista import AristaDriver
class AristaHASimulationDriver(AristaDriver):
def __init__(self):
super(AristaHASimulationDriver, self).__init__()
self.provision_queue1 = Queue()
self.provision_queue2 = Queue()
self.provision_queue3 = Queue()
self.provision_queues = [self.provision_queue1,
self.provision_queue2,
self.provision_queue3]
def get_workers(self):
return [arista_sync.AristaSyncWorker(self.provision_queue1),
arista_sync.AristaSyncWorker(self.provision_queue2),
arista_sync.AristaSyncWorker(self.provision_queue3)]
def create_network_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).create_network_postcommit(
context)
def update_network_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).update_network_postcommit(
context)
def delete_network_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).delete_network_postcommit(
context)
def update_port_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).update_port_postcommit(context)
def delete_port_postcommit(self, context):
self.provision_queue = random.choice(self.provision_queues)
super(AristaHASimulationDriver, self).delete_port_postcommit(context)
class AristaHAScaleSimulationDriver(AristaHASimulationDriver):
def __init__(self):
super(AristaHAScaleSimulationDriver, self).__init__()
self.context = None
self.plugin_rpc = None
def initialize(self):
super(AristaHAScaleSimulationDriver, self).initialize()
self.context = context.get_admin_context_without_session()
# Subscribe to port updates to force ports to active after binding
# since a fake virt driver is being used, so OVS will never see
# the libvirt interfaces come up, triggering the OVS provisioning
self.plugin_rpc = agent_rpc.PluginApi(topics.PLUGIN)
registry.subscribe(self._port_update_callback,
resources.PORT, events.AFTER_UPDATE)
def _port_update_callback(self, resource, event, trigger, payload):
port = payload.latest_state
host = port.get(portbindings.HOST_ID)
vif_type = port.get(portbindings.VIF_TYPE)
device_dict = {'device': port['id'],
'agent_id': 'ovs-agent-%s' % host,
'host': host}
if vif_type == 'ovs':
self.plugin_rpc.update_device_up(self.context, **device_dict)
elif (port.get(portbindings.VNIC_TYPE) == 'normal' and
vif_type == 'unbound'):
self.plugin_rpc.update_device_down(self.context, **device_dict)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/ml2_test_base.py0000664000175000017500000006045200000000000027165 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from oslo_log import fixture as log_fixture
import cProfile
from eventlet import queue
import mock
from pstats import Stats
from neutron_lib.api.definitions import l3 as l3_const
from neutron_lib.api.definitions import portbindings
from neutron_lib import constants as n_const
import neutron_lib.context
from neutron_lib.plugins import constants as p_const
from neutron_lib.plugins import directory
from neutron_lib.services.trunk import constants as trunk_const
from oslo_config import cfg
from neutron.api.rpc.handlers import l3_rpc
from neutron.common import utils as common_utils
from neutron.plugins.ml2.drivers import type_vxlan # noqa
from neutron.tests.common import helpers
from neutron.tests.unit.plugins.ml2 import test_plugin
from networking_arista.common import db_lib
from networking_arista.ml2 import arista_sync
from networking_arista.tests.unit import utils
ENABLE_PROFILER = False
ENABLE_LOGGING = False
class MechTestBase(test_plugin.Ml2PluginV2TestCase):
"""Main test cases for Arista Mechanism driver.
Tests all mechanism driver APIs supported by Arista Driver. It invokes
all the APIs as they would be invoked in real world scenarios and
verifies the functionality.
"""
_mechanism_drivers = ['arista', 'openvswitch']
def get_additional_service_plugins(self):
p = super(MechTestBase, self).get_additional_service_plugins()
p.update({'trunk_plugin_name': 'trunk'})
return p
def setUp(self):
if ENABLE_LOGGING:
log_fixture.setlevel.logging.root.setLevel(logging.DEBUG)
self.useFixture(log_fixture.SetLogLevel(['neutron'],
logging.DEBUG))
if ENABLE_PROFILER:
self.pr = cProfile.Profile()
self.pr.enable()
utils.setup_arista_wrapper_config(cfg)
cfg.CONF.set_override('vni_ranges',
['10000:11000'],
group='ml2_type_vxlan')
super(MechTestBase, self).setUp()
self.plugin.notifier.port_update = self._mock_port_update
self.plugin.start_rpc_listeners()
self.host1 = 'host1'
self.host2 = 'host2'
self.host3 = 'host3'
# Hack to ensure agents report being alive
cfg.CONF.set_override('agent_down_time', 1000)
helpers.register_ovs_agent(
host=self.host1, bridge_mappings={self.physnet: 'br-eth1'})
helpers.register_ovs_agent(
host=self.host2, bridge_mappings={self.physnet: 'br-eth1'})
helpers.register_ovs_agent(
host=self.host3, bridge_mappings={self.physnet2: 'br-eth1'})
self.region = 'region'
self.cvx = utils.MockCvx(self.region)
self.drv = self.driver.mechanism_manager.mech_drivers['arista'].obj
# multiprocessing.Queue's get() fails to wake up a thread, swap
# it out for a LightQueue for testing purposes
self.drv.provision_queue = queue.LightQueue()
for worker in self.driver._workers:
if isinstance(worker, arista_sync.AristaSyncWorker):
self.ar_sync = worker
self.ar_sync._rpc = self.cvx
self.ar_sync.provision_queue = self.drv.provision_queue
worker.start()
self.trunk_plugin = directory.get_plugin('trunk')
self.net_count = 0
def tearDown(self):
for worker in self.driver._workers:
worker.stop()
worker.wait()
self.cvx.endpoint_data.clear()
super(MechTestBase, self).tearDown()
if ENABLE_PROFILER:
p = Stats(self.pr)
p.strip_dirs()
p.sort_stats('cumtime')
p.print_stats()
def _mock_port_update(self, context, port, network_type, segmentation_id,
physical_network):
'''Simulates an L2 agent's response to a port_update notification
After binding a port, the ML2 plugin notifies all L2 agents of the
binding so that they can configure the datapath. Once they have
done so, the call either update_devices_up or update_devices_list
which brings the port status to ACTIVE. This function simply calls
update_devices_list in response to a binding without actually doing
any datapath manipulation.
'''
plugin = directory.get_plugin()
rpc_plugin = plugin.endpoints[0]
host = port.get(portbindings.HOST_ID)
agent_id = 'ovs-agent-%s' % host
vif_type = port.get(portbindings.VIF_TYPE)
if port.get('device_owner') == trunk_const.TRUNK_SUBPORT_OWNER:
return
if vif_type == 'ovs':
device_list = []
device_list.append(port['id'])
if port.get('trunk_details'):
trunk_rpc = self.trunk_plugin._rpc_backend._skeleton
for s in port['trunk_details']['sub_ports']:
s['trunk_id'] = port['trunk_details']['trunk_id']
trunk_rpc.update_subport_bindings(
self.context, port['trunk_details']['sub_ports'])
device_list.append(s['port_id'])
devices_dict = {'devices_up': device_list,
'agent_id': agent_id,
'host': port.get(portbindings.HOST_ID)}
# This is a hack. When calling update_port_status from the rpc
# handler, the trunk_details db extension gets a new admin context
# in order to query the parent port's subports' mac address.
# Querying within the new context's session seems to somehow
# invalidate the transaction in update_port_status, which causes
# the status in the db to remain 'DOWN' in spite of an
# update_port_[pre/post]commit being sent indicating that the
# status is 'ACTIVE'. For now, I've found that using the same admin
# context in all queries resolves the issue. In my testing, this
# doesn't affect real environments using mysql and seems to be
# limited to sqlite
#
# Additional notes: If there is no transaction in progress when a
# query in the new context is issued, the update_port_status
# commit succeeds (ie. comment out the context.session.flush() in
# update_individual_port_db_status
with mock.patch.object(neutron_lib.context, 'get_admin_context',
return_value=self.context):
rpc_plugin.update_device_list(context, **devices_dict)
if port.get('trunk_details'):
trunk_rpc = self.trunk_plugin._rpc_backend._skeleton
trunk_rpc.update_trunk_status(
self.context, port['trunk_details']['trunk_id'], 'ACTIVE')
elif (port.get(portbindings.VNIC_TYPE) == 'normal' and
vif_type == 'unbound'):
device_dict = {'agent_id': agent_id,
'device': port['id'],
'host': port.get(portbindings.HOST_ID)}
rpc_plugin.update_device_down(context, **device_dict)
def create_network(self, net_dict):
network = net_dict.get('network', {})
network['mtu'] = network.get('mtu', 1450)
net = self.plugin.create_network(self.context, net_dict)
self.plugin.create_subnet(self.context,
{'subnet':
{'tenant_id': net['tenant_id'],
'name': net['name'],
'network_id': net['id'],
'ip_version': 4,
'cidr': '10.0.%d.0/24' % self.net_count,
'gateway_ip': '10.0.%d.1' % self.net_count,
'allocation_pools': None,
'enable_dhcp': False,
'dns_nameservers': None,
'host_routes': None}})
self.net_count += 1
n_ctxs = self.plugin.get_network_contexts(self.context, [net['id']])
return net, n_ctxs[net['id']]
def delete_network(self, net_id):
self.plugin.delete_network(self.context, net_id)
def create_port(self, port_dict):
minimal_port = {'port':
{'name': port_dict.get('name'),
'tenant_id': port_dict.get('tenant_id'),
'device_id': port_dict.get('device_id'),
'fixed_ips': port_dict.get('fixed_ips'),
'network_id': port_dict.get('network_id'),
'device_owner': '',
'admin_state_up': True}}
port = self.plugin.create_port(self.context, minimal_port)
full_port = {'port': port_dict}
port = self.plugin.update_port(self.context, port['id'], full_port)
p_ctx = self.plugin.get_bound_port_context(self.context, port['id'])
return port, p_ctx
def migrate_port(self, port_id, new_host):
port_dict = {'port': {'binding:host_id': new_host}}
port = self.plugin.update_port(self.context, port_id, port_dict)
p_ctx = self.plugin.get_bound_port_context(self.context, port_id)
return port, p_ctx
def migrate_dhcp_device(self, port_id, new_device):
port_dict = {'port':
{'device_id': n_const.DEVICE_ID_RESERVED_DHCP_PORT}}
port = self.plugin.update_port(self.context, port_id, port_dict)
port_dict = {'port': {'device_id': new_device}}
port = self.plugin.update_port(self.context, port_id, port_dict)
p_ctx = self.plugin.get_bound_port_context(self.context, port_id)
return port, p_ctx
def bind_trunk_to_host(self, port, device_id, host):
p_dict = {'port':
{'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX,
portbindings.HOST_ID: host,
'device_id': device_id}}
port = self.plugin.update_port(self.context, port['id'], p_dict)
p_ctx = self.plugin.get_bound_port_context(self.context, port['id'])
return port, p_ctx
def bind_subport_to_trunk(self, port, trunk):
parent = self.plugin.get_port(self.context, trunk['port_id'])
p_dict = {'port':
{portbindings.HOST_ID: parent.get(portbindings.HOST_ID),
'device_owner': trunk_const.TRUNK_SUBPORT_OWNER}}
port = self.plugin.update_port(self.context, port['id'], p_dict)
self.plugin.update_port_status(self.context, port['id'],
n_const.PORT_STATUS_ACTIVE)
def unbind_port_from_host(self, port_id):
p_dict = {'port':
{portbindings.HOST_ID: None,
'device_id': ''}}
port = self.plugin.update_port(self.context, port_id, p_dict)
return port
def bind_trunk_to_baremetal(self, port_id, device_id, host,
switch_id, switch_port):
p_dict = {'port':
{'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': host,
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port}]},
'binding:vnic_type': 'baremetal'}}
port = self.plugin.update_port(self.context, port_id, p_dict)
return port
def unbind_trunk_from_baremetal(self, port_id):
p_dict = {'port':
{'device_id': '',
'device_owner': '',
'binding:host_id': None,
'binding:profile': None,
'binding:vnic_type': None,
'status': n_const.PORT_STATUS_DOWN,
portbindings.VIF_TYPE: portbindings.VIF_TYPE_UNBOUND}}
self.plugin.update_port(self.context, port_id, p_dict)
def bind_dvr_to_host(self, port, host):
p_dict = {'port':
{'device_id': port['device_id'],
'device_owner': port['device_owner'],
portbindings.HOST_ID: host}}
self.plugin.update_distributed_port_binding(self.context,
port['id'], p_dict)
self.plugin.update_port_status(self.context, port['id'],
n_const.PORT_STATUS_ACTIVE,
host)
p_ctx = self.plugin.get_bound_port_context(self.context, port['id'],
host)
return port, p_ctx
def unbind_dvr_from_host(self, port, host):
self.plugin.update_port_status(
self.context, port['id'], n_const.PORT_STATUS_DOWN, host)
def set_port_to_error_state(self, port):
self.plugin.update_port_status(
self.context, port['id'], n_const.PORT_STATUS_ERROR)
def delete_port(self, port_id):
self.plugin.delete_port(self.context, port_id)
def _get_endpoint(self, resource_type):
endpoint_map = {
'tenant': 'region/%s/tenant' % self.region,
'network': 'region/%s/network' % self.region,
'segment': 'region/%s/segment' % self.region,
'dhcp': 'region/%s/dhcp' % self.region,
'router': 'region/%s/router' % self.region,
'vm': 'region/%s/vm' % self.region,
'baremetal': 'region/%s/baremetal' % self.region,
'dhcp_port': 'region/%s/port?type=dhcp' % self.region,
'router_port': 'region/%s/port?type=router' % self.region,
'vm_port': 'region/%s/port?type=vm' % self.region,
'baremetal_port': 'region/%s/port?type=baremetal' % self.region,
'port_binding': 'region/%s/portbinding' % self.region}
return endpoint_map[resource_type]
def _assertResourceCreated(self, resource_type, resource_id):
endpoint = self._get_endpoint(resource_type)
def resource_created():
return resource_id in self.cvx.endpoint_data[endpoint].keys()
common_utils.wait_until_true(resource_created)
def _assertResourceDeleted(self, resource_type, resource_id):
endpoint = self._get_endpoint(resource_type)
def resource_deleted():
return resource_id not in self.cvx.endpoint_data[endpoint].keys()
common_utils.wait_until_true(resource_deleted)
def assertTenantCreated(self, tenant_id):
self._assertResourceCreated('tenant', tenant_id)
def assertTenantDeleted(self, tenant_id):
self._assertResourceDeleted('tenant', tenant_id)
def assertNetworkCreated(self, network_id):
self._assertResourceCreated('network', network_id)
def assertNetworkDeleted(self, network_id):
self._assertResourceDeleted('network', network_id)
def assertSegmentCreated(self, segment_id):
self._assertResourceCreated('segment', segment_id)
def assertSegmentDeleted(self, segment_id):
self._assertResourceDeleted('segment', segment_id)
def assertDhcpCreated(self, instance_id):
self._assertResourceCreated('dhcp', instance_id)
def assertDhcpDeleted(self, instance_id):
self._assertResourceDeleted('dhcp', instance_id)
def assertRouterCreated(self, instance_id):
self._assertResourceCreated('router', instance_id)
def assertRouterDeleted(self, instance_id):
self._assertResourceDeleted('router', instance_id)
def assertVmCreated(self, instance_id):
self._assertResourceCreated('vm', instance_id)
def assertVmDeleted(self, instance_id):
self._assertResourceDeleted('vm', instance_id)
def assertBaremetalCreated(self, instance_id):
self._assertResourceCreated('baremetal', instance_id)
def assertBaremetalDeleted(self, instance_id):
self._assertResourceDeleted('baremetal', instance_id)
def assertDhcpPortCreated(self, port_id):
self._assertResourceCreated('dhcp_port', port_id)
def assertDhcpPortDeleted(self, port_id):
self._assertResourceDeleted('dhcp_port', port_id)
def assertRouterPortCreated(self, port_id):
self._assertResourceCreated('router_port', port_id)
def assertRouterPortDeleted(self, port_id):
self._assertResourceDeleted('router_port', port_id)
def assertVmPortCreated(self, port_id):
self._assertResourceCreated('vm_port', port_id)
def assertVmPortDeleted(self, port_id):
self._assertResourceDeleted('vm_port', port_id)
def assertBaremetalPortCreated(self, port_id):
self._assertResourceCreated('baremetal_port', port_id)
def assertBaremetalPortDeleted(self, port_id):
self._assertResourceDeleted('baremetal_port', port_id)
def assertPortBindingCreated(self, pb_key):
self._assertResourceCreated('port_binding', pb_key)
def assertPortBindingDeleted(self, pb_key):
self._assertResourceDeleted('port_binding', pb_key)
class L3HARouterTestFramework(MechTestBase):
L3Plugin = ('neutron.services.l3_router.l3_router_plugin.'
'L3RouterPlugin')
def get_additional_service_plugins(self):
p = super(L3HARouterTestFramework,
self).get_additional_service_plugins()
p.update({'flavors_plugin_name': 'neutron.services.flavors.'
'flavors_plugin.FlavorsPlugin'})
p.update({'l3_plugin_name': self.L3Plugin})
return p
def _register_l3_agent(self, host):
return helpers.register_l3_agent(host=host)
def setUp(self):
super(L3HARouterTestFramework, self).setUp()
self.l3_plugin = directory.get_plugin(p_const.L3)
self.l3_rpc_cb = l3_rpc.L3RpcCallback()
self.ext_net = None
def create_router(self, ha=False, ext_net=None):
router_dict = {'router': {'name': 'router',
'admin_state_up': True,
'tenant_id': self._tenant_id,
'ha': ha,
'distributed': False}}
if ext_net:
router_dict['router'][l3_const.EXTERNAL_GW_INFO] = {'network_id':
ext_net['id']}
router = self.l3_plugin.create_router(self.context, router_dict)
self.l3_plugin.schedule_router(self.context, router['id'])
for host in self.l3_agents:
self.sync_routers(router['id'], host['host'])
return router
def sync_routers(self, router_id, host):
"""Call to L3 Agent plugin sync_routers
Since only l3 agent plugin is enabled in the unit test, to get the
sync data for routers (which causes port update), we need to call the
l3 agent plugin sync_routers method directly.
This call is normally done by a process running periodically on
l3 agent server (neutron-l3-agent).
"""
self.l3_rpc_cb.sync_routers(self.context, host=host,
router_ids=[router_id])
def add_router_interface(self, router, interface_info):
return self.l3_plugin.add_router_interface(self.context, router['id'],
interface_info)
def remove_router_interface(self, router, interface_info):
return self.l3_plugin.remove_router_interface(self.context,
router['id'],
interface_info)
def delete_router(self, router_id):
return self.l3_plugin.delete_router(self.context, router_id)
def get_ha_network(self, router):
networks = [getattr(net, 'Network') for net in db_lib.get_networks()]
HA_network_name = n_const.HA_NETWORK_NAME % router['project_id']
net_id = [net.id for net in networks if net.name == HA_network_name]
return net_id[0]
def get_network_segments(self, network_id):
segments = db_lib.get_segments()
net_segs = [seg.id for seg in segments if seg.network_id == network_id]
return net_segs
def update_routers_states(self, router_id, l3_agent):
binding_l3_agent_host = l3_agent['host']
binding_l3_agent_id = l3_agent['id']
self.l3_plugin.update_routers_states(self.context,
{router_id: 'active'},
binding_l3_agent_host)
bindings = self.l3_plugin.get_ha_router_port_bindings(self.context,
[router_id])
binding = [binding for binding in bindings
if binding.l3_agent_id == binding_l3_agent_id][0]
return self.plugin.get_port(self.context, binding.port_id)
def get_legacy_router_port(self, port_id):
return self.plugin.get_port(self.context, port_id)
def assertL3HANetworkCreated(self, router, net_id):
endpoint = self._get_endpoint('network')
HA_network_name = n_const.HA_NETWORK_NAME % router['project_id']
def resource_created():
cvx_data = self.cvx.endpoint_data[endpoint]
return (cvx_data and net_id in cvx_data and
HA_network_name == cvx_data[net_id]['name'])
common_utils.wait_until_true(resource_created)
def assertLegacyRouterCreated(self, router, host):
endpoint = self._get_endpoint('router')
def resource_created():
cvx_data = list(self.cvx.endpoint_data[endpoint].values())
expected_data = {'hostId': '(see router ports)',
'tenantId': router['project_id'],
'id': router['id']}
return cvx_data and cvx_data[0] == expected_data
common_utils.wait_until_true(resource_created)
def assertL3HARouterCreated(self, router):
endpoint = self._get_endpoint('router')
def resource_created():
cvx_data = list(self.cvx.endpoint_data[endpoint].values())
expected_data = {'hostId': '(see router ports)',
'tenantId': router['project_id'],
'id': router['id']}
return cvx_data and cvx_data[0] == expected_data
common_utils.wait_until_true(resource_created)
def assertL3HAPortCreated(self, router, port_id):
endpoint = self._get_endpoint('router_port')
port_name = n_const.HA_PORT_NAME % router['project_id']
def resource_created():
cvx_data = self.cvx.endpoint_data[endpoint]
return (cvx_data and port_id in cvx_data and all(
[cvx_data[port_id]['tenantId'] == router['project_id'],
cvx_data[port_id]['portName'] == port_name,
cvx_data[port_id]['instanceId'] == router['id']]))
common_utils.wait_until_true(resource_created)
def assertSegmentsCreated(self, segments):
segment_endpoint = self._get_endpoint('segment')
def resource_created():
cvx_segments = self.cvx.endpoint_data[segment_endpoint].keys()
return cvx_segments and set(cvx_segments) == set(segments)
common_utils.wait_until_true(resource_created)
def assertSegmentsDeleted(self, segments):
segment_endpoint = self._get_endpoint('segment')
def resource_deleted():
cvx_data = self.cvx.endpoint_data[segment_endpoint]
cvx_segments = cvx_data.keys()
return all(seg not in cvx_segments for seg in segments)
common_utils.wait_until_true(resource_deleted)
def assertRouterPortsDeleted(self, ports):
endpoint = self._get_endpoint('router_port')
def resource_deleted():
cvx_data = self.cvx.endpoint_data[endpoint]
return all(port not in cvx_data for port in ports)
common_utils.wait_until_true(resource_deleted)
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6481018
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/rpc/0000775000175000017500000000000000000000000024645 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/rpc/__init__.py0000664000175000017500000000000000000000000026744 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py0000664000175000017500000002021000000000000033136 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from oslo_config import cfg
from neutron.tests import base
from neutron.tests.unit import testlib_api
from neutron_lib.api.definitions import portbindings
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2.rpc import arista_eapi
from networking_arista.tests.unit import utils
def setup_valid_config():
utils.setup_arista_wrapper_config(cfg)
class AristaRPCWrapperInvalidConfigTestCase(base.BaseTestCase):
"""Negative test cases to test the Arista Driver configuration."""
def setUp(self):
super(AristaRPCWrapperInvalidConfigTestCase, self).setUp()
self.setup_invalid_config() # Invalid config, required options not set
def setup_invalid_config(self):
utils.setup_arista_wrapper_config(cfg, host='', user='')
def test_raises_exception_on_wrong_configuration(self):
self.assertRaises(arista_exc.AristaConfigError,
arista_eapi.AristaRPCWrapperEapi)
class NegativeRPCWrapperTestCase(testlib_api.SqlTestCase):
"""Negative test cases to test the RPC between Arista Driver and EOS."""
def setUp(self):
super(NegativeRPCWrapperTestCase, self).setUp()
setup_valid_config()
def test_exception_is_raised_on_json_server_error(self):
drv = arista_eapi.AristaRPCWrapperEapi()
drv.api_request = mock.MagicMock(
side_effect=Exception('server error')
)
with mock.patch.object(arista_eapi.LOG, 'error') as log_err:
self.assertRaises(arista_exc.AristaRpcError,
drv._run_openstack_cmds, [])
log_err.assert_called_once_with(mock.ANY)
class GetPhysnetTestCase(base.BaseTestCase):
"""Test cases to validate parsing of topology output to find physnets"""
def setUp(self):
super(GetPhysnetTestCase, self).setUp()
setup_valid_config()
def _test_get_host_physnet(self, nova_fqdn, topo_host_fqdn,
topo_switch_fqdn, bridge_map_fqdn, use_fqdn,
use_fqdn_physnet):
cfg.CONF.set_override('use_fqdn', use_fqdn, "ml2_arista")
cfg.CONF.set_override('use_fqdn_physnet', use_fqdn_physnet,
"ml2_arista")
context = mock.MagicMock()
nova_host1 = 'host1.full.name' if nova_fqdn else 'host1'
nova_host2 = 'host2.full.name' if nova_fqdn else 'host2'
topo_host1 = 'host1.full.name' if topo_host_fqdn else 'host1'
topo_host2 = 'host2.full.name' if topo_host_fqdn else 'host2'
topo_switch1 = 'switch1.full.name' if topo_switch_fqdn else 'switch1'
topo_switch2 = 'switch2.full.name' if topo_switch_fqdn else 'switch2'
bridge_map_switch1 = ('switch1.full.name' if bridge_map_fqdn
else 'switch1')
bridge_map_switch2 = ('switch2.full.name' if bridge_map_fqdn
else 'switch2')
context.host = nova_host1
topology = [{'neighbors':
{'%s-et1' % topo_host1:
{'fromPort':
{'name': 'et1',
'hostname': topo_host1,
'hostid': '00:00:00:00:00:00'},
'toPort': [
{'name': 'Ethernet1',
'hostname': topo_switch1,
'hostid': '00:00:00:00:00:01'}]},
'%s-et1' % topo_host2:
{'fromPort':
{'name': 'et1',
'hostname': topo_host2,
'hostid': '00:00:00:00:00:02'},
'toPort': [
{'name': 'Ethernet1',
'hostname': topo_switch2,
'hostid': '00:00:00:00:00:03'}]}}}]
drv = arista_eapi.AristaRPCWrapperEapi()
drv._run_eos_cmds = mock.MagicMock()
drv._run_eos_cmds.return_value = topology
self.assertEqual(drv.get_host_physnet(context), bridge_map_switch1)
context.host = nova_host2
self.assertEqual(drv.get_host_physnet(context), bridge_map_switch2)
def _test_get_baremetal_physnet(self, topo_switch_fqdn, bridge_map_fqdn,
use_fqdn_physnet):
cfg.CONF.set_override('use_fqdn_physnet', use_fqdn_physnet,
"ml2_arista")
context = mock.MagicMock()
topo_switch1 = 'switch1.full.name' if topo_switch_fqdn else 'switch1'
topo_switch2 = 'switch2.full.name' if topo_switch_fqdn else 'switch2'
bridge_map_switch1 = ('switch1.full.name' if bridge_map_fqdn
else 'switch1')
bridge_map_switch2 = ('switch2.full.name' if bridge_map_fqdn
else 'switch2')
context.host = 'host1'
context.current = {portbindings.PROFILE: {
'local_link_information': [{'switch_id': '00:00:00:00:00:00'}]}}
topology = [{'hosts':
{'00:00:00:00:00:00': {'name': '00:00:00:00:00:00',
'hostname': topo_switch1},
'00:00:00:00:00:01': {'name': '00:00:00:00:00:01',
'hostname': topo_switch2}}}]
drv = arista_eapi.AristaRPCWrapperEapi()
drv._run_eos_cmds = mock.MagicMock()
drv._run_eos_cmds.return_value = topology
self.assertEqual(drv.get_baremetal_physnet(context),
bridge_map_switch1)
context.host = 'host2'
context.current = {portbindings.PROFILE: {
'local_link_information': [{'switch_id': '00:00:00:00:00:01'}]}}
self.assertEqual(drv.get_baremetal_physnet(context),
bridge_map_switch2)
def test_get_host_physnet(self):
for nova_fqdn in (True, False):
for topo_host_fqdn in (True, False):
for topo_switch_fqdn in (True, False):
for bridge_map_fqdn in (True, False):
if bridge_map_fqdn and not topo_switch_fqdn:
# Topology has less info than bridge map.
# This isn't supported
continue
use_fqdn = True
if nova_fqdn and not topo_host_fqdn:
use_fqdn = False
use_fqdn_physnet = True
if topo_switch_fqdn and not bridge_map_fqdn:
use_fqdn_physnet = False
self._test_get_host_physnet(nova_fqdn,
topo_host_fqdn,
topo_switch_fqdn,
bridge_map_fqdn,
use_fqdn,
use_fqdn_physnet)
def test_get_baremetal_physnet(self):
for topo_switch_fqdn in (True, False):
for bridge_map_fqdn in (True, False):
if bridge_map_fqdn and not topo_switch_fqdn:
# Topology has less info than bridge map.
# This isn't supported.
continue
use_fqdn_physnet = True
if topo_switch_fqdn and not bridge_map_fqdn:
use_fqdn_physnet = False
self._test_get_baremetal_physnet(topo_switch_fqdn,
bridge_map_fqdn,
use_fqdn_physnet)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py0000664000175000017500000001614200000000000033202 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import functools
import operator
import socket
import mock
from mock import patch
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import importutils
from neutron.tests.unit import testlib_api
from networking_arista.ml2.rpc import arista_json
from networking_arista.tests.unit import utils
BASE_RPC = "networking_arista.ml2.rpc.arista_json.AristaRPCWrapperJSON."
JSON_SEND_FUNC = BASE_RPC + "send_api_request"
RAND_FUNC = BASE_RPC + "_get_random_name"
DB_LIB_MODULE = 'networking_arista.ml2.rpc.arista_json.db_lib'
def setup_valid_config():
utils.setup_arista_wrapper_config(cfg)
class _UnorderedDictList(list):
def __init__(self, iterable='', sort_key=None):
super(_UnorderedDictList, self).__init__(iterable)
try:
(self[0] or {})[sort_key]
self.sort_key = sort_key
except (IndexError, KeyError):
self.sort_key = None
def __eq__(self, other):
if isinstance(other, list) and self.sort_key:
key = operator.itemgetter(self.sort_key)
return sorted(self, key=key) == sorted(other, key=key)
else:
return super(_UnorderedDictList, self).__eq__(other)
class TestAristaJSONRPCWrapper(testlib_api.SqlTestCase):
def setUp(self):
super(TestAristaJSONRPCWrapper, self).setUp()
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
directory.add_plugin(plugin_constants.CORE, plugin_klass())
setup_valid_config()
self.drv = arista_json.AristaRPCWrapperJSON()
self.drv._server_ip = "10.11.12.13"
self.region = 'RegionOne'
def _verify_send_api_request_call(self, mock_send_api_req, calls,
unordered_dict_list=False):
if unordered_dict_list:
wrapper = functools.partial(_UnorderedDictList, sort_key='id')
else:
def wrapper(x):
return x
expected_calls = [
mock.call(c[0], c[1], *(wrapper(d) for d in c[2:])) for c in calls
]
mock_send_api_req.assert_has_calls(expected_calls, any_order=True)
@patch(JSON_SEND_FUNC)
def test_register_with_eos(self, mock_send_api_req):
self.drv.register_with_eos()
calls = [
('region/RegionOne', 'PUT',
[{'name': 'RegionOne', 'syncInterval': 1}])
]
self._verify_send_api_request_call(mock_send_api_req, calls)
def _get_random_name(self):
return 'thisWillBeRandomInProd'
@patch(JSON_SEND_FUNC)
@patch(RAND_FUNC, _get_random_name)
def test_sync_start(self, mock_send_api_req):
mock_send_api_req.side_effect = [
[{'name': 'RegionOne', 'syncStatus': '',
'syncInterval': self.drv.sync_interval}],
[{}]
]
assert self.drv.sync_start()
calls = [
('region/RegionOne/sync', 'POST',
{'requester': socket.gethostname().split('.')[0],
'requestId': self._get_random_name()})
]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch('requests.Response')
def test_sync_start_exception(self, mock_response):
mock_response.ok.return_value = False
self.assertFalse(self.drv.sync_start())
@patch(JSON_SEND_FUNC)
def test_sync_start_no_region(self, mock_send_api_req):
mock_send_api_req.return_value = {}
self.assertFalse(self.drv.sync_start())
calls = [
('region/RegionOne', 'GET'),
('region/', 'POST', [{'name': 'RegionOne'}])
]
self._verify_send_api_request_call(mock_send_api_req, calls)
def _get_region(self, region):
return {'name': region, 'syncStatus': 'syncTimedout',
'syncInterval': self.sync_interval}
@patch('requests.post')
@patch(BASE_RPC + 'get_region', _get_region)
@patch(BASE_RPC + '_get_eos_master', lambda _: 'cvx')
@patch(RAND_FUNC, _get_random_name)
def test_sync_start_after_failure(self, mock_post):
self.drv.current_sync_name = 'bad-sync-id'
self.assertTrue(self.drv.sync_start())
expected_header = {'Content-Type': 'application/json',
'Accept': 'application/json',
'X-Sync-ID': None}
mock_post.assert_called_once_with(mock.ANY,
data=mock.ANY,
timeout=mock.ANY,
verify=mock.ANY,
headers=expected_header)
@patch(JSON_SEND_FUNC)
@patch(RAND_FUNC, _get_random_name)
def test_sync_start_incorrect_interval(self, mock_send_api_req):
mock_send_api_req.side_effect = [
[{'name': 'RegionOne', 'syncStatus': '',
'syncInterval': 0.0}],
[{}],
[{'syncStatus': 'syncInProgress',
'requestId': self._get_random_name()}]
]
assert self.drv.sync_start()
calls = [
('region/RegionOne', 'PUT',
[{'name': 'RegionOne',
'syncInterval': self.drv.sync_interval}]),
('region/RegionOne/sync', 'POST',
{'requester': socket.gethostname().split('.')[0],
'requestId': self._get_random_name()})
]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch(JSON_SEND_FUNC)
@patch(RAND_FUNC, _get_random_name)
def test_sync_end(self, mock_send_api_req):
mock_send_api_req.return_value = [{'requester':
self._get_random_name()}]
self.drv.current_sync_name = self._get_random_name()
self.assertTrue(self.drv.sync_end())
calls = [
('region/RegionOne/sync', 'DELETE')
]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch(JSON_SEND_FUNC)
def test_create_region(self, mock_send_api_req):
self.drv.create_region('foo')
calls = [('region/', 'POST', [{'name': 'foo'}])]
self._verify_send_api_request_call(mock_send_api_req, calls)
@patch('requests.Response')
def test_get_region_exception(self, mock_response):
mock_response.ok.return_value = False
self.assertIsNone(self.drv.get_region('foo'))
@patch('requests.Response')
def test_get_cvx_uuid_exception(self, mock_response):
mock_response.ok.return_value = False
self.assertIsNone(self.drv.get_cvx_uuid())
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6481018
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/0000775000175000017500000000000000000000000027327 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/__init__.py0000664000175000017500000000000000000000000031426 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/sg_test_base.py0000664000175000017500000000617300000000000032352 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.plugins import directory
from networking_arista.tests.unit.ml2 import ml2_test_base
from networking_arista.tests.unit import utils
class SecurityGroupTestBase(ml2_test_base.MechTestBase):
def get_additional_service_plugins(self):
p = super(SecurityGroupTestBase, self).get_additional_service_plugins()
p.update({'arista_security_group_plugin': 'arista_security_group'})
return p
def setUp(self):
super(SecurityGroupTestBase, self).setUp()
self.arista_sec_gp_plugin = directory.get_plugin(
'arista_security_group')
self.switch1 = utils.MockSwitch()
self.switch2 = utils.MockSwitch()
self.switches = {'TOR1': self.switch1,
'TOR2': self.switch2}
self.arista_sec_gp_plugin._switches = self.switches
self.arista_sec_gp_plugin._port_group_info['TOR1'] = {
'Ethernet1': {'interfaceMembership': ''},
'Ethernet2': {'interfaceMembership': ''}}
self.arista_sec_gp_plugin._port_group_info['TOR2'] = {
'Ethernet1': {'interfaceMembership': ''},
'Ethernet2': {'interfaceMembership': ''}}
def create_port_channel(self, switch, interface, pc_name):
intf_info = self.arista_sec_gp_plugin._port_group_info[switch]
intf_info[interface]['interfaceMembership'] = 'Member of %s' % pc_name
def create_sg_rule(self, direction, proto, cidr, range_min=None,
range_max=None, ethertype='IPv4', default=True,
sg_id=None):
if sg_id is None:
sec_group = {'security_group':
{'name': 'sg1',
'tenant_id': 't1',
'description': ''}}
grp = self.plugin.create_security_group(self.context, sec_group,
default_sg=default)
sg_id = grp['id']
for switch in self.switches.values():
switch.clear_received_commands()
rule = {'security_group_rule':
{'direction': direction,
'ethertype': ethertype,
'protocol': proto,
'remote_ip_prefix': cidr,
'port_range_min': range_min,
'port_range_max': range_max,
'security_group_id': sg_id,
'remote_group_id': None,
'tenant_id': 't1',
'remote_address_group_id': None}}
rule = self.plugin.create_security_group_rule(self.context, rule)
return sg_id, rule
././@PaxHeader0000000000000000000000000000021200000000000011450 xustar0000000000000000116 path=networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/test_arista_security_groups.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/test_arista_security_gro0000664000175000017500000006172200000000000034402 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib import constants as n_const
from networking_arista.tests.unit.ml2.security_groups import sg_test_base
class SecurityGroupCallbacksTestCase(sg_test_base.SecurityGroupTestBase):
def test_create_security_group(self):
sec_group = {'security_group':
{'name': 'sg1',
'tenant_id': 't1',
'description': ''}}
grp = self.plugin.create_security_group(self.context, sec_group,
default_sg=True)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-INGRESS-%s dynamic' % grp['id'],
'exit',
'ip access-list SG-EGRESS-%s dynamic' % grp['id'],
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_delete_security_group(self):
sec_group = {'security_group':
{'name': 'sg1',
'tenant_id': 't1',
'description': ''}}
grp = self.plugin.create_security_group(self.context, sec_group,
default_sg=True)
for switch in self.switches.values():
switch.clear_received_commands()
self.plugin.delete_security_group(self.context, grp['id'])
expected_eapi_commands = [
'enable',
'configure',
'no ip access-list SG-INGRESS-%s' % grp['id'],
'no ip access-list SG-EGRESS-%s' % grp['id'],
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_ingress(self):
direction = 'ingress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-INGRESS-%s dynamic' % grp_id,
'permit %s %s any' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_egress(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_tcp(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_udp(self):
direction = 'egress'
proto = 'udp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_port_range(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
range_min = 100
range_max = 200
grp_id, _ = self.create_sg_rule(direction, proto, cidr,
range_min, range_max)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s range %s %s' % (proto, cidr,
range_min, range_max),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_basic_icmp(self):
direction = 'egress'
proto = 'icmp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_icmp_type(self):
direction = 'egress'
proto = 'icmp'
cidr = '10.0.0.0/24'
message_type = 10
grp_id, _ = self.create_sg_rule(direction, proto, cidr, message_type)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s %s' % (proto, cidr, message_type),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_rule_icmp_code(self):
direction = 'egress'
proto = 'icmp'
cidr = '10.0.0.0/24'
message_type = 10
message_code = 100
grp_id, _ = self.create_sg_rule(direction, proto, cidr, message_type,
message_code)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any %s %s %s' % (proto, cidr, message_type,
message_code),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_no_ip(self):
direction = 'egress'
proto = 'tcp'
cidr = None
grp_id, _ = self.create_sg_rule(direction, proto, cidr)
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'permit %s any any' % proto,
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_create_security_group_ipv6(self):
direction = 'egress'
proto = 'tcp'
cidr = None
ethertype = 'IPv6'
grp_id, _ = self.create_sg_rule(direction, proto, cidr,
ethertype=ethertype)
for switch in self.switches.values():
self.assertEqual([], switch.received_commands)
def test_delete_security_group_rule(self):
direction = 'egress'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, rule = self.create_sg_rule(direction, proto, cidr)
for switch in self.switches.values():
switch.clear_received_commands()
self.plugin.delete_security_group_rule(self.context, rule['id'])
expected_eapi_commands = [
'enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'no permit %s any %s' % (proto, cidr),
'exit',
'exit']
for switch in self.switches.values():
self.assertEqual(expected_eapi_commands,
switch.received_commands)
def test_apply_security_group(self):
switch_port = 'Ethernet1'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
for switch in self.switches.values():
switch.clear_received_commands()
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % switch_port,
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
# SGs are applied on binding and on status DOWN->UP,
# so expect the commands twice
expected_eapi_commands.extend(expected_eapi_commands)
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_apply_security_group_lag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
self.create_port_channel(switch_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
for switch in self.switches.values():
switch.clear_received_commands()
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
# SGs are applied on binding and on status DOWN->UP,
# so expect the commands twice
expected_eapi_commands.extend(expected_eapi_commands)
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_apply_security_group_mlag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch1_info = 'TOR1'
switch2_info = 'TOR2'
self.create_port_channel(switch1_info, switch_port, port_channel)
self.create_port_channel(switch2_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
for switch in self.switches.values():
switch.clear_received_commands()
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch1_info},
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch2_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
# SGs are applied on binding and on status DOWN->UP,
# so expect the commands twice
expected_eapi_commands.extend(expected_eapi_commands)
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual(expected_eapi_commands,
self.switch2.received_commands)
def test_remove_security_group(self):
switch_port = 'Ethernet1'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
for switch in self.switches.values():
switch.clear_received_commands()
self.delete_port(port['id'])
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % switch_port,
'no ip access-group SG-INGRESS-%s out' % grp_id,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_remove_security_group_lag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
self.create_port_channel(switch_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
for switch in self.switches.values():
switch.clear_received_commands()
self.delete_port(port['id'])
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'no ip access-group SG-INGRESS-%s out' % grp_id,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_remove_security_group_mlag(self):
switch_port = 'Ethernet1'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch1_info = 'TOR1'
switch2_info = 'TOR2'
self.create_port_channel(switch1_info, switch_port, port_channel)
self.create_port_channel(switch2_info, switch_port, port_channel)
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch1_info},
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch2_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
for switch in self.switches.values():
switch.clear_received_commands()
self.delete_port(port['id'])
expected_eapi_commands = [
'show interfaces',
'enable',
'configure',
'interface %s' % port_channel,
'no ip access-group SG-INGRESS-%s out' % grp_id,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit']
self.assertEqual(expected_eapi_commands,
self.switch1.received_commands)
self.assertEqual(expected_eapi_commands,
self.switch2.received_commands)
def test_apply_security_group_vm(self):
grp_id, rule = self.create_sg_rule('egress', 'tcp', '10.0.0.0/24')
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'vm1',
'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX,
'binding:host_id': self.host1,
'binding:vnic_type': 'normal',
'security_groups': [grp_id]}
for switch in self.switches.values():
switch.clear_received_commands()
self.create_port(port_dict)
self.assertEqual([], self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
def test_apply_multiple_security_groups(self):
switch_id = '00:11:22:33:44:55'
switch_info = 'TOR1'
switch_port = 'Ethernet1'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp1_id, _ = self.create_sg_rule('egress', proto, cidr)
grp2_id, _ = self.create_sg_rule('ingress', proto, cidr,
default=False)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp1_id, grp2_id]}
for switch in self.switches.values():
switch.clear_received_commands()
port, _ = self.create_port(port_dict)
self.assertEqual([], self.switch1.received_commands)
self.assertEqual([], self.switch2.received_commands)
././@PaxHeader0000000000000000000000000000020700000000000011454 xustar0000000000000000113 path=networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/test_security_group_sync.py
22 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/security_groups/test_security_group_sync0000664000175000017500000005042300000000000034434 0ustar00zuulzuul00000000000000# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from neutron.tests import base
from neutron_lib import constants as n_const
from oslo_config import cfg
from networking_arista.ml2.security_groups import security_group_sync
from networking_arista.tests.unit.ml2.security_groups import sg_test_base
from networking_arista.tests.unit import utils
class SecurityGroupSyncTestCase(sg_test_base.SecurityGroupTestBase):
def setUp(self):
super(SecurityGroupSyncTestCase, self).setUp()
self.sync_worker = security_group_sync.AristaSecurityGroupSyncWorker()
self.sync_worker._switches = self.switches
self.sync_worker._port_group_info = (
self.arista_sec_gp_plugin._port_group_info)
def assertAclsEqual(self, expected_acls, switch_acls):
self.assertItemsEqual(expected_acls.keys(), switch_acls.keys())
for acl in expected_acls.keys():
self.assertItemsEqual(expected_acls[acl], switch_acls[acl])
def assertBindingsEqual(self, expected_bindings, switch_bindings):
switch_intf_to_acl = collections.defaultdict(list)
for acl, dir_bindings in switch_bindings.items():
for direction, intfs in dir_bindings.items():
for intf in intfs:
switch_intf_to_acl[intf].append(
'ip access-group %s %s' % (acl, direction))
self.assertItemsEqual(expected_bindings.keys(),
switch_intf_to_acl.keys())
for intf in expected_bindings.keys():
self.assertItemsEqual(expected_bindings[intf],
switch_intf_to_acl[intf])
def test_synchronize(self):
"""Setup a scenario and ensure that sync recreates the scenario
Scenario is:
SG 1:
rule 1: permit egress tcp 10.0.0.0/24
rule 2: permit egress udp 10.0.0.0/24
SG 2:
rule 1: permit ingress tcp 10.0.0.0/24
rule 2: permit ingress udp 10.0.0.0/24
Port 1:
SG1 on switch TOR1, Ethernet1
Port 2:
SG2 on switch TOR1 and switch TOR2, Port-Channel100
"""
switch_port1 = 'Ethernet1'
switch_port2 = 'Ethernet2'
port_channel = 'Port-Channel100'
switch_id = '11:22:33:44:55'
switch1_info = 'TOR1'
switch2_info = 'TOR2'
proto1 = 'tcp'
proto2 = 'udp'
cidr = '10.0.0.0/24'
self.create_port_channel(switch1_info, switch_port2, port_channel)
self.create_port_channel(switch2_info, switch_port2, port_channel)
grp1_id, _ = self.create_sg_rule('egress', proto1, cidr)
self.create_sg_rule('egress', proto2, cidr, sg_id=grp1_id)
grp2_id, _ = self.create_sg_rule('ingress', proto1, cidr,
default=False)
self.create_sg_rule('ingress', proto2, cidr, default=False,
sg_id=grp2_id)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port1_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host1',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port1,
'switch_info': switch1_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp1_id]}
port1, _ = self.create_port(port1_dict)
port2_dict = {'name': 'port2',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm2',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host2',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port2,
'switch_info': switch1_info},
{'switch_id': switch_id,
'port_id': switch_port2,
'switch_info': switch2_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp2_id]}
port2, _ = self.create_port(port2_dict)
self.sync_worker.synchronize()
switch1_expected_acls = {
'SG-INGRESS-%s' % grp1_id: [],
'SG-EGRESS-%s' % grp1_id: [
'permit %s any %s' % (proto1, cidr),
'permit %s any %s' % (proto2, cidr)],
'SG-INGRESS-%s' % grp2_id: [
'permit %s %s any' % (proto1, cidr),
'permit %s %s any' % (proto2, cidr)],
'SG-EGRESS-%s' % grp2_id: []}
switch1_expected_bindings = {
switch_port1: [
'ip access-group SG-INGRESS-%s out' % grp1_id,
'ip access-group SG-EGRESS-%s in' % grp1_id],
port_channel: [
'ip access-group SG-INGRESS-%s out' % grp2_id,
'ip access-group SG-EGRESS-%s in' % grp2_id]}
switch2_expected_acls = {
'SG-INGRESS-%s' % grp1_id: [],
'SG-EGRESS-%s' % grp1_id: [
'permit %s any %s' % (proto1, cidr),
'permit %s any %s' % (proto2, cidr)],
'SG-INGRESS-%s' % grp2_id: [
'permit %s %s any' % (proto1, cidr),
'permit %s %s any' % (proto2, cidr)],
'SG-EGRESS-%s' % grp2_id: []}
switch2_expected_bindings = {
port_channel: [
'ip access-group SG-INGRESS-%s out' % grp2_id,
'ip access-group SG-EGRESS-%s in' % grp2_id]}
self.assertAclsEqual(switch1_expected_acls, self.switch1._acl_rules)
self.assertAclsEqual(switch2_expected_acls, self.switch2._acl_rules)
self.assertBindingsEqual(switch1_expected_bindings,
self.switch1._bindings)
self.assertBindingsEqual(switch2_expected_bindings,
self.switch2._bindings)
def test_sync_vm_port(self):
proto = 'tcp'
cidr = '10.0.0.0/24'
grp_id, _ = self.create_sg_rule('egress', proto, cidr)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'vm1',
'device_owner': n_const.DEVICE_OWNER_COMPUTE_PREFIX,
'binding:host_id': self.host1,
'binding:vnic_type': 'normal',
'Security_groups': [grp_id]}
self.create_port(port_dict)
for switch in self.switches.values():
switch.reset_switch()
self.sync_worker.synchronize()
expected_rules = {
'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: [
'permit %s any %s' % (proto, cidr)]}
self.assertAclsEqual(expected_rules, self.switch1._acl_rules)
self.assertAclsEqual(expected_rules, self.switch2._acl_rules)
def test_sync_multiple_sgs_per_port(self):
switch_id = '00:11:22:33:44:55'
switch_info = 'TOR1'
switch_port = 'Ethernet1'
proto = 'tcp'
cidr = '10.0.0.0/24'
grp1_id, _ = self.create_sg_rule('egress', proto, cidr)
grp2_id, _ = self.create_sg_rule('ingress', proto, cidr,
default=False)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port1',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm1',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp1_id, grp2_id]}
port, _ = self.create_port(port_dict)
self.sync_worker.synchronize()
# The security groups may be synced in either order, so just assert
# that neither group was applied
unexpected_cmds = [
'interface %s' % switch_port,
'ip access-group SG-INGRESS-%s out' % grp1_id,
'ip access-group SG-EGRESS-%s in' % grp1_id,
'ip access-group SG-INGRESS-%s out' % grp2_id,
'ip access-group SG-EGRESS-%s in' % grp2_id]
for cmd in unexpected_cmds:
self.assertNotIn(cmd, self.switch1.received_commands)
self.assertNotIn(cmd, self.switch2.received_commands)
def test_sync_unsupported_rules(self):
grp_id, _ = self.create_sg_rule('egress', 'tcp', None,
ethertype='IPv6')
for switch in self.switches.values():
switch.reset_switch()
self.sync_worker.synchronize()
expected_rules = {
'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: []}
self.assertEqual(expected_rules, self.switch1._acl_rules)
self.assertEqual(expected_rules, self.switch2._acl_rules)
def test_sync_missing_acl(self):
grp_id, _ = self.create_sg_rule('egress', 'tcp', None)
self.switch1.execute(['enable',
'configure',
'no ip access-list SG-EGRESS-%s' % grp_id,
'exit'])
self.assertEqual({'SG-INGRESS-%s' % grp_id: []},
self.switch1._acl_rules)
self.sync_worker.synchronize()
expected_rules = {
'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: ['permit tcp any any']}
self.assertEqual(expected_rules, self.switch1._acl_rules)
def test_sync_extra_acl(self):
grp_id = 'fake-os-sg'
extra_rule = 'permit tcp any any'
self.switch1.execute(['enable',
'configure',
'ip access-list %s dynamic' % grp_id,
extra_rule,
'exit',
'exit'])
self.assertEqual({grp_id: [extra_rule]}, self.switch1._acl_rules)
self.sync_worker.synchronize()
self.assertEqual({}, self.switch1._acl_rules)
def test_sync_missing_rule(self):
grp_id, _ = self.create_sg_rule('egress', 'tcp', None)
self.switch1.execute(['enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
'no permit tcp any any',
'exit'])
expected_rules = {'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: []}
self.assertEqual(expected_rules, self.switch1._acl_rules)
self.sync_worker.synchronize()
expected_rules = {
'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: ['permit tcp any any']}
self.assertEqual(expected_rules, self.switch1._acl_rules)
def test_sync_extra_rule(self):
grp_id, _ = self.create_sg_rule('egress', 'tcp', None)
extra_rule = 'permit udp any any'
self.switch1.execute(['enable',
'configure',
'ip access-list SG-EGRESS-%s dynamic' % grp_id,
extra_rule,
'exit',
'exit'])
expected_rules = {
'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: ['permit tcp any any', extra_rule]}
self.assertAclsEqual(expected_rules, self.switch1._acl_rules)
self.sync_worker.synchronize()
expected_rules = {
'SG-INGRESS-%s' % grp_id: [],
'SG-EGRESS-%s' % grp_id: ['permit tcp any any']}
self.assertEqual(expected_rules, self.switch1._acl_rules)
def test_sync_missing_binding(self):
switch_port = 'Ethernet1'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
grp_id, _ = self.create_sg_rule('egress', 'tcp', None)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
self.switch1.execute(['configure',
'interface %s' % switch_port,
'no ip access-group SG-EGRESS-%s in' % grp_id,
'exit',
'exit'])
expected_bindings = {
switch_port: [
'ip access-group SG-INGRESS-%s out' % grp_id]}
self.assertBindingsEqual(expected_bindings, self.switch1._bindings)
self.sync_worker.synchronize()
expected_bindings = {
switch_port: [
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id]}
self.assertBindingsEqual(expected_bindings, self.switch1._bindings)
def test_sync_extra_binding(self):
switch_port = 'Ethernet1'
extra_acl = 'bad-acl'
self.switch1.execute(['configure',
'interface %s' % switch_port,
'ip access-group %s in' % extra_acl,
'exit',
'exit'])
expected_bindings = {
switch_port: [
'ip access-group %s in' % extra_acl]}
self.assertBindingsEqual(expected_bindings, self.switch1._bindings)
self.sync_worker.synchronize()
self.assertBindingsEqual(dict(), self.switch1._bindings)
def test_sync_binding_changed(self):
wrong_acl = 'bad-acl'
switch_port = 'Ethernet1'
switch_id = '11:22:33:44:55'
switch_info = 'TOR1'
grp_id, _ = self.create_sg_rule('egress', 'tcp', None)
net_dict = {'network': {'name': 'net',
'tenant_id': 't1',
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
port_dict = {'name': 'port',
'tenant_id': 't1',
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': 'bm',
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': 'bm-host',
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port,
'switch_info': switch_info}]},
'binding:vnic_type': 'baremetal',
'security_groups': [grp_id]}
port, _ = self.create_port(port_dict)
self.switch1.execute(['configure',
'interface %s' % switch_port,
'ip access-group %s out' % wrong_acl,
'exit',
'exit'])
expected_bindings = {
switch_port: [
'ip access-group %s out' % wrong_acl,
'ip access-group SG-EGRESS-%s in' % grp_id]}
self.assertBindingsEqual(expected_bindings, self.switch1._bindings)
self.sync_worker.synchronize()
expected_bindings = {
switch_port: [
'ip access-group SG-INGRESS-%s out' % grp_id,
'ip access-group SG-EGRESS-%s in' % grp_id]}
self.assertBindingsEqual(expected_bindings, self.switch1._bindings)
class SecurityGroupSyncWorkerTestCase(base.BaseTestCase):
def setUp(self):
utils.setup_arista_wrapper_config(cfg)
super(SecurityGroupSyncWorkerTestCase, self).setUp()
self.sync_worker = security_group_sync.AristaSecurityGroupSyncWorker()
def tearDown(self):
if self.sync_worker._loop is not None:
self.sync_worker._loop.stop()
super(SecurityGroupSyncWorkerTestCase, self).tearDown()
def test_start(self):
self.sync_worker.start()
self.assertIsNotNone(self.sync_worker._loop)
self.assertTrue(self.sync_worker._loop._running)
self.assertIsNotNone(self.sync_worker._switches)
self.assertIsNotNone(self.sync_worker._loop.done)
def test_start_twice(self):
self.sync_worker.start()
current_loop = self.sync_worker._loop
self.assertRaises(RuntimeError, self.sync_worker.start)
self.assertEqual(self.sync_worker._loop, current_loop)
def test_stop(self):
self.test_start()
running_loop = self.sync_worker._loop
self.sync_worker.stop()
self.sync_worker.wait()
self.assertFalse(running_loop._running)
self.assertIsNone(self.sync_worker._loop)
def test_reset(self):
self.test_start()
old_loop = self.sync_worker._loop
self.sync_worker.reset()
self.assertNotEqual(self.sync_worker._loop, old_loop)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/test_arista_resources.py0000664000175000017500000016246700000000000031067 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import mock
from neutron.db import rbac_db_models as rbac_models
from neutron.tests import base
from neutron.tests.unit import testlib_api
from neutron_lib import constants as n_const
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import importutils
from networking_arista.common import db_lib
import networking_arista.ml2.arista_resources as resources
from networking_arista.tests.unit import utils
class TestAristaResourcesType(resources.AristaResourcesBase):
f1 = mock.MagicMock()
f1.return_value = 'data1Prime'
formatter = [resources.AttributeFormatter('id', 'id'),
resources.AttributeFormatter('n_key1', 'c_key1', f1),
resources.AttributeFormatter('n_key2', 'c_key2')]
class AristaResourcesClassAttrTest(base.BaseTestCase):
"""Ensure arista resources have class attributes"""
def test_attributes(self):
whitelist = ['AristaResourcesBase',
'PortResourcesBase',
'AttributeFormatter']
for cls in resources.__dict__.values():
if (isinstance(cls, type) and
cls.__module__ == resources.__name__ and
cls.__name__ not in whitelist):
self.assertIsNotNone(cls.formatter)
self.assertIsNotNone(cls.id_key)
self.assertIsNotNone(cls.endpoint)
class AristaResourcesBaseTest(base.BaseTestCase):
"""Test cases for resource manipulation"""
def setUp(self):
super(AristaResourcesBaseTest, self).setUp()
self.rpc = mock.MagicMock()
self.rpc.region = 'region'
def test_clear_cvx_data(self):
# Setup
ar = resources.AristaResourcesBase(self.rpc)
ar.cvx_ids = set([i for i in range(10)])
# Check that clear_cvx_data restores cvx_ids to a NULL set
ar.clear_cvx_data()
self.assertEqual(ar.cvx_ids, set())
def test_clear_neutron_data(self):
# Setup
ar = resources.AristaResourcesBase(self.rpc)
ar.neutron_resources = {i: {'id': i} for i in range(10)}
# Check that clear_neutron_data restores neutron resources to an
# empty dict
ar.clear_neutron_data()
self.assertEqual(ar.neutron_resources, dict())
def test_clear_all_data(self):
# Setup
ar = resources.AristaResourcesBase(self.rpc)
ar.cvx_ids = set(i for i in range(10))
ar.neutron_resources = {i: {'id': i} for i in range(10)}
# Check that clear_all_data restores neutron resources to an
# empty dict and cvx_ids to an empty set
ar.clear_all_data()
self.assertEqual(ar.neutron_resources, dict())
self.assertEqual(ar.cvx_ids, set())
def test_add_neutron_resource(self):
# Setup
ar = resources.AristaResourcesBase(self.rpc)
# Check that the resource is added to neutron_resources
ar._add_neutron_resource({'id': 1})
self.assertEqual(ar.neutron_resources, {1: {'id': 1}})
def test_force_resource_update(self):
# Setup
neutron_resources = {i: {'id': i} for i in range(10)}
ar = resources.AristaResourcesBase(self.rpc)
ar.cvx_data_stale = False
ar.neutron_data_stale = False
ar.cvx_ids = set(range(10))
ar.neutron_resources = neutron_resources
resource_to_update = 5
ar.get_db_resources = mock.MagicMock()
ar.get_db_resources.return_value = [{'id': resource_to_update}]
# Ensure that calling force_resource_update would result in that
# resource being resent to CVX (with any updated data)
self.assertEqual(ar.resource_ids_to_create(), set())
ar.force_resource_update(resource_to_update)
self.assertEqual(ar.resource_ids_to_create(),
set([resource_to_update]))
def test_delete_neutron_resource(self):
# Setup
neutron_resources = {i: {'id': i} for i in range(10)}
ar = resources.AristaResourcesBase(self.rpc)
ar.get_db_resources = mock.MagicMock()
ar.get_db_resources.return_value = []
ar.neutron_resources = copy.copy(neutron_resources)
id_to_delete = 5
del neutron_resources[id_to_delete]
# Delete neutron resource and check that it's no longer in
# neutron_resources
ar._delete_neutron_resource(id_to_delete)
self.assertEqual(ar.neutron_resources, neutron_resources)
def test_get_endpoint(self):
# Setup
regionName = 'region'
ar = resources.AristaResourcesBase(self.rpc)
ar.endpoint = '%(region)s'
self.assertEqual(ar.get_endpoint(), regionName)
def test_get_resource_ids(self):
ar = resources.AristaResourcesBase(self.rpc)
self.assertEqual(ar.get_resource_ids({'id': 1}), set([1]))
def test_get_cvx_ids(self):
# Setup
cvx_ids = range(10)
ar = resources.AristaResourcesBase(self.rpc)
ar.endpoint = 'region/%(region)s'
self.rpc.send_api_request.return_value = [{'id': i} for i in cvx_ids]
# Check that get_cvx_ids returns the expected value
self.assertEqual(ar.get_cvx_ids(), set(cvx_ids))
self.assertEqual(ar.cvx_ids, set(cvx_ids))
# Check that a second call uses the cached value
ar.get_cvx_ids()
self.rpc.send_api_request.assert_called_once()
def test_get_neutron_ids(self):
# Setup
neutron_ids = range(10)
neutron_resources = [{'id': i} for i in neutron_ids]
ar = resources.AristaResourcesBase(self.rpc)
ar.get_db_resources = mock.MagicMock()
ar.get_db_resources.return_value = neutron_resources
# Check that get_neutron_resources returns the expected value
self.assertEqual(ar.get_neutron_ids(), set(neutron_ids))
# Check that a second call uses the cached value
ar.get_neutron_ids()
ar.get_db_resources.assert_called_once()
def test_get_neutron_resources(self):
# Setup
neutron_ids = range(10)
db_resources = [{'id': i} for i in neutron_ids]
ar = resources.AristaResourcesBase(self.rpc)
ar.get_db_resources = mock.MagicMock()
ar.get_db_resources.return_value = db_resources
neutron_resources = {i: {'id': i} for i in neutron_ids}
# Check that get_neutron_resources returns the expected value
self.assertEqual(ar.get_neutron_resources(), neutron_resources)
self.assertEqual(ar.neutron_resources, neutron_resources)
# Check that a second call uses the cached value
ar.get_neutron_resources()
ar.get_db_resources.assert_called_once()
def test_resources_ids_to_delete(self):
# Setup
neutron_ids = set(range(7))
cvx_ids = set(range(3, 10))
ar = resources.AristaResourcesBase(self.rpc)
ar.get_cvx_ids = mock.MagicMock()
ar.get_cvx_ids.return_value = cvx_ids
ar.get_neutron_ids = mock.MagicMock()
ar.get_neutron_ids.return_value = neutron_ids
# Check that the return is the list of ids in cvx but not neutron
self.assertItemsEqual(ar.resource_ids_to_delete(),
cvx_ids - neutron_ids)
def test_resource_ids_to_create(self):
# Setup
cvx_resource_ids = set(range(0, 20, 2))
neutron_resource_ids = set(range(10))
ar = resources.AristaResourcesBase(self.rpc)
ar.cvx_data_stale = False
ar.neutron_data_stale = False
ar.cvx_ids = cvx_resource_ids
ar.neutron_resources = {i: {'id': i} for i in neutron_resource_ids}
# Ensure that resource ids to create returns the set of ids present in
# neutron, but not cvx
self.assertEqual(ar.resource_ids_to_create(), set(range(1, 10, 2)))
def test_format_for_create(self):
# Setup
ar = TestAristaResourcesType(self.rpc)
neutron_data = [{'id': 'id1',
'n_key1': 'data1',
'n_key2': 'data2'},
{'id': 'id2',
'n_key1': 'data1',
'n_key2': 'data2',
'extra_key': 'data'}]
expected_cvx_data = [{'id1': {'id': 'id1',
'c_key1': 'data1Prime',
'c_key2': 'data2'}},
{'id2': {'id': 'id2',
'c_key1': 'data1Prime',
'c_key2': 'data2'}}]
test_cases = [(neutron_data[i], expected_cvx_data[i])
for i in range(len(neutron_data))]
# Check that data is correctly formatted for cvx
for neutron_resource, expected_resource in test_cases:
formatted_resource = ar.format_for_create(neutron_resource)
self.assertEqual(formatted_resource, expected_resource)
# Check that an exception is raised if neutron is missing data that CVX
# requires
neutron_data = [{'n_key1': 'data1'},
{'n_key1': 'data1',
'extra_key': 'data'}]
for resource in neutron_data:
self.assertRaises(KeyError, ar.format_for_create, resource)
def test_format_for_delete(self):
ar = resources.AristaResourcesBase(self.rpc)
id_to_format = 1
self.assertEqual(ar.format_for_delete(id_to_format),
{ar.id_key: id_to_format})
def test_create_cvx_resources(self):
# Setup
neutron_ids = set(range(7))
cvx_ids = set(range(3, 10))
neutron_resources = [{'id': i} for i in neutron_ids]
ar = resources.AristaResourcesBase(self.rpc)
ar.get_db_resources = mock.MagicMock()
ar.get_db_resources.return_value = neutron_resources
ar.get_cvx_ids = mock.MagicMock()
ar.get_cvx_ids.return_value = cvx_ids
# Check that the return is the resources in neutron but not cvx
self.assertEqual(ar.create_cvx_resources(),
list(neutron_resources[k] for k in
neutron_ids - cvx_ids))
def test_delete_cvx_resources(self):
# Setup
neutron_ids = set(range(7))
cvx_ids = set(range(3, 10))
neutron_resources = [{'id': i} for i in neutron_ids]
ar = resources.AristaResourcesBase(self.rpc)
ar.get_db_resources = mock.MagicMock()
ar.get_db_resources.return_value = neutron_resources
ar.cvx_ids = cvx_ids.copy()
# Check that the return is the list of ids in cvx but not neutron
self.assertEqual(ar.delete_cvx_resources(),
[{'id': i} for i in (cvx_ids - neutron_ids)])
class AristaResourcesTestBase(testlib_api.SqlTestCase):
def setUp(self):
super(AristaResourcesTestBase, self).setUp()
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
directory.add_plugin(plugin_constants.CORE, plugin_klass())
self.rpc = utils.MockCvx('region')
def run_scenario(self, expect_created):
utils.setup_scenario()
# Create resource tests
resources_created = self.ar.create_cvx_resources()
self.assertItemsEqual(resources_created,
expect_created.values())
self.ar.clear_cvx_data()
# Ensure existing resources aren't created
resources_created = self.ar.create_cvx_resources()
self.assertEqual(resources_created, [])
# Delete resource tests
for resource_id_to_delete in expect_created.keys():
expect_deleted = [self.ar.format_for_delete(resource_id_to_delete)]
self.delete_helper(resource_id_to_delete)
self.ar.clear_all_data()
resources_deleted = self.ar.delete_cvx_resources()
self.assertEqual(resources_deleted,
expect_deleted)
# Ensure non-existent resources aren't deleted
self.ar.clear_all_data()
resources_deleted = self.ar.delete_cvx_resources()
self.assertEqual(resources_deleted, [])
def verify_format_for_create(self, test_cases):
for neutron_resource, expected_resource in test_cases:
formatted_resource = self.ar.format_for_create(neutron_resource)
self.assertEqual(formatted_resource, expected_resource)
def verify_format_for_delete(self, test_cases):
for neutron_resource_id, expected_resource in test_cases:
formatted_resource = self.ar.format_for_delete(neutron_resource_id)
self.assertEqual(formatted_resource, expected_resource)
class AristaTenantTest(AristaResourcesTestBase):
"""Test cases for creation & deletion of arista tenants"""
def setUp(self):
super(AristaTenantTest, self).setUp()
self.ar = resources.Tenants(self.rpc)
def delete_helper(self, tenant_id):
utils.delete_ports_for_tenant(tenant_id)
utils.delete_segments_for_tenant(tenant_id)
utils.delete_networks_for_tenant(tenant_id)
utils.delete_ha_router_for_tenant(tenant_id)
def test_scenario_tenants(self):
expect_created = {'t1': {'id': 't1'},
't2': {'id': 't2'},
'ha-router-project': {'id': 'ha-router-project'}}
self.run_scenario(expect_created)
def test_format_tenants(self):
# format_for_create test setup
neutron_data = [{'project_id': '1'},
{'project_id': '2'}]
expected_cvx_data = [{'1': {'id': '1'}},
{'2': {'id': '2'}}]
test_cases = [(neutron_data[i], expected_cvx_data[i])
for i in range(len(neutron_data))]
self.verify_format_for_create(test_cases)
def test_format_tenants_for_delete(self):
# format_for_delete test setup
neutron_tenant_id = 't3'
expected_tenant = {'id': 't3'}
test_case = [(neutron_tenant_id, expected_tenant)]
self.verify_format_for_delete(test_case)
class AristaNetworkTest(AristaResourcesTestBase):
"""Test cases for creation & deletion of arista networks"""
def setUp(self):
super(AristaNetworkTest, self).setUp()
self.ar = resources.Networks(self.rpc)
def delete_helper(self, network_id):
utils.delete_ports_on_network(network_id)
utils.delete_segments_for_network(network_id)
utils.delete_network(network_id)
def test_networks_scenario(self):
expect_created = {'n1': {'id': 'n1',
'tenantId': 't1',
'name': 'regular',
'shared': False},
'n2': {'id': 'n2',
'tenantId': 't2',
'name': 'hpb',
'shared': False},
'HA network': {'id': 'HA network',
'tenantId': 'ha-router-project',
'name': 'l3 ha',
'shared': False}}
self.run_scenario(expect_created)
def test_format_networks_for_create(self):
# format_for_create test setup
# Basic test case
net1_neutron = {'id': 'n1',
'project_id': 't1',
'name': 'n1_name',
'admin_state_up': True,
'rbac_entries': []}
net1_expected = {'n1': {'id': 'n1',
'tenantId': 't1',
'name': 'n1_name',
'shared': False}}
# Shared network
shared_rbac = rbac_models.NetworkRBAC(**{'id': 1,
'project_id': 't2',
'target_tenant': '*',
'action': 'access_as_shared'})
net2_neutron = {'id': 'n2',
'project_id': 't2',
'name': '',
'admin_state_up': True,
'rbac_entries': [shared_rbac]}
net2_expected = {'n2': {'id': 'n2',
'tenantId': 't2',
'name': '',
'shared': True}}
# Other RBAC
other_rbac1 = rbac_models.NetworkRBAC(**{'id': 2,
'project_id': 't3',
'target_tenant': 't1',
'action': 'access_as_shared'})
other_rbac2 = rbac_models.NetworkRBAC(**{'id': 3,
'project_id': 't3',
'target_tenant': 't2',
'action': 'access_as_shared'})
net3_neutron = {'id': 'n3',
'project_id': 't3',
'name': 'n3_name',
'admin_state_up': True,
'rbac_entries': [other_rbac1, other_rbac2]}
net3_expected = {'n3': {'id': 'n3',
'tenantId': 't3',
'name': 'n3_name',
'shared': False}}
utils.create_networks([net1_neutron, net2_neutron, net3_neutron])
test_cases = [(db_lib.get_networks('n1')[0], net1_expected),
(db_lib.get_networks('n2')[0], net2_expected),
(db_lib.get_networks('n3')[0], net3_expected)]
self.verify_format_for_create(test_cases)
def test_format_networks_for_delete(self):
# format_for_delete test setup
neutron_net_id = 'n4'
expected_net = {'id': 'n4'}
test_case = [(neutron_net_id, expected_net)]
self.verify_format_for_delete(test_case)
class AristaSegmentTest(AristaResourcesTestBase):
"""Test cases for creation & deletion of arista segments"""
def setUp(self):
super(AristaSegmentTest, self).setUp()
self.ar = resources.Segments(self.rpc)
def delete_helper(self, segment_id):
utils.delete_segment(segment_id)
def test_segments_scenario(self):
expect_created = {'s1': {'id': 's1',
'networkId': 'n1',
'segmentType': 'static',
'segmentationId': 11,
'type': 'vlan'},
's2': {'id': 's2',
'networkId': 'n2',
'segmentType': 'static',
'segmentationId': 20001,
'type': 'vxlan'},
's3': {'id': 's3',
'networkId': 'n2',
'segmentType': 'dynamic',
'segmentationId': 21,
'type': 'vlan'},
's4': {'id': 's4',
'networkId': 'n2',
'segmentType': 'dynamic',
'segmentationId': 31,
'type': 'vlan'},
's5': {'id': 's5',
'networkId': 'HA network',
'segmentType': 'static',
'segmentationId': 33,
'type': 'vlan'},
's6': {'id': 's6',
'networkId': 'HA network',
'segmentType': 'static',
'segmentationId': 20010,
'type': 'vxlan'},
's7': {'id': 's7',
'networkId': 'HA network',
'segmentationId': 700,
'segmentType': 'dynamic',
'type': 'vlan'},
's8': {'id': 's8',
'networkId': 'HA network',
'segmentationId': 800,
'segmentType': 'dynamic',
'type': 'vlan'}}
self.run_scenario(expect_created)
def test_format_segments_for_create(self):
seg1_neutron = {'id': 's1',
'network_id': 'nid',
'is_dynamic': False,
'segmentation_id': 10001,
'network_type': 'vxlan',
'physical_network': None}
seg1_expected = {'s1': {'id': 's1',
'type': 'vxlan',
'segmentationId': 10001,
'networkId': 'nid',
'segmentType': 'static'}}
seg2_neutron = {'id': 's2',
'network_id': 'nid',
'is_dynamic': True,
'segmentation_id': 11,
'network_type': 'vlan',
'physical_network': 'default'}
seg2_expected = {'s2': {'id': 's2',
'type': 'vlan',
'segmentationId': 11,
'networkId': 'nid',
'segmentType': 'dynamic'}}
test_cases = [(seg1_neutron, seg1_expected),
(seg2_neutron, seg2_expected)]
self.verify_format_for_create(test_cases)
def test_format_segments_for_delete(self):
neutron_seg_id = 's3'
expected_seg = {'id': 's3'}
test_case = [(neutron_seg_id, expected_seg)]
self.verify_format_for_delete(test_case)
class AristaInstancesTestBase(AristaResourcesTestBase):
def delete_helper(self, instance_id):
utils.delete_ports_for_instance(instance_id)
class AristaDhcpTest(AristaInstancesTestBase):
"""Test cases for creation & deletion of arista dhcp instances"""
def setUp(self):
super(AristaDhcpTest, self).setUp()
self.ar = resources.Dhcps(self.rpc)
def test_dhcps_scenario(self):
id_base = n_const.DEVICE_OWNER_DHCP + 'normal'
expect_created = {'%s1' % id_base:
{'tenantId': 't1',
'hostId': 'host1',
'id': '%s1' % id_base},
'%s2' % id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % id_base}}
self.run_scenario(expect_created)
def test_dhcps_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_DHCP + 'normal'
expect_created = {'%s2' % id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % id_base}}
self.run_scenario(expect_created)
class AristaRouterTest(AristaInstancesTestBase):
"""Test cases for creation & deletion of arista routers"""
def setUp(self):
super(AristaRouterTest, self).setUp()
self.ar = resources.Routers(self.rpc)
def test_routers_scenario(self):
id_base = n_const.DEVICE_OWNER_DVR_INTERFACE + 'normal'
ha_id_base = n_const.DEVICE_OWNER_ROUTER_HA_INTF + 'normal'
legacy_id_base = n_const.DEVICE_OWNER_ROUTER_INTF + 'normal'
expect_created = {'%s1' % id_base:
{'tenantId': 't1',
'hostId': '(see router ports)',
'id': '%s1' % id_base},
'%s2' % id_base:
{'tenantId': 't2',
'hostId': '(see router ports)',
'id': '%s2' % id_base},
'%s' % ha_id_base:
{'tenantId': 'ha-router-project',
'hostId': '(see router ports)',
'id': '%s' % ha_id_base},
'%s1' % legacy_id_base:
{'tenantId': 't1',
'hostId': '(see router ports)',
'id': '%s1' % legacy_id_base},
'%s2' % legacy_id_base:
{'tenantId': 't2',
'hostId': '(see router ports)',
'id': '%s2' % legacy_id_base}}
self.run_scenario(expect_created)
def test_routers_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_DVR_INTERFACE + 'normal'
legacy_id_base = n_const.DEVICE_OWNER_ROUTER_INTF + 'normal'
ha_id_base = n_const.DEVICE_OWNER_ROUTER_HA_INTF + 'normal'
expect_created = {'%s2' % id_base:
{'tenantId': 't2',
'hostId': '(see router ports)',
'id': '%s2' % id_base},
'%s2' % legacy_id_base:
{'id': '%s2' % legacy_id_base,
'tenantId': 't2',
'hostId': '(see router ports)'},
'%s' % ha_id_base:
{'tenantId': 'ha-router-project',
'hostId': '(see router ports)',
'id': '%s' % ha_id_base}}
self.run_scenario(expect_created)
class AristaVmTest(AristaInstancesTestBase):
"""Test cases for creation & deletion of arista vms"""
def setUp(self):
super(AristaVmTest, self).setUp()
self.ar = resources.Vms(self.rpc)
def test_vms_scenario(self):
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'normal'
expect_created = {'%s1' % id_base:
{'tenantId': 't1',
'hostId': 'host1',
'id': '%s1' % id_base},
'%s2' % id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % id_base}}
self.run_scenario(expect_created)
def test_vms_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'normal'
expect_created = {'%s2' % id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % id_base}}
self.run_scenario(expect_created)
class AristaBaremetalTest(AristaInstancesTestBase):
"""Test cases for creation & deletion of arista baremetal instances"""
def setUp(self):
super(AristaBaremetalTest, self).setUp()
self.ar = resources.Baremetals(self.rpc)
def test_baremetals_scenario(self):
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'baremetal'
legacy_id_base = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'baremetal'
expect_created = {'%s1' % id_base:
{'tenantId': 't1',
'hostId': 'host1',
'id': '%s1' % id_base},
'%s2' % id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % id_base},
'%s1' % legacy_id_base:
{'tenantId': 't1',
'hostId': 'host1',
'id': '%s1' % legacy_id_base},
'%s2' % legacy_id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % legacy_id_base}}
self.run_scenario(expect_created)
def test_baremetals_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'baremetal'
legacy_id_base = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'baremetal'
expect_created = {'%s2' % id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % id_base},
'%s2' % legacy_id_base:
{'tenantId': 't2',
'hostId': 'host2',
'id': '%s2' % legacy_id_base}}
self.run_scenario(expect_created)
class AristaPortTestBase(AristaResourcesTestBase):
def delete_helper(self, port_id):
utils.delete_port(port_id)
class AristaDhcpPortTest(AristaPortTestBase):
"""Test cases for creation & deletion of arista dhcp ports"""
def setUp(self):
super(AristaDhcpPortTest, self).setUp()
self.ar = resources.DhcpPorts(self.rpc)
def test_dhcp_ports_scenario(self):
id_base = n_const.DEVICE_OWNER_DHCP + 'normal'
expect_created = {'p1': {'id': 'p1',
'portName': 'regular_port',
'tenantId': 't1',
'instanceType': 'dhcp',
'instanceId': '%s1' % id_base,
'networkId': 'n1',
'vlanType': 'allowed'},
'p2': {'id': 'p2',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'dhcp',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'allowed'}}
self.run_scenario(expect_created)
def test_dhcp_ports_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_DHCP + 'normal'
expect_created = {'p2': {'id': 'p2',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'dhcp',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'allowed'}}
self.run_scenario(expect_created)
class AristaRouterPortTest(AristaPortTestBase):
"""Test cases for creation & deletion of arista router ports"""
def setUp(self):
super(AristaRouterPortTest, self).setUp()
self.ar = resources.RouterPorts(self.rpc)
def test_router_ports_scenario(self):
id_base = n_const.DEVICE_OWNER_DVR_INTERFACE + 'normal'
legacy_id_base = n_const.DEVICE_OWNER_ROUTER_INTF + 'normal'
ha_id_base = n_const.DEVICE_OWNER_ROUTER_HA_INTF + 'normal'
expect_created = {'p3': {'id': 'p3',
'portName': 'regular_port',
'tenantId': 't1',
'instanceType': 'router',
'instanceId': '%s1' % id_base,
'networkId': 'n1',
'vlanType': 'allowed'},
'p4': {'id': 'p4',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'router',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'allowed'},
'uuid-ha-1': {
'id': 'uuid-ha-1',
'portName': 'regular_port',
'tenantId': 'ha-router-project',
'instanceType': 'router',
'instanceId': '%s' % ha_id_base,
'networkId': 'HA network',
'vlanType': 'allowed'},
'uuid-ha-2': {
'id': 'uuid-ha-2',
'portName': 'regular_port',
'tenantId': 'ha-router-project',
'instanceType': 'router',
'instanceId': '%s' % ha_id_base,
'networkId': 'HA network',
'vlanType': 'allowed'},
'uuid-hpb-ha-1': {
'id': 'uuid-hpb-ha-1',
'portName': 'ha_router_hpb_port',
'tenantId': 'ha-router-project',
'instanceType': 'router',
'instanceId': '%s' % ha_id_base,
'networkId': 'HA network',
'vlanType': 'allowed'},
'uuid-hpb-ha-2': {
'id': 'uuid-hpb-ha-2',
'portName': u'ha_router_hpb_port',
'tenantId': 'ha-router-project',
'instanceType': 'router',
'networkId': u'HA network',
'instanceId': '%s' % ha_id_base,
'vlanType': 'allowed'},
'p15': {'id': 'p15',
'portName': u'regular_port',
'networkId': 'n1',
'instanceId': '%s1' % legacy_id_base,
'instanceType': 'router',
'tenantId': 't1',
'vlanType': 'allowed'},
'p16': {'id': 'p16',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceId': '%s2' % legacy_id_base,
'instanceType': 'router',
'networkId': 'n2',
'vlanType': 'allowed'}}
self.run_scenario(expect_created)
def test_router_ports_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_DVR_INTERFACE + 'normal'
legacy_id_base = n_const.DEVICE_OWNER_ROUTER_INTF + 'normal'
ha_id_base = n_const.DEVICE_OWNER_ROUTER_HA_INTF + 'normal'
expect_created = {'p4': {'id': 'p4',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'router',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'allowed'},
'uuid-hpb-ha-1': {
'id': 'uuid-hpb-ha-1',
'portName': 'ha_router_hpb_port',
'tenantId': 'ha-router-project',
'instanceType': 'router',
'instanceId': '%s' % ha_id_base,
'networkId': 'HA network',
'vlanType': 'allowed'},
'p16': {'id': 'p16',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'router',
'instanceId': '%s2' % legacy_id_base,
'networkId': 'n2',
'vlanType': 'allowed'}}
self.run_scenario(expect_created)
class AristaVmPortTest(AristaPortTestBase):
"""Test cases for creation & deletion of arista vm ports"""
def setUp(self):
super(AristaVmPortTest, self).setUp()
self.ar = resources.VmPorts(self.rpc)
def test_vm_ports_scenario(self):
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'normal'
expect_created = {'p5': {'id': 'p5',
'portName': 'regular_port',
'tenantId': 't1',
'instanceType': 'vm',
'instanceId': '%s1' % id_base,
'networkId': 'n1',
'vlanType': 'allowed'},
'p6': {'id': 'p6',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'vm',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'allowed'},
'p7': {'id': 'p7',
'portName': 'trunk_subport',
'tenantId': 't1',
'instanceType': 'vm',
'instanceId': '%s1' % id_base,
'networkId': 'n1',
'vlanType': 'allowed'}}
self.run_scenario(expect_created)
def test_vm_ports_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'normal'
expect_created = {'p6': {'id': 'p6',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'vm',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'allowed'}}
self.run_scenario(expect_created)
class AristaBaremetalPortTest(AristaPortTestBase):
"""Test cases for creation & deletion of arista baremetal ports"""
def setUp(self):
super(AristaBaremetalPortTest, self).setUp()
self.ar = resources.BaremetalPorts(self.rpc)
def test_baremetal_ports_scenario(self):
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'baremetal'
legacy_id_base = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'baremetal'
expect_created = {'p8': {'id': 'p8',
'portName': 'regular_port',
'tenantId': 't1',
'instanceType': 'baremetal',
'instanceId': '%s1' % id_base,
'networkId': 'n1',
'vlanType': 'native'},
'p9': {'id': 'p9',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'baremetal',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'native'},
'p10': {'id': 'p10',
'portName': 'trunk_subport',
'tenantId': 't1',
'instanceType': 'baremetal',
'instanceId': '%s1' % id_base,
'networkId': 'n1',
'vlanType': 'allowed'},
'p11': {'id': 'p11',
'portName': 'regular_port',
'tenantId': 't1',
'instanceType': 'baremetal',
'instanceId': '%s1' % legacy_id_base,
'networkId': 'n1',
'vlanType': 'native'},
'p12': {'id': 'p12',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'baremetal',
'instanceId': '%s2' % legacy_id_base,
'networkId': 'n2',
'vlanType': 'native'}}
self.run_scenario(expect_created)
def test_baremetal_ports_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
id_base = n_const.DEVICE_OWNER_COMPUTE_PREFIX + 'baremetal'
legacy_id_base = n_const.DEVICE_OWNER_BAREMETAL_PREFIX + 'baremetal'
expect_created = {'p9': {'id': 'p9',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'baremetal',
'instanceId': '%s2' % id_base,
'networkId': 'n2',
'vlanType': 'native'},
'p12': {'id': 'p12',
'portName': 'hpb_port',
'tenantId': 't2',
'instanceType': 'baremetal',
'instanceId': '%s2' % legacy_id_base,
'networkId': 'n2',
'vlanType': 'native'}}
self.run_scenario(expect_created)
class AristaPortBindingTest(AristaResourcesTestBase):
"""Test cases for creation & deletion of arista port bindings"""
def setUp(self):
super(AristaPortBindingTest, self).setUp()
self.ar = resources.PortBindings(self.rpc)
def delete_helper(self, binding_key):
if type(binding_key[1]) == tuple:
utils.remove_switch_binding(binding_key[0],
*binding_key[1])
else:
utils.delete_port_binding(*binding_key)
def test_port_binding_scenario(self):
expect_created = {
# DHCP ports
('p1', 'host1'):
{'portId': 'p1',
'hostBinding': [{'host': 'host1',
'segment': [{'id': 's1'}]}],
'switchBinding': []},
('p2', 'host2'):
{'portId': 'p2',
'hostBinding': [{'host': 'host2',
'segment': [{'id': 's2'},
{'id': 's3'}]}],
'switchBinding': []},
# DVR ports
('p3', 'host1'):
{'portId': 'p3',
'hostBinding': [{'host': 'host1',
'segment': [{'id': 's1'}]}],
'switchBinding': []},
('p3', 'host2'):
{'portId': 'p3',
'hostBinding': [{'host': 'host2',
'segment': [{'id': 's1'}]}],
'switchBinding': []},
('p4', 'host1'):
{'portId': 'p4',
'hostBinding': [{'host': 'host1',
'segment': [{'id': 's2'},
{'id': 's3'}]}],
'switchBinding': []},
('p4', 'host2'):
{'portId': 'p4',
'hostBinding': [{'host': 'host2',
'segment': [{'id': 's2'},
{'id': 's4'}]}],
'switchBinding': []},
# VM ports
('p5', 'host1'):
{'portId': 'p5',
'hostBinding': [{'host': 'host1',
'segment': [{'id': 's1'}]}],
'switchBinding': []},
('p6', 'host2'):
{'portId': 'p6',
'hostBinding': [{'host': 'host2',
'segment': [{'id': 's2'},
{'id': 's3'}]}],
'switchBinding': []},
('p7', 'host1'):
{'portId': 'p7',
'hostBinding': [{'host': 'host1',
'segment': [{'id': 's1'}]}],
'switchBinding': []},
# Baremetal ports
('p8', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p8',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's1'}]}]},
('p8', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p8',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet2',
'segment': [{'id': 's1'}],
'switch': '00:11:22:33:44:55'}]},
('p8', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p8',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1/1',
'segment': [{'id': 's1'}],
'switch': '55:44:33:22:11:00'}]},
('p8', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p8',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1/2',
'segment': [{'id': 's1'}],
'switch': '55:44:33:22:11:00'}]},
('p9', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's2'}, {'id': 's3'}]}]},
('p9', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '00:11:22:33:44:55'}]},
('p9', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/1',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
('p9', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
('p10', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p10',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's1'}]}]},
('p10', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p10',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet2',
'segment': [{'id': 's1'}],
'switch': '00:11:22:33:44:55'}]},
('p10', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p10',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1/1',
'segment': [{'id': 's1'}],
'switch': '55:44:33:22:11:00'}]},
('p10', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p10',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1/2',
'segment': [{'id': 's1'}],
'switch': '55:44:33:22:11:00'}]},
# Legacy baremetal ports
('p11', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p11',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's1'}]}]},
('p11', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p11',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet2',
'segment': [{'id': 's1'}],
'switch': '00:11:22:33:44:55'}]},
('p11', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p11',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1/1',
'segment': [{'id': 's1'}],
'switch': '55:44:33:22:11:00'}]},
('p11', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p11',
'hostBinding': [],
'switchBinding': [{'host': 'host1',
'interface': 'Ethernet1/2',
'segment': [{'id': 's1'}],
'switch': '55:44:33:22:11:00'}]},
('p12', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's2'}, {'id': 's3'}]}]},
('p12', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '00:11:22:33:44:55'}]},
('p12', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/1',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
('p12', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
# HA network port
('uuid-ha-1', ('host1')):
{'portId': 'uuid-ha-1',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's5'}], 'host': 'host1'}]},
('uuid-ha-2', ('host2')):
{'portId': 'uuid-ha-2',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's5'}], 'host': 'host2'}]},
('uuid-hpb-ha-1', ('host1')):
{'portId': 'uuid-hpb-ha-1',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's6'}, {'id': 's7'}],
'host': 'host1'}]},
('uuid-hpb-ha-2', ('host2')):
{'portId': 'uuid-hpb-ha-2',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's6'}, {'id': 's8'}],
'host': 'host2'}]},
# Legacy router port
('p15', ('host1')):
{'portId': 'p15',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's1'}], 'host': 'host1'}]},
('p16', ('host2')):
{'portId': 'p16',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's2'}, {'id': 's3'}],
'host': 'host2'}]}}
self.run_scenario(expect_created)
def test_port_binding_managed_physnets_scenario(self):
cfg.CONF.set_override('managed_physnets', 'switch1', 'ml2_arista')
expect_created = {
# DHCP ports
('p2', 'host2'):
{'portId': 'p2',
'hostBinding': [{'host': 'host2',
'segment': [{'id': 's2'},
{'id': 's3'}]}],
'switchBinding': []},
# DVR ports
('p4', 'host1'):
{'portId': 'p4',
'hostBinding': [{'host': 'host1',
'segment': [{'id': 's2'},
{'id': 's3'}]}],
'switchBinding': []},
# VM ports
('p6', 'host2'):
{'portId': 'p6',
'hostBinding': [{'host': 'host2',
'segment': [{'id': 's2'},
{'id': 's3'}]}],
'switchBinding': []},
# Baremetal ports
('p9', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's2'}, {'id': 's3'}]}]},
('p9', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '00:11:22:33:44:55'}]},
('p9', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/1',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
('p9', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p9',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
# Legacy baremetal ports
('p12', ('00:11:22:33:44:55', 'Ethernet1')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1',
'switch': '00:11:22:33:44:55',
'segment': [{'id': 's2'}, {'id': 's3'}]}]},
('p12', ('00:11:22:33:44:55', 'Ethernet2')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '00:11:22:33:44:55'}]},
('p12', ('55:44:33:22:11:00', 'Ethernet1/1')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/1',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
('p12', ('55:44:33:22:11:00', 'Ethernet1/2')):
{'portId': 'p12',
'hostBinding': [],
'switchBinding': [{'host': 'host2',
'interface': 'Ethernet1/2',
'segment': [{'id': 's2'}, {'id': 's3'}],
'switch': '55:44:33:22:11:00'}]},
# Legacy router
('p16', ('host2')):
{'portId': 'p16',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's2'},
{'id': 's3'}], 'host': 'host2'}]},
('uuid-hpb-ha-1', ('host1')):
{'portId': 'uuid-hpb-ha-1',
'switchBinding': [],
'hostBinding': [{'segment': [{'id': 's6'}, {'id': 's7'}],
'host': 'host1'}]}}
self.run_scenario(expect_created)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/test_arista_sync.py0000664000175000017500000002205600000000000030016 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eventlet import greenthread
from eventlet import queue
import mock
import time
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_utils import importutils
from neutron.tests.unit import testlib_api
from networking_arista.common import constants as a_const
from networking_arista.common import exceptions as arista_exc
from networking_arista.ml2 import arista_sync
from networking_arista.ml2.mechanism_arista import MechResource
from networking_arista.tests.unit import utils
class SyncServiceTest(testlib_api.SqlTestCase):
"""Test cases for the sync service."""
def setUp(self):
super(SyncServiceTest, self).setUp()
utils.setup_arista_wrapper_config(cfg)
plugin_klass = importutils.import_class(
"neutron.db.db_base_plugin_v2.NeutronDbPluginV2")
directory.add_plugin(plugin_constants.CORE, plugin_klass())
utils.setup_scenario()
self.mech_queue = queue.LightQueue()
self.sync_service = arista_sync.AristaSyncWorker(self.mech_queue)
self.sync_service._rpc = utils.MockCvx('region')
def tearDown(self):
if self.sync_service._running:
self.sync_service.stop()
self.sync_service.wait()
super(SyncServiceTest, self).tearDown()
def test_start(self):
self.sync_service.start()
self.assertTrue(self.sync_service._running)
self.assertIsNotNone(self.sync_service._thread)
self.assertIsNotNone(self.sync_service.sync_order)
self.assertIsNotNone(self.sync_service.done)
def test_start_twice(self):
self.sync_service.start()
current_thread = self.sync_service._thread
self.sync_service.start()
self.assertEqual(self.sync_service._thread, current_thread)
def test_stop_graceful(self):
self.test_start()
running_thread = self.sync_service._thread
self.sync_service.stop()
self.sync_service.wait()
self.assertFalse(self.sync_service._running)
self.assertTrue(running_thread.dead)
self.assertIsNone(self.sync_service._thread)
def test_stop_ungraceful(self):
self.test_start()
running_thread = self.sync_service._thread
self.sync_service.stop(graceful=False)
self.assertTrue(running_thread.dead)
self.assertIsNone(self.sync_service._thread)
def test_reset(self):
self.test_start()
old_thread = self.sync_service._thread
self.sync_service.reset()
self.assertNotEqual(self.sync_service._thread, old_thread)
def test_resource_class_full_coverage(self):
self.sync_service.initialize()
for i in range(len(self.sync_service.sync_order)):
self.sync_service.sync_order[i] = mock.MagicMock()
self.sync_service.synchronize_resources()
for resource_type in self.sync_service.sync_order:
resource_type.delete_cvx_resources.assert_called_once()
resource_type.create_cvx_resources.assert_called_once()
def test_full_sync_cvx_populated(self):
self.sync_service.initialize()
self.sync_service.synchronize_resources()
for endpoint, data in self.sync_service._rpc.endpoint_data.items():
self.assertNotEqual(data, {})
def test_process_mech_update(self):
self.sync_service.initialize()
for resource_type in a_const.ALL_RESOURCE_TYPES:
res_cls = mock.MagicMock()
with mock.patch.object(self.sync_service,
'get_resource_class') as get:
get.return_value = res_cls
res = MechResource('id', resource_type, a_const.CREATE)
self.sync_service.update_neutron_resource(res)
get.assert_called_once_with(resource_type)
res_cls.update_neutron_resource.assert_called_once_with(
'id', a_const.CREATE)
get.reset_mock()
get.return_value = res_cls
res = MechResource('id', resource_type, a_const.DELETE)
self.sync_service.update_neutron_resource(res)
get.assert_called_once_with(resource_type)
res_cls.update_neutron_resource.assert_called_once_with(
'id', a_const.DELETE)
def test_force_full_sync(self):
self.sync_service.initialize()
for i in range(len(self.sync_service.sync_order)):
self.sync_service.sync_order[i] = mock.MagicMock()
self.sync_service.force_full_sync()
for resource_type in self.sync_service.sync_order:
resource_type.clear_all_data.assert_called_once()
def test_sync_timeout(self):
self.sync_service.initialize()
with mock.patch.object(
self.sync_service, 'check_if_out_of_sync') as oos:
self.sync_service.wait_for_sync_required()
oos.assert_called_once()
def test_full_sync_required(self):
self.sync_service.initialize()
self.sync_service._cvx_uuid = 'old-id'
self.sync_service._rpc = mock.MagicMock()
self.sync_service._rpc.get_cvx_uuid.return_value = 'new-id'
with mock.patch.object(self.sync_service, 'force_full_sync') as ffs:
self.assertTrue(self.sync_service.check_if_out_of_sync())
ffs.assert_called_once()
self.assertEqual(self.sync_service._synchronizing_uuid, 'new-id')
self.assertNotEqual(self.sync_service._last_sync_time, 0)
def test_mech_queue_timeout(self):
self.sync_service.initialize()
self.assertFalse(self.sync_service.wait_for_mech_driver_update(1))
def test_mech_queue_updated(self):
self.sync_service.initialize()
resource = MechResource('tid', a_const.TENANT_RESOURCE, a_const.CREATE)
self.mech_queue.put(resource)
# Must yield to allow resource to be available on the queue
greenthread.sleep(0)
self.assertTrue(self.sync_service.wait_for_mech_driver_update(1))
self.assertEqual(self.sync_service._resources_to_update, [resource])
def test_sync_start_fail(self):
self.sync_service.initialize()
self.sync_service._rpc = mock.MagicMock()
self.sync_service._rpc.sync_start.return_value = False
self.assertEqual(self.sync_service._last_sync_time, 0)
for i in range(len(self.sync_service.sync_order)):
self.sync_service.sync_order[i] = mock.MagicMock()
self.sync_service.synchronize_resources()
for resource_type in self.sync_service.sync_order:
resource_type.delete_cvx_resources.assert_not_called()
resource_type.create_cvx_resources.assert_not_called()
self.sync_service._rpc.sync_end.assert_not_called()
def test_sync_lock_release_on_failure(self):
self.sync_service.initialize()
self.sync_service._rpc = mock.MagicMock()
self.sync_service._rpc.get_cvx_uuid.return_value = 'cvx-id'
self.sync_service.update_neutron_resource = mock.Mock(
side_effect=arista_exc.AristaRpcError(msg='fail'))
resource = MechResource('tid', a_const.TENANT_RESOURCE, a_const.CREATE)
self.sync_service.start()
self.sync_service._last_sync_time = time.time()
self.sync_service._cvx_uuid = 'cvx-id'
self.sync_service._synchronizing_uuid = 'cvx-id'
self.mech_queue.put(resource)
greenthread.sleep(0)
self.sync_service.stop()
self.sync_service.wait()
self.sync_service._rpc.sync_end.assert_called_once()
def test_failure_after_switchover(self):
"""Test behaviour of sync fail after CVX switchover
On sync failure, we should retain old sync UUID and we should not set
sync complete on CVX to avoid publishing config based on incomplete
state.
"""
self.sync_service.initialize()
self.sync_service._rpc = mock.MagicMock()
self.sync_service._rpc.get_cvx_uuid.return_value = 'new-id'
self.sync_service.start()
self.sync_service._cvx_uuid = 'old-id'
self.sync_service.tenants.delete_cvx_resources = mock.Mock(
side_effect=arista_exc.AristaRpcError(msg='fail'))
greenthread.sleep(0)
self.sync_service.stop()
self.sync_service.wait()
self.sync_service._rpc.sync_start.assert_called()
self.sync_service._rpc.sync_end.assert_not_called()
self.assertEqual(self.sync_service._cvx_uuid, 'old-id')
self.assertIsNone(self.sync_service._synchronizing_uuid, 'new-id')
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/test_mechanism_arista.py0000664000175000017500000021076000000000000031007 0ustar00zuulzuul00000000000000# Copyright (c) 2013 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from neutron.tests.common import helpers
from neutron_lib.api.definitions import external_net as extnet_const
from neutron_lib import constants as n_const
from oslo_config import cfg
from networking_arista.tests.unit.ml2 import ml2_test_base
from neutron_lib.api.definitions import portbindings
class BasicMechDriverTestCase(ml2_test_base.MechTestBase):
def test_create_network(self):
# Test create regular network
tenant_id = 'tid'
reg_net_dict = {'network': {'name': 'net1',
'tenant_id': tenant_id,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
reg_network, reg_n_ctx = self.create_network(reg_net_dict)
self.assertTenantCreated(tenant_id)
self.assertNetworkCreated(reg_network['id'])
for segment in reg_n_ctx.network_segments:
self.assertSegmentCreated(segment['id'])
# Test create shared network
shrd_net_dict = {'network': {'name': 'shared_net',
'tenant_id': tenant_id,
'admin_state_up': True,
'shared': True,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
shared_network, shared_n_ctx = self.create_network(shrd_net_dict)
self.assertTenantCreated(tenant_id)
self.assertNetworkCreated(shared_network['id'])
for segment in shared_n_ctx.network_segments:
self.assertSegmentCreated(segment['id'])
# Test delete regular network
self.delete_network(reg_network['id'])
self.assertTenantCreated(tenant_id)
self.assertNetworkDeleted(reg_network['id'])
for segment in reg_n_ctx.network_segments:
self.assertSegmentDeleted(segment['id'])
# Test delete shared network
self.delete_network(shared_network['id'])
self.assertTenantDeleted(tenant_id)
self.assertNetworkDeleted(shared_network['id'])
for segment in shared_n_ctx.network_segments:
self.assertSegmentDeleted(segment['id'])
def test_basic_dhcp_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create DHCP port
device_id = 'dhcp-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_DHCP,
'binding:host_id': port_host}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertDhcpCreated(device_id)
self.assertDhcpPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Delete DHCP port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertDhcpDeleted(device_id)
self.assertDhcpPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
def test_basic_dvr_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create DVR port
device_id = 'router-1'
port_tenant = 'port-ten'
port_host_1 = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_DVR_INTERFACE}
port, _ = self.create_port(port_dict)
self.bind_dvr_to_host(port, port_host_1)
self.assertTenantCreated(port_tenant)
self.assertRouterCreated(device_id)
self.assertRouterPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host_1))
# Bring up a second DVR host
port_host_2 = self.host2
port, port_ctx = self.bind_dvr_to_host(port, port_host_2)
self.assertPortBindingCreated((port['id'], port_host_2))
# Delete the port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertRouterDeleted(device_id)
self.assertRouterPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host_1))
self.assertPortBindingDeleted((port['id'], port_host_2))
def test_dvr_port_host_bind_unbind(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create DVR port
device_id = 'router-1'
port_tenant = 'port-ten'
port_host_1 = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_DVR_INTERFACE}
port, _ = self.create_port(port_dict)
port, port_ctx = self.bind_dvr_to_host(port, port_host_1)
self.assertTenantCreated(port_tenant)
self.assertRouterCreated(device_id)
self.assertRouterPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host_1))
# Bring up a second DVR host
port_host_2 = self.host2
port, port_ctx = self.bind_dvr_to_host(port, port_host_2)
self.assertPortBindingCreated((port['id'], port_host_2))
# Removed the second host
self.unbind_dvr_from_host(port, port_host_2)
self.assertPortBindingDeleted((port['id'], port_host_2))
self.assertTenantCreated(port_tenant)
self.assertRouterCreated(device_id)
self.assertRouterPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host_1))
# Delete the port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertRouterDeleted(device_id)
self.assertRouterPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host_1))
def test_basic_vm_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create VM port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Delete VM port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
def test_basic_baremetal_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create baremetal port
device_id = 'baremetal-1'
port_tenant = 'port-ten'
port_host = 'bm-host'
switch_id = '00:11:22:33:44:55'
switch_port = 'Ethernet1/1'
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': port_host,
'binding:profile': {'local_link_information': [
{'switch_id': switch_id,
'port_id': switch_port}]},
'binding:vnic_type': 'baremetal'}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertBaremetalCreated(device_id)
self.assertBaremetalPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], (switch_id, switch_port)))
# Delete baremetal port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertBaremetalDeleted(device_id)
self.assertBaremetalPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], (switch_id, switch_port)))
def test_basic_baremetal_mlag(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create baremetal port
device_id = 'baremetal-1'
port_tenant = 'port-ten'
port_host = 'bm-host'
switch_1_id = '00:11:22:33:44:55'
switch_1_port = 'Ethernet1/1'
switch_2_id = '55:44:33:22:11:00'
switch_2_port = 'Ethernet2'
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_BAREMETAL_PREFIX,
'binding:host_id': port_host,
'binding:profile': {'local_link_information': [
{'switch_id': switch_1_id,
'port_id': switch_1_port},
{'switch_id': switch_2_id,
'port_id': switch_2_port}]},
'binding:vnic_type': 'baremetal'}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertBaremetalCreated(device_id)
self.assertBaremetalPortCreated(port['id'])
self.assertPortBindingCreated(
(port['id'], (switch_1_id, switch_1_port)))
self.assertPortBindingCreated(
(port['id'], (switch_2_id, switch_2_port)))
# Delete baremetal port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertBaremetalDeleted(device_id)
self.assertBaremetalPortDeleted(port['id'])
self.assertPortBindingDeleted(
(port['id'], (switch_1_id, switch_2_port)))
self.assertPortBindingDeleted(
(port['id'], (switch_2_id, switch_2_port)))
def test_host_migration(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create VM port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Migrate the port
new_port_host = self.host2
port, _ = self.migrate_port(port['id'], new_port_host)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
self.assertPortBindingCreated((port['id'], new_port_host))
# Delete VM port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], new_port_host))
def test_dhcp_migration(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': 'physnet1',
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create DHCP port
device_id = 'dhcp-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_DHCP,
'binding:host_id': port_host,
'binding:vnic_type': 'normal'}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertDhcpCreated(device_id)
self.assertDhcpPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Migrate the DHCP port to a new dhcp instance
new_device_id = 'dhcp-2'
self.migrate_dhcp_device(port['id'], new_device_id)
self.assertTenantCreated(port_tenant)
self.assertDhcpCreated(new_device_id)
self.assertDhcpDeleted(device_id)
self.assertDhcpPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Delete DHCP port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertDhcpDeleted(device_id)
self.assertDhcpPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
def test_vm_trunk_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net1',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network1, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net2',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network2, _ = self.create_network(net_dict)
# Create trunk port with subport
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
trunkport_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network1['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
trunkport, _ = self.create_port(trunkport_dict)
subport_dict = {'name': 'port2',
'tenant_id': port_tenant,
'network_id': network2['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
subport, _ = self.create_port(subport_dict)
trunk_dict = {'trunk': {'port_id': trunkport['id'],
'project_id': port_tenant,
'tenant_id': port_tenant,
'sub_ports': [{'port_id': subport['id'],
'segmentation_type': 'vlan',
'segmentation_id': 123}]}}
trunk = self.trunk_plugin.create_trunk(self.context, trunk_dict)
self.bind_trunk_to_host(trunkport, device_id, port_host)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(trunkport['id'])
self.assertPortBindingCreated((trunkport['id'], port_host))
self.assertVmPortCreated(subport['id'])
self.assertPortBindingCreated((subport['id'], port_host))
# Delete the trunk and subport
self.unbind_port_from_host(trunkport['id'])
self.trunk_plugin.delete_trunk(self.context, trunk['id'])
self.delete_port(trunkport['id'])
self.delete_port(subport['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(trunkport['id'])
self.assertPortBindingDeleted((trunkport['id'], port_host))
self.assertVmPortDeleted(subport['id'])
self.assertPortBindingDeleted((subport['id'], port_host))
def test_trunk_add_remove_subport(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net1',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network1, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net2',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network2, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net3',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network3, _ = self.create_network(net_dict)
# Create trunk port with subport, add subport after initial binding
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
trunkport_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network1['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
trunkport, _ = self.create_port(trunkport_dict)
subport_dict = {'name': 'port2',
'tenant_id': port_tenant,
'network_id': network2['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
subport, _ = self.create_port(subport_dict)
trunk_dict = {'trunk': {'port_id': trunkport['id'],
'project_id': port_tenant,
'tenant_id': port_tenant,
'sub_ports': [{'port_id': subport['id'],
'segmentation_type': 'vlan',
'segmentation_id': 123}]}}
subport_dict2 = {'name': 'port3',
'tenant_id': port_tenant,
'network_id': network3['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
trunk = self.trunk_plugin.create_trunk(self.context, trunk_dict)
self.bind_trunk_to_host(trunkport, device_id, port_host)
subport2, _ = self.create_port(subport_dict2)
self.trunk_plugin.add_subports(self.context, trunk['id'],
{'sub_ports':
[{'port_id': subport2['id'],
'segmentation_type': 'vlan',
'segmentation_id': 111}]})
self.bind_subport_to_trunk(subport2, trunk)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(trunkport['id'])
self.assertPortBindingCreated((trunkport['id'], port_host))
self.assertVmPortCreated(subport['id'])
self.assertPortBindingCreated((subport['id'], port_host))
self.assertVmPortCreated(subport2['id'])
self.assertPortBindingCreated((subport2['id'], port_host))
# Remove the trunk subport
self.trunk_plugin.remove_subports(self.context, trunk['id'],
{'sub_ports':
[{'port_id': subport2['id']}]})
self.unbind_port_from_host(subport2['id'])
self.assertPortBindingDeleted((subport2['id'], port_host))
# Delete the trunk and remaining subport
self.unbind_port_from_host(trunkport['id'])
self.trunk_plugin.delete_trunk(self.context, trunk['id'])
self.delete_port(trunkport['id'])
self.delete_port(subport['id'])
self.delete_port(subport2['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(trunkport['id'])
self.assertPortBindingDeleted((trunkport['id'], port_host))
self.assertVmPortDeleted(subport['id'])
self.assertPortBindingDeleted((subport['id'], port_host))
def test_baremetal_trunk_basic(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net1',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network1, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net2',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network2, _ = self.create_network(net_dict)
# Create baremetal port
device_id = 'baremetal-1'
port_tenant = 'port-ten'
port_host = 'bm-host'
switch_id = '00:11:22:33:44:55'
switch_port = 'Ethernet1/1'
trunkport_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network1['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
trunkport, _ = self.create_port(trunkport_dict)
subport_dict = {'name': 'port2',
'tenant_id': port_tenant,
'network_id': network2['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
subport, _ = self.create_port(subport_dict)
trunk_dict = {'trunk': {'port_id': trunkport['id'],
'project_id': port_tenant,
'tenant_id': port_tenant,
'sub_ports': [{'port_id': subport['id'],
'segmentation_type': 'inherit',
'segmentation_id': 'inherit'}]}}
self.trunk_plugin.create_trunk(self.context, trunk_dict)
self.bind_trunk_to_baremetal(trunkport['id'], device_id, port_host,
switch_id, switch_port)
self.assertTenantCreated(port_tenant)
self.assertBaremetalCreated(device_id)
self.assertBaremetalPortCreated(trunkport['id'])
self.assertPortBindingCreated(
(trunkport['id'], (switch_id, switch_port)))
self.assertBaremetalPortCreated(subport['id'])
self.assertPortBindingCreated(
(subport['id'], (switch_id, switch_port)))
# Simulate baremetal shutdown
self.unbind_trunk_from_baremetal(trunkport['id'])
self.assertBaremetalDeleted(device_id)
self.assertPortBindingDeleted(
(trunkport['id'], (switch_id, switch_port)))
self.assertPortBindingDeleted(
(subport['id'], (switch_id, switch_port)))
def test_baremetal_trunk_bind_unbind(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net1',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network1, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net2',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network2, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net3',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network3, _ = self.create_network(net_dict)
# Create baremetal port
device_id = 'baremetal-1'
port_tenant = 'port-ten'
port_host = 'bm-host'
switch_id = '00:11:22:33:44:55'
switch_port = 'Ethernet1/1'
trunkport_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network1['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
trunkport, _ = self.create_port(trunkport_dict)
subport_dict = {'name': 'port2',
'tenant_id': port_tenant,
'network_id': network2['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
subport, _ = self.create_port(subport_dict)
trunk_dict = {'trunk': {'port_id': trunkport['id'],
'project_id': port_tenant,
'tenant_id': port_tenant,
'sub_ports': [{'port_id': subport['id'],
'segmentation_type': 'inherit',
'segmentation_id': 'inherit'}]}}
trunk = self.trunk_plugin.create_trunk(self.context, trunk_dict)
self.bind_trunk_to_baremetal(trunkport['id'], device_id, port_host,
switch_id, switch_port)
self.assertTenantCreated(port_tenant)
self.assertBaremetalCreated(device_id)
self.assertBaremetalPortCreated(trunkport['id'])
self.assertPortBindingCreated(
(trunkport['id'], (switch_id, switch_port)))
self.assertBaremetalPortCreated(subport['id'])
self.assertPortBindingCreated(
(subport['id'], (switch_id, switch_port)))
subport_dict2 = {'name': 'port3',
'tenant_id': port_tenant,
'network_id': network3['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
subport2, _ = self.create_port(subport_dict2)
self.trunk_plugin.add_subports(self.context, trunk['id'],
{'sub_ports':
[{'port_id': subport2['id'],
'segmentation_type': 'inherit',
'segmentation_id': 'inherit'}]})
self.assertBaremetalPortCreated(subport2['id'])
self.assertPortBindingCreated(
(subport2['id'], (switch_id, switch_port)))
self.trunk_plugin.remove_subports(self.context, trunk['id'],
{'sub_ports':
[{'port_id': subport2['id']}]})
self.assertPortBindingDeleted(
(subport2['id'], (switch_id, switch_port)))
# Simulate baremetal shutdown
self.unbind_trunk_from_baremetal(trunkport['id'])
self.assertBaremetalDeleted(device_id)
self.assertPortBindingDeleted(
(trunkport['id'], (switch_id, switch_port)))
self.assertPortBindingDeleted(
(subport['id'], (switch_id, switch_port)))
def test_baremetal_trunk_pre_bound(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net1',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network1, _ = self.create_network(net_dict)
net_dict = {'network': {'name': 'net2',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network2, _ = self.create_network(net_dict)
# Create baremetal port
device_id = 'baremetal-1'
port_tenant = 'port-ten'
port_host = 'bm-host'
switch_id = '00:11:22:33:44:55'
switch_port = 'Ethernet1/1'
trunkport_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network1['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
trunkport, _ = self.create_port(trunkport_dict)
subport_dict = {'name': 'port2',
'tenant_id': port_tenant,
'network_id': network2['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': '',
'device_owner': ''}
subport, _ = self.create_port(subport_dict)
trunk_dict = {'trunk': {'port_id': trunkport['id'],
'project_id': port_tenant,
'tenant_id': port_tenant,
'sub_ports': [{'port_id': subport['id'],
'segmentation_type': 'inherit',
'segmentation_id': 'inherit'}]}}
self.bind_trunk_to_baremetal(trunkport['id'], device_id, port_host,
switch_id, switch_port)
self.trunk_plugin.create_trunk(self.context, trunk_dict)
self.assertTenantCreated(port_tenant)
self.assertBaremetalCreated(device_id)
self.assertBaremetalPortCreated(trunkport['id'])
self.assertPortBindingCreated(
(trunkport['id'], (switch_id, switch_port)))
self.assertBaremetalPortCreated(subport['id'])
self.assertPortBindingCreated(
(subport['id'], (switch_id, switch_port)))
# Simulate baremetal shutdown
self.unbind_trunk_from_baremetal(trunkport['id'])
self.assertBaremetalDeleted(device_id)
self.assertPortBindingDeleted(
(trunkport['id'], (switch_id, switch_port)))
self.assertPortBindingDeleted(
(subport['id'], (switch_id, switch_port)))
def test_error_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Set port to ERROR state
self.set_port_to_error_state(port)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
class FQDNFalseTestCase(ml2_test_base.MechTestBase):
def setUp(self):
cfg.CONF.set_override('use_fqdn', False, "ml2_arista")
super(FQDNFalseTestCase, self).setUp()
self.fqdnHost = 'fqdnhost.full.domain.com'
helpers.register_ovs_agent(
host=self.fqdnHost, bridge_mappings={self.physnet: 'br-eth1'})
def test_fqdn_false(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create VM port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.fqdnHost
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], 'fqdnhost'))
# Delete VM port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], 'fqdnhost'))
class FQDNTrueTestCase(ml2_test_base.MechTestBase):
def setUp(self):
super(FQDNTrueTestCase, self).setUp()
self.fqdnHost = 'fqdnhost.full.domain.com'
helpers.register_ovs_agent(
host=self.fqdnHost, bridge_mappings={self.physnet: 'br-eth1'})
def test_fqdn_true(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
# Create VM port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.fqdnHost
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, _ = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], self.fqdnHost))
# Delete VM port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], self.fqdnHost))
class BasicHpbMechDriverTestCase(ml2_test_base.MechTestBase):
def setUp(self):
cfg.CONF.set_override('manage_fabric', True, "ml2_arista")
super(BasicHpbMechDriverTestCase, self).setUp()
def test_basic_hpb_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': None,
'provider:network_type': 'vxlan'}}
network, _ = self.create_network(net_dict)
self.assertNetworkCreated(network['id'])
# Create HPB port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
with mock.patch.object(self.drv.eapi,
'get_host_physnet',
return_value=self.physnet):
port, port_ctx = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Check that the dynamic segment was created
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# Delete HPB port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
def test_hpb_dvr_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': None,
'provider:network_type': 'vxlan'}}
network, _ = self.create_network(net_dict)
# Create DVR port
device_id = 'router-1'
port_tenant = 'port-ten'
port_host_1 = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': n_const.DEVICE_OWNER_DVR_INTERFACE}
port, _ = self.create_port(port_dict)
with mock.patch.object(self.drv.eapi,
'get_host_physnet',
return_value=self.physnet):
port, port_ctx = self.bind_dvr_to_host(port, port_host_1)
self.assertTenantCreated(port_tenant)
self.assertRouterCreated(device_id)
self.assertRouterPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host_1))
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# Bring up a second DVR host
port_host_2 = self.host3
with mock.patch.object(self.drv.eapi,
'get_host_physnet',
return_value=self.physnet2):
port, port_ctx = self.bind_dvr_to_host(port, port_host_2)
self.assertPortBindingCreated((port['id'], port_host_2))
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# Delete the port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertRouterDeleted(device_id)
self.assertRouterPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host_1))
self.assertPortBindingDeleted((port['id'], port_host_2))
class UnmanagedFabricUnmanagedPhysnetHpbTestCase(ml2_test_base.MechTestBase):
_mechanism_drivers = ['arista_test_fabric', 'arista', 'openvswitch']
def setUp(self):
cfg.CONF.set_override('manage_fabric', False, "ml2_arista")
cfg.CONF.set_override('managed_physnets', ['other_physnet'],
"ml2_arista")
super(UnmanagedFabricUnmanagedPhysnetHpbTestCase, self).setUp()
def test_unmanaged_fabric_unmanaged_hpb_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': None,
'provider:network_type': 'vxlan'}}
network, _ = self.create_network(net_dict)
self.assertNetworkCreated(network['id'])
# Create HPB port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, port_ctx = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
# Check that the dynamic segment was created
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# The VM/Port should not have been created
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
# Delete HPB port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
class ManagedFabricUnmanagedPhysnetHpbTestCase(ml2_test_base.MechTestBase):
def setUp(self):
cfg.CONF.set_override('manage_fabric', True, "ml2_arista")
cfg.CONF.set_override('managed_physnets', ['other_physnet'],
"ml2_arista")
super(ManagedFabricUnmanagedPhysnetHpbTestCase, self).setUp()
def test_managed_fabric_unmanaged_hpb_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': None,
'provider:network_type': 'vxlan'}}
network, _ = self.create_network(net_dict)
self.assertNetworkCreated(network['id'])
# Create HPB port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
with mock.patch.object(self.drv.eapi,
'get_host_physnet',
return_value=self.physnet):
port, port_ctx = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
# Check that the dynamic segment was created
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# The VM/Port should not have been created
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
# Delete HPB port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
class UnmanagedFabricManagedPhysnetHpbTestCase(ml2_test_base.MechTestBase):
_mechanism_drivers = ['arista_test_fabric', 'arista', 'openvswitch']
def setUp(self):
self.physnet = 'physnet1'
cfg.CONF.set_override('manage_fabric', False, "ml2_arista")
cfg.CONF.set_override('managed_physnets', [self.physnet],
"ml2_arista")
super(UnmanagedFabricManagedPhysnetHpbTestCase, self).setUp()
def test_unmanaged_fabric_managed_hpb_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': None,
'provider:network_type': 'vxlan'}}
network, _ = self.create_network(net_dict)
self.assertNetworkCreated(network['id'])
# Create HPB port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
port, port_ctx = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Check that the dynamic segment was created
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# Delete HPB port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
class ManagedFabricManagedFabricHpbTestCase(ml2_test_base.MechTestBase):
def setUp(self):
self.physnet = 'physnet1'
cfg.CONF.set_override('manage_fabric', True, "ml2_arista")
cfg.CONF.set_override('managed_physnets', [self.physnet],
"ml2_arista")
super(ManagedFabricManagedFabricHpbTestCase, self).setUp()
def test_managed_fabric_managed_hpb_port(self):
network_tenant = 'net-ten'
net_dict = {'network': {'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': None,
'provider:network_type': 'vxlan'}}
network, _ = self.create_network(net_dict)
self.assertNetworkCreated(network['id'])
# Create HPB port
device_id = 'vm-1'
port_tenant = 'port-ten'
port_host = self.host1
port_dict = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id,
'device_owner': 'compute:',
'binding:host_id': port_host}
with mock.patch.object(self.drv.eapi,
'get_host_physnet',
return_value=self.physnet):
port, port_ctx = self.create_port(port_dict)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id)
self.assertVmPortCreated(port['id'])
self.assertPortBindingCreated((port['id'], port_host))
# Check that the dynamic segment was created
network_segments = [level['bound_segment']
for level in port_ctx.binding_levels]
self.assertEqual(len(network_segments), 2)
for segment in network_segments:
self.assertSegmentCreated(segment['id'])
# Delete HPB port
self.delete_port(port['id'])
self.assertTenantDeleted(port_tenant)
self.assertVmDeleted(device_id)
self.assertVmPortDeleted(port['id'])
self.assertPortBindingDeleted((port['id'], port_host))
class ManagedPhysnetNoHpbTestCase(ml2_test_base.MechTestBase):
def setUp(self):
self.physnet = 'physnet1'
cfg.CONF.set_override('managed_physnets', [self.physnet],
"ml2_arista")
super(ManagedPhysnetNoHpbTestCase, self).setUp()
def test_managed_physnet_no_hpb(self):
network_tenant = 'net-ten'
net_dict = {'network':
{'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
network, _ = self.create_network(net_dict)
other_net_dict = {'network':
{'name': 'net',
'tenant_id': network_tenant,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet2,
'provider:network_type': 'vlan'}}
other_network, _ = self.create_network(other_net_dict)
# Create VM port
device_id_1 = 'vm-1'
port_tenant = 'port-ten'
port_host_1 = self.host1
port_dict_1 = {'name': 'port1',
'tenant_id': port_tenant,
'network_id': network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id_1,
'device_owner': 'compute:',
'binding:host_id': port_host_1}
port_1, _ = self.create_port(port_dict_1)
self.assertTenantCreated(port_tenant)
self.assertVmCreated(device_id_1)
self.assertVmPortCreated(port_1['id'])
self.assertPortBindingCreated((port_1['id'], port_host_1))
# Create VM port on unmanaged physnet
device_id_2 = 'vm-2'
port_host_2 = self.host3
port_dict_2 = {'name': 'port2',
'tenant_id': port_tenant,
'network_id': other_network['id'],
'admin_state_up': True,
'fixed_ips': [],
'device_id': device_id_2,
'device_owner': 'compute:',
'binding:host_id': port_host_2}
port_2, _ = self.create_port(port_dict_2)
# The VM/Port should not have been created
self.assertVmDeleted(device_id_2)
self.assertVmPortDeleted(port_2['id'])
self.assertPortBindingDeleted((port_2['id'], port_host_2))
# Delete VM port
self.delete_port(port_1['id'])
self.assertVmDeleted(device_id_1)
self.assertVmPortDeleted(port_1['id'])
self.assertPortBindingDeleted((port_1['id'], port_host_1))
# Delete other unmanaged VM port
self.delete_port(port_2['id'])
self.assertTenantDeleted(port_tenant)
class BasicL3HARouterTests(object):
def test_create_delete_router(self):
router_ports = []
ext_net = None
if self.ext_net:
ext_net, ext_net_ctx = self.create_network(self.ext_net)
router = self.create_router(ha=True, ext_net=ext_net)
for l3_agent in self.l3_agents:
port = self.update_routers_states(router['id'], l3_agent)
router_ports.append(port)
ha_network_id = self.get_ha_network(router)
segments = self.get_network_segments(ha_network_id)
if self.ext_net:
segments.extend(self.get_network_segments(ext_net['id']))
router_ports.append(self.plugin.get_port(self.context,
router['gw_port_id']))
self.assertTenantCreated(router['project_id'])
self.assertL3HANetworkCreated(router, ha_network_id)
self.assertRouterCreated(router['id'])
self.assertL3HARouterCreated(router)
for port in router_ports:
if port['device_owner'] != n_const.DEVICE_OWNER_ROUTER_GW:
self.assertL3HAPortCreated(router, port['id'])
else:
self.assertRouterPortCreated(port['id'])
self.assertPortBindingCreated((port['id'],
port[portbindings.HOST_ID]))
self.assertSegmentsCreated(segments)
# Delete the router
self.delete_router(router['id'])
if self.ext_net:
self.delete_network(ext_net['id'])
self.assertRouterPortsDeleted([p['id'] for p in router_ports])
self.assertRouterDeleted(router['id'])
self.assertSegmentsDeleted(segments)
self.assertNetworkDeleted(ha_network_id)
self.assertTenantDeleted(router['project_id'])
for port in router_ports:
self.assertPortBindingDeleted((port['id'],
port[portbindings.HOST_ID]))
class BasicRouterTests(object):
def test_create_delete_router(self):
net_list = []
port_list = []
segment_list = []
ext_net = None
if self.ext_net:
ext_net, ext_net_ctx = self.create_network(self.ext_net)
router = self.create_router(ext_net=ext_net)
for net in self.net_dict:
network, net_ctx = self.create_network(net)
net_list.append((network, net_ctx))
self.assertTenantCreated(router['project_id'])
for net, net_ctx in net_list:
interface_info = {'subnet_id': net_ctx.current['subnets'][0]}
intf = self.add_router_interface(router, interface_info)
self.sync_routers(router['id'], self.l3_agent1['host'])
port = self.get_legacy_router_port(intf['port_id'])
self.assertNotEqual(len(port), 0)
port_list.append(port)
if self.ext_net:
net_list.append((ext_net, ext_net_ctx))
port = self.get_legacy_router_port(router['gw_port_id'])
self.assertNotEqual(len(port), 0)
port_list.append(port)
self.assertLegacyRouterCreated(router, self.l3_agent1['host'])
for network, _ in net_list:
self.assertNetworkCreated(network['id'])
segment_list.extend(self.get_network_segments(network['id']))
self.assertEqual(len(segment_list), self.total_segments)
self.assertSegmentsCreated(segment_list)
for port in port_list:
self.assertRouterPortCreated(port['id'])
self.assertPortBindingCreated((port['id'],
port[portbindings.HOST_ID]))
# Delete the router interfaces and router
# Remove one of router's interface
network, net_ctx = net_list[0]
interface_info = {'subnet_id': net_ctx.current['subnets'][0]}
intf = self.remove_router_interface(router, interface_info)
self.assertRouterCreated(router['id'])
self.assertRouterPortDeleted(intf['port_id'])
self.assertPortBindingDeleted((intf['port_id'],
port[portbindings.HOST_ID]))
# Remove second router interface
network, net_ctx = net_list[1]
interface_info = {'subnet_id': net_ctx.current['subnets'][0]}
intf = self.remove_router_interface(router, interface_info)
# If there's still an external gateway port, the router will
# still exist on CVX. Otherwise, it will be deleted
if not self.ext_net:
self.assertRouterDeleted(router['id'])
else:
self.assertRouterCreated(router['id'])
self.assertRouterPortDeleted(intf['port_id'])
self.assertPortBindingDeleted((intf['port_id'],
port[portbindings.HOST_ID]))
# Delete router to delete external gateway port
self.delete_router(router['id'])
self.assertRouterDeleted(router['id'])
self.assertRouterPortDeleted(router['gw_port_id'])
self.assertPortBindingDeleted((router['gw_port_id'],
port[portbindings.HOST_ID]))
for network, _ in net_list:
self.delete_network(network['id'])
self.assertNetworkDeleted(network['id'])
self.assertSegmentsDeleted(segment_list)
class BasicL3HARouterTestCases(ml2_test_base.L3HARouterTestFramework,
BasicL3HARouterTests):
def setUp(self):
cfg.CONF.set_override('tenant_network_types', 'vlan', 'ml2')
super(BasicL3HARouterTestCases, self).setUp()
cfg.CONF.set_override('max_l3_agents_per_router', 2)
self.l3_agent1 = self._register_l3_agent(host=self.host1)
self.l3_agent2 = self._register_l3_agent(host=self.host2)
self.l3_agents = [self.l3_agent1, self.l3_agent2]
class BasicHpbL3HARouterTestCases(ml2_test_base.L3HARouterTestFramework,
BasicL3HARouterTests):
def setUp(self):
cfg.CONF.set_override('manage_fabric', True, 'ml2_arista')
cfg.CONF.set_override('tenant_network_types', 'vxlan', 'ml2')
super(BasicHpbL3HARouterTestCases, self).setUp()
cfg.CONF.set_override('l3_ha', True)
cfg.CONF.set_override('max_l3_agents_per_router', 3)
self.l3_agent1 = self._register_l3_agent(host=self.host1)
self.l3_agent2 = self._register_l3_agent(host=self.host2)
self.l3_agent3 = self._register_l3_agent(host=self.host3)
self.l3_agents = [self.l3_agent1, self.l3_agent2, self.l3_agent3]
def get_host_physnet(context):
if context.host == 'host3':
return self.physnet2
if context.host == 'host1':
return self.physnet
if context.host == 'host2':
return self.physnet
ghp = mock.patch.object(self.drv.eapi, 'get_host_physnet').start()
ghp.side_effect = get_host_physnet
class BasicRouterTestCases(ml2_test_base.L3HARouterTestFramework,
BasicRouterTests):
def setUp(self):
cfg.CONF.set_override('tenant_network_types', 'vlan', 'ml2')
super(BasicRouterTestCases, self).setUp()
self.l3_agent1 = self._register_l3_agent(host=self.host1)
self.net_dict = [
{'network': {'name': 'net-%d' % r,
'tenant_id': self._tenant_id,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
for r in range(1, 3)]
self.total_segments = 2
self.l3_agents = [self.l3_agent1]
class BasicHpbRouterTestCases(ml2_test_base.L3HARouterTestFramework,
BasicRouterTests):
def setUp(self):
cfg.CONF.set_override('manage_fabric', True, 'ml2_arista')
cfg.CONF.set_override('tenant_network_types', 'vxlan', 'ml2')
super(BasicHpbRouterTestCases, self).setUp()
self.l3_agent1 = self._register_l3_agent(host=self.host1)
def get_host_physnet(context):
return self.physnet
ghp = mock.patch.object(self.drv.eapi, 'get_host_physnet').start()
ghp.side_effect = get_host_physnet
self.net_dict = [{'network': {'name': 'hpb_net-%d' % r,
'tenant_id': self._tenant_id,
'admin_state_up': True,
'shared': False}}
for r in range(1, 3)]
self.total_segments = 4
self.l3_agents = [self.l3_agent1]
class RouterGatewayTestCases(ml2_test_base.L3HARouterTestFramework,
BasicRouterTests):
def setUp(self):
cfg.CONF.set_override('tenant_network_types', 'vlan', 'ml2')
super(RouterGatewayTestCases, self).setUp()
self.l3_agent1 = self._register_l3_agent(host=self.host1)
self.ext_net = {'network': {'name': 'ext_net',
'tenant_id': self._tenant_id,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan',
extnet_const.EXTERNAL: True}}
self.net_dict = [
{'network': {'name': 'net-%d' % r,
'tenant_id': self._tenant_id,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan'}}
for r in range(1, 3)]
self.total_segments = 3
self.l3_agents = [self.l3_agent1]
class RouterGatewayL3HARouterTestCases(ml2_test_base.L3HARouterTestFramework,
BasicL3HARouterTests):
def setUp(self):
cfg.CONF.set_override('tenant_network_types', 'vlan', 'ml2')
super(RouterGatewayL3HARouterTestCases, self).setUp()
cfg.CONF.set_override('max_l3_agents_per_router', 2)
self.l3_agent1 = self._register_l3_agent(host=self.host1)
self.l3_agent2 = self._register_l3_agent(host=self.host2)
self.l3_agents = [self.l3_agent1, self.l3_agent2]
self.ext_net = {'network': {'name': 'ext_net',
'tenant_id': self._tenant_id,
'admin_state_up': True,
'shared': False,
'provider:physical_network': self.physnet,
'provider:network_type': 'vlan',
extnet_const.EXTERNAL: True}}
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6481018
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/type_drivers/0000775000175000017500000000000000000000000026600 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/type_drivers/__init__.py0000664000175000017500000000000000000000000030677 0ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/ml2/type_drivers/test_arista_type_driver.py0000664000175000017500000001730200000000000034113 0ustar00zuulzuul00000000000000# Copyright (c) 2016 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import mock
from mock import patch
from neutron_lib import context
from oslo_config import cfg
from neutron.tests.unit.plugins.ml2.drivers import test_helpers
from neutron.tests.unit import testlib_api
import networking_arista.common.exceptions as exc
from networking_arista.ml2.type_drivers.driver_helpers import VlanSyncService
from networking_arista.ml2.type_drivers.type_arista_vlan \
import AristaVlanTypeDriver
import networking_arista.tests.unit.utils as utils
EAPI_SEND_FUNC = ('networking_arista.ml2.rpc.arista_eapi.AristaRPCWrapperEapi'
'._send_eapi_req')
class AristaTypeDriverTest(testlib_api.SqlTestCase):
def setUp(self):
super(AristaTypeDriverTest, self).setUp()
utils.setup_arista_wrapper_config(cfg)
@patch(EAPI_SEND_FUNC)
def test_initialize_type_driver(self, mock_send_eapi_req):
type_driver = AristaVlanTypeDriver()
type_driver.sync_service._force_sync = False
type_driver.sync_service._vlan_assignment_uuid = {'uuid': 1}
type_driver.sync_service._rpc = mock.MagicMock()
rpc = type_driver.sync_service._rpc
rpc.get_vlan_assignment_uuid.return_value = {'uuid': 1}
type_driver.initialize()
cmds = ['show openstack agent uuid',
'show openstack resource-pool vlan region RegionOne uuid']
calls = [mock.call(cmds=[cmd], commands_to_log=[cmd])
for cmd in cmds]
mock_send_eapi_req.assert_has_calls(calls)
type_driver.timer.cancel()
class AristaTypeDriverHelpersTest(test_helpers.HelpersTest):
def setUp(self):
utils.setup_arista_wrapper_config(cfg)
super(AristaTypeDriverHelpersTest, self).setUp()
self.driver = AristaVlanTypeDriver()
self.driver.network_vlan_ranges = test_helpers.NETWORK_VLAN_RANGES
def test_allocate_specific_allocated_segment_outside_pools(self):
# Invalid test case for Arista type driver because the first
# allocate fails with VlanUnavailable
pass
def test_allocate_specific_unallocated_segment_outside_pools(self):
expected = dict(physical_network=test_helpers.TENANT_NET,
vlan_id=test_helpers.VLAN_OUTSIDE)
self.assertRaises(exc.VlanUnavailable,
self.driver.allocate_fully_specified_segment,
self.context, **expected)
class VlanSyncServiceTest(testlib_api.SqlTestCase):
"""Test that VLANs are synchronized between EOS and Neutron."""
def setUp(self):
super(VlanSyncServiceTest, self).setUp()
self.rpc = mock.MagicMock()
self.sync_service = VlanSyncService(self.rpc)
self.ctx = context.get_admin_context()
def tearDown(self):
super(VlanSyncServiceTest, self).tearDown()
# Cleanup the db
utils.delete_vlan_allocation(self.ctx)
def _ensure_in_db(self, assigned, allocated, available):
vlans = utils.get_vlan_allocation(self.ctx)
self.assertEqual(len(vlans), len(assigned))
used_vlans = []
available_vlans = []
for vlan in vlans:
self.assertIn(vlan.vlan_id, assigned)
if vlan.vlan_id in available:
self.assertFalse(vlan.allocated)
available_vlans.append(vlan.vlan_id)
elif vlan.vlan_id in allocated:
self.assertTrue(vlan.allocated)
used_vlans.append(vlan.vlan_id)
self.assertEqual(set(used_vlans), set(allocated))
self.assertEqual(set(available_vlans), set(available))
def _get_vlan_allocations(self):
vlan_allocations = {
'available_vlans': [],
'allocated_vlans': [],
}
vlans = utils.get_vlan_allocation(self.ctx)
for vlan in vlans:
if vlan.allocated:
vlan_allocations['allocated_vlans'].append(vlan.vlan_id)
else:
vlan_allocations['available_vlans'].append(vlan.vlan_id)
return vlan_allocations
def test_synchronization_before_region_sync(self):
"""Test VLAN sync with empty data from CVX"""
# Populated VlanAllocations before starting the sync
for seg_id in range(2, 500):
utils.create_vlan_allocation(self.ctx, segmentation_id=seg_id)
self.rpc.get_vlan_allocation.return_value = {
'assignedVlans': '10, 100',
'availableVlans': '',
'allocatedVlans': ''
}
self.sync_service.synchronize()
self.assertTrue(self.sync_service._force_sync)
# Verify only assignedVlans exist in the db
vlans = self._get_vlan_allocations()
assigned_vlans = [10, 100]
allocated_vlans = []
self.assertEqual(set(vlans['available_vlans']), set(assigned_vlans))
self.assertEqual(set(vlans['allocated_vlans']), set(allocated_vlans))
def test_synchronization_test(self):
"""Test VLAN sync based on allocated VLANs in db and CVX"""
# Add entries to vlan allocation table
VLAN_MIN = 100
VLAN_MAX = 300
for seg_id in range(VLAN_MIN, VLAN_MAX + 1):
allocated = seg_id in [VLAN_MIN, VLAN_MAX]
utils.create_vlan_allocation(self.ctx, segmentation_id=seg_id,
allocated=allocated)
# Test case that vlan resource pool does not have allocated vlans
self.rpc.get_vlan_allocation.return_value = {
'assignedVlans': '10-20, 50-60, %s, %s' % (VLAN_MIN, VLAN_MAX),
'availableVlans': '10-20, 50-60',
'allocatedVlans': ''
}
self.sync_service.synchronize()
self.assertFalse(self.sync_service._force_sync)
allocated_vlans = [VLAN_MIN, VLAN_MAX]
available_vlans = list(set(range(10, 21)) | set(range(50, 61)))
assigned_vlans = list(set(available_vlans) | set(allocated_vlans))
self._ensure_in_db(assigned_vlans, allocated_vlans, available_vlans)
# Test case that vlan resource pool has updated resources
self.rpc.get_vlan_allocation.return_value = {
'assignedVlans': '200-220, %s, %s' % (VLAN_MIN, VLAN_MAX),
'availableVlans': '200-220',
'allocatedVlans': '%s, %s' % (VLAN_MIN, VLAN_MAX)
}
available_vlans = list(set(range(200, 221)))
assigned_vlans = list(set(available_vlans) | set(allocated_vlans))
self.sync_service.synchronize()
self._ensure_in_db(assigned_vlans, allocated_vlans, available_vlans)
def test_synchronization_test_with_data_from_cvx(self):
"""Test VLAN sync based on data from CVX"""
self.rpc.get_vlan_allocation.return_value = {
'assignedVlans': '51-60,71-80',
'availableVlans': '51-55,71,73,75,77,79',
'allocatedVlans': '56-60,72,74,76,78,80'
}
assigned_vlans = list(set(range(51, 61)) | set(range(71, 81)))
available_vlans = [51, 52, 53, 54, 55, 71, 73, 75, 77, 79]
allocated_vlans = list(set(assigned_vlans) - set(available_vlans))
self.sync_service.synchronize()
self.assertFalse(self.sync_service._force_sync)
self._ensure_in_db(assigned_vlans, allocated_vlans, available_vlans)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/networking_arista/tests/unit/utils.py0000664000175000017500000010331100000000000025100 0ustar00zuulzuul00000000000000# Copyright (c) 2017 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
import json
import re
from oslo_log import log as logging
from neutron_lib import constants as n_const
from neutron_lib.db import api as db_api
from neutron_lib.services.trunk import constants as t_const
from neutron.db.models import l3 as l3_models
from neutron.db.models import l3ha as l3ha_models
from neutron.db.models import segment as segment_models
from neutron.db import models_v2
from neutron.objects.plugins.ml2 import vlanallocation as vlan_alloc_obj
from neutron.plugins.ml2 import models as ml2_models
from neutron.services.trunk import models as t_models
from networking_arista.common import config # noqa
from networking_arista.common import constants as a_const
from networking_arista.ml2 import arista_resources as resources
LOG = logging.getLogger(__name__)
def setup_arista_wrapper_config(cfg, host='host', user='user'):
cfg.CONF.set_override('eapi_host', host, "ml2_arista")
cfg.CONF.set_override('eapi_username', user, "ml2_arista")
cfg.CONF.set_override('sync_interval', 1, "ml2_arista")
cfg.CONF.set_override('conn_timeout', 20, "ml2_arista")
cfg.CONF.set_override('switch_info', ['switch1:user:pass'], "ml2_arista")
class MockCvx(object):
def __init__(self, region):
whitelist = ['AristaResourcesBase',
'PortResourcesBase',
'AttributeFormatter']
self.endpoint_data = {}
self.endpoint_to_id = {}
self.endpoint_to_class = {}
self.region = region
for cls in resources.__dict__.values():
if (isinstance(cls, type) and
cls.__module__ == resources.__name__ and
cls.__name__ not in whitelist):
region_endpoint = cls.endpoint % {'region': region}
self.endpoint_data[region_endpoint] = {}
self.endpoint_to_id[region_endpoint] = cls.id_key
self.endpoint_to_class[region_endpoint] = cls
def send_api_request(self, endpoint, request_type, data=None):
if request_type != 'GET':
LOG.debug("%(type)s %(endpoint)s %(data)s", {'type': request_type,
'endpoint': endpoint,
'data': data})
if request_type == 'POST':
for resource in data:
endpoint_class = self.endpoint_to_class[endpoint]
for key in endpoint_class.get_resource_ids(resource):
self.endpoint_data[endpoint][key] = resource
elif request_type == 'GET':
return self.endpoint_data[endpoint].values()
elif request_type == 'DELETE':
for resource in data:
endpoint_class = self.endpoint_to_class[endpoint]
for key in endpoint_class.get_resource_ids(resource):
try:
del self.endpoint_data[endpoint][key]
except KeyError:
pass
def get_cvx_uuid(self):
return None
def sync_start(self):
return True
def sync_end(self):
return True
class MockSwitch(object):
def __init__(self):
self._commands = []
self._acl_rules = dict()
self._bindings = defaultdict(lambda: defaultdict(set))
self._vrfs = dict()
self._svis = dict()
self._vlans = dict()
self._route = dict()
self._acl_mode_re = re.compile(r'^(?Pno )?ip access-list '
r'(?P\S+)(?P dynamic)?$')
self._interface_mode_re = re.compile(
r'^(?Pno )?interface (?P.+)$')
self._access_group_re = re.compile(
r'^(?Pno )?ip access-group (?P\S+) (?P\S+)$')
self._vrf_mode_re = re.compile(
r'^(?Pno )?vrf instance (?P\S+)$')
self._vlan_re = re.compile(r'^(?Pno )?vlan (?P\d+)$')
self._ip_address_re = re.compile(
r'^ip address (?P[\d.]+)/(?P\d+)$')
self._vip_re = re.compile(
r'^ip virtual-router address (?P[\d.]+)$')
self._vrf_route_re = re.compile(
r'^(?Pno )?ip route vrf (?P\S+) '
r'(?P[\d.]+/\d+) (?P[\d.]+)$')
self._svi_vrf_re = re.compile(r'^vrf forwarding (?P\S+)$')
self._rd_re = re.compile(r'^rd (?P\S+)$')
self._varp_mac_re = re.compile(
r'^ip virtual-router mac-address (?P\S+)$')
self._mode = None
def execute(self, commands, commands_to_log=None, keep_alive=True):
ret = []
for command in commands:
if command == 'show ip access-lists dynamic':
acls = {'aclList': []}
for name, rules in self._acl_rules.items():
acls['aclList'].append(
{'name': name,
'sequence': [{'text': rule} for rule in rules]})
ret.append(acls)
elif command == 'show ip access-lists summary dynamic':
bindings = {'aclList': []}
for acl, binding in self._bindings.items():
bindings['aclList'].append(
{'name': acl,
'configuredIngressIntfs': [
{'name': intf} for intf in
binding.get(a_const.INGRESS_DIRECTION, [])],
'configuredEgressIntfs': [
{'name': intf} for intf in
binding.get(a_const.EGRESS_DIRECTION, [])]})
ret.append(bindings)
elif command == 'show vlan':
vlans = {'vlans': {}}
for vlan, info in self._vlans.items():
vlans['vlans'][str(vlan)] = {'dynamic': info['dynamic']}
ret.append(vlans)
elif command == 'show ip interface':
svis = {'interfaces': {}}
for intf, svi in self._svis.items():
svis['interfaces']['Vlan%s' % intf.strip('vlan ')] = {
'interfaceAddress': [
{'primaryIp': {'maskLen': svi['mask'],
'address': svi['ip']}}]}
ret.append(svis)
elif command == 'show vrf':
vrfs = {'vrfs': {}}
for vrf_name, vrf in self._vrfs.items():
vrfs['vrfs'][vrf_name] = {'interfaces': vrf['svis'],
'routeDistinguisher': vrf['rd']}
ret.append(vrfs)
elif command == 'show ip route vrf all':
vrfs = {'vrfs': {}}
for vrf_name, vrf in self._vrfs.items():
vrfs['vrfs'][vrf_name] = {'routes': {}}
vrf_routes = vrf.get('routes')
if not vrf_routes:
continue
for route, vrf_route in vrf['routes'].items():
vrf_routes[route] = {
"kernelProgrammed": True,
"directlyConnected": False,
"routeAction": "forward",
"routeLeaked": False,
"vias": [
{
"interface": "Management1",
"nexthopAddr": vrf_route
}
],
"metric": 0,
"hardwareProgrammed": True,
"routeType": "static",
"preference": 1
}
ret.append(vrfs)
elif command == 'show version':
ret.append({'version': '4.22.0F'})
elif command == 'enable':
ret.append({})
elif 'show' in command:
pass
elif 'ip access-list' in command:
acl_match = self._acl_mode_re.match(command)
acl = acl_match.group('acl')
delete = acl_match.group('delete')
if delete:
del self._acl_rules[acl]
else:
assert acl_match.group('dyn'), "All ACLs must be dynamic"
self._mode = ('acl', acl)
if not self._acl_rules.get(acl):
self._acl_rules[acl] = list()
elif 'interface' in command:
intf_match = self._interface_mode_re.match(command)
intf = intf_match.group('intf')
if intf_match.group('delete'):
del self._svis[intf]
else:
if 'vlan' in intf:
self._svis[intf] = {'ip': '',
'mask': '',
'vip': ''}
self._mode = ('interface', intf)
elif 'vrf instance' in command:
vrf_match = self._vrf_mode_re.match(command)
delete = vrf_match.group('delete')
vrf_name = vrf_match.group('name')
if delete:
del self._vrfs[vrf_name]
else:
self._vrfs[vrf_name] = {'svis': [], 'routes': {}}
self._mode = ('vrf', vrf_name)
elif 'vlan' in command:
self._parse_vlan(command)
elif 'ip route vrf' in command:
self._parse_ip_route_vrf(command)
elif command == 'exit':
self._mode = None
else:
if self._mode:
if self._mode[0] == 'acl':
if command.startswith('no '):
self._acl_rules[self._mode[1]].remove(command[3:])
else:
self._acl_rules[self._mode[1]].append(command)
elif self._mode[0] == 'interface':
acl_match = self._access_group_re.match(command)
if acl_match:
self._parse_acl(acl_match)
else:
self._parse_svi(command)
elif self._mode[0] == 'vrf':
self._parse_vrf(command)
self._commands.append(command)
return ret
def _parse_acl(self, acl_match):
delete = acl_match.group('delete')
acl = acl_match.group('acl')
direction = acl_match.group('dir')
if delete:
self._bindings[acl][direction].remove(
self._mode[1])
else:
# Delete the old binding for this intf if nec.
for binding in self._bindings.values():
if self._mode[1] in binding[direction]:
binding[direction].remove(self._mode[1])
self._bindings[acl][direction].add(self._mode[1])
def _parse_svi(self, command):
ip_addr_match = self._ip_address_re.match(command)
if ip_addr_match:
self._svis[self._mode[1]]['ip'] = ip_addr_match.group('ip')
self._svis[self._mode[1]]['mask'] = ip_addr_match.group('mask')
vip_match = self._vip_re.match(command)
if vip_match:
self._svis[self._mode[1]]['vip'] = vip_match.group('ip')
vrf_match = self._svi_vrf_re.match(command)
if vrf_match:
self._vrfs[vrf_match.group('vrf')]['svis'].append(self._mode[1])
def _parse_vrf(self, command):
rd_match = self._rd_re.match(command)
if rd_match:
self._vrfs[self._mode[1]]['rd'] = rd_match.group('rd')
varp_mac_match = self._varp_mac_re.match(command)
if varp_mac_match:
pass
def _parse_ip_route_vrf(self, command):
vrf_route_match = self._vrf_route_re.match(command)
if vrf_route_match:
delete = vrf_route_match.group('delete')
vrf = vrf_route_match.group('vrf')
if vrf not in self._vrfs:
assert False
vrf_dict = self._vrfs[vrf]
vrf_routes = vrf_dict['routes']
if delete:
del self._vrfs[vrf]['routes'][vrf_route_match.group('network')]
else:
vrf_routes[vrf_route_match.group('network')] = \
vrf_route_match.group('next_hop')
def _parse_vlan(self, command):
vlan_match = self._vlan_re.match(command)
delete = vlan_match.group('delete')
vlan = vlan_match.group('vlan')
if delete:
del self._vlans[vlan]
else:
self._vlans[vlan] = {'dynamic': False}
@property
def received_commands(self):
return self._commands
def assert_command_not_received(self, unexpected_cmd):
for cmd in self._commands:
assert unexpected_cmd not in cmd
def clear_received_commands(self):
self._commands = []
def reset_switch(self):
self._commands = []
self._acl_rules = dict()
self._bindings = defaultdict(lambda: defaultdict(set))
self._vrfs = dict()
self._svis = dict()
self._vlans = dict()
# Network utils #
def create_networks(networks):
session = db_api.get_writer_session()
with session.begin():
for network in networks:
session.add(models_v2.Network(**network))
session.commit()
def delete_network(network_id):
session = db_api.get_writer_session()
with session.begin():
network_model = models_v2.Network
session.query(network_model).filter(
network_model.id == network_id).delete()
session.commit()
def delete_networks_for_tenant(tenant_id):
session = db_api.get_writer_session()
with session.begin():
network_model = models_v2.Network
networks = session.query(network_model).filter(
network_model.project_id == tenant_id).all()
for network in networks:
delete_ports_on_network(network.id)
session.delete(network)
session.commit()
# Segment utils #
def create_segments(segments):
session = db_api.get_writer_session()
with session.begin():
for segment in segments:
session.add(segment_models.NetworkSegment(**segment))
session.commit()
def delete_segment(segment_id):
session = db_api.get_writer_session()
with session.begin():
segment_model = segment_models.NetworkSegment
session.query(segment_model).filter(
segment_model.id == segment_id).delete()
session.commit()
def delete_segments_for_network(network_id):
session = db_api.get_writer_session()
with session.begin():
segment_model = segment_models.NetworkSegment
session.query(segment_model).filter(
segment_model.network_id == network_id).delete()
session.commit()
def delete_segments_for_tenant(tenant_id):
session = db_api.get_writer_session()
network_model = models_v2.Network
segment_model = segment_models.NetworkSegment
with session.begin():
networks = session.query(network_model).filter(
network_model.project_id == tenant_id).all()
for network in networks:
session.query(segment_model).filter(
segment_model.network_id == network.id).delete()
session.commit()
# Port utils #
def create_ports(ports):
session = db_api.get_writer_session()
with session.begin():
for port in ports:
binding_levels = port.pop('binding_levels', [])
binding = port.pop('binding', {})
session.add(models_v2.Port(**port))
if binding:
binding['port_id'] = port['id']
if binding['vif_type'] == 'distributed':
distributed_binding = binding.copy()
distributed_binding['status'] = 'ACTIVE'
for host in binding['host']:
distributed_binding['host'] = host
session.add(
ml2_models.DistributedPortBinding(
**distributed_binding))
else:
session.add(ml2_models.PortBinding(**binding))
for binding_level in binding_levels:
binding_level['port_id'] = port['id']
session.add(ml2_models.PortBindingLevel(**binding_level))
session.commit()
def delete_port(port_id):
session = db_api.get_writer_session()
with session.begin():
port_model = models_v2.Port
session.query(port_model).filter(
port_model.id == port_id).delete()
session.commit()
def delete_ports_on_network(network_id):
session = db_api.get_writer_session()
with session.begin():
port_model = models_v2.Port
session.query(port_model).filter(
port_model.network_id == network_id).delete()
session.commit()
def delete_ports_for_instance(instance_id):
session = db_api.get_writer_session()
with session.begin():
port_model = models_v2.Port
session.query(port_model).filter(
port_model.device_id == instance_id).delete()
session.commit()
def delete_ports_for_tenant(tenant_id):
session = db_api.get_writer_session()
with session.begin():
port_model = models_v2.Port
session.query(port_model).filter(
port_model.project_id == tenant_id).delete()
session.commit()
# Port binding utils #
def delete_port_binding(port_id, host):
session = db_api.get_writer_session()
with session.begin():
# We cannot do any bulk deletes here because every delete bumps the
# revision number of the Port
pbl_model = ml2_models.PortBindingLevel
levels = (session.query(pbl_model)
.filter(pbl_model.port_id == port_id,
pbl_model.host == host))
for level in levels:
session.delete(level)
pb_model = ml2_models.PortBinding
bindings = (session.query(pb_model)
.filter(pb_model.port_id == port_id,
pb_model.host == host))
for binding in bindings:
session.delete(binding)
dpb_model = ml2_models.DistributedPortBinding
bindings = (session.query(dpb_model)
.filter(dpb_model.port_id == port_id,
dpb_model.host == host))
for binding in bindings:
session.delete(binding)
session.commit()
def remove_switch_binding(port_id, switch_id, intf_id):
session = db_api.get_writer_session()
with session.begin():
pb_model = ml2_models.PortBinding
binding = (session.query(pb_model)
.filter(pb_model.port_id == port_id).first())
profile = json.loads(binding.profile)
lli = profile['local_link_information']
for idx, link in enumerate(lli):
if link['switch_id'] == switch_id and link['port_id'] == intf_id:
lli.pop(idx)
break
binding.profile = json.dumps(profile)
if len(lli) == 0:
delete_port_binding(port_id, binding.host)
session.commit()
# Trunk utils #
def create_trunks(trunks):
session = db_api.get_writer_session()
with session.begin():
for trunk in trunks:
session.add(t_models.Trunk(**trunk))
session.commit()
def create_subports(subports):
session = db_api.get_writer_session()
with session.begin():
for subport in subports:
session.add(t_models.SubPort(**subport))
session.commit()
# L3 HA Router utils #
def create_ha_routers(routers):
session = db_api.get_writer_session()
with session.begin():
for router in routers:
session.add(l3_models.Router(**router))
session.commit()
def create_ha_router_networks(networks):
session = db_api.get_writer_session()
with session.begin():
for network in networks:
session.add(l3ha_models.L3HARouterNetwork(**network))
session.commit()
def delete_ha_router_for_tenant(tenant_id):
session = db_api.get_writer_session()
with session.begin():
router_model = l3_models.Router
routers = session.query(router_model).filter(
router_model.project_id == tenant_id).all()
for router in routers:
session.delete(router)
session.commit()
def setup_scenario():
# Create networks
regular_network = {'id': 'n1',
'project_id': 't1',
'name': 'regular',
'admin_state_up': True,
'rbac_entries': []}
hpb_network = {'id': 'n2',
'project_id': 't2',
'name': 'hpb',
'admin_state_up': True,
'rbac_entries': []}
ha_network = {'id': 'HA network',
'project_id': '',
'name': 'l3 ha',
'admin_state_up': True,
'rbac_entries': []}
# Create segments
flat_segment = {'id': 'sError',
'network_id': 'n1',
'is_dynamic': False,
'network_type': 'flat'}
regular_segment = {'id': 's1',
'network_id': 'n1',
'is_dynamic': False,
'segmentation_id': 11,
'network_type': 'vlan',
'physical_network': 'default'}
fabric_segment = {'id': 's2',
'network_id': 'n2',
'is_dynamic': False,
'segmentation_id': 20001,
'network_type': 'vxlan',
'physical_network': None}
dynamic_segment1 = {'id': 's3',
'network_id': 'n2',
'is_dynamic': True,
'segmentation_id': 21,
'network_type': 'vlan',
'physical_network': 'switch1'}
dynamic_segment2 = {'id': 's4',
'network_id': 'n2',
'is_dynamic': True,
'segmentation_id': 31,
'network_type': 'vlan',
'physical_network': 'switch2'}
ha_segment = {'id': 's5',
'network_id': 'HA network',
'is_dynamic': False,
'segmentation_id': 33,
'network_type': 'vlan',
'physical_network': 'default'}
ha_fabric_segment = {'id': 's6',
'network_id': 'HA network',
'is_dynamic': False,
'segmentation_id': 20010,
'network_type': 'vxlan',
'physical_network': None}
ha_dynamic_segment = {'1': {'id': 's7',
'network_id': 'HA network',
'is_dynamic': True,
'segmentation_id': 700,
'network_type': 'vlan',
'physical_network': 'switch1'},
'2': {'id': 's8',
'network_id': 'HA network',
'is_dynamic': True,
'segmentation_id': 800,
'network_type': 'vlan',
'physical_network': 'switch2'}}
# Create ports
port_ctr = 0
ports = list()
trunk_ctr = 0
trunks = list()
subports = list()
instance_types = [(n_const.DEVICE_OWNER_DHCP, 'normal'),
(n_const.DEVICE_OWNER_DVR_INTERFACE, 'normal'),
(n_const.DEVICE_OWNER_COMPUTE_PREFIX, 'normal'),
(n_const.DEVICE_OWNER_COMPUTE_PREFIX, 'baremetal'),
(n_const.DEVICE_OWNER_BAREMETAL_PREFIX, 'baremetal'),
(n_const.DEVICE_OWNER_ROUTER_HA_INTF, 'normal'),
(n_const.DEVICE_OWNER_ROUTER_INTF, 'normal')]
for device_owner, vnic_type in instance_types:
vif_type = 'ovs'
regular_host = 'host1'
regular_binding_levels = [
{'host': 'host1',
'segment_id': regular_segment['id'],
'level': 0,
'driver': 'arista'}]
hpb_binding_levels = [
{'host': 'host2',
'segment_id': fabric_segment['id'],
'level': 0,
'driver': 'arista'},
{'host': 'host2',
'segment_id': dynamic_segment1['id'],
'level': 1,
'driver': 'arista'}]
hpb_host = 'host2'
binding_profile = ''
if vnic_type == 'baremetal':
binding_profile = ('{"local_link_information": ' +
'[{"switch_id": "00:11:22:33:44:55", ' +
'"port_id": "Ethernet1"}, ' +
'{"switch_id": "00:11:22:33:44:55", ' +
'"port_id": "Ethernet2"}, ' +
'{"switch_id": "55:44:33:22:11:00", ' +
'"port_id": "Ethernet1/1"}, ' +
'{"switch_id": "55:44:33:22:11:00", ' +
'"port_id": "Ethernet1/2"}]}')
if device_owner == n_const.DEVICE_OWNER_DVR_INTERFACE:
vif_type = 'distributed'
regular_host = ['host1', 'host2']
regular_binding_levels = [
{'host': 'host1',
'segment_id': regular_segment['id'],
'level': 0,
'driver': 'arista'},
{'host': 'host2',
'segment_id': regular_segment['id'],
'level': 0,
'driver': 'arista'}]
hpb_binding_levels = [
{'host': 'host1',
'segment_id': fabric_segment['id'],
'level': 0,
'driver': 'arista'},
{'host': 'host1',
'segment_id': dynamic_segment1['id'],
'level': 1,
'driver': 'arista'},
{'host': 'host2',
'segment_id': fabric_segment['id'],
'level': 0,
'driver': 'arista'},
{'host': 'host2',
'segment_id': dynamic_segment2['id'],
'level': 1,
'driver': 'arista'}]
hpb_host = ['host1', 'host2']
port_ctr += 1
regular_port = {'admin_state_up': True,
'status': 'ACTIVE',
'device_id': '%s%s1' % (device_owner, vnic_type),
'device_owner': device_owner,
'binding': {'host': regular_host,
'vif_type': vif_type,
'vnic_type': vnic_type,
'profile': binding_profile},
'tenant_id': 't1',
'id': 'p%d' % port_ctr,
'network_id': regular_network['id'],
'mac_address': '00:00:00:00:00:%02x' % port_ctr,
'name': 'regular_port',
'binding_levels': regular_binding_levels}
port_ctr += 1
hpb_port = {'admin_state_up': True,
'status': 'ACTIVE',
'device_id': '%s%s2' % (device_owner, vnic_type),
'device_owner': device_owner,
'binding': {'host': hpb_host,
'vif_type': vif_type,
'vnic_type': vnic_type,
'profile': binding_profile},
'tenant_id': 't2',
'id': 'p%d' % port_ctr,
'network_id': hpb_network['id'],
'mac_address': '00:00:00:00:00:%02x' % port_ctr,
'name': 'hpb_port',
'binding_levels': hpb_binding_levels}
if device_owner != n_const.DEVICE_OWNER_ROUTER_HA_INTF:
ports.extend([regular_port, hpb_port])
if device_owner == n_const.DEVICE_OWNER_COMPUTE_PREFIX:
port_ctr += 1
trunk_subport = {'admin_state_up': True,
'status': 'ACTIVE',
'device_id': '%s%s1' % (device_owner, vnic_type),
'device_owner': t_const.TRUNK_SUBPORT_OWNER,
'binding': {'host': regular_host,
'vif_type': vif_type,
'vnic_type': vnic_type,
'profile': binding_profile},
'tenant_id': 't1',
'id': 'p%d' % port_ctr,
'network_id': regular_network['id'],
'mac_address': '10:00:00:00:00:%02x' % port_ctr,
'name': 'trunk_subport',
'binding_levels': regular_binding_levels}
ports.extend([trunk_subport])
trunk = {'id': 't%d' % trunk_ctr,
'port_id': regular_port['id']}
subport = {'port_id': trunk_subport['id'],
'trunk_id': trunk['id'],
'segmentation_type': 'vlan',
'segmentation_id': 100}
trunk_ctr += 1
trunks.append(trunk)
subports.append(subport)
if device_owner == n_const.DEVICE_OWNER_ROUTER_HA_INTF:
vif_type = 'ovs'
ha_router_ports = [{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': '%s%s' % (device_owner,
vnic_type),
'device_owner': device_owner,
'binding': {'host': 'host%d' % p,
'vif_type': vif_type,
'vnic_type': vnic_type,
'profile': binding_profile},
'tenant_id': '',
'id': 'uuid-ha-%d' % p,
'network_id': ha_network['id'],
'mac_address': '00:00:00:00:00:%02x' % p,
'name': 'regular_port',
'binding_levels': [
{'host': 'host%d' % p,
'segment_id': ha_segment['id'],
'level': 0,
'driver': 'openvswitch'}]}
for p in range(1, 3)]
ports.extend(ha_router_ports)
ha_router_hpb_ports = [
{'admin_state_up': True,
'status': 'ACTIVE',
'device_id': '%s%s' % (device_owner,
vnic_type),
'device_owner': device_owner,
'binding': {'host': 'host%d' % p,
'vif_type': vif_type,
'vnic_type': vnic_type,
'profile': binding_profile},
'tenant_id': '',
'id': 'uuid-hpb-ha-%d' % p,
'network_id': ha_network['id'],
'mac_address': '00:00:00:00:01:%02x' % p,
'name': 'ha_router_hpb_port',
'binding_levels': [
{'host': 'host%d' % p,
'segment_id': ha_fabric_segment['id'],
'level': 0,
'driver': 'arista'},
{'host': 'host%d' % p,
'segment_id': ha_dynamic_segment['%d' % p]['id'],
'level': 1,
'driver': 'openvswitch'}]} for p in range(1, 3)]
ports.extend(ha_router_hpb_ports)
routers = [
{'project_id': 'ha-router-project',
'id': n_const.DEVICE_OWNER_ROUTER_HA_INTF + 'normal',
'name': 'test-router'}]
create_ha_routers(routers)
create_networks([regular_network, hpb_network, ha_network])
ha_router_networks = [
{'project_id': 'ha-router-project',
'network_id': ha_network['id']}]
create_ha_router_networks(ha_router_networks)
create_segments([regular_segment, fabric_segment, flat_segment,
dynamic_segment1, dynamic_segment2, ha_segment,
ha_fabric_segment, ha_dynamic_segment['1'],
ha_dynamic_segment['2']])
create_ports(ports)
create_trunks(trunks)
create_subports(subports)
# VlanAllocation utils
def create_vlan_allocation(ctx, segmentation_id, physical_network='default',
allocated=False):
attr = {'physical_network': physical_network,
'allocated': allocated,
'vlan_id': segmentation_id}
alloc = vlan_alloc_obj.VlanAllocation(ctx, **attr)
alloc.create()
def get_vlan_allocation(ctx):
return vlan_alloc_obj.VlanAllocation.get_objects(ctx)
def delete_vlan_allocation(ctx):
vlan_alloc_obj.VlanAllocation.delete_objects(ctx)
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6361027
networking_arista-2023.1.0/networking_arista.egg-info/0000775000175000017500000000000000000000000022740 5ustar00zuulzuul00000000000000././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/PKG-INFO0000664000175000017500000000212700000000000024037 0ustar00zuulzuul00000000000000Metadata-Version: 1.2
Name: networking-arista
Version: 2023.1.0
Summary: Arista Networking drivers
Home-page: https://opendev.org/x/networking-arista/
Author: Arista Networks
Author-email: openstack-dev@arista.com
License: UNKNOWN
Description: ===============================
networking-arista
===============================
Arista Networking drivers
* Free software: Apache license
* Source: https://opendev.org/x/networking-arista
* Bug: https://bugs.launchpad.net/networking-arista
Platform: UNKNOWN
Classifier: Environment :: OpenStack
Classifier: Intended Audience :: Information Technology
Classifier: Intended Audience :: System Administrators
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Operating System :: POSIX :: Linux
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Requires-Python: >=3.8
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/SOURCES.txt0000664000175000017500000001102400000000000024622 0ustar00zuulzuul00000000000000.coveragerc
.mailmap
.stestr.conf
.zuul.yaml
AUTHORS
CONTRIBUTING.rst
ChangeLog
HACKING.rst
LICENSE
README.rst
babel.cfg
requirements.txt
setup.cfg
setup.py
test-requirements.txt
tox.ini
devstack/plugin.sh
devstack/settings
doc/source/conf.py
doc/source/contributing.rst
doc/source/index.rst
doc/source/installation.rst
doc/source/readme.rst
doc/source/usage.rst
dockerfiles/README
etc/ml2_conf_arista.ini
etc/policy.json
networking_arista/__init__.py
networking_arista/_i18n.py
networking_arista.egg-info/PKG-INFO
networking_arista.egg-info/SOURCES.txt
networking_arista.egg-info/dependency_links.txt
networking_arista.egg-info/entry_points.txt
networking_arista.egg-info/not-zip-safe
networking_arista.egg-info/pbr.json
networking_arista.egg-info/requires.txt
networking_arista.egg-info/top_level.txt
networking_arista/common/__init__.py
networking_arista/common/api.py
networking_arista/common/config.py
networking_arista/common/constants.py
networking_arista/common/db_lib.py
networking_arista/common/exceptions.py
networking_arista/common/utils.py
networking_arista/db/README
networking_arista/db/__init__.py
networking_arista/db/migration/README
networking_arista/db/migration/__init__.py
networking_arista/db/migration/alembic_migrations/README
networking_arista/db/migration/alembic_migrations/__init__.py
networking_arista/db/migration/alembic_migrations/env.py
networking_arista/db/migration/alembic_migrations/script.py.mako
networking_arista/db/migration/alembic_migrations/versions/296b4e0236e0_initial_db_version.py
networking_arista/db/migration/alembic_migrations/versions/CONTRACT_HEAD
networking_arista/db/migration/alembic_migrations/versions/EXPAND_HEAD
networking_arista/db/migration/alembic_migrations/versions/__init__.py
networking_arista/db/migration/alembic_migrations/versions/liberty/contract/47036dc8697a_initial_db_version_contract.py
networking_arista/db/migration/alembic_migrations/versions/liberty/expand/1c6993ce7db0_initial_db_version_expand.py
networking_arista/db/migration/alembic_migrations/versions/queens/contract/39c2eeb67116_drop_aristaprovisionednets.py
networking_arista/db/migration/alembic_migrations/versions/queens/contract/941bad5630c1_drop_aristaprovisionedvms.py
networking_arista/db/migration/alembic_migrations/versions/queens/contract/dc7bf9c1ab4d_drop_aristaprovisionedtenants.py
networking_arista/l3Plugin/__init__.py
networking_arista/l3Plugin/arista_l3_driver.py
networking_arista/l3Plugin/l3_arista.py
networking_arista/ml2/__init__.py
networking_arista/ml2/arista_resources.py
networking_arista/ml2/arista_sync.py
networking_arista/ml2/arista_trunk.py
networking_arista/ml2/mechanism_arista.py
networking_arista/ml2/rpc/__init__.py
networking_arista/ml2/rpc/arista_eapi.py
networking_arista/ml2/rpc/arista_json.py
networking_arista/ml2/rpc/base.py
networking_arista/ml2/security_groups/__init__.py
networking_arista/ml2/security_groups/arista_security_groups.py
networking_arista/ml2/security_groups/security_group_sync.py
networking_arista/ml2/security_groups/switch_helper.py
networking_arista/ml2/type_drivers/__init__.py
networking_arista/ml2/type_drivers/driver_helpers.py
networking_arista/ml2/type_drivers/type_arista_vlan.py
networking_arista/tests/__init__.py
networking_arista/tests/base.py
networking_arista/tests/test_networking_arista.py
networking_arista/tests/unit/__init__.py
networking_arista/tests/unit/utils.py
networking_arista/tests/unit/common/__init__.py
networking_arista/tests/unit/common/test_api.py
networking_arista/tests/unit/common/test_db_lib.py
networking_arista/tests/unit/l3Plugin/__init__.py
networking_arista/tests/unit/l3Plugin/test_arista_l3_driver.py
networking_arista/tests/unit/ml2/__init__.py
networking_arista/tests/unit/ml2/mechanism_fabric.py
networking_arista/tests/unit/ml2/mechanism_ha_simulator.py
networking_arista/tests/unit/ml2/ml2_test_base.py
networking_arista/tests/unit/ml2/test_arista_resources.py
networking_arista/tests/unit/ml2/test_arista_sync.py
networking_arista/tests/unit/ml2/test_mechanism_arista.py
networking_arista/tests/unit/ml2/rpc/__init__.py
networking_arista/tests/unit/ml2/rpc/test_arista_eapi_rpc_wrapper.py
networking_arista/tests/unit/ml2/rpc/test_arista_json_rpc_wrapper.py
networking_arista/tests/unit/ml2/security_groups/__init__.py
networking_arista/tests/unit/ml2/security_groups/sg_test_base.py
networking_arista/tests/unit/ml2/security_groups/test_arista_security_groups.py
networking_arista/tests/unit/ml2/security_groups/test_security_group_sync.py
networking_arista/tests/unit/ml2/type_drivers/__init__.py
networking_arista/tests/unit/ml2/type_drivers/test_arista_type_driver.py././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/dependency_links.txt0000664000175000017500000000000100000000000027006 0ustar00zuulzuul00000000000000
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/entry_points.txt0000664000175000017500000000170200000000000026236 0ustar00zuulzuul00000000000000[neutron.db.alembic_migrations]
networking-arista = networking_arista.db.migration:alembic_migrations
[neutron.ml2.mechanism_drivers]
arista = networking_arista.ml2.mechanism_arista:AristaDriver
arista_ha_scale_sim = networking_arista.tests.unit.ml2.mechanism_ha_simulator:AristaHAScaleSimulationDriver
arista_ha_sim = networking_arista.tests.unit.ml2.mechanism_ha_simulator:AristaHASimulationDriver
arista_ml2 = networking_arista.ml2.mechanism_arista:AristaDriver
arista_test_fabric = networking_arista.tests.unit.ml2.mechanism_fabric:TestFabricDriver
[neutron.ml2.type_drivers]
arista_vlan = networking_arista.ml2.type_drivers.type_arista_vlan:AristaVlanTypeDriver
[neutron.service_plugins]
arista_l3 = networking_arista.l3Plugin.l3_arista:AristaL3ServicePlugin
arista_security_group = networking_arista.ml2.security_groups.arista_security_groups:AristaSecurityGroupHandler
[oslo.config.opts]
networking_arista = networking_arista.common.config:list_opts
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/not-zip-safe0000664000175000017500000000000100000000000025166 0ustar00zuulzuul00000000000000
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/pbr.json0000664000175000017500000000005600000000000024417 0ustar00zuulzuul00000000000000{"git_version": "84eca97", "is_release": true}././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/requires.txt0000664000175000017500000000027000000000000025337 0ustar00zuulzuul00000000000000SQLAlchemy>=1.4.23
alembic>=1.6.5
neutron-lib>=3.4.0
oslo.config>=9.0.0
oslo.i18n>=3.20.0
oslo.log>=4.5.0
oslo.service>=2.8.0
oslo.utils>=4.8.0
pbr>=4.0.0
requests>=2.18.0
six>=1.10.0
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270226.0
networking_arista-2023.1.0/networking_arista.egg-info/top_level.txt0000664000175000017500000000002200000000000025464 0ustar00zuulzuul00000000000000networking_arista
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/requirements.txt0000664000175000017500000000147100000000000021003 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
pbr>=4.0.0 # Apache-2.0
alembic>=1.6.5 # MIT
neutron-lib>=3.4.0 # Apache-2.0
oslo.i18n>=3.20.0 # Apache-2.0
oslo.config>=9.0.0 # Apache-2.0
oslo.log>=4.5.0 # Apache-2.0
oslo.service>=2.8.0 # Apache-2.0
oslo.utils>=4.8.0 # Apache-2.0
requests>=2.18.0 # Apache-2.0
six>=1.10.0 # MIT
SQLAlchemy>=1.4.23 # MIT
# The comment below indicates this project repo is current with neutron-lib
# and should receive neutron-lib consumption patches as they are released
# in neutron-lib. It also implies the project will stay current with TC
# and infra initiatives ensuring consumption patches can land.
# neutron-lib-current
././@PaxHeader0000000000000000000000000000003400000000000011452 xustar000000000000000028 mtime=1693270226.6521015
networking_arista-2023.1.0/setup.cfg0000664000175000017500000000446100000000000017342 0ustar00zuulzuul00000000000000[metadata]
name = networking_arista
summary = Arista Networking drivers
description-file =
README.rst
author = Arista Networks
author-email = openstack-dev@arista.com
home-page = https://opendev.org/x/networking-arista/
python-requires = >=3.8
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
Intended Audience :: System Administrators
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
[files]
packages =
networking_arista
data_files =
/etc/neutron/plugins/ml2 =
etc/ml2_conf_arista.ini
[global]
setup-hooks =
pbr.hooks.setup_hook
[entry_points]
neutron.ml2.mechanism_drivers =
arista = networking_arista.ml2.mechanism_arista:AristaDriver
arista_ml2 = networking_arista.ml2.mechanism_arista:AristaDriver
arista_test_fabric = networking_arista.tests.unit.ml2.mechanism_fabric:TestFabricDriver
arista_ha_sim = networking_arista.tests.unit.ml2.mechanism_ha_simulator:AristaHASimulationDriver
arista_ha_scale_sim = networking_arista.tests.unit.ml2.mechanism_ha_simulator:AristaHAScaleSimulationDriver
neutron.service_plugins =
arista_l3 = networking_arista.l3Plugin.l3_arista:AristaL3ServicePlugin
arista_security_group = networking_arista.ml2.security_groups.arista_security_groups:AristaSecurityGroupHandler
neutron.db.alembic_migrations =
networking-arista = networking_arista.db.migration:alembic_migrations
neutron.ml2.type_drivers =
arista_vlan = networking_arista.ml2.type_drivers.type_arista_vlan:AristaVlanTypeDriver
oslo.config.opts =
networking_arista = networking_arista.common.config:list_opts
[build_sphinx]
source-dir = doc/source
build-dir = doc/build
all_files = 1
[upload_sphinx]
upload-dir = doc/build/html
[compile_catalog]
directory = networking_arista/locale
domain = networking-arista
[update_catalog]
domain = networking-arista
output_dir = networking_arista/locale
input_file = networking_arista/locale/networking-arista.pot
[extract_messages]
keywords = _ gettext ngettext l_ lazy_gettext
mapping_file = babel.cfg
output_file = networking_arista/locale/networking-arista.pot
[wheel]
universal = 1
[egg_info]
tag_build =
tag_date = 0
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/setup.py0000664000175000017500000000200600000000000017224 0ustar00zuulzuul00000000000000# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/test-requirements.txt0000664000175000017500000000077700000000000021770 0ustar00zuulzuul00000000000000# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
hacking>=3.0.1,<3.1.0 # Apache-2.0
coverage!=4.4,>=4.0 # Apache-2.0
mock>=3.0.0 # BSD
python-subunit>=1.0.0 # Apache-2.0/BSD
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=2.2.0 # MIT
testresources>=2.0.0 # Apache-2.0/BSD
testscenarios>=0.4 # Apache-2.0/BSD
WebTest>=2.0.27 # MIT
././@PaxHeader0000000000000000000000000000002600000000000011453 xustar000000000000000022 mtime=1693270202.0
networking_arista-2023.1.0/tox.ini0000664000175000017500000000351400000000000017032 0ustar00zuulzuul00000000000000[tox]
envlist = py3-dev,pep8-dev
minversion = 3.18.0
ignore_basepython_conflict = True
[testenv]
basepython = python3
usedevelop = True
setenv = VIRTUAL_ENV={envdir}
PYTHONWARNINGS=default::DeprecationWarning
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2023.1}
-r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
neutron>=22.0.0,<23.0.0
allowlist_externals =
find
commands =
stestr run {posargs}
[testenv:dev]
# run locally (not in the gate) using editable mode
# https://pip.pypa.io/en/stable/reference/pip_install/#editable-installs
# note that order is important to ensure dependencies don't override
commands =
pip install -q -e "git+https://git.openstack.org/openstack/neutron@stable/2023.1#egg=neutron"
[testenv:py3-dev]
commands =
{[testenv:dev]commands}
pip freeze
stestr run {posargs}
[testenv:debug]
commands =
{[testenv:dev]commands}
pip freeze
oslo_debug_helper {posargs}
[testenv:pep8]
commands =
flake8
neutron-db-manage --subproject networking-arista check_migration
[testenv:pep8-dev]
commands =
{[testenv:dev]commands}
pip freeze
flake8
neutron-db-manage --subproject networking-arista check_migration
[testenv:venv]
commands = {posargs}
[testenv:cover]
commands =
find networking_arista -type f -name "*.pyc" -delete
stestr run {posargs}
coverage combine
coverage html -d cover
coverage xml -o cover/coverage.xml
[testenv:docs]
commands = python setup.py build_sphinx
[flake8]
# H803 skipped on purpose per list discussion.
# E123, E125 skipped as they are invalid PEP-8.
show-source = True
ignore = E126,E128,E731,I202,H405,N530,W504
enable-extensions=H106,H203,H204,H205,H904
builtins = _
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
[hacking]
import_exceptions = networking_arista._i18n