pax_global_header00006660000000000000000000000064140253113460014511gustar00rootroot0000000000000052 comment=2d746ec46e7db90e843db0b451ec658c6b13857d
nsscache-version-0.42/000077500000000000000000000000001402531134600147105ustar00rootroot00000000000000nsscache-version-0.42/.coveragerc000066400000000000000000000001061402531134600170260ustar00rootroot00000000000000[run]
include = nss_cache
branch = True
[report]
show_missing = True
nsscache-version-0.42/.github/000077500000000000000000000000001402531134600162505ustar00rootroot00000000000000nsscache-version-0.42/.github/dependabot.yml000066400000000000000000000003131402531134600210750ustar00rootroot00000000000000version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
schedule:
interval: "daily"
- package-ecosystem: "pip"
directory: "/"
schedule:
interval: "daily"
nsscache-version-0.42/.github/workflows/000077500000000000000000000000001402531134600203055ustar00rootroot00000000000000nsscache-version-0.42/.github/workflows/ci.yml000066400000000000000000000023471402531134600214310ustar00rootroot00000000000000name: CI
on:
push:
pull_request:
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/setup-python@v2
with:
python-version: '3.7'
- name: install dependencies
run: |
sudo apt-get update -y
sudo apt-get install -y libnss-db libdb-dev libcurl4-gnutls-dev libgnutls28-dev libldap2-dev libsasl2-dev
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install pytest-github-actions-annotate-failures
- name: Test
run: python setup.py test --addopts "-v --durations=0 --junitxml=test-results/junit.xml --cov=nss_cache"
- uses: codecov/codecov-action@v1
- name: Install
run: pip install --user .
- name: slapd Regression Test
run: |
sudo apt-get install -y slapd ldap-utils libnss-db db-util
tests/slapd-regtest
- name: yapf
run: |
pip install yapf
yapf --diff --recursive nss_cache nsscache
- name: pylint
run: |
pip install pylint
pylint nsscache nss_cache
# TODO(jaq): eventually make this lint clean and remove this line
continue-on-error: true
nsscache-version-0.42/.github/workflows/codeql-analysis.yml000066400000000000000000000044711402531134600241260ustar00rootroot00000000000000# For most projects, this workflow file will not need changing; you simply need
# to commit it to your repository.
#
# You may wish to alter this file to override the set of languages analyzed,
# or to provide custom queries or build logic.
#
# ******** NOTE ********
# We have attempted to detect the languages in your repository. Please check
# the `language` matrix defined below to confirm you have the correct set of
# supported CodeQL languages.
#
name: "CodeQL"
on:
push:
branches: [ master ]
pull_request:
# The branches below must be a subset of the branches above
branches: [ master ]
schedule:
- cron: '22 12 * * 1'
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
language: [ 'python' ]
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
# Learn more:
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
steps:
- name: Checkout repository
uses: actions/checkout@v2
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
# By default, queries listed here will override any specified in a config file.
# Prefix the list here with "+" to use these queries and those in the config file.
# queries: ./path/to/local/query, your-org/your-repo/queries@main
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v1
# ℹ️ Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
# and modify them (or add more) to build your code if your project
# uses a compiled language
#- run: |
# make bootstrap
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v1
nsscache-version-0.42/.gitignore000066400000000000000000000007051402531134600167020ustar00rootroot00000000000000MANIFEST
build
debian/files
debian/nsscache.debhelper.log
debian/nsscache.postinst.debhelper
debian/nsscache.prerm.debhelper
debian/nsscache.substvars
debian/nsscache
debian/patches
dist
__pycache__/
.pc
*.pyc
*.dsc
*.tar.gz
*.deb
*.changes
*.upload
*.diff.gz
*.build
a.out
*.debian.tar.xz
.pybuild
debian/debhelper-build-stamp
*~
debian/.debhelper/
/.cache/
/.eggs/
/.ghi.yml
/.pytest_cache/
nsscache.egg-info/
/tmpconfig.yml
/.coverage
/test-results/
nsscache-version-0.42/CODE_OF_CONDUCT.md000066400000000000000000000062131402531134600175110ustar00rootroot00000000000000# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at jaq@google.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
nsscache-version-0.42/CONTRIBUTING.md000066400000000000000000000035411402531134600171440ustar00rootroot00000000000000Want to contribute? Great! First, read this page (including the small print at the end).
### Before you contribute
Before we can use your code, you must sign the
[Google Individual Contributor License Agreement](https://developers.google.com/open-source/cla/individual?csw=1)
(CLA), which you can do online. The CLA is necessary mainly because you own the
copyright to your changes, even after your contribution becomes part of our
codebase, so we need your permission to use and distribute your code. We also
need to be sure of various other things—for instance that you'll tell us if you
know that your code infringes on other people's patents. You don't have to sign
the CLA until after you've submitted your code for review and a member has
approved it, but you must do it before we can put your code into our codebase.
Before you start working on a larger contribution, you should get in touch with
us first through the issue tracker with your idea so that we can help out and
possibly guide you. Coordinating up front makes it much easier to avoid
frustration later on.
### Code reviews
All submissions, including submissions by project members, require review. We
use Github pull requests for this purpose.
Please format your code with github.com/google/yapf before sending pull
requests. You can install this from PyPI with `pip install yapf` or on Debian
systems as the `yapf3` package.
### Response Time
This repository is maintained as a best effort service.
Response times to issues and PRs may vary with the availability of the
maintainers. We appreciate your patience.
PRs with unit tests will be merged promptly. All other requests (issues and
PRs) may take longer to be responded to.
### The small print
Contributions made by corporations are covered by a different agreement than
the one above, the Software Grant and Corporate Contributor License Agreement.
nsscache-version-0.42/COPYING000066400000000000000000000431051402531134600157460ustar00rootroot00000000000000 GNU GENERAL PUBLIC LICENSE
Version 2, June 1991
Copyright (C) 1989, 1991 Free Software Foundation, Inc.
51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The licenses for most software are designed to take away your
freedom to share and change it. By contrast, the GNU General Public
License is intended to guarantee your freedom to share and change free
software--to make sure the software is free for all its users. This
General Public License applies to most of the Free Software
Foundation's software and to any other program whose authors commit to
using it. (Some other Free Software Foundation software is covered by
the GNU Library General Public License instead.) You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
this service if you wish), that you receive source code or can get it
if you want it, that you can change the software or use pieces of it
in new free programs; and that you know you can do these things.
To protect your rights, we need to make restrictions that forbid
anyone to deny you these rights or to ask you to surrender the rights.
These restrictions translate to certain responsibilities for you if you
distribute copies of the software, or if you modify it.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must give the recipients all the rights that
you have. You must make sure that they, too, receive or can get the
source code. And you must show them these terms so they know their
rights.
We protect your rights with two steps: (1) copyright the software, and
(2) offer you this license which gives you legal permission to copy,
distribute and/or modify the software.
Also, for each author's protection and ours, we want to make certain
that everyone understands that there is no warranty for this free
software. If the software is modified by someone else and passed on, we
want its recipients to know that what they have is not the original, so
that any problems introduced by others will not reflect on the original
authors' reputations.
Finally, any free program is threatened constantly by software
patents. We wish to avoid the danger that redistributors of a free
program will individually obtain patent licenses, in effect making the
program proprietary. To prevent this, we have made it clear that any
patent must be licensed for everyone's free use or not licensed at all.
The precise terms and conditions for copying, distribution and
modification follow.
GNU GENERAL PUBLIC LICENSE
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
0. This License applies to any program or other work which contains
a notice placed by the copyright holder saying it may be distributed
under the terms of this General Public License. The "Program", below,
refers to any such program or work, and a "work based on the Program"
means either the Program or any derivative work under copyright law:
that is to say, a work containing the Program or a portion of it,
either verbatim or with modifications and/or translated into another
language. (Hereinafter, translation is included without limitation in
the term "modification".) Each licensee is addressed as "you".
Activities other than copying, distribution and modification are not
covered by this License; they are outside its scope. The act of
running the Program is not restricted, and the output from the Program
is covered only if its contents constitute a work based on the
Program (independent of having been made by running the Program).
Whether that is true depends on what the Program does.
1. You may copy and distribute verbatim copies of the Program's
source code as you receive it, in any medium, provided that you
conspicuously and appropriately publish on each copy an appropriate
copyright notice and disclaimer of warranty; keep intact all the
notices that refer to this License and to the absence of any warranty;
and give any other recipients of the Program a copy of this License
along with the Program.
You may charge a fee for the physical act of transferring a copy, and
you may at your option offer warranty protection in exchange for a fee.
2. You may modify your copy or copies of the Program or any portion
of it, thus forming a work based on the Program, and copy and
distribute such modifications or work under the terms of Section 1
above, provided that you also meet all of these conditions:
a) You must cause the modified files to carry prominent notices
stating that you changed the files and the date of any change.
b) You must cause any work that you distribute or publish, that in
whole or in part contains or is derived from the Program or any
part thereof, to be licensed as a whole at no charge to all third
parties under the terms of this License.
c) If the modified program normally reads commands interactively
when run, you must cause it, when started running for such
interactive use in the most ordinary way, to print or display an
announcement including an appropriate copyright notice and a
notice that there is no warranty (or else, saying that you provide
a warranty) and that users may redistribute the program under
these conditions, and telling the user how to view a copy of this
License. (Exception: if the Program itself is interactive but
does not normally print such an announcement, your work based on
the Program is not required to print an announcement.)
These requirements apply to the modified work as a whole. If
identifiable sections of that work are not derived from the Program,
and can be reasonably considered independent and separate works in
themselves, then this License, and its terms, do not apply to those
sections when you distribute them as separate works. But when you
distribute the same sections as part of a whole which is a work based
on the Program, the distribution of the whole must be on the terms of
this License, whose permissions for other licensees extend to the
entire whole, and thus to each and every part regardless of who wrote it.
Thus, it is not the intent of this section to claim rights or contest
your rights to work written entirely by you; rather, the intent is to
exercise the right to control the distribution of derivative or
collective works based on the Program.
In addition, mere aggregation of another work not based on the Program
with the Program (or with a work based on the Program) on a volume of
a storage or distribution medium does not bring the other work under
the scope of this License.
3. You may copy and distribute the Program (or a work based on it,
under Section 2) in object code or executable form under the terms of
Sections 1 and 2 above provided that you also do one of the following:
a) Accompany it with the complete corresponding machine-readable
source code, which must be distributed under the terms of Sections
1 and 2 above on a medium customarily used for software interchange; or,
b) Accompany it with a written offer, valid for at least three
years, to give any third party, for a charge no more than your
cost of physically performing source distribution, a complete
machine-readable copy of the corresponding source code, to be
distributed under the terms of Sections 1 and 2 above on a medium
customarily used for software interchange; or,
c) Accompany it with the information you received as to the offer
to distribute corresponding source code. (This alternative is
allowed only for noncommercial distribution and only if you
received the program in object code or executable form with such
an offer, in accord with Subsection b above.)
The source code for a work means the preferred form of the work for
making modifications to it. For an executable work, complete source
code means all the source code for all modules it contains, plus any
associated interface definition files, plus the scripts used to
control compilation and installation of the executable. However, as a
special exception, the source code distributed need not include
anything that is normally distributed (in either source or binary
form) with the major components (compiler, kernel, and so on) of the
operating system on which the executable runs, unless that component
itself accompanies the executable.
If distribution of executable or object code is made by offering
access to copy from a designated place, then offering equivalent
access to copy the source code from the same place counts as
distribution of the source code, even though third parties are not
compelled to copy the source along with the object code.
4. You may not copy, modify, sublicense, or distribute the Program
except as expressly provided under this License. Any attempt
otherwise to copy, modify, sublicense or distribute the Program is
void, and will automatically terminate your rights under this License.
However, parties who have received copies, or rights, from you under
this License will not have their licenses terminated so long as such
parties remain in full compliance.
5. You are not required to accept this License, since you have not
signed it. However, nothing else grants you permission to modify or
distribute the Program or its derivative works. These actions are
prohibited by law if you do not accept this License. Therefore, by
modifying or distributing the Program (or any work based on the
Program), you indicate your acceptance of this License to do so, and
all its terms and conditions for copying, distributing or modifying
the Program or works based on it.
6. Each time you redistribute the Program (or any work based on the
Program), the recipient automatically receives a license from the
original licensor to copy, distribute or modify the Program subject to
these terms and conditions. You may not impose any further
restrictions on the recipients' exercise of the rights granted herein.
You are not responsible for enforcing compliance by third parties to
this License.
7. If, as a consequence of a court judgment or allegation of patent
infringement or for any other reason (not limited to patent issues),
conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot
distribute so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you
may not distribute the Program at all. For example, if a patent
license would not permit royalty-free redistribution of the Program by
all those who receive copies directly or indirectly through you, then
the only way you could satisfy both it and this License would be to
refrain entirely from distribution of the Program.
If any portion of this section is held invalid or unenforceable under
any particular circumstance, the balance of the section is intended to
apply and the section as a whole is intended to apply in other
circumstances.
It is not the purpose of this section to induce you to infringe any
patents or other property right claims or to contest validity of any
such claims; this section has the sole purpose of protecting the
integrity of the free software distribution system, which is
implemented by public license practices. Many people have made
generous contributions to the wide range of software distributed
through that system in reliance on consistent application of that
system; it is up to the author/donor to decide if he or she is willing
to distribute software through any other system and a licensee cannot
impose that choice.
This section is intended to make thoroughly clear what is believed to
be a consequence of the rest of this License.
8. If the distribution and/or use of the Program is restricted in
certain countries either by patents or by copyrighted interfaces, the
original copyright holder who places the Program under this License
may add an explicit geographical distribution limitation excluding
those countries, so that distribution is permitted only in or among
countries not thus excluded. In such case, this License incorporates
the limitation as if written in the body of this License.
9. The Free Software Foundation may publish revised and/or new versions
of the General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the Program
specifies a version number of this License which applies to it and "any
later version", you have the option of following the terms and conditions
either of that version or of any later version published by the Free
Software Foundation. If the Program does not specify a version number of
this License, you may choose any version ever published by the Free Software
Foundation.
10. If you wish to incorporate parts of the Program into other free
programs whose distribution conditions are different, write to the author
to ask for permission. For software which is copyrighted by the Free
Software Foundation, write to the Free Software Foundation; we sometimes
make exceptions for this. Our decision will be guided by the two goals
of preserving the free status of all derivatives of our free software and
of promoting the sharing and reuse of software generally.
NO WARRANTY
11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
REPAIR OR CORRECTION.
12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
POSSIBILITY OF SUCH DAMAGES.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
convey the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
Also add information on how to contact you by electronic and paper mail.
If the program is interactive, make it output a short notice like this
when it starts in an interactive mode:
Gnomovision version 69, Copyright (C) year name of author
Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, the commands you use may
be called something other than `show w' and `show c'; they could even be
mouse-clicks or menu items--whatever suits your program.
You should also get your employer (if you work as a programmer) or your
school, if any, to sign a "copyright disclaimer" for the program, if
necessary. Here is a sample; alter the names:
Yoyodyne, Inc., hereby disclaims all copyright interest in the program
`Gnomovision' (which makes passes at compilers) written by James Hacker.
, 1 April 1989
Ty Coon, President of Vice
This General Public License does not permit incorporating your program into
proprietary programs. If your program is a subroutine library, you may
consider it more useful to permit linking proprietary applications with the
library. If this is what you want to do, use the GNU Library General
Public License instead of this License.
nsscache-version-0.42/Dockerfile000066400000000000000000000003651402531134600167060ustar00rootroot00000000000000FROM python:3
RUN apt-get update
RUN apt-get install -y libnss-db libsasl2-dev libldap2-dev libssl-dev
RUN mkdir /code
WORKDIR /code
ADD requirements.txt /code/
RUN pip install -r requirements.txt
ADD . /code/
CMD [ "python", "./runtests.py" ]
nsscache-version-0.42/MANIFEST.in000066400000000000000000000002631402531134600164470ustar00rootroot00000000000000include nsscache.conf
include nsscache.conf.5
include nsscache.1
include nsscache.cron
include nsscache.spec
include COPYING
include THANKS
include runtests.py
include examples/*
nsscache-version-0.42/Makefile000066400000000000000000000010011402531134600163400ustar00rootroot00000000000000
###
## CircleCI development targets
#
.PHONY: circleci-validate
circleci-validate: .circleci/config.yml
circleci config validate
# Override this on the make command to say which job to run
CIRCLEJOB ?= build
.PHONY: circleci-execute
.INTERMEDIATE: tmpconfig.yml
circleci-execute: .circleci/config.yml circleci-validate
ifeq ($(CIRCLECI),true)
$(error "Don't run this target from within CircleCI!")
endif
circleci config process $< > tmpconfig.yml
circleci local execute -c tmpconfig.yml --job $(CIRCLEJOB)
nsscache-version-0.42/README.md000066400000000000000000000046201402531134600161710ustar00rootroot00000000000000nsscache - Asynchronously synchronise local NSS databases with remote directory services
========================================================================================

[](https://codecov.io/gh/google/nsscache)
*nsscache* is a commandline tool and Python library that synchronises a local NSS cache from a remote directory service, such as LDAP.
As soon as you have more than one machine in your network, you want to share usernames between those systems. Linux administrators have been brought up on the convention of LDAP or NIS as a directory service, and `/etc/nsswitch.conf`, `nss_ldap.so`, and `nscd` to manage their nameservice lookups.
Even small networks will have experienced intermittent name lookup failures, such as a mail receiver sometimes returning "User not found" on a mailbox destination because of a slow socket over a congested network, or erratic cache behaviour by `nscd`. To combat this problem, we have separated the network from the NSS lookup codepath, by using an asynchronous cron job and a glorified script, to improve the speed and reliability of NSS lookups. We presented a talk at [linux.conf.au 2008](http://lca2008.linux.org.au/) ([PDF slides](http://mirror.linux.org.au/linux.conf.au/2008/slides/056-posix-jaq-v.pdf)) on the problems in NSS and the requirements for a solution.
Here, we present to you this glorified script, which is just a little more extensible than
ldapsearch | awk > /etc/passwd
Read the [Google Code blog announcement](http://www.anchor.com.au/blog/2009/02/nsscache-and-ldap-reliability/) for nsscache, or more about the [motivation behind this tool](https://github.com/google/nsscache/wiki/MotivationBehindNssCache).
Here's a [testimonial from Anchor Systems](http://www.anchor.com.au/blog/2009/02/nsscache-and-ldap-reliability/) on their deployment of nsscache.
Pair *nsscache* with https://github.com/google/libnss-cache to integrate the local cache with your name service switch.
---
Mailing list: https://groups.google.com/forum/#!forum/nsscache-discuss
Issue history is at https://code.google.com/p/nsscache/issues/list
---
# Contributions
Please format your code with https://github.com/google/yapf (installable as `pip install yapf` or the `yapf3` package on Debian systems) before sending pull requests.
nsscache-version-0.42/THANKS000066400000000000000000000007671402531134600156350ustar00rootroot00000000000000These people have helped improve nsscache by providing patches, filing
bugs, etc.
Christian Marie (pingu)
kamil.kisiel
Berend De Schouwer
huw.lynes
Robin H. Johnson
antarus@google.com
albibek@gmail.com
javi@trackuino.org
Jesse W. Hathaway
jmartinj@ies1libertas.es
Robert Flemming
Jeff Bailey
ohookins@gmail.com
mimianddaniel@gmail.com
Kevin Bowling
Joshua Pereyda
nsscache-version-0.42/debian/000077500000000000000000000000001402531134600161325ustar00rootroot00000000000000nsscache-version-0.42/debian/README.Debian000066400000000000000000000011641402531134600201750ustar00rootroot00000000000000README.Debian for nsscache
==========================
To complete installation of nsscache:
* Configure /etc/nsscache.conf
A basic configuration is given. You will want to modify the LDAP base
as appropriate for your site.
* Run `nsscache update' once. The map caches will be built per the
configuration.
* Reconfigure /etc/nsswitch.conf for your new maps.
Append `db' to each of the maps you are configured for.
E.g.:
passwd = files db
shadow = files db
group = files db
Replace `ldap' if you are no longer using that map (recommended).
-- Jamie Wilkinson , 2007-04-02
nsscache-version-0.42/debian/changelog000066400000000000000000000607121402531134600200120ustar00rootroot00000000000000nsscache (0.39-2) unstable; urgency=medium
* Make the tests execute correctly under pybuild by overriding the default local to search.
* Trap NoSectionError when trying to load automount prefix and suffix mutators.
* Tell pybuild to copy in nsscache.conf as it's used as testdata for confit_test.py.
-- Jamie Wilkinson Tue, 12 Nov 2019 21:43:52 +1100
nsscache (0.39-1) unstable; urgency=medium
* New upstream release.
* Move the status check to the run, because beforehand the config doesn't exist.
* Update the source options to ignore python generated files.
* Update build deps and suggests for boto3, letting the tests pass and users who want s3 buckets to be able to succeed.
* Update rules to run pytest tests now runtests is deleted upstream.
* Add a config for gbp.
-- Jamie Wilkinson Tue, 12 Nov 2019 04:04:05 +1100
nsscache (0.38-2) unstable; urgency=medium
* Ensure the autopkgtest has a bsddb dependency installed.
-- Jamie Wilkinson Sat, 09 Nov 2019 23:03:00 +1100
nsscache (0.38-1) unstable; urgency=medium
* New upstream release.
* Python3! Closes: #937164
-- Jamie Wilkinson Sat, 09 Nov 2019 03:38:45 +1100
nsscache (0.37-1) unstable; urgency=medium
* New upstream release.
-- Jamie Wilkinson Thu, 01 Nov 2018 05:29:02 +1100
nsscache (0.36-1) unstable; urgency=medium
* New upstream release.
-- Jamie Wilkinson Sun, 22 Apr 2018 12:07:04 +1000
nsscache (0.35-1) unstable; urgency=medium
* New upstream release.
* Fix lintian warning; bump standards version.
* Fix lintian warning; remove Testsuite header.
-- Jamie Wilkinson Fri, 03 Nov 2017 15:29:14 +1100
nsscache (0.34-2) unstable; urgency=medium
* Add more diagnostics to the regtest.
* Fix version numbers in manpages.
-- Jamie Wilkinson Tue, 18 Apr 2017 10:04:30 +1000
nsscache (0.34-1) unstable; urgency=medium
* New upstream release.
* Fix the path to nsscache in debci regtest, so it tests the installed
version, not from the source directory.
* Bump debian/compat to a supported version, take 2.
-- Jamie Wilkinson Thu, 13 Apr 2017 21:11:06 +1000
nsscache (0.33-3) unstable; urgency=high
* Revert change to debian/compat for the RC bugfix.
-- Jamie Wilkinson Fri, 07 Apr 2017 19:19:11 +1000
nsscache (0.33-2) unstable; urgency=high (fix RC bug)
* Update debian/links which is causing the binary to be removed.
Closes: #857087.
* Urgency=critical to fix RC-bug before autoremoval.
* Update debian/compat to a supported version.
-- Jamie Wilkinson Tue, 04 Apr 2017 19:58:41 +1000
nsscache (0.33-1) unstable; urgency=medium
* New upstream release.
-- Jamie Wilkinson Tue, 01 Nov 2016 15:32:19 +1100
nsscache (0.32-2) unstable; urgency=medium
* Build-depend on tzdata. Closes: #839435.
* Bump standards version.
* Update project homepage to GitHub.
-- Jamie Wilkinson Tue, 01 Nov 2016 14:51:20 +1100
nsscache (0.32-1) unstable; urgency=medium
* New upstream release.
-- Jamie Wilkinson Sun, 17 Jan 2016 13:08:15 +1100
nsscache (0.30-4) unstable; urgency=medium
* Allow stderr output in the autopkgtest test.
-- Jamie Wilkinson Sat, 30 May 2015 16:33:23 +1000
nsscache (0.30-3) unstable; urgency=medium
* Convert to dh-python2 from python-support.
-- Jamie Wilkinson Sat, 23 May 2015 17:25:55 +1000
nsscache (0.30-2) unstable; urgency=medium
* Add the autopkgtest testsuite back to the control file.
* Fix some path errors in the regression test.
-- Jamie Wilkinson Thu, 21 May 2015 20:01:23 +1000
nsscache (0.30-1) unstable; urgency=medium
* New upstream release.
-- Jamie Wilkinson Wed, 20 May 2015 08:56:19 +1000
nsscache (0.29-2) unstable; urgency=medium
* Add dependency on libnss-db back to build-depends, to fix a FTBFS on
amd64. Closes: #750329.
-- Jamie Wilkinson Wed, 22 Oct 2014 10:09:50 +1100
nsscache (0.29-1) unstable; urgency=medium
* New upstream release.
* Update standards version.
* Disable autopkgtest for now as the test script is broken. Closes:
#757019.
-- Jamie Wilkinson Tue, 21 Oct 2014 22:32:27 +1100
nsscache (0.27-3) unstable; urgency=medium
* Updated watchfile for github.
-- Jamie Wilkinson Tue, 21 Oct 2014 19:30:07 +1100
nsscache (0.27-2) unstable; urgency=medium
* Remove suggests: on python-zsync, which doesn't exist.
-- Jamie Wilkinson Sat, 24 May 2014 13:10:49 +1000
nsscache (0.27-1) unstable; urgency=medium
* New upstream release.
* Add autopkgtest regression test to the package.
-- Jamie Wilkinson Thu, 22 May 2014 17:49:09 +1000
nsscache (0.26-1) unstable; urgency=low
* New upstream version.
* Add authorized-keys-command.sh to examples.
* Add a dh_auto_test override target to debian/rules.
-- Jamie Wilkinson Sat, 17 May 2014 17:09:05 +1000
nsscache (0.25-1) unstable; urgency=medium
* New upstream release.
* Added packaging VCS headers to control file.
-- Jamie Wilkinson Thu, 01 May 2014 00:09:30 +1000
nsscache (0.24-1) unstable; urgency=medium
* New upstream release.
* Update standards version.
-- Jamie Wilkinson Tue, 29 Apr 2014 19:46:00 +1000
nsscache (0.23-2) unstable; urgency=low
* Refactor debian/rules to install into /usr/share/nsscache.
-- Jamie Wilkinson Mon, 07 Oct 2013 09:52:13 +1100
nsscache (0.23-1) unstable; urgency=low
* New upstream release.
-- Jamie Wilkinson Sun, 06 Oct 2013 14:43:12 +1100
nsscache (0.22-1) unstable; urgency=low
* New upstream release.
* Bump standards version.
-- Jamie Wilkinson Tue, 30 Jul 2013 11:00:45 +0000
nsscache (0.21.19-1) unstable; urgency=low
* New upstream release.
* Bump standards version.
-- Jamie Wilkinson Mon, 14 Jan 2013 22:23:44 +0000
nsscache (0.21.18-2) unstable; urgency=low
* Fix unmanaged files in tarball, by adding to debian/source/options. (Closes: #643227)
-- Jamie Wilkinson Mon, 07 Jan 2013 09:56:48 +0000
nsscache (0.21.18-1) unstable; urgency=low
* New upstream release.
-- Jamie Wilkinson Sun, 06 Jan 2013 18:52:48 +1100
nsscache (0.21.17-2) unstable; urgency=low
* Delete debugging print.
-- Jamie Wilkinson Fri, 10 Feb 2012 10:45:42 +1100
nsscache (0.21.17-1) unstable; urgency=low
* New upstream release.
* Clean up temp directories on error.
* Port unit tests to use python-mox instead of pmock.
-- Jamie Wilkinson Fri, 10 Feb 2012 10:10:42 +1100
nsscache (0.21.16-0gg2) lucid; urgency=low
* Fix subprocess communication with getent so that nsscache -v verify
works again.
-- Joel Sing Wed, 05 Oct 2011 18:21:06 +1100
nsscache (0.21.16-0gg1) unstable; urgency=low
* If a zsync fetch fails against the local cache, fall back to a full
fetch via zsync.
-- Joel Sing Wed, 08 Jun 2011 17:04:17 +1000
nsscache (0.21.15-0gg1) unstable; urgency=low
* Create indexes for the latest libnss-cache.
* Add python-mox to the build dependencies.
-- Jamie Wilkinson Mon, 16 May 2011 16:44:51 +1000
nsscache (0.21.14-0gg2) unstable; urgency=low
* Add a postinst that will clear out accidental nsscache turds from /etc.
-- Jamie Wilkinson Mon, 09 May 2011 16:24:44 +1000
nsscache (0.21.14-0gg1) unstable; urgency=low
* New upstream release.
* Raise InvalidMap if the zsync decompress fails.
* Handle EmptyMap before trying to gpg decrypt the result.
* Handle missing and multiple signatures from pyme without crashing.
* Catch InvalidMap in the command object.
-- Jamie Wilkinson Thu, 05 May 2011 14:46:23 +1000
nsscache (0.21.13-0gg1) unstable; urgency=low
* New upstream release.
* No longer falls back to pycurl to retrieve a full file if zsync
fetches fail.
-- Jamie Wilkinson Mon, 02 May 2011 14:35:13 +1000
nsscache (0.21.12-0gg2) unstable; urgency=low
* Fix package dependencies on pycurl and python-ldap.
-- Jamie Wilkinson Thu, 28 Apr 2011 10:11:19 +1000
nsscache (0.21.12-0gg1) unstable; urgency=low
* If a timestamp stored locally is more than an hour in the future,
ignore it and use 'now'.
-- Jamie Wilkinson Thu, 10 Mar 2011 10:21:32 -0800
nsscache (0.21.11-0gg1) unstable; urgency=low
* Fix the unit tests for full updates, and make the handling of the
--force-write flag act as documented.
* Update the test runner to show log output if --verbosity is enabled.
-- Jamie Wilkinson Tue, 08 Mar 2011 17:25:43 -0800
nsscache (0.21.10-0gg1) unstable; urgency=low
* New upstream release.
* Handle relative paths in the config when creating temporary directories.
-- Jamie Wilkinson Wed, 09 Feb 2011 20:34:36 -0800
nsscache (0.21.9-0gg1) unstable; urgency=low
* New upstream release.
* Fix bugs in the ldapsource module introduced by changing the
timestamp type from an int to a time.struct_time.
-- Jamie Wilkinson Mon, 07 Feb 2011 21:24:41 -0800
nsscache (0.21.8-0gg1) unstable; urgency=low
* New upstream release.
* Update the status output to be flexible, adding --template and
--automount-template options.
-- Jamie Wilkinson Tue, 01 Feb 2011 23:52:08 -0800
nsscache (0.21.7-0gg1) unstable; urgency=low
* New upstream release.
* Improve logging around adding to an automount map.
* Ignore elements of automount master that we can't parse, instead of
crashing.
* Rename automount_info to automount_mountpoint for clarity.
* Work around a bug in bdb-4.8 that doesn't like writing to a
zero-length file, instead we create a security hole race condition on
the tempfile we want it to write to. Oh well, if you use bdb for nss
caching then you are going to suffer many more problems than just this
one.
* Drop python2.3 and lower support by insisting on the set() type, and
using subprocess instead of popen*.
-- Jamie Wilkinson Mon, 31 Jan 2011 13:57:19 -0800
nsscache (0.21.6-0gg1) unstable; urgency=low
* New upstream release.
* Don't retrieve the auto.master file from the zsync source if
zsync_local_automount_master is set to yes.
* Fix a crash caused by bad code clagging between update/files.py and
update/maps.py.
-- Jamie Wilkinson Fri, 28 Jan 2011 17:58:07 -0800
nsscache (0.21.5-0gg1) unstable; urgency=low
* New upstream release.
* Change the log message formatting depending on where the logs go,
adding timestamp, level, and source location when on the console, and
removing everything but the level and message when going to syslog.
* Actually sort netgroup membership.
* Create temp files in the same directory as the destination, as
cross-filesystem renames aren't atomic.
* Create a temporary directory in the target directory, and chdir into
it, in case a zsync source is used; zsync's librcksum creates its
temporary files in the cwd, and sometimes doesn't clean up after
itself.
* Fix printing of timestamps in nsscache status output.
* Don't abort on EmptyMap errors, continue onto the next map.
-- Jamie Wilkinson Fri, 28 Jan 2011 14:04:01 -0800
nsscache (0.21.4-0gg2) unstable; urgency=low
* Put nsscache back in /usr/bin
-- Michael Haro Fri, 28 Jan 2011 04:44:34 -0800
nsscache (0.21.4-0gg1) unstable; urgency=low
* New upstream release.
* Sort netgroup membership.
* Log the time it takes to perform a whole run.
* Factor out the updater factory method from app.py.
* Don't quit the updater if one map fails, wait for all maps to finish
processing before exiting.
* Add the pid, module, and function name to the syslog format string.
-- Jamie Wilkinson Wed, 26 Jan 2011 15:57:50 -0800
nsscache (0.21.3-0gg1) unstable; urgency=low
* New upstream release.
-- Jamie Wilkinson Tue, 25 Jan 2011 23:32:39 -0800
nsscache (0.21.2-0gg2) unstable; urgency=low
* Reduce the size of netgroup files by half for ldap servers that use
memberNisNetgroup attributes like they use nisNetgroupTriple; they're
both just text fields so easy to do it incorrectly; use a set instead
of a list to remove duplicates.
* Convert timestamp handling to use time.struct_time instead of epoch
timestamps, the former lets us control the timezone of the timestamp
correctly.
-- Jamie Wilkinson Tue, 25 Jan 2011 23:11:22 -0800
nsscache (0.21.2-0gg1) unstable; urgency=low
* New upstream version.
-- Jamie Wilkinson Tue, 25 Jan 2011 16:45:15 -0800
nsscache (0.21.1-0gg1) unstable; urgency=low
* Bump version number in nss_cache/__init__.py so that the code knows
what version it's running.
-- Jamie Wilkinson Tue, 25 Jan 2011 15:01:09 -0800
nsscache (0.21-1) unstable; urgency=low
* Include exit code in Exiting message
-- Michael Haro Mon, 17 Jan 2011 22:07:46 -0800
nsscache (0.20-1) unstable; urgency=low
* Do all work in a tempdir that we clean up on a normal exit.
-- David MacKinnon Fri, 14 Jan 2011 12:24:55 +1100
nsscache (0.19-2) unstable; urgency=low
* Initial upload to Debian. (Closes: #609625)
-- Jamie Wilkinson Tue, 11 Jan 2011 00:24:54 +0000
nsscache (0.19-1) unstable; urgency=low
* Allow a 206 response code when fetching the GPG signature.
-- David MacKinnon Fri, 19 Nov 2010 11:32:13 +1100
nsscache (0.18-1) unstable; urgency=low
* Remove the norange_conn. A range of 0- rather than blank will do
the job.
-- David MacKinnon Thu, 18 Nov 2010 13:22:27 +1100
nsscache (0.17-2) unstable; urgency=low
* Bugfix for zsyncsource.Verify()
-- David MacKinnon Mon, 08 Nov 2010 10:31:49 +1100
nsscache (0.16-1) unstable; urgency=low
* Change behaviour when fetching full files. Use a separate connection
that doesn't have the range header set at all.
-- David MacKinnon Wed, 03 Nov 2010 17:29:30 +1100
nsscache (0.15-1) unstable; urgency=low
* Add Verify() to the zsync source.
-- David MacKinnon Thu, 26 Aug 2010 16:01:25 +1000
nsscache (0.14-1) unstable; urgency=low
* Treat non-existent file maps as empty.
-- David MacKinnon Thu, 15 Jul 2010 14:32:51 +1000
nsscache (0.13-1) unstable; urgency=low
* Fix some errors in the GPG handling in zsyncsource.py
-- David MacKinnon Mon, 12 Jul 2010 16:29:02 +1000
nsscache (0.12-3) unstable; urgency=low
* Create /var/lib/nsscache
-- David MacKinnon Tue, 11 May 2010 12:17:21 +1000
nsscache (0.12-2) unstable; urgency=low
* Remove unit tests and test runner from package
-- David MacKinnon Fri, 23 Apr 2010 13:19:36 +1000
nsscache (0.12-1) unstable; urgency=low
* Add support for GPG signed files to the zsyncsource
-- David MacKinnon Tue, 30 Mar 2010 15:21:03 +1100
nsscache (0.11-1) unstable; urgency=low
* Add the zsync source
* Added file based Updater classes
-- David MacKinnon Thu, 25 Mar 2010 15:51:44 +1100
nsscache (0.10-1) unstable; urgency=low
* Add support for netgroups and automounts to the HTTP file source.
-- David MacKinnon Thu, 18 Feb 2010 12:51:07 +1100
nsscache (0.9-4) unstable; urgency=low
* Install into /usr/bin instead of /usr/local/bin
-- Andrew Pollock Fri, 19 Mar 2010 15:18:39 -0700
nsscache (0.9-3) unstable; urgency=low
* Minor packaging changes for lucid
-- David MacKinnon Mon, 15 Mar 2010 16:32:38 +1100
nsscache (0.9-2) unstable; urgency=low
* Fix setup.py to include the new util package.
-- David MacKinnon Wed, 17 Feb 2010 14:44:19 +1100
nsscache (0.9-1) unstable; urgency=low
* Add a HTTPSource. This only supports the passwd/shadow/group maps for now,
and adds a pycurl dependency. The long term plan is to support zsync.
-- David MacKinnon Mon, 14 Dec 2009 12:52:16 +1100
nsscache (0.8.8-1) unstable; urgency=low
* gracefully handle a missing auto.master when using the local automount
master map -- issues a warning and skips auto.* updates.
-- V Hoffman Tue, 23 Jun 2009 15:52:05 -0700
nsscache (0.8.7-2) unstable; urgency=low
* Explicit dependency on python2.4 instead of using ${python:Depends}
as we call python2.4 as the interpreter in /usr/bin/nsscache.
-- Jamie Wilkinson Tue, 10 Mar 2009 15:21:21 +1100
nsscache (0.8.7-1) unstable; urgency=low
* Handle comments in map files.
* Stop FileCache.Write() from closing the cache file
* Handle closed cache files more gracefully in _Commit()
-- David MacKinnon Thu, 26 Feb 2009 14:01:43 +1100
nsscache (0.8.6-1) unstable; urgency=low
* Worked around set(), subprocess, ConfigParser, and logging to allow
nsscache to run on python2.3 systems. (code.google.com issue #15)
* Call flush after write and fsync before close on timestamps
and temporary files to ensure data is committed to disk before
renaming to overwrite old data, reducing chance of data loss.
-- Jamie Wilkinson Thu, 29 Jan 2009 13:16:00 +1100
nsscache (0.8.5-1) unstable; urgency=low
* add local_automaster flag to the automount section of the files backend,
to allow for local control over which automount masters are enabled and
updated
* fix files backend to not write extra whitespace with empty automount
options, and to properly parse extra whitespace on read
-- V Hoffman Wed, 19 Nov 2008 16:24:58 +0900
nsscache (0.8.4-1) unstable; urgency=low
* some style cleanups as I encountered them
* fixed timezone dependancy in unit test
-- V Hoffman Wed, 12 Nov 2008 18:08:59 +0900
nsscache (0.8.3-1) unstable; urgency=low
* New upstream version
- Uses posix exit codes. (jaq)
-- Jamie Wilkinson Mon, 11 Aug 2008 12:41:33 +1000
nsscache (0.8.2-1) unstable; urgency=low
* nsscache instantiation no longer fails if syslog's /dev/log cannot be
accessed.
-- Matt T. Proud Thu, 31 Jul 2008 09:25:40 -0700
nsscache (0.8.1-2) unstable; urgency=low
* rebuild for hardy under a new package number
-- V Hoffman Wed, 30 Jul 2008 17:20:08 -0700
nsscache (0.8.1-1) unstable; urgency=low
* New upstream version
- netgroup entries use str instead of list for speed and memory benefits
-- V Hoffman Mon, 28 Jul 2008 20:15:54 -0700
nsscache (0.8.0-1) unstable; urgency=low
* New upstream version
- automount support
- fixed bug where we keep re-merging on incremental updates sometimes
-- V Hoffman Wed, 11 Jun 2008 22:01:33 -0700
nsscache (0.7.4-1) unstable; urgency=low
* New upstream version
- netgroup verify generates an info notice not a warning
-- V Hoffman Thu, 10 Apr 2008 17:33:18 -0700
nsscache (0.7.3-1) unstable; urgency=low
* New upstream version.
- verify command now recognizes when modules use libnss-cache and when
they do not.
- Add patch from 'pingu' that synchronises crypted userPassword in
the shadow map if available.
-- V Hoffman Tue, 8 Apr 2008 17:55:09 -0700
nsscache (0.7.2-1) unstable; urgency=low
* New upstream version.
- __slots__ used to reduce memory footprint for large maps.
- Write() made destructive to reduce memory footprint for large maps.
- MapEntry classes no longer use internal dict()s, to reduce memory waste.
- Removed unecessary attribute validation in MapEntry classes for speed.
- Removed unused methods from the Map API, e.g. Remove() and UpdateKey().
-- V Hoffman Wed, 26 Mar 2008 14:01:10 -0700
nsscache (0.7.1-1) unstable; urgency=low
* New upstream version.
- 'verify' command works with non-nssdb backends.
- error messages do not print 'None' instead of the nsswitch.conf
location.
- Fix issue #1 from code.google.com where cache_map was referenced
before use.
-- Jamie Wilkinson Mon, 3 Mar 2008 14:03:41 +1100
nsscache (0.7-1) unstable; urgency=low
* New upstream version.
- 'files' backend has netgroup support
- incremental updates run faster when there are no updates
- ldapsource module no longer pulls old data on incremental updates
- modify-timestamp updates properly on incremental updates
-- V Hoffman Wed, 14 Nov 2007 09:56:09 -0800
nsscache (0.6-1) unstable; urgency=low
* New upstream version.
- 'files' backend for passwd, group, and shadow.
- Fixed rollback errors that left temporary files around.
- Temporary files now prefixed with 'nsscache' for identification.
- Numerous bugfixes and cleanups.
- nsscache is now GPL'd.
- Added a nsscache.conf.5 manpage with content.
* Add libnss-cache as a dependency, OR'd with libnss-db.
* Downgrade Recommends libdb4.3 to Suggests now that we also have a files
backend and libdb is no longer required for successful operation.
* Do not install the cron fragment, but instead package it as an example
only.
-- Jamie Wilkinson Tue, 9 Oct 2007 13:53:39 +1000
nsscache (0.5-2) unstable; urgency=low
* Change dependency on libdb to Recommends. (jaq)
-- Jamie Wilkinson Tue, 17 Jul 2007 14:14:08 +1000
nsscache (0.5-1) unstable; urgency=low
* New upstream version.
- Lazy-load the cache contents on demand, so that checking the status
doesn't take large amounts of time. (jaq)
- Add retry-on-connect to LdapSource. (vasilios)
- Fail gracefully if the LdapSource source is unreachable. (vasilios)
- Strip quotes from configuration values. (jaq)
- Cast configuration options to floats if possible. (vasilios)
- Clean up temp files if we abort before committing the cache file. (jaq)
- Improve status reporting for monitoring. (jaq)
* Depends on patched libdb to fix memory leak. (vasilios)
-- Jamie Wilkinson Mon, 25 Jun 2007 16:51:50 -0700
nsscache (0.4-1) unstable; urgency=low
* New upstream version.
-- Jamie Wilkinson Mon, 4 Jun 2007 16:51:50 -0700
nsscache (0.3.1-1) unstable; urgency=low
* New upstream release.
* Cleaned up debian/rules.
-- Jamie Wilkinson Fri, 18 May 2007 11:52:26 +1000
nsscache (0.3-1) unstable; urgency=low
* New upstream version.
* Install manpages nsscache.1 and nsscache.conf.5.
-- Jamie Wilkinson Wed, 16 May 2007 11:17:01 +1000
nsscache (0.2-2) unstable; urgency=low
* Altered cron job edit to not set the 15 minutely job at an initial
offset greater than 15 minutes.
-- Jamie Wilkinson Thu, 10 May 2007 14:22:09 +1000
nsscache (0.2-1) unstable; urgency=low
* New upstream version.
* Change command for full daily update as it is supported by nsscache
directly now.
* Set full daily update run time to a random offset between 2AM and 5AM
in postinst.
* Make the cron fragment an example file, and only copy it over if it
doesn't exist. This is done to satisfy Debian Policy which states
that a maintainer script must not alter a conffile otherwise the
user would be pestered about changes every time a package upgrades.
* Delete the generated cron fragment at package purge time.
* Updated debian/rules to clean source on debian clean target.
-- Jamie Wilkinson Tue, 8 May 2007 19:13:20 +1000
nsscache (0.1-2) unstable; urgency=low
* Fix logging error causing incremental updates to fail.
-- Jamie Wilkinson Mon, 16 Apr 2007 16:42:56 +1000
nsscache (0.1-1) unstable; urgency=low
* Rename of upstream.
- Provide, replace, and conflict with lofsync.
* Remove conflicts on nscd.
-- Jamie Wilkinson Mon, 5 Feb 2007 16:05:28 +1100
lofsync (0.1-1) unstable; urgency=low
* Initial debian packaging
-- Iustin Pop Tue, 8 Aug 2006 17:09:49 +0200
nsscache-version-0.42/debian/compat000066400000000000000000000000021402531134600173300ustar00rootroot000000000000009
nsscache-version-0.42/debian/control000066400000000000000000000023271402531134600175410ustar00rootroot00000000000000Source: nsscache
Section: admin
Priority: optional
Maintainer: Jamie Wilkinson
Build-Depends: debhelper (>= 9~), python3, dh-python, python3-pycurl, python3-ldap, python3-mox3, python3-bsddb3, libnss-db, tzdata, python3-pytest-runner, python3-pytest, python3-boto3
Standards-Version: 4.1.1
Homepage: https://github.com/google/nsscache
Vcs-Browser: https://github.com/google/nsscache/tree/debian
Vcs-Git: https://github.com/google/nsscache.git -b debian
Package: nsscache
Architecture: all
Depends: ${shlibs:Depends}, ${misc:Depends}, ${python3:Depends}, python3-pycurl, python3-ldap
Provides: ${python3:Provides}
Recommends: libnss-cache | libnss-db
Suggests: python3-boto3
Description: asynchronously synchronise local NSS databases with remote directory services
Synchronises local NSS caches, such as those served by the
libnss-cache module, against remote directory services, such as
LDAP, or prebuild cache files from an HTTP server. This can be
used alongside the libnss-cache package to keep user account
information, groups, netgroups, and automounts up to date.
.
Use of nsscache and libnss-cache eliminates the need for using a
cache daemon such as nscd with networked NSS modules such as
libnss-ldap.
nsscache-version-0.42/debian/copyright000066400000000000000000000017361402531134600200740ustar00rootroot00000000000000This package was debianized by Jamie Wilkinson on
Mon, 19 Mar 2007 09:54:10 +1000.
Copyright:
Copyright 2007-2011 Google, Inc.
License:
This package is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This package is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this package; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
On Debian systems, the complete text of the GNU General
Public License can be found in `/usr/share/common-licenses/GPL'.
nsscache-version-0.42/debian/dirs000066400000000000000000000000321402531134600170110ustar00rootroot00000000000000usr/sbin
var/lib/nsscache
nsscache-version-0.42/debian/examples000066400000000000000000000001001402531134600176620ustar00rootroot00000000000000nsscache.conf
nsscache.cron
examples/authorized-keys-command.sh
nsscache-version-0.42/debian/gbp.conf000066400000000000000000000001021402531134600175420ustar00rootroot00000000000000[DEFAULT]
upstream-tag = version/%(version)s
debian-branch=debian
nsscache-version-0.42/debian/links000066400000000000000000000000451402531134600171740ustar00rootroot00000000000000/usr/bin/nsscache /usr/sbin/nsscache
nsscache-version-0.42/debian/nsscache.cron000066400000000000000000000005441402531134600206070ustar00rootroot00000000000000# /etc/cron.d/nsscache: crontab entries for the nsscache package
SHELL=/bin/sh
PATH=/usr/bin
MAILTO=root
# update the cache 15 minutely
%MINUTE15%/15 * * * * root /usr/bin/nsscache update
# perform a full update once a day, at a time chosen during package
# configuration (between 2AM and 5AM)
%MINUTE% %HOUR% * * * root /usr/bin/nsscache update --full
nsscache-version-0.42/debian/nsscache.manpages000066400000000000000000000000331402531134600214320ustar00rootroot00000000000000nsscache.1
nsscache.conf.5
nsscache-version-0.42/debian/pybuild.testfiles000066400000000000000000000000161402531134600215230ustar00rootroot00000000000000nsscache.conf
nsscache-version-0.42/debian/rules000077500000000000000000000003261402531134600172130ustar00rootroot00000000000000#!/usr/bin/make -f
# Uncomment this to turn on verbose mode.
export DH_VERBOSE=1
export PYBUILD_NAME=nsscache
export PYBUILD_TEST_PYTEST=1
export PYBUILD_TEST_ARGS=
%:
dh $@ --with=python3 --buildsystem=pybuild
nsscache-version-0.42/debian/source/000077500000000000000000000000001402531134600174325ustar00rootroot00000000000000nsscache-version-0.42/debian/source/format000066400000000000000000000000141402531134600206400ustar00rootroot000000000000003.0 (quilt)
nsscache-version-0.42/debian/source/options000066400000000000000000000002451402531134600210510ustar00rootroot00000000000000single-debian-patch
# Ignore files not included in release tarball.
extend-diff-ignore = "(^|/)(MANIFEST(\.in)?|rpm/.*|dist/.*|build/.*|.eggs/.*|.pytest_cache/.*)$"
nsscache-version-0.42/debian/tests/000077500000000000000000000000001402531134600172745ustar00rootroot00000000000000nsscache-version-0.42/debian/tests/control000066400000000000000000000001671402531134600207030ustar00rootroot00000000000000Tests: slapd-regtest
Restrictions: allow-stderr
Depends: @,
slapd,
ldap-utils,
libnss-db,
db-util,
python3-bsddb3
nsscache-version-0.42/debian/tests/slapd-regtest000077500000000000000000000006711402531134600220040ustar00rootroot00000000000000#!/bin/bash
set -x
if [[ -z ${ADTTMP-} ]]; then
WORKDIR=$(mktemp -d -t nsscache.regtest.XXXXXX)
ARTIFACTS=${WORKDIR}
else
WORKDIR=${ADTTMP}
ARTIFACTS=${ADT_ARTIFACTS}
fi
export WORKDIR ARTIFACTS
cleanup() {
if [[ -e "$WORKDIR/slapd.pid" ]]; then
kill -TERM $(cat $WORKDIR/slapd.pid)
fi
if [[ -z ${ADTTMP-} ]]; then
rm -rf $WORKDIR
fi
}
trap cleanup 0 INT QUIT ABRT PIPE TERM
../../tests/slapd-regtest
nsscache-version-0.42/debian/watch000066400000000000000000000002261402531134600171630ustar00rootroot00000000000000version=3
opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/nsscache-$1\.tar\.gz/ \
https://github.com/google/nsscache/tags .*/version\/(\d\S*)\.tar\.gz
nsscache-version-0.42/examples/000077500000000000000000000000001402531134600165265ustar00rootroot00000000000000nsscache-version-0.42/examples/authorized-keys-command.py000077500000000000000000000275341402531134600236610ustar00rootroot00000000000000#!/usr/bin/python
# vim: ts=4 sts=4 et:
# pylint: disable=invalid-name,line-too-long
"""OpenSSH AuthorizedKeysCommand: NSSCache input Copyright 2016 Gentoo
Foundation Written by Robin H.
Johnson Distributed under the BSD-3 license.
This script returns one or more authorized keys for use by SSH, by extracting
them from a local cache file /etc/sshkey.cache.
Two variants are supported, based on the existing nsscache code:
Format 1:
username:key1
username:key2
Format 2:
username:['key1', 'key2']
Ensure this script is mentioned in the sshd_config like so:
AuthorizedKeysCommand /path/to/nsscache/authorized-keys-command.py
If you have sufficently new OpenSSH, you can also narrow down the search:
AuthorizedKeysCommand /path/to/nsscache/authorized-keys-command.py
--username="%u" --key-type="%t" --key-fingerprint="%f" --key-blob="%k"
Future improvements:
- Validate SSH keys more strictly:
- validate options string
- validate X509 cert strings
- Implement command line options to:
- filter keys based on options better (beyond regex)
- filter keys based on comments better (beyond regex)
- filter X509 keys based on DN/subject
- support multiple inputs for conditions
- add an advanced conditional filter language
"""
from ast import literal_eval
import sys
import errno
import argparse
import re
import base64
import hashlib
import copy
import textwrap
DEFAULT_SSHKEY_CACHE = '/etc/sshkey.cache'
REGEX_BASE64 = r'(?:[A-Za-z0-9+/]{4})*(?:[A-Za-z0-9+/]{2}==|[A-Za-z0-9+/]{3}=)?'
# All of the SSH blobs starts with 3 null bytes , which encode to 'AAAA' in base64
REGEX_BASE64_START3NULL = r'AAAA' + REGEX_BASE64
# This regex needs a lot of work
KEYTYPE_REGEX_STRICT = r'\b(?:ssh-(?:rsa|dss|ed25519)|ecdsa-sha2-nistp(?:256|384|521))\b'
# Docs:
# http://www.iana.org/assignments/ssh-parameters/ssh-parameters.xhtml#ssh-parameters-19
# RFC6187, etc
KEYTYPE_REGEX_LAZY_NOX509 = r'\b(?:(?:spki|pgp|x509|x509v3)-)?(?:(?:ssh|sign)-(?:rsa|dss|ed25519)|ecdsa-[0-9a-z-]+|rsa2048-sha256)(?:-cert-v01@openssh\.com|\@ssh\.com)?\b'
KEYTYPE_REGEX_LAZY_X509 = r'\bx509(?:v3)?-(?:(?:ssh|sign)-(?:rsa|dss|ed25519)|ecdsa-[0-9a-z-]+|rsa2048-sha256)(?:-cert-v01@openssh\.com|\@ssh\.com)?\b'
X509_WORDDN = r'(?:(?i)(?:Distinguished[ _-]?Name|DN|Subject)[=:]?)' # case insensitive!
KEY_REGEX = r'(.*)\s*(?:(' + KEYTYPE_REGEX_LAZY_NOX509 + r')\s+(' + REGEX_BASE64_START3NULL + r')\s*(.*)|(' + KEYTYPE_REGEX_LAZY_X509 + r')\s+(' + X509_WORDDN + '.*))'
# Group 1: options
# Branch 1:
# Group 2: keytype (any, including x509)
# Group 3: key blob (non-x509), always starts with AAAA (3 nulls in base64), no whitespace!
# Group 4: comment (non-x509)
# Branch 2:
# Group 5: keytype (x509)
# Group 6: x509 WORDDN followed by x509-specific blob or DN, including whitespace
#
# If the keytype is x509v3-*, then the data block can actually be a certificate
# XOR a base64 block.
# The cert specifier is "DN:/OU=.../SN=.../C=.." etc. By implication, this
# EXCLUDEs the use of an comments, as you CANNOT detect when the DN ends.
def warning(*objs):
"""Helper function for output to stderr."""
print('WARNING: ', *objs, file=sys.stderr)
def parse_key(full_key_line):
"""Explode an authorized_keys line including options into the various
parts."""
#print(KEY_REGEX)
m = re.match(KEY_REGEX, full_key_line)
if m is None:
warning('Failed to match', full_key_line)
return (None, None, None, None)
options = m.group(1)
key_type = m.group(2)
blob = m.group(3)
comment = m.group(4)
if m.group(5) is not None:
key_type = m.group(5)
blob = m.group(6)
comment = None
return (options, key_type, blob, comment)
def fingerprint_key(keyblob, fingerprint_format='SHA256'):
"""Generate SSH key fingerprints, using the requested format."""
# Don't try to fingerprint x509 blobs
if keyblob is None or not keyblob.startswith('AAAA'):
return None
try:
binary_blob = base64.b64decode(keyblob)
except TypeError as e:
warning(e, keyblob)
return None
if fingerprint_format == 'MD5':
raw = hashlib.md5(binary_blob).digest()
return 'MD5:' + ':'.join('{:02x}'.format(ord(c)) for c in raw)
elif fingerprint_format in ['SHA256', 'SHA512', 'SHA1']:
h = hashlib.new(fingerprint_format)
h.update(binary_blob)
raw = h.digest()
return fingerprint_format + ':' + base64.b64encode(raw).rstrip('=')
return None
def detect_fingerprint_format(fpr):
"""Given a fingerprint, try to detect what fingerprint format is used."""
if fpr is None:
return None
for prefix in ['SHA256', 'SHA512', 'SHA1', 'MD5']:
if fpr.startswith(prefix + ':'):
return prefix
if re.match(r'^(MD5:)?([0-9a-f]{2}:)+[0-9a-f]{2}$', fpr) is not None:
return 'MD5'
# Cannot detect the format
return None
def validate_key(candidate_key, conditions, strict=False):
# pylint: disable=invalid-name,line-too-long,too-many-locals
"""Validate a potential authorized_key line against multiple conditions."""
# Explode the key
(candidate_key_options, \
candidate_key_type, \
candidate_key_blob, \
candidate_key_comment) = parse_key(candidate_key)
# Set up our conditions with their defaults
key_type = conditions.get('key_type', None)
key_blob = conditions.get('key_blob', None)
key_fingerprint = conditions.get('key_fingerprint', None)
key_options_re = conditions.get('key_options_re', None)
key_comment_re = conditions.get('key_comment_re', None)
# Try to detect the fingerprint format
fingerprint_format = detect_fingerprint_format(key_fingerprint)
# Force MD5 prefix on old fingerprints
if fingerprint_format is 'MD5':
if not key_fingerprint.startswith('MD5:'):
key_fingerprint = 'MD5:' + key_fingerprint
# The OpenSSH base64 fingerprints drops the trailing padding, ensure we do
# the same on provided input
if fingerprint_format is not 'MD5' \
and key_fingerprint is not None:
key_fingerprint = key_fingerprint.rstrip('=')
# Build the fingerprint for the candidate key
# (the func does the padding strip as well)
candidate_key_fingerprint = \
fingerprint_key(candidate_key_blob,
fingerprint_format)
match = True
strict_pass = False
if key_type is not None and \
candidate_key_type is not None:
strict_pass = True
match = match and \
(candidate_key_type == key_type)
if key_fingerprint is not None and \
candidate_key_fingerprint is not None:
strict_pass = True
match = match and \
(candidate_key_fingerprint == key_fingerprint)
if key_blob is not None and \
candidate_key_blob is not None:
strict_pass = True
match = match and \
(candidate_key_blob == key_blob)
if key_comment_re is not None and \
candidate_key_comment is not None:
strict_pass = True
match = match and \
key_comment_re.search(candidate_key_comment) is not None
if key_options_re is not None:
strict_pass = True
match = match and \
key_options_re.search(candidate_key_options) is not None
if strict:
return match and strict_pass
return match
PROG_EPILOG = textwrap.dedent("""\
Strict match will require that at least one condition matched.
Conditions marked with X may not work correctly with X509 authorized_keys lines.
""")
PROG_DESC = 'OpenSSH AuthorizedKeysCommand to read from cached keys file'
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='AUTHKEYCMD',
description=PROG_DESC,
epilog=PROG_EPILOG,
formatter_class=argparse.RawDescriptionHelpFormatter,
add_help=False)
# Arguments
group = parser.add_argument_group('Mandatory arguments')
group.add_argument('username',
metavar='USERNAME',
nargs='?',
type=str,
help='Username')
group.add_argument('--username',
metavar='USERNAME',
dest='username_opt',
type=str,
help='Username (alternative form)')
# Conditions
group = parser.add_argument_group('Match Conditions (optional)')
group.add_argument('--key-type',
metavar='KEY-TYPE',
type=str,
help='Key type')
group.add_argument('--key-fingerprint',
'--key-fp',
metavar='KEY-FP',
type=str,
help='Key fingerprint X')
group.add_argument('--key-blob',
metavar='KEY-BLOB',
type=str,
help='Key blob (Base64 section) X')
group.add_argument('--key-comment-re',
metavar='REGEX',
type=str,
help='Regex to match on comments X')
group.add_argument('--key-options-re',
metavar='REGEX',
type=str,
help='Regex to match on options')
# Setup parameters:
group = parser.add_argument_group('Misc settings')
group.add_argument(
'--cache-file',
metavar='FILENAME',
default=DEFAULT_SSHKEY_CACHE,
type=argparse.FileType('r'),
help='Cache file [%s]' % (DEFAULT_SSHKEY_CACHE,),
)
group.add_argument('--strict',
action='store_true',
default=False,
help='Strict match required')
group.add_argument('--help', action='help', default=False, help='This help')
# Fire it all
args = parser.parse_args()
# Handle that we support both variants
lst = [args.username, args.username_opt]
cnt = lst.count(None)
if cnt == 2:
parser.error('Username was not specified')
elif cnt == 0:
parser.error(
'Username must be specified either as an option XOR argument.')
else:
args.username = [x for x in lst if x is not None][0]
# Strict makes no sense without at least one condition being specified
if args.strict:
d = copy.copy(vars(args))
for k in ['cache_file', 'strict', 'username']:
d.pop(k, None)
if not any(v is not None for v in list(d.values())):
parser.error(
'At least one condition must be specified with --strict')
if args.key_comment_re is not None:
args.key_comment_re = re.compile(args.key_comment_re)
if args.key_options_re is not None:
args.key_options_re = re.compile(args.key_options_re)
try:
key_conditions = {
'key_options_re': args.key_options_re,
'key_type': args.key_type,
'key_blob': args.key_blob,
'key_fingerprint': args.key_fingerprint,
'key_comment_re': args.key_comment_re,
}
with args.cache_file as f:
for line in f:
(username, key) = line.split(':', 1)
if username != args.username:
continue
key = key.strip()
if key.startswith('[') and key.endswith(']'):
# Python array, but handle it safely!
keys = [i.strip() for i in literal_eval(key)]
else:
# Raw key
keys = [key.strip()]
for k in keys:
if validate_key(candidate_key=k,
conditions=key_conditions,
strict=args.strict):
print(k)
except IOError as err:
if err.errno in [errno.EPERM, errno.ENOENT]:
pass
else:
raise err
nsscache-version-0.42/examples/authorized-keys-command.sh000077500000000000000000000006021402531134600236260ustar00rootroot00000000000000#!/bin/sh
# This script returns one or more authorized keys for use by SSH, by extracting
# them from a local cache file /etc/sshkey.cache.
#
# Ensure this script is mentioned in the sshd_config like so:
#
# AuthorizedKeysCommand /path/to/nsscache/authorized-keys-command.sh
awk -F: -v name="$1" '$0 ~ name {print $2}' /etc/sshkey.cache | \
tr -d "[']" | \
sed -e 's/, /\n/g'
nsscache-version-0.42/nss_cache/000077500000000000000000000000001402531134600166365ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/.DS_Store000066400000000000000000000200041402531134600203150ustar00rootroot00000000000000Bud1esbwspblob @� @� @� @cachesbwspblob�bplist00�
]ShowStatusBar[ShowPathbar[ShowToolbar[ShowTabView_ContainerShowSidebar\WindowBounds[ShowSidebar _{{842, 110}, {770, 436}} %1=I`myz{|}~��cacheslsvCblob�bplist00�
HIJ
_viewOptionsVersion_showIconPreviewWcolumns_calculateAllSizesXtextSizeZsortColumnXiconSize_useRelativeDates �"&+05:>C�
WvisibleUwidthYascendingZidentifier , Tname�WvisibleUwidthYascending#Xubiquity�
! �\dateModified�
%[dateCreated�
'(
Tsizea �
,-
Tkinds �
12
Ulabeld �
67
WversionK �
;
Xcomments �
@B�^dateLastOpened�DYdateAdded#@(Tname#@0 .@H\epy��������������������� "#$09>@ABKPRST]cefgpxz{|�����������������L�cacheslsvpblobYbplist00�
DEF
_viewOptionsVersion_showIconPreviewWcolumns_calculateAllSizesXtextSizeZsortColumnXiconSize_useRelativeDates �
#(-27<@Xcomments^dateLastOpened[dateCreatedTsizeUlabelTkindWversionTname\dateModified�
WvisibleUwidthYascendingUindex, � ��$%��
*, a�/
1d �
4
6 s �9
;K �=
� %
#@(Tname#@0 .@H\epy��������������')+,-68:;<EFHIKTUWXZcdfgirsuvx�����������H�cachesvSrnlongEDSDB `� @� @� @*, a�/
1d �
4
6 s �9
;K �=
� %
#@(Tname#@0 .@H\epy��������������')+,-68:;<EFHIKTUWXZcdfgirsuvx�����������H�cachesvSrnlongnsscache-version-0.42/nss_cache/__init__.py000066400000000000000000000021641402531134600207520ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Library for client side caching of NSS data.
The nsscache package implements client-side caching of nss data
from various sources to different local nss storage implementations.
This file all the availible known caches, maps, and sources for the
nss_cache package.
"""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
__version__ = '0.42'
nsscache-version-0.42/nss_cache/app.py000066400000000000000000000220271402531134600177730ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Main program body for nsscache.
The nsscache program is the user interface to the nss_cache package,
responsible for updating or building local persistent cache, e.g.
nss_db.
"""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import logging
import logging.handlers
import optparse
import os
import socket
import sys
import nss_cache
from nss_cache import command
from nss_cache import config
from nss_cache import error
# Hack to support python 2.3's logging module
try:
BaseLoggingClass = logging.getLoggerClass()
except AttributeError:
BaseLoggingClass = logging.Logger
class NssCacheLogger(BaseLoggingClass):
"""Custom logger class for nss_cache.
This class defines two extra logging levels, VERBOSE which is for
messages that can be hidden unless asked for with -v, and DEBUG2 for
really chatty implementation details.
"""
def __init__(self, name):
logging.Logger.__init__(self, name)
logging.VERBOSE = logging.INFO - 1
logging.addLevelName(logging.VERBOSE, 'VERBOSE')
logging.DEBUG2 = logging.DEBUG - 1
logging.addLevelName(logging.DEBUG2, 'DEBUG2')
def verbose(self, msg, *args, **kwargs):
self.log(logging.VERBOSE, msg, args, kwargs)
def debug2(self, msg, *args, **kwargs):
self.log(logging.DEBUG2, msg, args, kwargs)
logging.setLoggerClass(NssCacheLogger)
class NssCacheApp(object):
"""Main application for building/updating NSS caches."""
def __init__(self):
"""Set up the application.
See the file README.style for logging policy set up here.
"""
# default to syslog unless on a tty
try:
is_tty = os.isatty(sys.stdin.fileno())
except ValueError:
is_tty = False
if is_tty:
format_str = ('%(levelname)-8s %(asctime)-15s '
'%(filename)s:%(lineno)d: '
'%(funcName)s: '
'%(message)s')
logging.basicConfig(format=format_str)
# python2.3's basicConfig doesn't let you set the default level
logger = logging.getLogger()
logger.setLevel(logging.WARN)
else:
facility = logging.handlers.SysLogHandler.LOG_DAEMON
try:
handler = logging.handlers.SysLogHandler(address='/dev/log',
facility=facility)
except socket.error:
print('/dev/log could not be opened; falling back on stderr.')
# Omitting an argument to StreamHandler results in sys.stderr being
# used.
handler = logging.StreamHandler()
format_str = (os.path.basename(sys.argv[0]) +
'[%(process)d]: %(levelname)s %(message)s')
fmt = logging.Formatter(format_str)
handler.setFormatter(fmt)
handler.setLevel(level=logging.INFO)
logging.getLogger().addHandler(handler)
self.log = logging.getLogger(__name__)
self.parser = self._GetParser()
def _GetParser(self):
"""Sets up our parser for global options.
Args: None
Returns:
# OptionParser is from standard python module optparse
OptionParser
"""
usage = ('nsscache synchronises a local NSS cache against a '
'remote data source.\n'
'\n'
'Usage: nsscache [global options] command [command options]\n'
'\n'
'commands:\n')
command_descriptions = []
for (name, cls) in list(command.__dict__.items()):
# skip the command base object
if name == 'Command':
continue
if hasattr(cls, 'Help'):
short_help = cls().Help(short=True)
command_descriptions.append(' %-21s %.40s' %
(name.lower(), short_help.lower()))
usage += '\n'.join(command_descriptions)
version_string = ('nsscache ' + nss_cache.__version__ + '\n'
'\n'
'Copyright (c) 2007 Google, Inc.\n'
'This is free software; see the source for copying '
'conditions. There is NO\n'
'warranty; not even for MERCHANTABILITY or FITNESS '
'FOR A PARTICULAR PURPOSE.\n'
'\n'
'Written by Jamie Wilkinson and Vasilios Hoffman.')
parser = optparse.OptionParser(usage, version=version_string)
# We do not mix arguments and flags!
parser.disable_interspersed_args()
# Add options.
parser.set_defaults(verbose=False, debug=False)
parser.add_option('-v',
'--verbose',
action='store_true',
help='enable verbose output')
parser.add_option('-d',
'--debug',
action='store_true',
help='enable debugging output')
parser.add_option('-c',
'--config-file',
type='string',
help='read configuration from FILE',
metavar='FILE')
# filthy monkeypatch hack to remove the prepended 'usage: '
# TODO(jaq): we really ought to subclass OptionParser instead...
old_get_usage = parser.get_usage
def get_usage():
return old_get_usage()[7:]
parser.get_usage = get_usage
return parser
def Run(self, args, env):
"""Begin execution of nsscache.
This method loads our runtime configuration, instantiates the
appropriate Source and Cache objects, and invokes the
appropriate method based on the command given.
NOTE: We avoid calling sys.exit() and instead return an int
to our caller, who will exit with that status.
Args:
args: list of command line arguments
env: dictionary of environment variables
Returns:
POSIX exit status
"""
# Parse the commandline.
try:
(options, args) = self.parser.parse_args(args)
except SystemExit as e:
# OptionParser objects raise SystemExit (error() calls exit()
# calls sys.exit()) upon a parser error.
# This can be handled better by overriding error or monkeypatching
# our parser.
return e.code
# Initialize a configuration object.
conf = config.Config(env)
# Process the global flags.
if options.verbose:
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if options.debug:
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
if options.config_file:
conf.config_file = options.config_file
self.log.info('using nss_cache library, version %s',
nss_cache.__version__)
self.log.debug('library path is %r', nss_cache.__file__)
# Identify the command to dispatch.
if not args:
print('No command given')
self.parser.print_help()
return os.EX_USAGE
# print global help if command is 'help' with no argument
if len(args) == 1 and args[0] == 'help':
self.parser.print_help()
return os.EX_OK
self.log.debug('args: %r' % args)
command_name = args.pop(0)
self.log.debug('command: %r' % command_name)
# Load the configuration from file.
config.LoadConfig(conf)
# Dispatch the command.
try:
command_callable = getattr(command, command_name.capitalize())
except AttributeError:
self.log.warning('%s is not implemented', command_name)
print(('command %r is not implemented' % command_name))
self.parser.print_help()
return os.EX_SOFTWARE
try:
retval = command_callable().Run(conf=conf, args=args)
except error.SourceUnavailable as e:
self.log.error('Problem with configured data source: %s', e)
return os.EX_TEMPFAIL
return retval
nsscache-version-0.42/nss_cache/app_test.py000066400000000000000000000116571402531134600210410ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/app.py."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import logging
import io
import os
import sys
import unittest
from nss_cache import app
class TestNssCacheApp(unittest.TestCase):
"""Unit tests for NssCacheApp class."""
def setUp(self):
dev_null = io.StringIO()
self.stdout = sys.stdout
sys.stdout = dev_null
def tearDown(self):
sys.stdout = self.stdout
def testRun(self):
return_code = app.NssCacheApp().Run([], {})
self.assertEqual(os.EX_USAGE, return_code)
def testParseGlobalOptions(self):
a = app.NssCacheApp()
(options, args) = a.parser.parse_args(['-d', '-v', 'command'])
self.assertNotEqual(None, options.debug)
self.assertNotEqual(None, options.verbose)
self.assertEqual(['command'], args)
def testParseCommandLineDebug(self):
a = app.NssCacheApp()
(options, args) = a.parser.parse_args(['-d'])
self.assertNotEqual(None, options.debug)
(options, args) = a.parser.parse_args(['--debug'])
self.assertNotEqual(None, options.debug)
a.Run(['-d'], {})
self.assertEqual(logging.DEBUG, a.log.getEffectiveLevel())
def testParseCommandLineVerbose(self):
a = app.NssCacheApp()
(options, args) = a.parser.parse_args(['-v'])
self.assertNotEqual(None, options.verbose)
self.assertEqual([], args)
(options, args) = a.parser.parse_args(['--verbose'])
self.assertNotEqual(None, options.verbose)
self.assertEqual([], args)
a.Run(['-v'], {})
self.assertEqual(logging.INFO, a.log.getEffectiveLevel())
def testParseCommandLineVerboseDebug(self):
a = app.NssCacheApp()
a.Run(['-v', '-d'], {})
self.assertEqual(logging.DEBUG, a.log.getEffectiveLevel())
def testParseCommandLineConfigFile(self):
a = app.NssCacheApp()
(options, args) = a.parser.parse_args(['-c', 'file'])
self.assertNotEqual(None, options.config_file)
self.assertEqual([], args)
(options, args) = a.parser.parse_args(['--config-file', 'file'])
self.assertNotEqual(None, options.config_file)
self.assertEqual([], args)
def testBadOptionsCauseNoExit(self):
a = app.NssCacheApp()
stderr_buffer = io.StringIO()
old_stderr = sys.stderr
sys.stderr = stderr_buffer
self.assertEqual(2, a.Run(['--invalid'], {}))
sys.stderr = old_stderr
def testHelpOptionPrintsGlobalHelp(self):
stdout_buffer = io.StringIO()
a = app.NssCacheApp()
old_stdout = sys.stdout
sys.stdout = stdout_buffer
self.assertEqual(0, a.Run(['--help'], {}))
sys.stdout = old_stdout
self.assertNotEqual(0, stdout_buffer.tell())
(prelude, usage, commands,
options) = stdout_buffer.getvalue().split('\n\n')
self.assertTrue(prelude.startswith('nsscache synchronises'))
expected_str = 'Usage: nsscache [global options] command [command options]'
self.assertEqual(expected_str, usage)
self.assertTrue(commands.startswith('commands:'))
self.assertTrue(options.startswith('Options:'))
self.assertTrue(options.find('show this help message and exit') >= 0)
def testHelpCommandOutput(self):
# trap stdout into a StringIO
stdout_buffer = io.StringIO()
a = app.NssCacheApp()
old_stdout = sys.stdout
sys.stdout = stdout_buffer
self.assertEqual(0, a.Run(['help'], {}))
sys.stdout = old_stdout
self.assertNotEqual(0, stdout_buffer.tell())
self.assertTrue(
stdout_buffer.getvalue().find('nsscache synchronises') >= 0)
@unittest.skip('cant pass unless theres a valid config')
def testRunBadArgsPrintsGlobalHelp(self):
# trap stdout into a StringIO
stdout_buffer = io.StringIO()
old_stdout = sys.stdout
sys.stdout = stdout_buffer
# verify bad arguments calls help
return_code = app.NssCacheApp().Run(['blarg'], {})
sys.stdout = old_stdout
assert return_code == 1
assert stdout_buffer.getvalue().find('enable debugging') >= 0
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/caches/000077500000000000000000000000001402531134600200645ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/caches/__init__.py000066400000000000000000000000001402531134600221630ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/caches/cache_factory.py000066400000000000000000000057561402531134600232450ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Package level factory implementation for cache implementations.
We use a factory instead of relying on the __init__.py module to
register cache implementations at import time. This is much more
reliable.
"""
__author__ = 'springer@google.com (Matthew Springer)'
import logging
from nss_cache.caches import files
from nss_cache.caches import nssdb
_cache_implementations = {}
def RegisterImplementation(cache_name, map_name, cache):
"""Register a Cache implementation with the CacheFactory.
Child modules are expected to call this method in the file-level scope
so that the CacheFactory is aware of them.
Args:
cache_name: (string) The name of the NSS backend.
map_name: (string) The name of the map handled by this Cache.
cache: A class type that is a subclass of Cache.
Returns: Nothing
"""
global _cache_implementations
if cache_name not in _cache_implementations:
logging.info('Registering [%s] cache for [%s].', cache_name, map_name)
_cache_implementations[cache_name] = {}
_cache_implementations[cache_name][map_name] = cache
def Create(conf, map_name, automount_mountpoint=None):
"""Cache creation factory method.
Args:
conf: a dictionary of configuration key/value pairs, including one
required attribute 'name'
map_name: a string identifying the map name to handle
automount_mountpoint: A string containing the automount mountpoint, used only
by automount maps.
Returns:
an instance of a Cache
Raises:
RuntimeError: problem instantiating the requested cache
"""
global _cache_implementations
if not _cache_implementations:
raise RuntimeError('no cache implementations exist')
cache_name = conf['name']
if cache_name not in _cache_implementations:
raise RuntimeError('cache not implemented: %r' % (cache_name,))
if map_name not in _cache_implementations[cache_name]:
raise RuntimeError('map %r not supported by cache %r' %
(map_name, cache_name))
return _cache_implementations[cache_name][map_name](
conf, map_name, automount_mountpoint=automount_mountpoint)
files.RegisterAllImplementations(RegisterImplementation)
nssdb.RegisterAllImplementations(RegisterImplementation)
nsscache-version-0.42/nss_cache/caches/cache_factory_test.py000066400000000000000000000040651402531134600242740ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for out cache factory."""
__author__ = 'springer@google.com (Matthew Springer)'
import unittest
from nss_cache.caches import caches
from nss_cache.caches import cache_factory
class TestCacheFactory(unittest.TestCase):
def testRegister(self):
class DummyCache(caches.Cache):
pass
old_cache_implementations = cache_factory._cache_implementations
cache_factory._cache_implementations = {}
cache_factory.RegisterImplementation('dummy', 'dummy', DummyCache)
self.assertEqual(1, len(cache_factory._cache_implementations))
self.assertEqual(1, len(cache_factory._cache_implementations['dummy']))
self.assertEqual(DummyCache,
cache_factory._cache_implementations['dummy']['dummy'])
cache_factory._cache_implementations = old_cache_implementations
def testCreateWithNoImplementations(self):
old_cache_implementations = cache_factory._cache_implementations
cache_factory._cache_implementations = {}
self.assertRaises(RuntimeError, cache_factory.Create, {}, 'map_name')
cache_factory._cache_implementations = old_cache_implementations
def testThatRegularImplementationsArePresent(self):
self.assertEqual(len(cache_factory._cache_implementations), 2)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/caches/caches.py000066400000000000000000000227601402531134600216730ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Base class of cache for nsscache."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import errno
import logging
import os
import shutil
import stat
import tempfile
from nss_cache import config
from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.maps import sshkey
class Cache(object):
"""Abstract base class for Caches.
The Cache object represents the cache used by NSS, that we plan on
writing the NSS data to -- it is the cache that we up date so that
the NSS module has a place to retrieve data from. Typically a cache
is some form of on-disk local storage.
You can manipulate a cache directly, like asking for a Map object from
it, or giving it a Map to write out to disk. There is an Updater class
which holds the logic for taking data from Source objects and merging them
with Cache objects.
It is important to note that a new Cache is instantiated for each
'map' defined in the configuration -- allowing different Cache
storages for different NSS maps, instead of one Cache to hold them all
(and in the darkness bind them).
"""
def __init__(self, conf, map_name, automount_mountpoint=None):
"""Initialise the Cache object.
Args:
conf: A dictionary of key/value pairs
map_name: A string representation of the map type
automount_mountpoint: A string containing the automount mountpoint,
used only by automount maps.
Raises:
UnsupportedMap: for map types we don't know about
"""
super(Cache, self).__init__()
# Set up a logger for our children
self.log = logging.getLogger(__name__)
# Store config info
self.conf = conf
self.output_dir = conf.get('dir', '.')
self.automount_mountpoint = automount_mountpoint
self.map_name = map_name
# Setup the map we may be asked to load our cache into.
if map_name == config.MAP_PASSWORD:
self.data = passwd.PasswdMap()
elif map_name == config.MAP_SSHKEY:
self.data = sshkey.SshkeyMap()
elif map_name == config.MAP_GROUP:
self.data = group.GroupMap()
elif map_name == config.MAP_SHADOW:
self.data = shadow.ShadowMap()
elif map_name == config.MAP_NETGROUP:
self.data = netgroup.NetgroupMap()
elif map_name == config.MAP_AUTOMOUNT:
self.data = automount.AutomountMap()
else:
raise error.UnsupportedMap('Cache does not support %s' % map_name)
def _Begin(self):
"""Start a write transaction."""
self.log.debug('Output dir: %s', self.output_dir)
self.log.debug('CWD: %s', os.getcwd())
try:
self.temp_cache_file = tempfile.NamedTemporaryFile(
delete=False,
prefix='nsscache-cache-file-',
dir=os.path.join(os.getcwd(), self.output_dir))
self.temp_cache_filename = self.temp_cache_file.name
self.log.debug('opened temporary cache filename %r',
self.temp_cache_filename)
except OSError as e:
if e.errno == errno.EACCES:
self.log.info(
'Got OSError (%s) when trying to create temporary file', e)
raise error.PermissionDenied('OSError: ' + str(e))
raise
def _Rollback(self):
"""Rollback a write transaction."""
self.log.debug('rolling back, deleting temp cache file %r',
self.temp_cache_filename)
self.temp_cache_file.close()
# Safe file remove (ignore "no such file or directory" errors):
try:
os.remove(self.temp_cache_filename)
except OSError as e:
if e.errno != errno.ENOENT: # errno.ENOENT = no such file or directory
raise # re-raise exception if a different error occured
def _Commit(self):
"""Ensure the cache is now the active data source for NSS.
Perform an atomic rename on the cache file to the location
expected by the NSS module. No verification of database validity
or consistency is performed here.
Returns:
Always returns True
"""
# TODO(jaq): if self WriteModifyTimestamp() fails below, we still have a
# new cache, but we might instead want to reserve the space on
# disk for a timestamp first -- thus needing a write/commit pair
# of functions for a timestamp. Edge case, so not bothering for now.
if not self.temp_cache_file.closed:
self.temp_cache_file.flush()
os.fsync(self.temp_cache_file.fileno())
self.temp_cache_file.close()
else:
self.log.debug('temp cache file was already closed before Commit')
# We emulate the permissions of our source map to avoid bugs where
# permissions may differ (usually w/shadow map)
# Catch the case where the source file may not exist for some reason and
# chose a sensible default.
try:
shutil.copymode(self.GetCompatFilename(), self.temp_cache_filename)
stat_info = os.stat(self.GetCompatFilename())
uid = stat_info.st_uid
gid = stat_info.st_gid
os.chown(self.temp_cache_filename, uid, gid)
except OSError as e:
if e.errno == errno.ENOENT:
if self.map_name == 'sshkey':
os.chmod(self.temp_cache_filename,
stat.S_IRUSR | stat.S_IRGRP | stat.S_IROTH)
else:
os.chmod(
self.temp_cache_filename, stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP | stat.S_IROTH)
self.log.debug('committing temporary cache file %r to %r',
self.temp_cache_filename, self.GetCacheFilename())
os.rename(self.temp_cache_filename, self.GetCacheFilename())
return True
def GetCacheFilename(self):
"""Return the final destination pathname of the cache file."""
return os.path.join(self.output_dir, self.CACHE_FILENAME)
def GetCompatFilename(self):
"""Return the filename where the normal (not-cache) map would be."""
# TODO(jaq): Probably shouldn't hard code '/etc' here.
return os.path.join('/etc', self.map_name)
def GetMap(self, cache_filename=None):
"""Returns the map from the cache.
Must be implemented by the child class!
Args:
cache_filename: optional extra info used by the child class
Raises:
NotImplementedError: We should have been implemented by child.
"""
raise NotImplementedError('%s must implement this method!' %
self.__class__.__name__)
def GetMapLocation(self):
"""Return the location of the Map in this cache.
This is used by automount maps so far, and must be implemented in the
child class only if it is to support automount maps.
Raises:
NotImplementedError: We should have been implemented by child.
"""
raise NotImplementedError('%s must implement this method!' %
self.__class__.__name__)
def WriteMap(self, map_data=None, force_write=False):
"""Write a map to disk.
Args:
map_data: optional Map object to overwrite our current data with.
force_write: optional flag to indicate verification checks can be
ignored.
Returns:
0 if succesful, 1 if not
"""
if map_data is None:
writable_map = self.data
else:
writable_map = map_data
entries_written = self.Write(writable_map)
# N.B. Write is destructive, len(writable_map) == 0 now.
# Asserting this isn't good for the unit tests, though.
#assert 0 == len(writable_map), "self.Write should be destructive."
if entries_written is None:
self.log.warning('cache write failed, exiting')
return 1
if force_write or self.Verify(entries_written):
# TODO(jaq): in the future we should handle return codes from
# Commit()
self._Commit()
# Create an index for this map.
self.WriteIndex()
return 0
self.log.warning('verification failed, exiting')
return 1
def WriteIndex(self):
"""Build an index for this cache.
No-op, but child classes may override this.
"""
pass
def Write(self, writable_map):
raise NotImplementedError
def Verify(self, entries_written):
raise NotImplementedError
nsscache-version-0.42/nss_cache/caches/caches_test.py000066400000000000000000000062001402531134600227210ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for caches/caches.py."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import os
import platform
import stat
import tempfile
import unittest
from mox3 import mox
from nss_cache import config
from nss_cache.caches import caches
class FakeCacheCls(caches.Cache):
CACHE_FILENAME = 'shadow'
def __init__(self, config, map_name):
super(FakeCacheCls, self).__init__(config, map_name)
def Write(self, map_data):
return 0
def GetCacheFilename(self):
return os.path.join(self.output_dir, self.CACHE_FILENAME + '.test')
class TestCls(mox.MoxTestBase):
def setUp(self):
self.workdir = tempfile.mkdtemp()
self.config = {'dir': self.workdir}
if platform.system() == 'FreeBSD':
# FreeBSD doesn't have a shadow file
self.shadow = config.MAP_PASSWORD
else:
self.shadow = config.MAP_SHADOW
def tearDown(self):
os.rmdir(self.workdir)
def testCopyOwnerMissing(self):
expected = os.stat(os.path.join('/etc', self.shadow))
expected = stat.S_IMODE(expected.st_mode)
cache = FakeCacheCls(config=self.config, map_name=self.shadow)
cache._Begin()
cache._Commit()
data = os.stat(os.path.join(self.workdir, cache.GetCacheFilename()))
self.assertEqual(expected, stat.S_IMODE(data.st_mode))
os.unlink(cache.GetCacheFilename())
def testCopyOwnerPresent(self):
expected = os.stat(os.path.join('/etc/', self.shadow))
expected = stat.S_IMODE(expected.st_mode)
cache = FakeCacheCls(config=self.config, map_name=self.shadow)
cache._Begin()
cache._Commit()
data = os.stat(os.path.join(self.workdir, cache.GetCacheFilename()))
self.assertEqual(expected, stat.S_IMODE(data.st_mode))
os.unlink(cache.GetCacheFilename())
class TestCache(mox.MoxTestBase):
def testWriteMap(self):
cache_map = caches.Cache({}, config.MAP_PASSWORD, None)
self.mox.StubOutWithMock(cache_map, '_Commit')
self.mox.StubOutWithMock(cache_map, 'Write')
self.mox.StubOutWithMock(cache_map, 'Verify')
cache_map._Commit()
cache_map.Write('writable_map').AndReturn('entries_written')
cache_map.Verify('entries_written').AndReturn(True)
self.mox.ReplayAll()
self.assertEqual(0, cache_map.WriteMap('writable_map'))
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/caches/files.py000066400000000000000000000454111402531134600215450ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a nss_files format local cache, with indexing.
libnss-cache is a NSS module that reads NSS data from files in /etc,
that look similar to the standard ones used by nss_files, but with
".cache" extension. It also uses an index file if one exists, in a
format created here.
"""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import configparser
import errno
import os.path
import re
import shutil
import stat
import sys
from nss_cache import config
from nss_cache import error
from nss_cache.caches import caches
from nss_cache.util import file_formats
def LongestLength(l):
return len(max(l, key=len))
# Load suffix config variables
parser = configparser.ConfigParser()
for i in sys.argv:
if ('nsscache.conf') in i:
# Remove '--config-file=' from the string
if ('--config-file') in i:
i = i[14:]
parser.read(i)
elif os.path.isfile('/etc/nsscache.conf'):
parser.read('/etc/nsscache.conf')
else:
# Config in nsscache folder
parser.read('nsscache.conf')
prefix = parser.get('suffix', 'prefix', fallback='')
suffix = parser.get('suffix', 'suffix', fallback='')
def RegisterAllImplementations(register_callback):
"""Register our cache classes independently from the import scheme."""
register_callback('files', 'passwd', FilesPasswdMapHandler)
register_callback('files', 'sshkey', FilesSshkeyMapHandler)
register_callback('files', 'group', FilesGroupMapHandler)
register_callback('files', 'shadow', FilesShadowMapHandler)
register_callback('files', 'netgroup', FilesNetgroupMapHandler)
register_callback('files', 'automount', FilesAutomountMapHandler)
class FilesCache(caches.Cache):
"""An implementation of a Cache specific to nss_files module.
This implementation creates, updates, and verifies map caches used by
nss_files module.
Child classes can define the class attribute _INDEX_ATTRIBUTES, a
sequence-type of strings containing attributes of their associated
Map type that will be built into an index for use by libnss-cache.
"""
def __init__(self, conf, map_name, automount_mountpoint=None):
"""Create a handler for the given map type.
Args:
conf: a configuration object
map_name: a string representing the type of map we are
automount_mountpoint: A string containing the automount mountpoint, used
only by automount maps.
"""
super(FilesCache,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
# Documented in nsscache.conf example.
self.cache_filename_suffix = conf.get('cache_filename_suffix', 'cache')
# Store a dict of indexes, each containing a dict of keys to line, position
# tuples.
self._indices = {}
if hasattr(self, '_INDEX_ATTRIBUTES'):
for index in self._INDEX_ATTRIBUTES:
self._indices[index] = {}
def GetMap(self, cache_filename=None):
"""Returns the map from the cache.
Args:
cache_filename: alternative file to read, optional.
Returns:
A child of Map containing the cache data.
Raises:
CacheNotFound: The cache file we expected to read from does not exist.
"""
data = self.data
if cache_filename is None:
cache_filename = self.GetCacheFilename()
self.log.debug('Opening %r for reading existing cache', cache_filename)
if not os.path.exists(cache_filename):
self.log.warning(
'Cache file does not exist, using an empty map instead')
else:
cache_file = open(cache_filename)
data = self.map_parser.GetMap(cache_file, data)
return data
def Verify(self, written_keys):
"""Verify that the cache is correct.
Perform some unit tests on the written data, such as reading it
back and verifying that it parses and has the entries we expect.
Args:
written_keys: a set of keys that should have been written to disk.
Returns:
a boolean indicating success.
Raises:
EmptyMap: The cache being verified is empty.
"""
self.log.debug('verification starting on %r', self.temp_cache_filename)
cache_data = self.GetMap(self.temp_cache_filename)
map_entry_count = len(cache_data)
self.log.debug('entry count: %d', map_entry_count)
if map_entry_count <= 0:
# We have read in an empty map, yet we expect that earlier we
# should have written more. Uncaught disk full or other error?
self.log.error('The files cache being verified "%r" is empty.',
self.temp_cache_filename)
raise error.EmptyMap(self.temp_cache_filename + ' is empty')
cache_keys = set()
# Use PopItem() so we free our memory if multiple maps are Verify()ed.
try:
while 1:
entry = cache_data.PopItem()
cache_keys.update(self._ExpectedKeysForEntry(entry))
except KeyError:
# expected when PopItem() is done, and breaks our loop for us.
pass
missing_from_cache = written_keys - cache_keys
if missing_from_cache:
self.log.warning('verify failed: %d missing from the on-disk cache',
len(missing_from_cache))
if len(missing_from_cache) < 1000:
self.log.debug('keys missing from the on-disk cache: %r',
missing_from_cache)
else:
self.log.debug('More than 1000 keys missing from cache. '
'Not printing.')
self._Rollback()
return False
missing_from_map = cache_keys - written_keys
if missing_from_map:
self.log.warning(
'verify failed: %d keys found, unexpected in the on-disk '
'cache', len(missing_from_map))
if len(missing_from_map) < 1000:
self.log.debug('keys missing from map: %r', missing_from_map)
else:
self.log.debug(
'More than 1000 keys missing from map. Not printing.')
self._Rollback()
return False
return True
def Write(self, map_data):
"""Write the map to the cache.
Warning -- this destroys map_data as it is written. This is done to save
memory and keep our peak footprint smaller. We consume memory again
on Verify() as we read a new copy of the entries back in.
Args:
map_data: A Map subclass containing the entire map to be written.
Returns:
a set of keys written or None on failure.
"""
self._Begin()
written_keys = set()
write_offset = 0
try:
while 1:
entry = map_data.PopItem()
for index in self._indices:
self._indices[index][str(getattr(
entry, index))] = str(write_offset)
write_offset += self._WriteData(self.temp_cache_file, entry)
written_keys.update(self._ExpectedKeysForEntry(entry))
except KeyError:
# expected when PopItem() is done, and breaks our loop for us.
self.temp_cache_file.flush()
except:
self._Rollback()
raise
return written_keys
def GetCacheFilename(self):
"""Return the final destination pathname of the cache file."""
cache_filename_target = self.CACHE_FILENAME
if self.cache_filename_suffix:
cache_filename_target += '.' + self.cache_filename_suffix
return os.path.join(self.output_dir, cache_filename_target)
def WriteIndex(self):
"""Generate an index for libnss-cache from this map."""
for index_name in self._indices:
# index file write to tmp file first, magic string ".ix"
tmp_index_filename = '%s.ix%s.tmp' % (self.GetCacheFilename(),
index_name)
self.log.debug('Writing index %s', tmp_index_filename)
index = self._indices[index_name]
key_length = LongestLength(list(index.keys()))
pos_length = LongestLength(list(index.values()))
max_length = key_length + pos_length
# Open for write/truncate
index_file = open(tmp_index_filename, 'w')
# setup permissions
try:
shutil.copymode(self.GetCompatFilename(), tmp_index_filename)
stat_info = os.stat(self.GetCompatFilename())
uid = stat_info.st_uid
gid = stat_info.st_gid
os.chown(tmp_index_filename, uid, gid)
except OSError as e:
if e.errno == errno.ENOENT:
os.chmod(
tmp_index_filename, stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP | stat.S_IROTH)
for key in sorted(index):
pos = index[key]
index_line = ('%s\0%s\0%s\n' %
(key, pos, '\0' *
(max_length - len(key) - len(pos))))
index_file.write(index_line)
index_file.close()
for index_name in self._indices:
# rename tmp index file to target index file in order to
# prevent getting user info fail during update index.
tmp_index_filename = '%s.ix%s.tmp' % (self.GetCacheFilename(),
index_name)
index_filename = '%s.ix%s' % (self.GetCacheFilename(), index_name)
os.rename(tmp_index_filename, index_filename)
class FilesSshkeyMapHandler(FilesCache):
"""Concrete class for updating a nss_files module sshkey cache."""
CACHE_FILENAME = 'sshkey'
_INDEX_ATTRIBUTES = ('name',)
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_SSHKEY
super(FilesSshkeyMapHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.map_parser = file_formats.FilesSshkeyMapParser()
def _ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this type of map.
Args:
entry: A SshkeyMapEntry
Returns:
A list of strings
"""
return [entry.name]
def _WriteData(self, target, entry):
"""Write a SshekeyMapEntry to the target cache.
Args:
target: A file-like object.
entry: A SshkeyMapEntry.
Returns:
Number of bytes written to the target.
"""
sshkey_entry = '%s:%s' % (entry.name, entry.sshkey)
target.write(sshkey_entry.encode() + b'\n')
return len(sshkey_entry) + 1
class FilesPasswdMapHandler(FilesCache):
"""Concrete class for updating a nss_files module passwd cache."""
CACHE_FILENAME = 'passwd'
_INDEX_ATTRIBUTES = ('name', 'uid')
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_PASSWORD
super(FilesPasswdMapHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.map_parser = file_formats.FilesPasswdMapParser()
def _ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this type of map.
Args:
entry: A PasswdMapEntry
Returns:
A list of strings
"""
return [entry.name]
def _WriteData(self, target, entry):
"""Write a PasswdMapEntry to the target cache.
Args:
target: A file-like object.
entry: A PasswdMapEntry.
Returns:
Number of bytes written to the target.
"""
password_entry = '%s:%s:%d:%d:%s:%s:%s' % (
entry.name, entry.passwd, entry.uid, entry.gid, entry.gecos,
entry.dir, entry.shell)
target.write(password_entry.encode() + b'\n')
return len(password_entry) + 1
class FilesGroupMapHandler(FilesCache):
"""Concrete class for updating a nss_files module group cache."""
CACHE_FILENAME = 'group'
_INDEX_ATTRIBUTES = ('name', 'gid')
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_GROUP
super(FilesGroupMapHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.map_parser = file_formats.FilesGroupMapParser()
def _ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this type of map.
Args:
entry: A GroupMapEntry
Returns:
A list of strings
"""
return [entry.name]
def _WriteData(self, target, entry):
"""Write a GroupMapEntry to the target cache."""
group_entry = '%s:%s:%d:%s' % (entry.name, entry.passwd, entry.gid,
','.join(entry.members))
target.write(group_entry.encode() + b'\n')
return len(group_entry) + 1
class FilesShadowMapHandler(FilesCache):
"""Concrete class for updating a nss_files module shadow cache."""
CACHE_FILENAME = 'shadow'
_INDEX_ATTRIBUTES = ('name',)
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_SHADOW
super(FilesShadowMapHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.map_parser = file_formats.FilesShadowMapParser()
def _ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this type of map.
Args:
entry: A ShadowMapEntry
Returns:
A list of strings
"""
return [entry.name]
def _WriteData(self, target, entry):
"""Write a ShadowMapEntry to the target cache."""
shadow_entry = '%s:%s:%s:%s:%s:%s:%s:%s:%s' % (
entry.name, entry.passwd, entry.lstchg or '', entry.min or
'', entry.max or '', entry.warn or '', entry.inact or
'', entry.expire or '', entry.flag or '')
target.write(shadow_entry.encode() + b'\n')
return len(shadow_entry) + 1
class FilesNetgroupMapHandler(FilesCache):
"""Concrete class for updating a nss_files module netgroup cache."""
CACHE_FILENAME = 'netgroup'
_TUPLE_RE = re.compile(r'^\((.*?),(.*?),(.*?)\)$') # Do this only once.
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_NETGROUP
super(FilesNetgroupMapHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.map_parser = file_formats.FilesNetgroupMapParser()
def _ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this type of map.
Args:
entry: A NetgroupMapEntry
Returns:
A list of strings
"""
return [entry.name]
def _WriteData(self, target, entry):
"""Write a NetgroupMapEntry to the target cache."""
if entry.entries:
netgroup_entry = '%s %s' % (entry.name, entry.entries)
else:
netgroup_entry = entry.name
target.write(netgroup_entry.encode() + b'\n')
return len(netgroup_entry) + 1
class FilesAutomountMapHandler(FilesCache):
"""Concrete class for updating a nss_files module automount cache."""
CACHE_FILENAME = None # we have multiple files, set as we update.
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_AUTOMOUNT
super(FilesAutomountMapHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.map_parser = file_formats.FilesAutomountMapParser()
if automount_mountpoint is None:
# we are dealing with the master map
self.CACHE_FILENAME = 'auto.master'
else:
# turn /auto into auto.auto, and /usr/local into /auto.usr_local
automount_mountpoint = automount_mountpoint.lstrip('/')
self.CACHE_FILENAME = 'auto.%s' % automount_mountpoint.replace(
'/', '_')
def _ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this type of map.
Args:
entry: A AutomountMapEntry
Returns:
A list of strings
"""
return [entry.key]
def _WriteData(self, target, entry):
"""Write an AutomountMapEntry to the target cache."""
# Modify suffix after mountpoint for autofs
pattern = re.compile(prefix)
if entry.options is not None:
if prefix != '':
if (pattern.match(entry.location)): # Found string with regex
entry.location = re.sub(r'({0})'.format(prefix),
r'{0}'.format(suffix),
entry.location)
automount_entry = '%s %s %s' % (entry.key, entry.options,
entry.location)
else:
automount_entry = '%s %s' % (entry.key, entry.location)
target.write(automount_entry.encode() + b'\n')
return len(automount_entry) + 1
def GetMapLocation(self):
"""Get the location of this map for the automount master map."""
return self.GetCacheFilename()
nsscache-version-0.42/nss_cache/caches/files_test.py000066400000000000000000000251721402531134600226060ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/caches/files.py."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import os
import shutil
import tempfile
import unittest
import sys
from mox3 import mox
from nss_cache import config
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.caches import files
class TestFilesCache(mox.MoxTestBase):
def setUp(self):
super(TestFilesCache, self).setUp()
self.workdir = tempfile.mkdtemp()
self.config = {'dir': self.workdir}
def tearDown(self):
super(TestFilesCache, self).tearDown()
shutil.rmtree(self.workdir)
def testInstantiation(self):
cache = files.FilesCache(self.config, config.MAP_PASSWORD)
self.assertNotEqual(None, cache)
def testWrite(self):
cache = files.FilesPasswdMapHandler(self.config)
entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
pmap = passwd.PasswdMap([entry])
written = cache.Write(pmap)
self.assertTrue('foo' in written)
self.assertFalse(entry in pmap) # we emptied pmap to avoid mem leaks
self.assertFalse(cache.temp_cache_file.closed)
def testCacheFilenameSuffixOption(self):
new_config = {'cache_filename_suffix': 'blarg'}
new_config.update(self.config)
cache = files.FilesCache(new_config, config.MAP_PASSWORD)
cache.CACHE_FILENAME = 'test'
self.assertEqual(os.path.join(self.workdir, 'test.blarg'),
cache.GetCacheFilename())
cache.temp_cache_file = open(os.path.join(self.workdir, 'pre-commit'),
'w')
cache.temp_cache_file.write('\n')
cache.temp_cache_filename = os.path.join(self.workdir, 'pre-commit')
cache._Commit()
expected_cache_filename = os.path.join(self.workdir, 'test.blarg')
self.assertTrue(os.path.exists(expected_cache_filename))
def testWritePasswdEntry(self):
"""We correctly write a typical entry in /etc/passwd format."""
cache = files.FilesPasswdMapHandler(self.config)
file_mock = self.mox.CreateMock(sys.stdout)
file_mock.write(b'root:x:0:0:Rootsy:/root:/bin/bash\n')
map_entry = passwd.PasswdMapEntry()
map_entry.name = 'root'
map_entry.passwd = 'x'
map_entry.uid = 0
map_entry.gid = 0
map_entry.gecos = 'Rootsy'
map_entry.dir = '/root'
map_entry.shell = '/bin/bash'
self.mox.ReplayAll()
cache._WriteData(file_mock, map_entry)
def testWriteGroupEntry(self):
"""We correctly write a typical entry in /etc/group format."""
cache = files.FilesGroupMapHandler(self.config)
file_mock = self.mox.CreateMock(sys.stdout)
file_mock.write(b'root:x:0:zero_cool,acid_burn\n')
map_entry = group.GroupMapEntry()
map_entry.name = 'root'
map_entry.passwd = 'x'
map_entry.gid = 0
map_entry.members = ['zero_cool', 'acid_burn']
self.mox.ReplayAll()
cache._WriteData(file_mock, map_entry)
def testWriteShadowEntry(self):
"""We correctly write a typical entry in /etc/shadow format."""
cache = files.FilesShadowMapHandler(self.config)
file_mock = self.mox.CreateMock(sys.stdout)
file_mock.write(b'root:$1$zomgmd5support:::::::\n')
map_entry = shadow.ShadowMapEntry()
map_entry.name = 'root'
map_entry.passwd = '$1$zomgmd5support'
self.mox.ReplayAll()
cache._WriteData(file_mock, map_entry)
def testWriteNetgroupEntry(self):
"""We correctly write a typical entry in /etc/netgroup format."""
cache = files.FilesNetgroupMapHandler(self.config)
file_mock = self.mox.CreateMock(sys.stdout)
file_mock.write(
b'administrators unix_admins noc_monkeys (-,zero_cool,)\n')
map_entry = netgroup.NetgroupMapEntry()
map_entry.name = 'administrators'
map_entry.entries = 'unix_admins noc_monkeys (-,zero_cool,)'
self.mox.ReplayAll()
cache._WriteData(file_mock, map_entry)
def testWriteAutomountEntry(self):
"""We correctly write a typical entry in /etc/auto.* format."""
cache = files.FilesAutomountMapHandler(self.config)
file_mock = self.mox.CreateMock(sys.stdout)
file_mock.write(b'scratch -tcp,rw,intr,bg fileserver:/scratch\n')
map_entry = automount.AutomountMapEntry()
map_entry.key = 'scratch'
map_entry.options = '-tcp,rw,intr,bg'
map_entry.location = 'fileserver:/scratch'
self.mox.ReplayAll()
cache._WriteData(file_mock, map_entry)
self.mox.VerifyAll()
file_mock = self.mox.CreateMock(sys.stdout)
file_mock.write(b'scratch fileserver:/scratch\n')
map_entry = automount.AutomountMapEntry()
map_entry.key = 'scratch'
map_entry.options = None
map_entry.location = 'fileserver:/scratch'
self.mox.ReplayAll()
cache._WriteData(file_mock, map_entry)
def testAutomountSetsFilename(self):
"""We set the correct filename based on mountpoint information."""
# also tests GetMapLocation() because it uses it :)
conf = {'dir': self.workdir, 'cache_filename_suffix': ''}
cache = files.FilesAutomountMapHandler(conf)
self.assertEqual(cache.GetMapLocation(),
'%s/auto.master' % self.workdir)
cache = files.FilesAutomountMapHandler(conf,
automount_mountpoint='/home')
self.assertEqual(cache.GetMapLocation(), '%s/auto.home' % self.workdir)
cache = files.FilesAutomountMapHandler(conf,
automount_mountpoint='/usr/meh')
self.assertEqual(cache.GetMapLocation(),
'%s/auto.usr_meh' % self.workdir)
def testCacheFileDoesNotExist(self):
"""Make sure we just get an empty map rather than exception."""
conf = {'dir': self.workdir, 'cache_filename_suffix': ''}
cache = files.FilesAutomountMapHandler(conf)
self.assertFalse(
os.path.exists(os.path.join(self.workdir, 'auto.master')))
data = cache.GetMap()
self.assertFalse(data)
def testIndexCreation(self):
cache = files.FilesPasswdMapHandler(self.config)
entries = [
passwd.PasswdMapEntry(dict(name='foo', uid=10, gid=10)),
passwd.PasswdMapEntry(dict(name='bar', uid=11, gid=11)),
passwd.PasswdMapEntry(dict(name='quux', uid=12, gid=11)),
]
pmap = passwd.PasswdMap(entries)
cache.Write(pmap)
cache.WriteIndex()
index_filename = cache.GetCacheFilename() + '.ixname'
self.assertTrue(os.path.exists(index_filename),
'Index not created %s' % index_filename)
with open(index_filename) as f:
self.assertEqual('bar\x0015\x00\x00\n', f.readline())
self.assertEqual('foo\x000\x00\x00\x00\n', f.readline())
self.assertEqual('quux\x0030\x00\n', f.readline())
index_filename = cache.GetCacheFilename() + '.ixuid'
self.assertTrue(os.path.exists(index_filename),
'Index not created %s' % index_filename)
with open(index_filename) as f:
self.assertEqual('10\x000\x00\x00\n', f.readline())
self.assertEqual('11\x0015\x00\n', f.readline())
self.assertEqual('12\x0030\x00\n', f.readline())
def testWriteCacheAndIndex(self):
cache = files.FilesPasswdMapHandler(self.config)
entries = [
passwd.PasswdMapEntry(dict(name='foo', uid=10, gid=10)),
passwd.PasswdMapEntry(dict(name='bar', uid=11, gid=11)),
]
pmap = passwd.PasswdMap(entries)
written = cache.Write(pmap)
cache.WriteIndex()
self.assertTrue('foo' in written)
self.assertTrue('bar' in written)
index_filename = cache.GetCacheFilename() + '.ixname'
self.assertTrue(os.path.exists(index_filename),
'Index not created %s' % index_filename)
index_filename = cache.GetCacheFilename() + '.ixuid'
self.assertTrue(os.path.exists(index_filename),
'Index not created %s' % index_filename)
entries = [
passwd.PasswdMapEntry(dict(name='foo', uid=10, gid=10)),
passwd.PasswdMapEntry(dict(name='bar', uid=11, gid=11)),
passwd.PasswdMapEntry(dict(name='quux', uid=12, gid=11)),
]
pmap = passwd.PasswdMap(entries)
written = cache.Write(pmap)
self.assertTrue('foo' in written)
self.assertTrue('bar' in written)
self.assertTrue('quux' in written)
index_filename = cache.GetCacheFilename() + '.ixname'
with open(index_filename) as f:
self.assertEqual('bar\x0015\x00\n', f.readline())
self.assertEqual('foo\x000\x00\x00\n', f.readline())
index_filename = cache.GetCacheFilename() + '.ixuid'
with open(index_filename) as f:
self.assertEqual('10\x000\x00\x00\n', f.readline())
self.assertEqual('11\x0015\x00\n', f.readline())
cache.WriteIndex()
index_filename = cache.GetCacheFilename() + '.ixname'
with open(index_filename) as f:
self.assertEqual('bar\x0015\x00\x00\n', f.readline())
self.assertEqual('foo\x000\x00\x00\x00\n', f.readline())
self.assertEqual('quux\x0030\x00\n', f.readline())
index_filename = cache.GetCacheFilename() + '.ixuid'
with open(index_filename) as f:
self.assertEqual('10\x000\x00\x00\n', f.readline())
self.assertEqual('11\x0015\x00\n', f.readline())
self.assertEqual('12\x0030\x00\n', f.readline())
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/caches/nssdb.py000066400000000000000000000512461402531134600215570ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of nss_db local cache for nsscache."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import bsddb3
import fcntl
import os
import select
import subprocess
from nss_cache import config
from nss_cache import error
from nss_cache.caches import caches
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
def RegisterAllImplementations(register_callback):
"""Register our cache classes independently from the import scheme."""
register_callback('nssdb', 'passwd', NssDbPasswdHandler)
register_callback('nssdb', 'group', NssDbGroupHandler)
register_callback('nssdb', 'shadow', NssDbShadowHandler)
# TODO: Move this function and the use below it can be used for all caches.
def is_valid_unix_name(name):
"""Return False if name has characters that are not OK for Unix usernames,
True otherwise.
Unix has certain naming restrictions for user names in passwd, shadow, etc.
Here we take a conservative approach and only block a few characters.
Args:
name: name to test
Returns: True if the name is OK, False if it contains bad characters.
"""
if any(c in name for c in map(lambda x: x, [' ', ':', '\n'])):
return False
else:
return True
class NssDbCache(caches.Cache):
"""An implementation of a Cache specific to nss_db.
nss_db uses one Berkeley DB database per map for the cache. This class
abstracts the update and write strategies for nss_db caches.
This class also provides timestamp read/write routines that are
independent of the cache storage, as nss_db provides no support for
these.
"""
UPDATE_TIMESTAMP_SUFFIX = 'nsscache-update-timestamp'
MODIFY_TIMESTAMP_SUFFIX = 'nsscache-timestamp'
def __init__(self, conf, map_name, automount_mountpoint=None):
"""Create a handler for the given map type.
Args:
conf: a configuration object
map_name: a string representing the type of map we are
automount_mountpoint: A string containing the automount mountpoint,
used only by automount maps.
Returns: A CacheMapHandler instance.
"""
super(NssDbCache,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
self.makedb = conf.get('makedb', '/usr/bin/makedb')
def GetMap(self, cache_filename=None):
"""Returns the map from the cache.
Args:
cache_filename: unused by this implementation of caches.Cache
Returns:
a Map containing the map cache
"""
data = self.data
self._LoadBdbCacheFile(data)
return data
def _LoadBdbCacheFile(self, data):
"""Load data from bdb caches into a map.
Args:
data: a map.Map subclass
Returns:
Nothing. Cache data is loaded into the 'data' parameter.
Raises:
CacheNotFound: if the database file does not exist
"""
db_file = os.path.join(self.output_dir, self.CACHE_FILENAME)
if not os.path.exists(db_file):
self.log.debug('cache file does not exist: %r', db_file)
raise error.CacheNotFound('cache file does not exist: %r' % db_file)
db = bsddb3.btopen(db_file, 'r')
for k in db:
if self.IsMapPrimaryKey(k):
password_entry = self.ConvertValueToMapEntry(db[k])
if not data.Add(password_entry):
self.log.warning('could not add entry built from %r', db[k])
db.close()
def _SpawnMakeDb(self):
"""Run 'makedb' in a subprocess and return it to use for streaming.
Returns:
a subprocess object
"""
# TODO(jaq): this should probably raise a better exception and be handled
# gracefully
if not os.path.exists(self.makedb):
self.log.warning(
'makedb binary %s does not exist, cannot generate bdb map',
self.makedb)
return None
else:
self.log.debug('executing makedb: %s - %s', self.makedb,
self.temp_cache_filename)
# This is a race condition on the tempfile now, but db-4.8 is braindead
# and refuses to open zero length files with:
# fop_read_meta: foo: unexpected file type or format
# foo: Invalid type 5 specified
# makedb: cannot open output file `foo': Invalid argument
os.unlink(self.temp_cache_filename)
makedb = subprocess.Popen(
[self.makedb, '-', self.temp_cache_filename],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True)
fcntl.fcntl(makedb.stdout, fcntl.F_SETFL, os.O_NONBLOCK)
makedb.allout = ''
return makedb
def _Read(self, proc):
while len(select.select([proc.stdout], (), (), 0)[0]) > 0:
data = proc.stdout.read(-1) # Read everything
if len(data) == 0:
break # sigh... select() says there's data.
proc.allout += data
def Write(self, map_data):
"""Write the map to the cache file.
Warning -- this destroys map_data as it is written. This is done to save
memory and keep our peak footprint smaller. We consume memory again
on Verify() as we read a new copy of the entries back in.
Args:
map_data: A Map subclass
Returns:
a set of keys written or None on failure.
"""
self._Begin()
written_keys = set()
self.log.debug('Map contains %d elems', len(map_data))
enumeration_index = 0
makedb = self._SpawnMakeDb()
self.makedbproc = makedb
try:
try:
while True:
entry = map_data.PopItem()
if makedb:
self._Read(makedb)
if makedb.poll() is not None:
self.log.error(
'early exit from makedb! child output: %s',
makedb.allout)
# in this case, no matter how the child exited, we complain
return None
self.WriteData(makedb.stdin, entry, enumeration_index)
else:
self.WriteData(None, entry, enumeration_index)
written_keys.update(self.ExpectedKeysForEntry(entry))
enumeration_index += 1
except KeyError:
# expected when PopItem() is done, and breaks our loop for us.
pass
if makedb:
makedb.stdin.close()
self.log.debug('%d entries written, %d keys', enumeration_index,
len(written_keys))
# wait for subprocess to commit data before we move live.
if makedb:
makedb.wait()
self._Read(makedb)
makedb.stdout.close()
map_data = makedb.allout
if map_data:
self.log.debug('makedb output: %r', map_data)
if self._DecodeExitCode(makedb.wait()):
return written_keys
return None
else:
return written_keys
except Exception as e:
self.log.debug('Wrote %d entries before exception %s',
enumeration_index, e)
if makedb:
self.log.debug('makedb output: %s', makedb.allout)
# wait for subprocess to commit data before we roll back.
makedb.wait()
self._Rollback()
raise
def _DecodeExitCode(self, code):
"""Helper function to compute if a child exited with code 0 or not."""
return os.WIFEXITED(code) and (os.WEXITSTATUS(code) == 0)
# TODO(jaq): validate the unit tests for this code path, are we
# verifying the temp cache or the real cache?
def Verify(self, written_keys):
"""Verify that the written cache is correct.
Perform some unit tests on the written data, such as reading it
back and verifying that it loads and has the entries we expect.
Args:
written_keys: a set of keys that should have been written to disk.
Returns:
boolean indicating success.
Raises:
EmptyMap: The cache being verified is empty.
"""
self.log.debug('verification started %s', self.temp_cache_filename)
db = bsddb3.btopen(self.temp_cache_filename, 'r')
# cast keys to a set for fast __contains__ lookup in the loop
# following
cache_keys = set(db)
db.close()
written_key_count = len(written_keys)
cache_key_count = len(cache_keys)
self.log.debug('%d written keys, %d cache keys', written_key_count,
cache_key_count)
if cache_key_count <= 0 and written_key_count > 0:
# We have an empty db, yet we expect that earlier we should have
# written more. Uncaught disk full or other error?
raise error.EmptyMap
# makedb creates new keys internally. we only care that all the keys
# we tried to write out are still there. so written_keys must be a subset
# of cache_keys!
if not written_keys.issubset(cache_keys):
self.log.warning(
'verify failed: written keys missing from the on-disk'
' cache!')
intersection = written_keys.intersection(cache_keys)
missing_keys = written_keys - intersection
self.log.debug('missing: %r', missing_keys)
self._Rollback()
return False
self.log.info('verify passed: %s', self.temp_cache_filename)
return True
class NssDbPasswdHandler(NssDbCache):
"""Concrete class for updating a nss_db passwd cache."""
CACHE_FILENAME = 'passwd.db'
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_PASSWORD
super(NssDbPasswdHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
def WriteData(self, target, entry, enumeration_index):
"""Generate three entries as expected by nss_db passwd map.
nss_db keys each pwent on three keys: username, uid number, and an
enumeration index. This method writes the pwent out three times
to the target file-like object with each of these keys, each marked
specially as documented in the nss_db source db-Makefile.
Args:
target: File-like object of the makedb subprocess stdin
entry: A PasswdMapEntry
enumeration_index: The number of records processed so far.
Returns:
Nothing
"""
if not is_valid_unix_name(entry.name):
return
password_entry = '%s:%s:%d:%d:%s:%s:%s' % (
entry.name, entry.passwd, entry.uid, entry.gid, entry.gecos,
entry.dir, entry.shell)
# Write to makedb with each key
if target:
target.write(
('.%s %s\n' % (entry.name, password_entry)).encode('ascii'))
target.write(
('=%d %s\n' % (entry.uid, password_entry)).encode('ascii'))
target.write(('0%d %s\n' %
(enumeration_index, password_entry)).encode('ascii'))
def IsMapPrimaryKey(self, key):
"""Defines the 'primary' key for this map.
nss_db maps typically have the same entry many times in their cache
files. In order to build our representation of the cache, we need to
ignore all but one of them. This method chooses one key as the primary.
Args:
key: the database key returned from the Berkeley DB key/value pairs
Returns:
a boolean indicating truth
"""
# only take values keyed with username, known in nss_db land as the
# one starting with a dot
try:
return key.startswith(b'.')
except TypeError:
return key.startswith('.')
def ConvertValueToMapEntry(self, entry):
"""Convert a pwent-like string into a PasswdMapEntry.
Args:
entry: A string containing a pwent entry ala /etc/passwd
Returns:
a PasswdMapEntry instance
"""
if isinstance(entry, bytes):
entry = entry.decode('ascii')
elif entry.endswith('\x00'):
entry = entry[:-1]
entry = entry.split(':')
map_entry = passwd.PasswdMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = entry[0]
map_entry.passwd = entry[1]
map_entry.uid = int(entry[2])
map_entry.gid = int(entry[3])
map_entry.gecos = entry[4]
map_entry.dir = entry[5]
map_entry.shell = entry[6]
return map_entry
def ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this entry.
Args:
entry: A PasswdMapEntry
Returns:
a list of strings
"""
if not is_valid_unix_name(entry.name):
return []
return [('.%s' % entry.name).encode('ascii'),
('=%d' % entry.uid).encode('ascii')]
class NssDbGroupHandler(NssDbCache):
"""Concrete class for updating nss_db group maps."""
CACHE_FILENAME = 'group.db'
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_GROUP
super(NssDbGroupHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
def WriteData(self, target, entry, enumeration_index):
"""Generate three entries as expected by nss_db group map.
nss_db keys each grent on three keys: group name, gid number, and an
enumeration index. This method writes the grent out three times
to the target file-like object with each of these keys, each marked
specially as documented in the nss_db source db-Makefile.
Args:
target: File-like object of the makedb subprocess stdin
entry: A GroupMapEntry
enumeration_index: The number of records processed so far.
Returns:
Nothing
"""
if not is_valid_unix_name(entry.name):
return
grent = '%s:%s:%d:%s' % (entry.name, entry.passwd, entry.gid, ','.join(
entry.members))
# Write to makedb with each key
if target:
target.write(('.%s %s\n' % (entry.name, grent)).encode('ascii'))
target.write(('=%d %s\n' % (entry.gid, grent)).encode('ascii'))
target.write(
('0%d %s\n' % (enumeration_index, grent)).encode('ascii'))
def IsMapPrimaryKey(self, key):
"""Defines the 'primary' key for a nss_db group.db map.
See the docstring for NssDbPasswdCache.IsMapPrimaryKey()
Args:
key: they database key returned from bsddb.
Returns:
a boolean indicating truth
"""
# use the key designated as a 'group name' key
return key.startswith('.')
def ConvertValueToMapEntry(self, entry):
"""Convert a grent-like string into a GroupMapEntry.
Args:
entry: A string containing a grent entry ala /etc/group
Returns:
A GroupMapEntry instance
"""
if isinstance(entry, bytes):
entry = entry.decode('ascii')
elif entry.endswith('\x00'):
entry = entry[:-1]
entry = entry.split(':')
map_entry = group.GroupMapEntry()
# map entries expect strict typing, so convert as appropriate
map_entry.name = entry[0]
map_entry.passwd = entry[1]
map_entry.gid = int(entry[2])
map_entry.members = entry[3].split(',')
return map_entry
def ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this entry.
Args:
entry: A GroupMapEntry
Returns:
a list of strings
"""
if not is_valid_unix_name(entry.name):
return []
return map(lambda x: x.encode('ascii'),
['.%s' % entry.name, '=%d' % entry.gid])
class NssDbShadowHandler(NssDbCache):
"""Concrete class for updating nss_db shadow maps."""
CACHE_FILENAME = 'shadow.db'
def __init__(self, conf, map_name=None, automount_mountpoint=None):
if map_name is None:
map_name = config.MAP_SHADOW
super(NssDbShadowHandler,
self).__init__(conf,
map_name,
automount_mountpoint=automount_mountpoint)
def WriteData(self, target, entry, enumeration_index):
"""Generate three entries as expected by nss_db shadow map.
nss_db keys each shadow entry on two keys, username and enumeration
index.
This method writes out the shadow entry twice, once with each key,
each marked specially as documented in the nss_db source db-Makefile.
Args:
target: File-like object of the makedb subprocess stdin
entry: A ShadowMapEntry
enumeration_index: The number of records processed so far.
Returns:
Nothing
"""
if not is_valid_unix_name(entry.name):
return
# If the field is None, then set to empty string
shadow_entry = '%s:%s:%s:%s:%s:%s:%s:%s:%s' % (
entry.name, entry.passwd, entry.lstchg or '', entry.min or
'', entry.max or '', entry.warn or '', entry.inact or
'', entry.expire or '', entry.flag or 0)
# Write to makedb with each key
if target:
target.write(
('.%s %s\n' % (entry.name, shadow_entry)).encode('ascii'))
target.write(('0%d %s\n' %
(enumeration_index, shadow_entry)).encode('ascii'))
def IsMapPrimaryKey(self, key):
"""Defines the 'primary' key for a nss_db shadow.db map.
See the docstring for NssDbPasswdCache.IsMapPrimaryKey()
Args:
key: they database key returned from bsddb.
Returns:
a boolean indicating truth
"""
# use the key designated as a "shadow name" key
return key.startswith('.')
def ConvertValueToMapEntry(self, entry):
"""Convert a grent-like string into a ShadowMapEntry.
Args:
entry: A string containing a grent entry ala /etc/shadow
Returns:
A ShadowMapEntry instance
"""
if isinstance(entry, bytes):
entry = entry.decode('ascii')
elif entry.endswith('\x00'):
entry = entry[:-1]
entry = entry.split(':')
map_entry = shadow.ShadowMapEntry()
# map entries expect strict typing, so convert as appropriate
map_entry.name = entry[0]
map_entry.passwd = entry[1]
if entry[2]:
map_entry.lstchg = int(entry[2])
if entry[3]:
map_entry.min = int(entry[3])
if entry[4]:
map_entry.max = int(entry[4])
if entry[5]:
map_entry.warn = int(entry[5])
if entry[6]:
map_entry.inact = int(entry[6])
if entry[7]:
map_entry.expire = int(entry[7])
if entry[8]:
map_entry.flag = int(entry[8])
return map_entry
def ExpectedKeysForEntry(self, entry):
"""Generate a list of expected cache keys for this entry.
Args:
entry: A ShadowMapEntry
Returns:
a list of strings
"""
if not is_valid_unix_name(entry.name):
return []
return [('.%s' % entry.name).encode('ascii')]
nsscache-version-0.42/nss_cache/caches/nssdb_test.py000066400000000000000000000511401402531134600226070ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/caches/nssdb.py."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import io
import logging
import os.path
import select
import shutil
import sys
import tempfile
import time
import unittest
from mox3 import mox
from bsddb3 import btopen
from nss_cache import error
from nss_cache.caches import nssdb
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
def NoMakeDB():
return not os.path.exists('/usr/bin/makedb')
class MakeDbDummy(object):
allout = ""
def wait(self):
return 0
def poll(self):
return None
class TestNssDbPasswdHandler(mox.MoxTestBase):
def setUp(self):
super(TestNssDbPasswdHandler, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(TestNssDbPasswdHandler, self).tearDown()
# remove the test working directory
shutil.rmtree(self.workdir)
def testConvertValueToMapEntry(self):
ent = 'foo:x:1000:1001:bar:/:/bin/sh'
updater = nssdb.NssDbPasswdHandler({})
pme = updater.ConvertValueToMapEntry(ent)
self.assertEqual('foo', pme.name)
self.assertEqual(1000, pme.uid)
self.assertEqual(1001, pme.gid)
self.assertEqual('bar', pme.gecos)
self.assertEqual('/bin/sh', pme.shell)
self.assertEqual('/', pme.dir)
def testIsMapPrimaryKey(self):
updater = nssdb.NssDbPasswdHandler({})
self.assertTrue(updater.IsMapPrimaryKey('.foo'))
self.assertFalse(updater.IsMapPrimaryKey('=1000'))
self.assertFalse(updater.IsMapPrimaryKey('00'))
def testNssDbPasswdHandlerWriteData(self):
entry_string = 'foo:x:1000:1000:foo:/:/bin/sh'
makedb_stdin = self.mox.CreateMock(io.BytesIO)
makedb_stdin.write(('.foo %s\n' % entry_string).encode('ascii'))
makedb_stdin.write(('=1000 %s\n' % entry_string).encode('ascii'))
makedb_stdin.write(('00 %s\n' % entry_string).encode('ascii'))
passwd_map = passwd.PasswdMap()
passwd_map_entry = passwd.PasswdMapEntry()
passwd_map_entry.name = 'foo'
passwd_map_entry.uid = 1000
passwd_map_entry.gid = 1000
passwd_map_entry.gecos = 'foo'
passwd_map_entry.dir = '/'
passwd_map_entry.shell = '/bin/sh'
passwd_map_entry.passwd = 'x'
self.assertTrue(passwd_map.Add(passwd_map_entry))
writer = nssdb.NssDbPasswdHandler({
'makedb': '/bin/false',
'dir': '/tmp'
})
self.mox.ReplayAll()
writer.WriteData(makedb_stdin, passwd_map_entry, 0)
def testNssDbPasswdHandlerWrite(self):
ent = 'foo:x:1000:1000:foo:/:/bin/sh'
makedb_stdin = self.mox.CreateMock(io.BytesIO)
makedb_stdin.write(('.foo %s\n' % ent).encode('ascii'))
makedb_stdin.write(('=1000 %s\n' % ent).encode('ascii'))
makedb_stdin.write(('00 %s\n' % ent).encode('ascii'))
makedb_stdin.close()
makedb_stdout = self.mox.CreateMock(io.BytesIO)
makedb_stdout.read(-1).AndReturn('')
makedb_stdout.close()
m = passwd.PasswdMap()
pw = passwd.PasswdMapEntry()
pw.name = 'foo'
pw.uid = 1000
pw.gid = 1000
pw.gecos = 'foo'
pw.dir = '/'
pw.shell = '/bin/sh'
pw.passwd = 'x'
pw.Verify()
self.assertTrue(m.Add(pw))
self.mox.StubOutWithMock(select, 'select')
select.select([makedb_stdout], (), (), 0).AndReturn(([37], [], []))
select.select([makedb_stdout], (), (), 0).AndReturn(([], [], []))
def SpawnMakeDb():
makedb = MakeDbDummy()
makedb.stdin = makedb_stdin
makedb.stdout = makedb_stdout
return makedb
writer = nssdb.NssDbPasswdHandler({
'makedb': '/usr/bin/makedb',
'dir': self.workdir
})
writer._SpawnMakeDb = SpawnMakeDb
self.mox.ReplayAll()
writer.Write(m)
tmppasswd = os.path.join(self.workdir, 'passwd.db')
self.assertFalse(os.path.exists(tmppasswd))
# just clean it up, Write() doesn't Commit()
writer._Rollback()
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testVerify(self):
# create a map
m = passwd.PasswdMap()
e = passwd.PasswdMapEntry()
e.name = 'foo'
e.uid = 1000
e.gid = 2000
self.assertTrue(m.Add(e))
updater = nssdb.NssDbPasswdHandler({
'dir': self.workdir,
'makedb': '/usr/bin/makedb'
})
written = updater.Write(m)
self.assertTrue(os.path.exists(updater.temp_cache_filename),
'updater.Write() did not create a file')
retval = updater.Verify(written)
self.assertEqual(True, retval)
os.unlink(updater.temp_cache_filename)
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testVerifyFailure(self):
# create a map
m = passwd.PasswdMap()
e = passwd.PasswdMapEntry()
e.name = 'foo'
e.uid = 1000
e.gid = 2000
self.assertTrue(m.Add(e))
updater = nssdb.NssDbPasswdHandler({
'dir': self.workdir,
'makedb': '/usr/bin/makedb'
})
written = updater.Write(m)
self.assertTrue(os.path.exists(updater.temp_cache_filename),
'updater.Write() did not create a file')
# change the cache
db = btopen(updater.temp_cache_filename)
del db[db.first()[0]]
db.sync()
db.close()
retval = updater.Verify(written)
self.assertEqual(False, retval)
self.assertFalse(
os.path.exists(os.path.join(updater.temp_cache_filename)))
def testVerifyEmptyMap(self):
updater = nssdb.NssDbPasswdHandler({'dir': self.workdir})
# create a temp file, clag it into the updater object
(_, temp_filename) = tempfile.mkstemp(prefix='nsscache-nssdb_test',
dir=self.workdir)
updater.temp_cache_filename = temp_filename
# make it empty
db = btopen(temp_filename, 'w')
self.assertEqual(0, len(db))
db.close()
# TODO(jaq): raising an exception is probably the wrong behaviour
self.assertRaises(error.EmptyMap, updater.Verify, set('foo'))
os.unlink(temp_filename)
class TestNssDbGroupHandler(mox.MoxTestBase):
def setUp(self):
super(TestNssDbGroupHandler, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(TestNssDbGroupHandler, self).tearDown()
# remove the test working directory
shutil.rmtree(self.workdir)
def testConvertValueToMapEntry(self):
ent = 'foo:x:1000:bar'
updater = nssdb.NssDbGroupHandler({})
gme = updater.ConvertValueToMapEntry(ent)
self.assertEqual('foo', gme.name)
self.assertEqual(1000, gme.gid)
self.assertEqual('x', gme.passwd)
self.assertEqual(['bar'], gme.members)
def testIsMapPrimaryKey(self):
updater = nssdb.NssDbGroupHandler({})
self.assertTrue(updater.IsMapPrimaryKey('.foo'))
self.assertFalse(updater.IsMapPrimaryKey('=1000'))
self.assertFalse(updater.IsMapPrimaryKey('00'))
def testNssDbGroupHandlerWriteData(self):
ent = 'foo:x:1000:bar'
makedb_stdin = self.mox.CreateMock(io.BytesIO)
makedb_stdin.write(('.foo %s\n' % ent).encode('ascii'))
makedb_stdin.write(('=1000 %s\n' % ent).encode('ascii'))
makedb_stdin.write(('00 %s\n' % ent).encode('ascii'))
m = group.GroupMap()
g = group.GroupMapEntry()
g.name = 'foo'
g.gid = 1000
g.passwd = 'x'
g.members = ['bar']
self.assertTrue(m.Add(g))
writer = nssdb.NssDbGroupHandler({
'makedb': '/bin/false',
'dir': '/tmp'
})
self.mox.ReplayAll()
writer.WriteData(makedb_stdin, g, 0)
def testNssDbGroupHandlerWrite(self):
ent = 'foo:x:1000:bar'
makedb_stdin = self.mox.CreateMock(io.BytesIO)
makedb_stdin.write(('.foo %s\n' % ent).encode('ascii'))
makedb_stdin.write(('=1000 %s\n' % ent).encode('ascii'))
makedb_stdin.write(('00 %s\n' % ent).encode('ascii'))
makedb_stdin.close()
makedb_stdout = self.mox.CreateMock(io.BytesIO)
makedb_stdout.read(-1).AndReturn('')
makedb_stdout.close()
m = group.GroupMap()
g = group.GroupMapEntry()
g.name = 'foo'
g.gid = 1000
g.passwd = 'x'
g.members = ['bar']
g.Verify()
self.assertTrue(m.Add(g))
self.mox.StubOutWithMock(select, 'select')
select.select([makedb_stdout], (), (), 0).AndReturn(([37], [], []))
select.select([makedb_stdout], (), (), 0).AndReturn(([], [], []))
def SpawnMakeDb():
makedb = MakeDbDummy()
makedb.stdin = makedb_stdin
makedb.stdout = makedb_stdout
return makedb
writer = nssdb.NssDbGroupHandler({
'makedb': '/usr/bin/makedb',
'dir': self.workdir
})
writer._SpawnMakeDb = SpawnMakeDb
self.mox.ReplayAll()
writer.Write(m)
tmpgroup = os.path.join(self.workdir, 'group.db')
self.assertFalse(os.path.exists(tmpgroup))
# just clean it up, Write() doesn't Commit()
writer._Rollback()
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testVerify(self):
# create a map
m = group.GroupMap()
e = group.GroupMapEntry()
e.name = 'foo'
e.gid = 2000
self.assertTrue(m.Add(e))
updater = nssdb.NssDbGroupHandler({
'dir': self.workdir,
'makedb': '/usr/bin/makedb'
})
written = updater.Write(m)
self.assertTrue(os.path.exists(updater.temp_cache_filename),
'updater.Write() did not create a file')
retval = updater.Verify(written)
self.assertEqual(True, retval)
os.unlink(updater.temp_cache_filename)
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testVerifyFailure(self):
# create a map
m = group.GroupMap()
e = group.GroupMapEntry()
e.name = 'foo'
e.gid = 2000
self.assertTrue(m.Add(e))
updater = nssdb.NssDbGroupHandler({
'dir': self.workdir,
'makedb': '/usr/bin/makedb'
})
written = updater.Write(m)
self.assertTrue(os.path.exists(updater.temp_cache_filename),
'updater.Write() did not create a file')
# change the cache
db = btopen(updater.temp_cache_filename)
del db[db.first()[0]]
db.sync()
db.close()
retval = updater.Verify(written)
self.assertEqual(False, retval)
self.assertFalse(
os.path.exists(os.path.join(updater.temp_cache_filename)))
class TestNssDbShadowHandler(mox.MoxTestBase):
def setUp(self):
super(TestNssDbShadowHandler, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(TestNssDbShadowHandler, self).tearDown()
# remove the test working directory
shutil.rmtree(self.workdir)
def testConvertValueToMapEntry(self):
ent = 'foo:*:::::::0'
updater = nssdb.NssDbShadowHandler({})
sme = updater.ConvertValueToMapEntry(ent)
self.assertEqual('foo', sme.name)
self.assertEqual('*', sme.passwd)
self.assertEqual(0, sme.flag)
def testIsMapPrimaryKey(self):
updater = nssdb.NssDbShadowHandler({})
self.assertTrue(updater.IsMapPrimaryKey('.foo'))
self.assertFalse(updater.IsMapPrimaryKey('00'))
def testNssDbShadowHandlerWriteData(self):
ent = 'foo:!!:::::::0'
makedb_stdin = self.mox.CreateMock(io.BytesIO)
makedb_stdin.write(('.foo %s\n' % ent).encode('ascii'))
makedb_stdin.write(('00 %s\n' % ent).encode('ascii'))
m = shadow.ShadowMap()
s = shadow.ShadowMapEntry()
s.name = 'foo'
self.assertTrue(m.Add(s))
writer = nssdb.NssDbShadowHandler({
'makedb': '/bin/false',
'dir': '/tmp'
})
self.mox.ReplayAll()
writer.WriteData(makedb_stdin, s, 0)
def testNssDbShadowHandlerWrite(self):
ent = 'foo:*:::::::0'
makedb_stdin = self.mox.CreateMock(io.BytesIO)
makedb_stdin.write(('.foo %s\n' % ent).encode('ascii'))
makedb_stdin.write(('00 %s\n' % ent).encode('ascii'))
makedb_stdin.close()
makedb_stdout = self.mox.CreateMock(io.BytesIO)
makedb_stdout.read(-1).AndReturn('')
makedb_stdout.close()
m = shadow.ShadowMap()
s = shadow.ShadowMapEntry()
s.name = 'foo'
s.passwd = '*'
s.Verify()
self.assertTrue(m.Add(s))
self.mox.StubOutWithMock(select, 'select')
select.select([makedb_stdout], (), (), 0).AndReturn(([37], [], []))
select.select([makedb_stdout], (), (), 0).AndReturn(([], [], []))
def SpawnMakeDb():
makedb = MakeDbDummy()
makedb.stdin = makedb_stdin
makedb.stdout = makedb_stdout
return makedb
writer = nssdb.NssDbShadowHandler({
'makedb': '/usr/bin/makedb',
'dir': self.workdir
})
writer._SpawnMakeDb = SpawnMakeDb
self.mox.ReplayAll()
writer.Write(m)
tmpshadow = os.path.join(self.workdir, 'shadow.db')
self.assertFalse(os.path.exists(tmpshadow))
# just clean it up, Write() doesn't Commit()
writer._Rollback()
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testVerify(self):
m = shadow.ShadowMap()
s = shadow.ShadowMapEntry()
s.name = 'foo'
self.assertTrue(m.Add(s))
updater = nssdb.NssDbShadowHandler({
'dir': self.workdir,
'makedb': '/usr/bin/makedb'
})
written = updater.Write(m)
self.assertTrue(os.path.exists(updater.temp_cache_filename),
'updater.Write() did not create a file')
retval = updater.Verify(written)
self.assertEqual(True, retval)
os.unlink(updater.temp_cache_filename)
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testVerifyFailure(self):
# create a map
m = shadow.ShadowMap()
s = shadow.ShadowMapEntry()
s.name = 'foo'
self.assertTrue(m.Add(s))
updater = nssdb.NssDbShadowHandler({
'dir': self.workdir,
'makedb': '/usr/bin/makedb'
})
written = updater.Write(m)
self.assertTrue(os.path.exists(updater.temp_cache_filename),
'updater.Write() did not create a file')
# change the cache
db = btopen(updater.temp_cache_filename)
del db[db.first()[0]]
db.sync()
db.close()
retval = updater.Verify(written)
self.assertEqual(False, retval)
self.assertFalse(
os.path.exists(os.path.join(updater.temp_cache_filename)))
class TestNssDbCache(unittest.TestCase):
def setUp(self):
super(TestNssDbCache, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(TestNssDbCache, self).tearDown()
shutil.rmtree(self.workdir)
@unittest.skipIf(NoMakeDB(), 'no /usr/bin/makedb')
def testWriteTestBdb(self):
data = passwd.PasswdMap()
pw = passwd.PasswdMapEntry()
pw.name = 'foo'
pw.passwd = 'x'
pw.uid = 1000
pw.gid = 1000
pw.gecos = 'doody'
pw.dir = '/'
pw.shell = '/bin/sh'
self.assertTrue(data.Add(pw))
# instantiate object under test
dummy_config = {'dir': self.workdir}
cache = nssdb.NssDbPasswdHandler(dummy_config)
written = cache.Write(data)
self.assertTrue(b'.foo' in written)
self.assertTrue(b'=1000' in written)
# perform test
db = btopen(cache.temp_cache_filename, 'r')
self.assertEqual(3, len(list(db.keys())))
self.assertTrue(b'.foo' in list(db.keys()))
self.assertTrue(b'=1000' in list(db.keys()))
self.assertTrue(b'00' in list(db.keys()))
# convert data to pwent
d = ('%s:x:%s:%s:%s:%s:%s\x00' % (pw.name, pw.uid, pw.gid, pw.gecos,
pw.dir, pw.shell)).encode('ascii')
self.assertEqual(db[b'00'], d)
self.assertEqual(db[b'.foo'], d)
self.assertEqual(db[b'=1000'], d)
# tear down
os.unlink(cache.temp_cache_filename)
def testLoadBdbCacheFile(self):
pass_file = os.path.join(self.workdir, 'passwd.db')
db = btopen(pass_file, 'c')
ent = 'foo:x:1000:500:bar:/:/bin/sh'
db[b'00'] = ent
db[b'=1000'] = ent
db[b'.foo'] = ent
db.sync()
self.assertTrue(os.path.exists(pass_file))
config = {'dir': self.workdir}
cache = nssdb.NssDbPasswdHandler(config)
data_map = cache.GetMap()
cache._LoadBdbCacheFile(data_map)
self.assertEqual(1, len(data_map))
# convert data to pwent
x = data_map.PopItem()
d = '%s:x:%s:%s:%s:%s:%s' % (x.name, x.uid, x.gid, x.gecos, x.dir,
x.shell)
self.assertEqual(ent, d)
os.unlink(pass_file)
def testGetMapRaisesCacheNotFound(self):
bad_file = os.path.join(self.workdir, 'really_not_going_to_exist_okay')
self.assertFalse(os.path.exists(bad_file), 'what the hell, it exists!')
config = {}
cache = nssdb.NssDbPasswdHandler(config)
cache.CACHE_FILENAME = bad_file
self.assertRaises(error.CacheNotFound, cache.GetMap)
def testGetMapIsSizedObject(self):
timestamp = int(time.time())
update_ts_filename = os.path.join(
self.workdir, 'passwd.db.nsscache-update-timestamp')
update_ts_file = open(update_ts_filename, 'w')
update_ts_file.write(
'%s\n' %
time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp)))
update_ts_file.close()
db_filename = os.path.join(self.workdir, 'passwd.db')
db = btopen(db_filename)
db.close()
cache = nssdb.NssDbPasswdHandler({'dir': self.workdir})
cache_map = cache.GetMap()
self.assertEqual(0, len(cache_map))
os.unlink(update_ts_filename)
os.unlink(db_filename)
def testGetMapHasMerge(self):
timestamp = int(time.time())
update_ts_filename = os.path.join(
self.workdir, 'passwd.db.nsscache-update-timestamp')
update_ts_file = open(update_ts_filename, 'w')
update_ts_file.write(
'%s\n' %
time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp)))
update_ts_file.close()
db_filename = os.path.join(self.workdir, 'passwd.db')
db = btopen(db_filename)
db.close()
cache = nssdb.NssDbPasswdHandler({'dir': self.workdir})
cache_map = cache.GetMap()
self.assertEqual(False, cache_map.Merge(passwd.PasswdMap()))
os.unlink(update_ts_filename)
os.unlink(db_filename)
def testGetMapIsIterable(self):
timestamp = int(time.time())
update_ts_filename = os.path.join(
self.workdir, 'passwd.db.nsscache-update-timestamp')
update_ts_file = open(update_ts_filename, 'w')
update_ts_file.write(
'%s\n' %
time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime(timestamp)))
update_ts_file.close()
db_filename = os.path.join(self.workdir, 'passwd.db')
db = btopen(db_filename)
db.close()
cache = nssdb.NssDbPasswdHandler({'dir': self.workdir})
cache_map = cache.GetMap()
self.assertEqual([], list(cache_map))
os.unlink(update_ts_filename)
os.unlink(db_filename)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/command.py000066400000000000000000000711121402531134600206300ustar00rootroot00000000000000# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Command objects."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import inspect
from io import StringIO
import logging
import optparse
import os
import shutil
import tempfile
import time
from nss_cache import config
from nss_cache import error
from nss_cache import lock
from nss_cache import nss
from nss_cache.caches import cache_factory
from nss_cache.sources import source_factory
from nss_cache.update import map_updater
from nss_cache.update import files_updater
class Command(object):
"""Base class for commands.
The Command object mostly handles the mapping of commandline
parameters into one or more nss_cache operations, and the results
back into output.
Commands normally don't have any state. All their arguments are
passed into the run() method.
The docstring for an actual command should give a one-line
summary, then a complete description of the command. This is used
as part of the help system.
"""
# Well known exit codes. We reserve anything 30 and under for the
# number of failed NSS maps (~15 defined under modern linux/glibc
# implementations of named services. add fudge facter of 2 until I
# check out a sun box and some other unices).
#
# This should all be uplifted into error.py and
# coordinated there for the entire module.
ERR_LOCK = 200
def __init__(self):
# Setup logging.
self.log = logging.getLogger(__name__)
if self.__doc__ == Command.__doc__:
self.log.warning('No help message set for %r', self)
# Setup command parser.
self.parser = self._GetParser()
# Attribute used to hold optional lock object.
self.lock = None
def __del__(self):
"""Release any locks before we exit."""
self._Unlock()
def _GetParser(self):
"""Initialize the argument parser for this command object.
A default parser is initialized which supports common flags. It
is expected that Command subclasses extend this and add specific
flags as needed.
Returns:
an optparse.OptionParser instance
"""
parser = optparse.OptionParser()
# We do not mix arguments and flags!
parser.disable_interspersed_args()
# commonly used options
parser.add_option('-m',
'--map',
action='append',
type='string',
dest='maps',
help='map to operate on, can be'
' supplied multiple times')
return parser
def Run(self, conf, args):
"""Run this command.
Commands are invoked with a global configuration object and a list
of arguments.
Args:
conf: A Config object defining global configuration of
nss_cache.
args: A list of strings of commandline arguments.
Returns:
0 if the command was successful
non-zero shell error code if not.
"""
raise NotImplementedError('command %r not implemented' %
self.__class__.__name__)
def _Lock(self, path=None, force=False):
"""Grab a system-wide lock for this command.
Commands wishing to prevent concurrent operation can invoke this
method to acquire a system-wide lock. The lock will be
automatically released on object destruction, however an optional
Unlock() method is provided for commands wishing a smaller scope
of locking.
Args:
path: optional path to lock file.
force: optional boolean to override existing locks.
Returns:
True if the lock was acquired.
False if the lock was not.
"""
# Create the lock if it doesn't exist.
if self.lock is None:
self.lock = lock.PidFile(filename=path)
# Acquire the lock.
return self.lock.Lock(force=force)
def _Unlock(self):
"""Release the system-wide lock if present."""
if self.lock is not None:
if self.lock.Locked():
self.lock.Unlock()
def Help(self, short=False):
"""Return the help message for this command."""
if self.__doc__ is Command.__doc__:
return None
help_text = inspect.getdoc(self) + '\n'
if short:
# only use the short summary first line
help_text = help_text.split('\n')[0]
else:
# lose the short summary first line
help_text = '\n'.join(help_text.split('\n')[2:])
help_buffer = StringIO()
self.parser.print_help(file=help_buffer)
# lose the first line, which is the usage line
help_text += '\n'.join(help_buffer.getvalue().split('\n')[1:])
return help_text
class Update(Command):
"""Update the cache.
Performs an update of the configured caches from the configured
sources.
"""
def __init__(self):
"""Initialize the argument parser for this command object."""
super(Update, self).__init__()
self.parser.add_option('-f',
'--full',
action='store_false',
help='force a full update from the data source',
dest='incremental',
default=True)
self.parser.add_option('-s',
'--sleep',
action='store',
type='int',
default=False,
dest='delay',
help='number of seconds to sleep before'
' executing command')
self.parser.add_option(
'--force-write',
action='store_true',
default=False,
dest='force_write',
help='force the update to write new maps, overriding'
' safety checks, such as refusing to write empty'
'maps.')
self.parser.add_option(
'--force-lock',
action='store_true',
default=False,
dest='force_lock',
help='forcibly acquire the lock, and issue a SIGTERM'
'to any nsscache process holding the lock.')
def Run(self, conf, args):
"""Run the Update command.
See Command.Run() for full documentation on the Run() method.
Args:
conf: a nss_cache.config.Config object
args: a list of arguments to be parsed by this command
Returns:
0 on success, nonzero on error
"""
try:
(options, args) = self.parser.parse_args(args)
except SystemExit as e:
return e.code
if options.maps:
self.log.info('Setting configured maps to %s', options.maps)
conf.maps = options.maps
if not options.incremental:
self.log.debug('performing FULL update of caches')
else:
self.log.debug('performing INCREMENTAL update of caches')
if options.delay:
self.log.info('Delaying %d seconds before executing', options.delay)
time.sleep(options.delay)
return self.UpdateMaps(conf,
incremental=options.incremental,
force_write=options.force_write,
force_lock=options.force_lock)
def UpdateMaps(self,
conf,
incremental,
force_write=False,
force_lock=False):
"""Update each configured map.
For each configured map, create a source and cache object and
update the cache from the source.
Args:
conf: configuration object
incremental: flag indicating incremental update should occur
force_write: optional flag indicating safety checks should be ignored
force_lock: optional flag indicating we override existing locks
Returns:
integer, zero indicating success, non-zero failure
"""
# Grab a lock before we continue!
if not self._Lock(path=conf.lockfile, force=force_lock):
self.log.error('Failed to acquire lock, aborting!')
return self.ERR_LOCK
retval = 0
for map_name in conf.maps:
if map_name not in conf.options:
self.log.error('No such map name defined in config: %s',
map_name)
return 1
if incremental:
self.log.info('Updating and verifying %s cache.', map_name)
else:
self.log.info('Rebuilding and verifying %s cache.', map_name)
cache_options = conf.options[map_name].cache
source_options = conf.options[map_name].source
# Change into the target directory.
# Sources such as zsync handle their temporary files badly, so we
# want to be in the same location that the destination file will
# exist in, so that the atomic rename occurs in the same
# filesystem.
# In addition, we create a tempdir below this dir to work in, because
# zsync's librcksum sometimes leaves temp files around, and we don't
# want to leave file turds around /etc.
# We save and restore the directory here as each cache can define its own
# output directory.
# Finally, relative paths in the config are treated as relative to the
# startup directory, but we convewrt them to absolute paths so that future
# temp dirs do not mess with our output routines.
old_cwd = os.getcwd()
tempdir = tempfile.mkdtemp(dir=cache_options['dir'],
prefix='nsscache-%s-' % map_name)
if not os.path.isabs(cache_options['dir']):
cache_options['dir'] = os.path.abspath(cache_options['dir'])
if not os.path.isabs(conf.timestamp_dir):
conf.timestamp_dir = os.path.abspath(conf.timestamp_dir)
if not os.path.isabs(tempdir):
tempdir = os.path.abspath(tempdir)
os.chdir(tempdir)
# End chdir dirty hack.
try:
try:
source = source_factory.Create(source_options)
updater = self._Updater(map_name, source, cache_options,
conf)
if incremental:
self.log.info('Updating and verifying %s cache.',
map_name)
else:
self.log.info('Rebuilding and verifying %s cache.',
map_name)
retval = updater.UpdateFromSource(source,
incremental=incremental,
force_write=force_write)
except error.PermissionDenied:
self.log.error(
'Permission denied: could not update map %r. Aborting',
map_name)
retval += 1
except (error.EmptyMap, error.InvalidMap) as e:
self.log.error(e)
retval += 1
except error.InvalidMerge as e:
self.log.warning('Could not merge map %r: %s. Skipping.',
map_name, e)
finally:
# Start chdir cleanup
os.chdir(old_cwd)
shutil.rmtree(tempdir)
# End chdir cleanup
return retval
def _Updater(self, map_name, source, cache_options, conf):
# Bit ugly. This just checks the class attribute UPDATER
# to determine which type of updater the source uses. At the moment
# there's only two, so not a huge deal. If we add another we should
# refactor though.
if hasattr(source, 'UPDATER') and source.UPDATER == config.UPDATER_FILE:
if map_name == config.MAP_AUTOMOUNT:
return files_updater.FileAutomountUpdater(
map_name, conf.timestamp_dir, cache_options)
else:
return files_updater.FileMapUpdater(map_name,
conf.timestamp_dir,
cache_options,
can_do_incremental=True)
else:
if map_name == config.MAP_AUTOMOUNT:
return map_updater.AutomountUpdater(map_name,
conf.timestamp_dir,
cache_options)
else:
return map_updater.MapUpdater(map_name,
conf.timestamp_dir,
cache_options,
can_do_incremental=True)
class Verify(Command):
"""Verify the cache and configuration.
Perform verification of the built caches and validation of the
system NSS configuration.
"""
def Run(self, conf, args):
"""Run the Verify command.
See Command.Run() for full documentation on the Run() method.
Args:
conf: nss_cache.config.Config object
args: list of arguments to be parsed
Returns:
count of warnings and errors detected when verifying
"""
try:
(options, args) = self.parser.parse_args(args)
except SystemExit as e:
return e.code
if options.maps:
self.log.info('Setting configured maps to %s', options.maps)
conf.maps = options.maps
(warnings, errors) = (0, 0)
self.log.info('Verifying program and system configuration.')
(config_warnings, config_errors) = config.VerifyConfiguration(conf)
warnings += config_warnings
errors += config_errors
self.log.info('Verifying data sources.')
errors += self.VerifySources(conf)
self.log.info('Verifying data caches.')
errors += self.VerifyMaps(conf)
self.log.info('Verification result: %d warnings, %d errors', warnings,
errors)
if warnings + errors:
self.log.info('Verification failed!')
else:
self.log.info('Verification passed!')
return warnings + errors
def VerifyMaps(self, conf):
"""Compare each configured map against data retrieved from NSS.
For each configured map, build a Map object from NSS and compare
it against a Map object retrieved directly from the cache. We
expect the cache Map to be a subset of the nss Map due to possible
inclusion of other NSS map types (e.g. files, nis, ldap, etc).
This could be done via series of get*nam calls, however at this
time it appears to be more efficient to grab them in bulk and use
the Map.__contains__() membership test.
Args:
conf: nss_cache.config.Config object
Returns:
count of failures when verifying
"""
retval = 0
for map_name in conf.maps:
self.log.info('Verifying map: %s.', map_name)
# The netgroup map does not have an enumerator,
# to test this we'd have to loop over the loaded cache map
# and verify each entry is retrievable via getent directly.
# TODO(blaed): apply fix from comment to allow for netgroup checking
if map_name == config.MAP_NETGROUP:
self.log.info(('The netgroup map does not support enumeration, '
'skipping.'))
continue
# Automount maps do not support getent, we'll have to come up with
# a good way to verify these.
if map_name == config.MAP_AUTOMOUNT:
self.log.info(
('The automount map does not support enumeration, '
'skipping.'))
continue
try:
nss_map = nss.GetMap(map_name)
except error.UnsupportedMap:
self.log.warning('Verification of %s map is unsupported!',
map_name)
continue
self.log.debug('built NSS map of %d entries', len(nss_map))
cache_options = conf.options[map_name].cache
cache = cache_factory.Create(cache_options, map_name)
try:
cache_map = cache.GetMap()
except error.CacheNotFound:
self.log.error('Cache missing!')
retval += 1
continue
self.log.debug('built cache map of %d entries', len(cache_map))
# cache_map is a subset of nss_map due to possible other maps,
# e.g. files, nis, ldap, etc.
missing_entries = 0
for map_entry in cache_map:
if map_entry not in nss_map:
self.log.info(
'The following entry is present in the cache '
'but not availible via NSS! %s', map_entry.name)
self.log.debug('missing entry data: %s', map_entry)
missing_entries += 1
if missing_entries > 0:
self.log.warning('Missing %d entries in %s map',
missing_entries, map_name)
retval += 1
return retval
def VerifySources(self, conf):
"""Verify each possible source and return the appropriate retval."""
possible_sources = set()
retval = 0
for map_name in conf.maps:
possible_sources.add(map_name)
if possible_sources:
for map_name in possible_sources:
source_options = conf.options[map_name].source
try:
source = source_factory.Create(source_options)
except error.SourceUnavailable as e:
self.log.debug('map %s dumps source error %s', map_name, e)
self.log.error('Map %s is unvavailable!', map_name)
retval += 1
continue
retval += source.Verify()
else:
self.log.error('No sources configured for any maps!')
retval += 1
return retval
class Help(Command):
"""Show per-command help.
usage: help [command]
Shows online help for each command.
e.g. 'help help' shows this help.
"""
def Run(self, conf, args):
"""Run the Help command.
See Command.Run() for full documentation on the Run() method.
Args:
conf: nss_cache.config.Config object
args: list of arguments to be parsed by this command.
Returns:
zero, and prints the help text as a side effectg
"""
if not args:
help_text = self.Help()
else:
help_command = args.pop()
print(('Usage: nsscache [global options] %s [options]' %
help_command))
print()
try:
callable_action = getattr(inspect.getmodule(self),
help_command.capitalize())
help_text = callable_action().Help()
except AttributeError:
print(('command %r is not implemented' % help_command))
return 1
print(help_text)
return 0
class Repair(Command):
"""Repair the cache.
Verify that the configuration is correct, that the source is
reachable, then perform a full synchronisation of the cache.
"""
def Run(self, conf, args):
"""Run the Repair command.
See Command.Run() for full documentation on the Run() method.
Args:
conf: nss_cache.config.Config object
args: list of arguments to be parsed by this command
Returns:
0 on success, nonzero on error
"""
try:
(options, args) = self.parser.parse_args(args)
except SystemExit as e:
return e.code
if options.maps:
self.log.info('Setting configured maps to %s', options.maps)
conf.maps = options.maps
(warnings, errors) = (0, 0)
self.log.info('Verifying program and system configuration.')
(config_warnings, config_errors) = config.VerifyConfiguration(conf)
warnings += config_warnings
errors += config_errors
self.log.info('Verifying data sources.')
errors += Verify().VerifySources(conf)
self.log.info('verification: %d warnings, %d errors', warnings, errors)
# Exit and report if config or source failed verification, because
# we cannot reliably build a cache if either of these are faulty.
if errors > 0:
self.log.error('Too many errors in verification tests failed;'
' repair aborted!')
return 1
# Rebuild local cache in full, which also verifies each cache.
self.log.info('Rebuilding and verifying caches: %s.', conf.maps)
return Update().UpdateMaps(conf=conf, incremental=False)
class Status(Command):
"""Show current cache status.
Show the last update time of each configured cache, and other
metrics, optionally in a machine-readable format.
"""
def __init__(self):
super(Status, self).__init__()
self.parser.add_option('--epoch',
action='store_true',
help='show timestamps in UNIX epoch time',
dest='epoch',
default=False)
self.parser.add_option('--template',
action='store',
help='Set format for output',
metavar='FORMAT',
dest='template',
default='NSS map: %(map)s\n%(key)s: %(value)s')
self.parser.add_option('--automount-template',
action='store',
help='Set format for automount output',
metavar='FORMAT',
dest='automount_template',
default=('NSS map: %(map)s\nAutomount map: '
'%(automount)s\n%(key)s: %(value)s'))
def Run(self, conf, args):
"""Run the Status command.
See Command.Run() for full documentation on the Run() method.
Args:
conf: nss_cache.config.Config object
args: list of arguments to be parsed by this command
Returns:
zero on success, nonzero on error
"""
try:
(options, args) = self.parser.parse_args(args)
except SystemExit as e:
# See app.NssCacheApp.Run()
return e.code
if options.maps:
self.log.info('Setting configured maps to %s', options.maps)
conf.maps = options.maps
for map_name in conf.maps:
# Hardcoded to support the two-tier structure of automount maps
if map_name == config.MAP_AUTOMOUNT:
value_list = self.GetAutomountMapMetadata(conf,
epoch=options.epoch)
self.log.debug('Value list: %r', value_list)
for value_dict in value_list:
self.log.debug('Value dict: %r', value_dict)
output = options.automount_template % value_dict
print(output)
else:
for value_dict in self.GetSingleMapMetadata(
map_name, conf, epoch=options.epoch):
self.log.debug('Value dict: %r', value_dict)
output = options.template % value_dict
print(output)
return os.EX_OK
def GetSingleMapMetadata(self,
map_name,
conf,
automount_mountpoint=None,
epoch=False):
"""Return metadata from map specified.
Args:
map_name: name of map to extract data from
conf: a config.Config object
automount_mountpoint: information necessary for automount maps
epoch: return times as an integer epoch (time_t) instead of a
human readable name
Returns:
a list of dicts of metadata key/value pairs
"""
cache_options = conf.options[map_name].cache
updater = map_updater.MapUpdater(map_name, conf.timestamp_dir,
cache_options, automount_mountpoint)
modify_dict = {'key': 'last-modify-timestamp', 'map': map_name}
update_dict = {'key': 'last-update-timestamp', 'map': map_name}
if map_name == config.MAP_AUTOMOUNT:
# have to find out *which* automount map from a cache object!
cache = cache_factory.Create(
cache_options,
config.MAP_AUTOMOUNT,
automount_mountpoint=automount_mountpoint)
automount = cache.GetMapLocation()
modify_dict['automount'] = automount
update_dict['automount'] = automount
last_modify_timestamp = updater.GetModifyTimestamp() or 0
last_update_timestamp = updater.GetUpdateTimestamp() or 0
if not epoch:
# If we are displaying the time as a string, do so in localtime. This is
# the only place such a conversion is appropriate.
if last_modify_timestamp:
last_modify_timestamp = time.asctime(
time.localtime(last_modify_timestamp))
else:
last_modify_timestamp = 'Unknown'
if last_update_timestamp:
last_update_timestamp = time.asctime(
time.localtime(last_update_timestamp))
else:
last_update_timestamp = 'Unknown'
modify_dict['value'] = last_modify_timestamp
update_dict['value'] = last_update_timestamp
return [modify_dict, update_dict]
def GetAutomountMapMetadata(self, conf, epoch=False):
"""Return status of automount master map and all listed automount maps.
We retrieve the automount master map, and build a list of dicts which
are used by the caller to print the status output.
Args:
conf: a config.Config object
epoch: return times as an integer epoch (time_t) instead of a
human readable name
Returns:
a list of dicts of metadata key/value pairs
"""
map_name = config.MAP_AUTOMOUNT
cache_options = conf.options[map_name].cache
value_list = []
# get the value_dict for the master map, note that automount_mountpoint=None
# defaults to the master map!
values = self.GetSingleMapMetadata(map_name,
conf,
automount_mountpoint=None,
epoch=epoch)
value_list.extend(values)
# now get the contents of the master map, and get the status for each map
# we find
cache = cache_factory.Create(cache_options,
config.MAP_AUTOMOUNT,
automount_mountpoint=None)
master_map = cache.GetMap()
for map_entry in master_map:
values = self.GetSingleMapMetadata(
map_name, conf, automount_mountpoint=map_entry.key, epoch=epoch)
value_list.extend(values)
return value_list
nsscache-version-0.42/nss_cache/command_test.py000066400000000000000000000737771402531134600217120ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/command.py."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import grp
import os
import pwd
import shutil
from io import StringIO
import sys
import tempfile
import time
import unittest
from mox3 import mox
from nss_cache import command
from nss_cache import config
from nss_cache import error
from nss_cache import lock
from nss_cache import nss
from nss_cache.caches import caches
from nss_cache.caches import cache_factory
from nss_cache.maps import automount
from nss_cache.maps import passwd
from nss_cache.sources import source
from nss_cache.sources import source_factory
from nss_cache.update import updater
from nss_cache.update import files_updater
from nss_cache.update import map_updater
class TestCommand(mox.MoxTestBase):
"""Unit tests for the Command class."""
def testRunCommand(self):
c = command.Command()
self.assertRaises(NotImplementedError, c.Run, [], {})
@unittest.skip('badly mocked')
def testLock(self):
self.mox.StubOutClassWithMocks(lock, 'PidFile')
mock_lock = lock.PidFile(filename=None)
mock_lock.Lock(force=False).AndReturn('LOCK')
mock_lock.Lock(force=False).AndReturn('MORLOCK')
mock_lock.Locked().AndReturn(True)
mock_lock.Unlock()
self.mox.ReplayAll()
c = command.Command()
# First test that we create a lock and lock it.
self.assertEqual('LOCK', c._Lock())
# Then we test that we lock the existing one a second time.
self.assertEqual('MORLOCK', c._Lock())
@unittest.skip('badly mocked')
def testForceLock(self):
self.mox.StubOutClassWithMocks(lock, 'PidFile')
mock_lock = lock.PidFile(filename=None)
mock_lock.Lock(force=True).AndReturn('LOCK')
mock_lock.Locked().AndReturn(True)
mock_lock.Unlock()
self.mox.ReplayAll()
c = command.Command()
self.assertEqual('LOCK', c._Lock(force=True))
@unittest.skip('badly mocked')
def testUnlock(self):
self.mox.StubOutClassWithMocks(lock, 'PidFile')
mock_lock = lock.PidFile(filename=None)
mock_lock.Lock(force=False).AndReturn(True)
mock_lock.Locked().AndReturn(True)
mock_lock.Unlock()
mock_lock.Locked().AndReturn(False) # destructor
self.mox.ReplayAll()
c = command.Command()
c._Lock()
c._Unlock()
def testCommandHelp(self):
c = command.Command()
self.assertNotEqual(None, c)
self.assertEqual(None, c.Help())
def testDummyCommand(self):
class Dummy(command.Command):
"""Dummy docstring for dummy command."""
def Run(self):
return 0
c = Dummy()
self.assertTrue(isinstance(c, command.Command))
self.assertNotEqual(None, c.Help())
class TestUpdateCommand(mox.MoxTestBase):
"""Unit tests for the Update command class."""
def setUp(self):
super(TestUpdateCommand, self).setUp()
self.workdir = tempfile.mkdtemp()
class DummyConfig(object):
pass
self.conf = DummyConfig()
self.conf.options = {
config.MAP_PASSWORD: config.MapOptions(),
config.MAP_AUTOMOUNT: config.MapOptions()
}
self.conf.options[config.MAP_PASSWORD].cache = {
'name': 'dummy',
'dir': self.workdir
}
self.conf.options[config.MAP_PASSWORD].source = {'name': 'dummy'}
self.conf.options[config.MAP_AUTOMOUNT].cache = {
'name': 'dummy',
'dir': self.workdir
}
self.conf.options[config.MAP_AUTOMOUNT].source = {'name': 'dummy'}
self.conf.timestamp_dir = self.workdir
self.conf.lockfile = None
def tearDown(self):
super(TestUpdateCommand, self).tearDown()
shutil.rmtree(self.workdir)
def testConstructor(self):
c = command.Update()
self.assertNotEqual(None, c)
def testHelp(self):
c = command.Update()
self.assertNotEqual(None, c.Help())
def testRunWithNoParameters(self):
c = command.Update()
self.mox.StubOutWithMock(c, 'UpdateMaps')
c.UpdateMaps(self.conf,
incremental=True,
force_lock=False,
force_write=False).AndReturn(0)
self.mox.ReplayAll()
self.assertEqual(0, c.Run(self.conf, []))
def testRunWithBadParameters(self):
c = command.Update()
# Trap stderr so the unit test runs clean,
# since unit test status is printed on stderr.
dev_null = StringIO()
stderr = sys.stderr
sys.stderr = dev_null
self.assertEqual(2, c.Run(None, ['--invalid']))
sys.stderr = stderr
def testRunWithFlags(self):
c = command.Update()
self.mox.StubOutWithMock(c, 'UpdateMaps')
c.UpdateMaps(self.conf,
incremental=False,
force_lock=True,
force_write=True).AndReturn(0)
self.mox.ReplayAll()
self.assertEqual(
0,
c.Run(self.conf, [
'-m', config.MAP_PASSWORD, '-f', '--force-write', '--force-lock'
]))
self.assertEqual(['passwd'], self.conf.maps)
def testUpdateSingleMaps(self):
self.mox.StubOutClassWithMocks(lock, 'PidFile')
lock_mock = lock.PidFile(filename=None)
lock_mock.Lock(force=False).AndReturn(True)
lock_mock.Locked().AndReturn(True)
lock_mock.Unlock()
self.conf.maps = [config.MAP_PASSWORD]
self.conf.cache = 'dummy'
modify_stamp = 1
map_entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
passwd_map = passwd.PasswdMap([map_entry])
passwd_map.SetModifyTimestamp(modify_stamp)
source_mock = self.mox.CreateMock(source.Source)
source_mock.GetMap(config.MAP_PASSWORD,
location=None).AndReturn(passwd_map)
self.mox.StubOutWithMock(source_factory, 'Create')
source_factory.Create(self.conf.options[
config.MAP_PASSWORD].source).AndReturn(source_mock)
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.WriteMap(map_data=passwd_map).AndReturn(0)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(self.conf.options[config.MAP_PASSWORD].cache,
config.MAP_PASSWORD).AndReturn(cache_mock)
self.mox.ReplayAll()
c = command.Update()
self.assertEqual(
0, c.UpdateMaps(self.conf, incremental=True, force_write=False))
def testUpdateAutomounts(self):
self.mox.StubOutClassWithMocks(lock, 'PidFile')
lock_mock = lock.PidFile(filename=None)
lock_mock.Lock(force=False).AndReturn(True)
lock_mock.Locked().AndReturn(True)
lock_mock.Unlock()
self.conf.maps = [config.MAP_AUTOMOUNT]
self.conf.cache = 'dummy'
modify_stamp = 1
map_entry = automount.AutomountMapEntry()
map_entry.key = '/home'
map_entry.location = 'foo'
automount_map = automount.AutomountMap([map_entry])
automount_map.SetModifyTimestamp(modify_stamp)
source_mock = self.mox.CreateMock(source.Source)
source_mock.GetAutomountMasterMap().AndReturn(automount_map)
source_mock.GetMap(config.MAP_AUTOMOUNT,
location='foo').AndReturn(automount_map)
self.mox.StubOutWithMock(source_factory, 'Create')
source_factory.Create(self.conf.options[
config.MAP_PASSWORD].source).AndReturn(source_mock)
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMapLocation().AndReturn('home')
cache_mock.WriteMap(map_data=automount_map).AndReturn(0)
cache_mock.WriteMap(map_data=automount_map).AndReturn(0)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(self.conf.options[config.MAP_AUTOMOUNT].cache,
config.MAP_AUTOMOUNT,
automount_mountpoint='/home').AndReturn(cache_mock)
cache_factory.Create(self.conf.options[config.MAP_AUTOMOUNT].cache,
config.MAP_AUTOMOUNT,
automount_mountpoint=None).AndReturn(cache_mock)
self.mox.ReplayAll()
c = command.Update()
self.assertEqual(
0, c.UpdateMaps(self.conf, incremental=True, force_write=False))
def testUpdateMapsTrapsPermissionDenied(self):
self.mox.StubOutWithMock(map_updater.MapUpdater, 'UpdateFromSource')
map_updater.MapUpdater.UpdateFromSource(mox.IgnoreArg(),
incremental=True,
force_write=False).AndRaise(
error.PermissionDenied)
self.mox.StubOutClassWithMocks(lock, 'PidFile')
lock_mock = lock.PidFile(filename=None)
lock_mock.Lock(force=False).AndReturn(True)
lock_mock.Locked().AndReturn(True)
lock_mock.Unlock()
self.conf.maps = [config.MAP_PASSWORD]
self.conf.cache = 'dummy'
modify_stamp = 1
map_entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
passwd_map = passwd.PasswdMap([map_entry])
passwd_map.SetModifyTimestamp(modify_stamp)
source_mock = self.mox.CreateMock(source.Source)
self.mox.StubOutWithMock(source_factory, 'Create')
source_factory.Create(self.conf.options[
config.MAP_PASSWORD].source).AndReturn(source_mock)
cache_mock = self.mox.CreateMock(caches.Cache)
self.mox.StubOutWithMock(cache_factory, 'Create')
self.mox.ReplayAll()
c = command.Update()
self.assertEqual(
1, c.UpdateMaps(self.conf, incremental=True, force_write=False))
def testUpdateMapsCanForceLock(self):
self.mox.StubOutClassWithMocks(lock, 'PidFile')
lock_mock = lock.PidFile(filename=None)
lock_mock.Lock(force=True).AndReturn(False)
lock_mock.Locked().AndReturn(True)
lock_mock.Unlock()
self.mox.ReplayAll()
c = command.Update()
self.assertEqual(c.UpdateMaps(self.conf, False, force_lock=True),
c.ERR_LOCK)
def testSleep(self):
self.mox.StubOutWithMock(time, 'sleep')
time.sleep(1)
c = command.Update()
self.mox.StubOutWithMock(c, 'UpdateMaps')
c.UpdateMaps(self.conf,
incremental=True,
force_lock=mox.IgnoreArg(),
force_write=mox.IgnoreArg()).AndReturn(0)
self.mox.ReplayAll()
c.Run(self.conf, ['-s', '1'])
def testForceWriteFlag(self):
c = command.Update()
(options, _) = c.parser.parse_args([])
self.assertEqual(False, options.force_write)
(options, _) = c.parser.parse_args(['--force-write'])
self.assertEqual(True, options.force_write)
def testForceLockFlag(self):
c = command.Update()
(options, _) = c.parser.parse_args([])
self.assertEqual(False, options.force_lock)
(options, _) = c.parser.parse_args(['--force-lock'])
self.assertEqual(True, options.force_lock)
def testForceWriteFlagCallsUpdateMapsWithForceWriteTrue(self):
c = command.Update()
self.mox.StubOutWithMock(c, 'UpdateMaps')
c.UpdateMaps(self.conf,
incremental=mox.IgnoreArg(),
force_lock=mox.IgnoreArg(),
force_write=True).AndReturn(0)
self.mox.ReplayAll()
self.assertEqual(0, c.Run(self.conf, ['--force-write']))
def testForceLockFlagCallsUpdateMapsWithForceLockTrue(self):
c = command.Update()
self.mox.StubOutWithMock(c, 'UpdateMaps')
c.UpdateMaps(self.conf,
incremental=mox.IgnoreArg(),
force_lock=True,
force_write=mox.IgnoreArg()).AndReturn(0)
self.mox.ReplayAll()
self.assertEqual(0, c.Run(self.conf, ['--force-lock']))
def testUpdateMapsWithBadMapName(self):
c = command.Update()
self.mox.StubOutWithMock(c, '_Lock')
c._Lock(force=False, path=None).AndReturn(True)
self.mox.ReplayAll()
# Create an invalid map name.
self.assertEqual(
1, c.Run(self.conf, ['-m', config.MAP_PASSWORD + 'invalid']))
class TestVerifyCommand(mox.MoxTestBase):
def setUp(self):
super(TestVerifyCommand, self).setUp()
class DummyConfig(object):
pass
class DummySource(source.Source):
name = 'dummy'
def Verify(self):
return 0
# Instead of a DummyCache, we will override cache_factory.Create so
# we can return a pmock cache object.
self.original_caches_create = cache_factory.Create
self.original_sources_create = source_factory.Create
# Add dummy source to the set if implementations of sources.
source_factory.RegisterImplementation(DummySource)
# Create a config with a section for a passwd map.
self.conf = DummyConfig()
self.conf.options = {config.MAP_PASSWORD: config.MapOptions()}
self.conf.options[config.MAP_PASSWORD].cache = {'name': 'dummy'}
self.conf.options[config.MAP_PASSWORD].source = {'name': 'dummy'}
self.original_verify_configuration = config.VerifyConfiguration
self.original_getmap = nss.GetMap
self.original_getpwall = pwd.getpwall
self.original_getgrall = grp.getgrall
# Setup maps used by VerifyMap testing.
big_map = passwd.PasswdMap()
map_entry1 = passwd.PasswdMapEntry()
map_entry1.name = 'foo'
map_entry1.uid = 10
map_entry1.gid = 10
big_map.Add(map_entry1)
map_entry2 = passwd.PasswdMapEntry()
map_entry2.name = 'bar'
map_entry2.uid = 20
map_entry2.gid = 20
big_map.Add(map_entry2)
small_map = passwd.PasswdMap()
small_map.Add(map_entry1)
self.big_map = big_map
self.small_map = small_map
def tearDown(self):
super(TestVerifyCommand, self).tearDown()
config.VerifyConfiguration = self.original_verify_configuration
cache_factory.Create = self.original_caches_create
nss.getmap = self.original_getmap
source_factory.Create = self.original_sources_create
pwd.getpwall = self.original_getpwall
grp.getgrall = self.original_getgrall
def testConstructor(self):
c = command.Verify()
self.assertTrue(isinstance(c, command.Verify))
def testHelp(self):
c = command.Verify()
self.assertNotEqual(None, c.Help())
def testRunWithNoParameters(self):
def FakeVerifyConfiguration(conf):
"""Assert that we call VerifyConfiguration correctly."""
self.assertEqual(conf, self.conf)
return (0, 0)
def FakeVerifyMaps(conf):
"""Assert that VerifyMaps is called with a config object."""
self.assertEqual(conf, self.conf)
return 0
config.VerifyConfiguration = FakeVerifyConfiguration
c = command.Verify()
c.VerifyMaps = FakeVerifyMaps
self.conf.maps = []
self.assertEqual(1, c.Run(self.conf, []))
def testRunWithBadParameters(self):
c = command.Verify()
# Trap stderr so the unit test runs clean,
# since unit test status is printed on stderr.
dev_null = StringIO()
stderr = sys.stderr
sys.stderr = dev_null
self.assertEqual(2, c.Run(None, ['--invalid']))
sys.stderr = stderr
def testRunWithParameters(self):
def FakeVerifyConfiguration(conf):
"""Assert that we call VerifyConfiguration correctly."""
self.assertEqual(conf, self.conf)
return (0, 0)
def FakeVerifyMaps(conf):
"""Assert that VerifyMaps is called with a config object."""
self.assertEqual(conf, self.conf)
return 0
config.VerifyConfiguration = FakeVerifyConfiguration
c = command.Verify()
c.VerifyMaps = FakeVerifyMaps
self.assertEqual(0, c.Run(self.conf, ['-m', config.MAP_PASSWORD]))
def testVerifyMapsSucceedsOnGoodMaps(self):
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMap().AndReturn(self.small_map)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(self.conf.options[config.MAP_PASSWORD].cache,
config.MAP_PASSWORD).AndReturn(cache_mock)
self.conf.maps = [config.MAP_PASSWORD]
self.mox.StubOutWithMock(nss, 'GetMap')
nss.GetMap(config.MAP_PASSWORD).AndReturn(self.big_map)
self.mox.ReplayAll()
c = command.Verify()
self.assertEqual(0, c.VerifyMaps(self.conf))
def testVerifyMapsBad(self):
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMap().AndReturn(self.big_map)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(self.conf.options[config.MAP_PASSWORD].cache,
config.MAP_PASSWORD).AndReturn(cache_mock)
self.conf.maps = [config.MAP_PASSWORD]
self.mox.StubOutWithMock(nss, 'GetMap')
nss.GetMap(config.MAP_PASSWORD).AndReturn(self.small_map)
self.mox.ReplayAll()
c = command.Verify()
self.assertEqual(1, c.VerifyMaps(self.conf))
def testVerifyMapsException(self):
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMap().AndRaise(error.CacheNotFound)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(self.conf.options[config.MAP_PASSWORD].cache,
config.MAP_PASSWORD).AndReturn(cache_mock)
self.conf.maps = [config.MAP_PASSWORD]
self.mox.StubOutWithMock(nss, 'GetMap')
nss.GetMap(config.MAP_PASSWORD).AndReturn(self.small_map)
self.mox.ReplayAll()
c = command.Verify()
self.assertEqual(1, c.VerifyMaps(self.conf))
def testVerifyMapsSkipsNetgroups(self):
self.mox.StubOutWithMock(cache_factory, 'Create')
self.conf.maps = [config.MAP_NETGROUP]
self.mox.StubOutWithMock(nss, 'GetMap')
self.mox.ReplayAll()
c = command.Verify()
self.assertEqual(0, c.VerifyMaps(self.conf))
def testVerifySourcesGood(self):
source_mock = self.mox.CreateMock(source.Source)
source_mock.Verify().AndReturn(0)
self.mox.StubOutWithMock(source_factory, 'Create')
source_factory.Create(mox.IgnoreArg()).AndReturn(source_mock)
self.conf.maps = [config.MAP_PASSWORD]
self.mox.ReplayAll()
self.assertEqual(0, command.Verify().VerifySources(self.conf))
def testVerifySourcesBad(self):
self.conf.maps = []
self.assertEqual(1, command.Verify().VerifySources(self.conf))
source_mock = self.mox.CreateMock(source.Source)
source_mock.Verify().AndReturn(1)
self.mox.StubOutWithMock(source_factory, 'Create')
source_factory.Create(
self.conf.options[config.MAP_PASSWORD].cache).AndReturn(source_mock)
self.conf.maps = [config.MAP_PASSWORD]
self.mox.ReplayAll()
self.assertEqual(1, command.Verify().VerifySources(self.conf))
def testVerifySourcesTrapsSourceUnavailable(self):
self.conf.maps = []
self.assertEqual(1, command.Verify().VerifySources(self.conf))
def FakeCreate(conf):
"""Stub routine returning a pmock to test VerifySources."""
self.assertEqual(conf,
self.conf.options[config.MAP_PASSWORD].source)
raise error.SourceUnavailable
old_source_base_create = source_factory.Create
source_factory.Create = FakeCreate
self.conf.maps = [config.MAP_PASSWORD]
self.assertEqual(1, command.Verify().VerifySources(self.conf))
source_factory.Create = old_source_base_create
class TestRepairCommand(unittest.TestCase):
def setUp(self):
class DummyConfig(object):
pass
class DummySource(source.Source):
name = 'dummy'
def Verify(self):
return 0
# Add dummy source to the set if implementations of sources
source_factory.RegisterImplementation(DummySource)
self.conf = DummyConfig()
self.conf.options = {config.MAP_PASSWORD: config.MapOptions()}
self.conf.options[config.MAP_PASSWORD].cache = {'name': 'dummy'}
self.conf.options[config.MAP_PASSWORD].source = {'name': 'dummy'}
self.original_verify_configuration = config.VerifyConfiguration
def tearDown(self):
config.VerifyConfiguration = self.original_verify_configuration
def testCreate(self):
c = command.Repair()
self.assertTrue(isinstance(c, command.Repair))
def testHelp(self):
c = command.Repair()
self.assertNotEqual(None, c.Help())
def testRunWithNoParameters(self):
c = command.Repair()
def FakeVerifyConfiguration(conf):
"""Assert that we call VerifyConfiguration correctly."""
self.assertEqual(conf, self.conf)
return (0, 1)
config.VerifyConfiguration = FakeVerifyConfiguration
self.conf.maps = []
self.assertEqual(1, c.Run(self.conf, []))
def testRunWithBadParameters(self):
c = command.Repair()
# Trap stderr so the unit test runs clean,
# since unit test status is printed on stderr.
dev_null = StringIO()
stderr = sys.stderr
sys.stderr = dev_null
self.assertEqual(2, c.Run(None, ['--invalid']))
sys.stderr = stderr
def testRunWithParameters(self):
def FakeVerifyConfiguration(conf):
"""Assert that we call VerifyConfiguration correctly."""
self.assertEqual(conf, self.conf)
return (0, 1)
config.VerifyConfiguration = FakeVerifyConfiguration
c = command.Repair()
self.assertEqual(1, c.Run(self.conf, ['-m', config.MAP_PASSWORD]))
class TestHelpCommand(unittest.TestCase):
def setUp(self):
self.stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = self.stdout
def testHelp(self):
c = command.Help()
self.assertNotEqual(None, c.Help())
def testRunWithNoParameters(self):
c = command.Help()
self.assertEqual(0, c.Run(None, []))
def testRunHelpHelp(self):
c = command.Help()
self.assertEqual(0, c.Run(None, ['help']))
class TestStatusCommand(mox.MoxTestBase):
def setUp(self):
super(TestStatusCommand, self).setUp()
class DummyConfig(object):
pass
class DummySource(source.Source):
name = 'dummy'
def Verify(self):
return 0
# stub out parts of update.MapUpdater
class DummyUpdater(map_updater.MapUpdater):
def GetModifyTimestamp(self):
return 1
def GetUpdateTimestamp(self):
return 2
# Add dummy source to the set if implementations of sources
source_factory.RegisterImplementation(DummySource)
self.conf = DummyConfig()
self.conf.timestamp_dir = 'TEST_DIR'
self.conf.options = {
config.MAP_PASSWORD: config.MapOptions(),
config.MAP_AUTOMOUNT: config.MapOptions()
}
self.conf.options[config.MAP_PASSWORD].cache = {'name': 'dummy'}
self.conf.options[config.MAP_PASSWORD].source = {'name': 'dummy'}
self.conf.options[config.MAP_AUTOMOUNT].cache = {'name': 'dummy'}
self.conf.options[config.MAP_AUTOMOUNT].source = {'name': 'dummy'}
self.original_verify_configuration = config.VerifyConfiguration
self.original_create = cache_factory.Create
self.original_updater = map_updater.MapUpdater
# stub this out for all tests
map_updater.MapUpdater = DummyUpdater
def tearDown(self):
super(TestStatusCommand, self).tearDown()
config.VerifyConfiguration = self.original_verify_configuration
cache_factory.Create = self.original_create
map_updater.MapUpdater = self.original_updater
def testHelp(self):
c = command.Status()
self.assertNotEqual(None, c.Help())
def testRunWithNoParameters(self):
c = command.Status()
self.conf.maps = []
self.assertEqual(0, c.Run(self.conf, []))
def testRunWithBadParameters(self):
c = command.Status()
# Trap stderr so the unit test runs clean,
# since unit test status is printed on stderr.
dev_null = StringIO()
stderr = sys.stderr
sys.stderr = dev_null
self.assertEqual(2, c.Run(None, ['--invalid']))
sys.stderr = stderr
def testEpochFormatParameter(self):
c = command.Status()
(options, args) = c.parser.parse_args([])
self.assertEqual(False, options.epoch)
self.assertEqual([], args)
def testObeysMapsFlag(self):
stdout_buffer = StringIO()
old_stdout = sys.stdout
sys.stdout = stdout_buffer
c = command.Status()
self.assertEqual(0, c.Run(self.conf, ['-m', 'passwd']))
sys.stdout = old_stdout
self.assertNotEqual(0, len(stdout_buffer.getvalue()))
self.assertFalse(stdout_buffer.getvalue().find('group') >= 0)
def testGetSingleMapMetadata(self):
# test both automount and non-automount maps.
# cache mock is returned by FakeCreate() for automount maps
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMapLocation().AndReturn('/etc/auto.master')
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(
self.conf.options[config.MAP_AUTOMOUNT].cache,
config.MAP_AUTOMOUNT,
automount_mountpoint='automount_mountpoint').AndReturn(cache_mock)
self.mox.ReplayAll()
c = command.Status()
values = c.GetSingleMapMetadata(config.MAP_PASSWORD, self.conf)
self.assertTrue('map' in values[0])
self.assertTrue('key' in values[0])
self.assertTrue('value' in values[0])
values = c.GetSingleMapMetadata(
config.MAP_AUTOMOUNT,
self.conf,
automount_mountpoint='automount_mountpoint')
self.assertTrue('map' in values[0])
self.assertTrue('key' in values[0])
self.assertTrue('value' in values[0])
self.assertTrue('automount' in values[0])
def testGetSingleMapMetadataTimestampEpoch(self):
c = command.Status()
values = c.GetSingleMapMetadata(config.MAP_PASSWORD,
self.conf,
epoch=True)
self.assertTrue('map' in values[0])
self.assertTrue('key' in values[0])
self.assertTrue('value' in values[0])
# values below are returned by dummyupdater
self.assertEqual(1, values[0]['value'])
self.assertEqual(2, values[1]['value'])
def testGetSingleMapMetadataTimestampEpochFalse(self):
# set the timezone so we get a consistent return value
os.environ['TZ'] = 'MST'
time.tzset()
c = command.Status()
values = c.GetSingleMapMetadata(config.MAP_PASSWORD,
self.conf,
epoch=False)
self.assertEqual('Wed Dec 31 17:00:02 1969', values[1]['value'])
def testGetAutomountMapMetadata(self):
# need to stub out GetSingleMapMetadata (tested above) and then
# stub out cache_factory.Create to return a cache mock that spits
# out an iterable map for the function to use.
# stub out GetSingleMapMetadata
class DummyStatus(command.Status):
def GetSingleMapMetadata(self,
unused_map_name,
unused_conf,
automount_mountpoint=None,
epoch=False):
return {
'map': 'map_name',
'last-modify-timestamp': 'foo',
'last-update-timestamp': 'bar'
}
# the master map to loop over
master_map = automount.AutomountMap()
master_map.Add(
automount.AutomountMapEntry({
'key': '/home',
'location': '/etc/auto.home'
}))
master_map.Add(
automount.AutomountMapEntry({
'key': '/auto',
'location': '/etc/auto.auto'
}))
# mock out a cache to return the master map
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMap().AndReturn(master_map)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(self.conf.options[config.MAP_AUTOMOUNT].cache,
config.MAP_AUTOMOUNT,
automount_mountpoint=None).AndReturn(cache_mock)
self.mox.ReplayAll()
c = DummyStatus()
value_list = c.GetAutomountMapMetadata(self.conf)
self.assertEqual(9, len(value_list))
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/config.py000066400000000000000000000261111402531134600204560ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Configuration classes for nss_cache module.
These classes perform command line and file-based configuration loading
and parsing for the nss_cache module.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from configparser import ConfigParser
import logging
import re
# known nss map types.
MAP_PASSWORD = 'passwd'
MAP_GROUP = 'group'
MAP_SHADOW = 'shadow'
MAP_NETGROUP = 'netgroup'
MAP_AUTOMOUNT = 'automount'
MAP_SSHKEY = 'sshkey'
# accepted commands.
CMD_HELP = 'help'
CMD_REPAIR = 'repair'
CMD_STATUS = 'status'
CMD_UPDATE = 'update'
CMD_VERIFY = 'verify'
# default file locations
FILE_NSSWITCH = '/etc/nsswitch.conf'
# update method types
UPDATER_FILE = 'file'
UPDATER_MAP = 'map'
class Config(object):
"""Data container for runtime configuration information.
Global information such as the command, configured maps, etc, are
loaded into this object. Source and cache configuration
information is also stored here.
However since each map can be configured against a different
source and cache implementation we have to store per-map
configuration information. This is done via a Config().options
dictionary with the map name as the key and a MapOptions object as
the value.
"""
# default config file.
NSSCACHE_CONFIG = '/etc/nsscache.conf'
# known config file option names
OPT_SOURCE = 'source'
OPT_CACHE = 'cache'
OPT_MAPS = 'maps'
OPT_LOCKFILE = 'lockfile'
OPT_TIMESTAMP_DIR = 'timestamp_dir'
def __init__(self, env):
"""Initialize defaults for data we hold.
Args:
env: dictionary of environment variables (typically os.environ)
"""
# override constants based on ENV vars
if 'NSSCACHE_CONFIG' in env:
self.config_file = env['NSSCACHE_CONFIG']
else:
self.config_file = self.NSSCACHE_CONFIG
# default values
self.command = None
self.help_command = None
self.maps = []
self.options = {}
self.lockfile = None
self.timestamp_dir = None
self.log = logging.getLogger(__name__)
def __repr__(self):
"""String representation of this object."""
# self.options is of variable length so we are forced to do
# some fugly concatenation here to print our config in a
# readable fashion.
string = (('' % string
class MapOptions(object):
"""Data container for individual maps.
Each map is configured against a source and cache. The dictionaries
used by the source and cache implementations are stored here.
"""
def __init__(self):
"""Initialize default values."""
self.cache = {}
self.source = {}
def __repr__(self):
"""String representation of this object."""
return '' % (self.cache, self.source)
#
# Configuration itself is done through module-level methods. These
# methods are below.
#
def LoadConfig(configuration):
"""Load the on-disk configuration file and merge it into config.
Args:
configuration: a config.Config object
Raises:
error.NoConfigFound: no configuration file was found
"""
parser = ConfigParser()
# load config file
configuration.log.debug('Attempting to parse configuration file: %s',
configuration.config_file)
parser.read(configuration.config_file)
# these are required, and used as defaults for each section
default = 'DEFAULT'
default_source = FixValue(parser.get(default, Config.OPT_SOURCE))
default_cache = FixValue(parser.get(default, Config.OPT_CACHE))
# this is also required, but global only
# TODO(v): make this default to /var/lib/nsscache before next release
configuration.timestamp_dir = FixValue(
parser.get(default, Config.OPT_TIMESTAMP_DIR))
# optional defaults
if parser.has_option(default, Config.OPT_LOCKFILE):
configuration.lockfile = FixValue(
parser.get(default, Config.OPT_LOCKFILE))
if not configuration.maps:
# command line did not override
maplist = FixValue(parser.get(default, Config.OPT_MAPS))
# special case for empty string, or split(',') will return a
# non-empty list
if maplist:
configuration.maps = [m.strip() for m in maplist.split(',')]
else:
configuration.maps = []
# build per-map source and cache dictionaries and store
# them in MapOptions() objects.
for map_name in configuration.maps:
map_options = MapOptions()
source = default_source
cache = default_cache
# override source and cache if necessary
if parser.has_section(map_name):
if parser.has_option(map_name, Config.OPT_SOURCE):
source = FixValue(parser.get(map_name, Config.OPT_SOURCE))
if parser.has_option(map_name, Config.OPT_CACHE):
cache = FixValue(parser.get(map_name, Config.OPT_CACHE))
# load source and cache default options
map_options.source = Options(parser.items(default), source)
map_options.cache = Options(parser.items(default), cache)
# overide with any section-specific options
if parser.has_section(map_name):
options = Options(parser.items(map_name), source)
map_options.source.update(options)
options = Options(parser.items(map_name), cache)
map_options.cache.update(options)
# used to instantiate the specific cache/source
map_options.source['name'] = source
map_options.cache['name'] = cache
# save final MapOptions() in the parent config object
configuration.options[map_name] = map_options
configuration.log.info('Configured maps are: %s',
', '.join(configuration.maps))
configuration.log.debug('loaded configuration: %r', configuration)
def Options(items, name):
"""Returns a dict of options specific to an implementation.
This is used to retrieve a dict of options for a given
implementation. We look for configuration options in the form of
name_option and ignore the rest.
Args:
items: [('key1', 'value1'), ('key2, 'value2'), ...]
name: 'foo'
Returns:
dictionary of option:value pairs
"""
options = {}
option_re = re.compile(r'^%s_(.+)' % name)
for item in items:
match = option_re.match(item[0])
if match:
options[match.group(1)] = FixValue(item[1])
return options
def FixValue(value):
"""Helper function to fix values loaded from a config file.
Currently we strip bracketed quotes as well as convert numbers to
floats for configuration parameters expecting numerical data types.
Args:
value: value to be converted
Returns:
fixed value
"""
# Strip quotes if necessary.
if ((value.startswith('"') and value.endswith('"')) or
(value.startswith('\'') and value.endswith('\''))):
value = value[1:-1]
# Convert to float if necessary. Python converts between floats and ints
# on demand, but won't attempt string conversion automagically.
#
# Caveat: '1' becomes 1.0, however python treats it reliably as 1
# for native comparisons to int types, and if an int type is needed
# explicitly the caller will have to cast. This is simplist.
try:
value = int(value)
except ValueError:
try:
value = float(value)
except ValueError:
return value
return value
def ParseNSSwitchConf(nsswitch_filename):
"""Parse /etc/nsswitch.conf and return the sources for each map.
Args:
nsswitch_filename: Full path to an nsswitch.conf to parse. See manpage
nsswitch.conf(5) for full details on the format expected.
Returns:
a dictionary keyed by map names and containing a list of sources
for each map.
"""
with open(nsswitch_filename, 'r') as nsswitch_file:
nsswitch = {}
map_re = re.compile(r'^([a-z]+): *(.*)$')
for line in nsswitch_file:
match = map_re.match(line)
if match:
sources = match.group(2).split()
nsswitch[match.group(1)] = sources
return nsswitch
def VerifyConfiguration(conf, nsswitch_filename=FILE_NSSWITCH):
"""Verify that the system configuration matches the nsscache configuration.
Checks that NSS configuration has the cache listed for each map that
is configured in the nsscache configuration, i.e. that the system is
configured to use the maps we are building.
Args:
conf: a Configuration
nsswitch_filename: optionally the name of the file to parse
Returns:
(warnings, errors) a tuple counting the number of warnings and
errors detected
"""
(warnings, errors) = (0, 0)
if not conf.maps:
logging.error('No maps are configured.')
errors += 1
# Verify that at least one supported module is configured in nsswitch.conf.
nsswitch = ParseNSSwitchConf(nsswitch_filename)
for configured_map in conf.maps:
if configured_map == 'sshkey':
continue
if conf.options[configured_map].cache['name'] == 'nssdb':
nss_module_name = 'db'
if conf.options[configured_map].cache['name'] == 'files':
nss_module_name = 'files'
if ('cache_filename_suffix' in conf.options[configured_map].cache
and
conf.options[configured_map].cache['cache_filename_suffix']
== 'cache'):
# We are configured for libnss-cache for this map.
nss_module_name = 'cache'
else:
# TODO(jaq): default due to hysterical raisins
nss_module_name = 'db'
if nss_module_name not in nsswitch[configured_map]:
logging.warning(('nsscache is configured to build maps for %r, '
'but NSS is not configured (in %r) to use it'),
configured_map, nsswitch_filename)
warnings += 1
return (warnings, errors)
nsscache-version-0.42/nss_cache/config_test.py000066400000000000000000000337061402531134600215250ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/config.py."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import os
import shutil
import tempfile
import unittest
from nss_cache import config
class TestConfig(unittest.TestCase):
"""Unit tests for config.Config()."""
def testConfigInit(self):
env = {'NSSCACHE_CONFIG': 'test.conf'}
conf = config.Config(env)
self.assertEqual(conf.config_file,
env['NSSCACHE_CONFIG'],
msg='Failed to override NSSCACHE_CONFIG.')
class TestMapOptions(unittest.TestCase):
"""Unit tests for config.MapOptions()."""
def testMapOptionsInit(self):
mapconfig = config.MapOptions()
self.assertTrue(isinstance(mapconfig.cache, dict))
self.assertTrue(isinstance(mapconfig.source, dict))
class TestClassMethods(unittest.TestCase):
"""Unit tests for class-level methods in config.py."""
def setUp(self):
# create a directory with a writeable copy of nsscache.conf in it
self.workdir = tempfile.mkdtemp()
conf_filename = 'nsscache.conf'
self.conf_filename = os.path.join(self.workdir, conf_filename)
shutil.copy(conf_filename, self.conf_filename)
os.chmod(self.conf_filename, 0o640)
# prepare a config object with this config
self.conf = config.Config({})
self.conf.config_file = self.conf_filename
def tearDown(self):
shutil.rmtree(self.workdir)
def testLoadConfigSingleMap(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = foo\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
self.assertEqual(['foo'], self.conf.maps)
def testLoadConfigTwoMaps(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = foo, bar\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
self.assertEqual(['foo', 'bar'], self.conf.maps)
def testLoadConfigMapsWhitespace(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = foo, bar , baz\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
self.assertEqual(['foo', 'bar', 'baz'], self.conf.maps)
def testLoadConfigExample(self):
"""Test that we parse and load the example config.
Note that this also tests MapOptions() creation and our overriding
of defaults in LoadConfig.
This requires that nsscache.conf exists in the top of the source tree.
Changes to the configuration options may break this test.
"""
conf = self.conf
config.LoadConfig(conf)
passwd = conf.options['passwd']
group = conf.options['group']
shadow = conf.options['shadow']
automount = conf.options['automount']
self.assertTrue(isinstance(passwd, config.MapOptions))
self.assertTrue(isinstance(group, config.MapOptions))
self.assertTrue(isinstance(shadow, config.MapOptions))
self.assertTrue(isinstance(automount, config.MapOptions))
self.assertEqual(passwd.source['name'], 'ldap')
self.assertEqual(group.source['name'], 'ldap')
self.assertEqual(shadow.source['name'], 'ldap')
self.assertEqual(automount.source['name'], 'ldap')
self.assertEqual(passwd.cache['name'], 'files')
self.assertEqual(group.cache['name'], 'files')
self.assertEqual(shadow.cache['name'], 'files')
self.assertEqual(automount.cache['name'], 'files')
self.assertEqual(passwd.source['base'], 'ou=people,dc=example,dc=com')
self.assertEqual(passwd.source['filter'], '(objectclass=posixAccount)')
self.assertEqual(group.source['base'], 'ou=group,dc=example,dc=com')
self.assertEqual(group.source['filter'], '(objectclass=posixGroup)')
def testLoadConfigOptionalDefaults(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = foo, bar , baz\n'
'lockfile = foo\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
self.assertEqual(self.conf.lockfile, 'foo')
def testLoadConfigStripQuotesFromStrings(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = "ldap"\n' # needs to be ldap due to magic
'cache = \'b\'ar\'\n'
'maps = quux\n'
'timestamp_dir = foo\n'
'ldap_tls_require_cert = \'blah\'\n'
'[quux]\n'
'ldap_klingon = "qep\'a\' wa\'maH loS\'DIch"\n')
conf_file.close()
config.LoadConfig(self.conf)
self.assertEqual('ldap', self.conf.options['quux'].source['name'])
self.assertEqual('b\'ar', self.conf.options['quux'].cache['name'])
self.assertEqual('blah',
self.conf.options['quux'].source['tls_require_cert'])
self.assertEqual('qep\'a\' wa\'maH loS\'DIch',
self.conf.options['quux'].source['klingon'])
def testLoadConfigConvertsNumbers(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = foo\n'
'timestamp_dir = foo\n'
'foo_string = test\n'
'foo_float = 1.23\n'
'foo_int = 1\n')
conf_file.close()
config.LoadConfig(self.conf)
foo_dict = self.conf.options['foo'].source
self.assertTrue(isinstance(foo_dict['string'], str))
self.assertTrue(isinstance(foo_dict['float'], float))
self.assertTrue(isinstance(foo_dict['int'], int))
self.assertEqual(foo_dict['string'], 'test')
self.assertEqual(foo_dict['float'], 1.23)
self.assertEqual(foo_dict['int'], 1)
def testOptions(self):
# check the empty case.
options = config.Options([], 'foo')
self.assertEqual(options, {})
# create a list like from ConfigParser.items()
items = [('maps', 'foo, bar, foobar'), ('nssdb_dir', '/path/to/dir'),
('ldap_uri', 'TEST_URI'), ('source', 'foo'), ('cache', 'bar'),
('ldap_base', 'TEST_BASE'), ('ldap_filter', 'TEST_FILTER')]
options = config.Options(items, 'ldap')
self.assertTrue('uri' in options)
self.assertTrue('base' in options)
self.assertTrue('filter' in options)
self.assertEqual(options['uri'], 'TEST_URI')
self.assertEqual(options['base'], 'TEST_BASE')
self.assertEqual(options['filter'], 'TEST_FILTER')
def testParseNSSwitchConf(self):
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: files db\n')
nsswitch_file.write('group: files db\n')
nsswitch_file.write('shadow: files db\n')
nsswitch_file.close()
expected_switch = {
'passwd': ['files', 'db'],
'group': ['files', 'db'],
'shadow': ['files', 'db']
}
self.assertEqual(expected_switch,
config.ParseNSSwitchConf(nsswitch_filename))
os.unlink(nsswitch_filename)
def testVerifyConfiguration(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = passwd, group, shadow\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: files db\n')
nsswitch_file.write('group: files db\n')
nsswitch_file.write('shadow: files db\n')
nsswitch_file.close()
self.assertEqual((0, 0),
config.VerifyConfiguration(self.conf,
nsswitch_filename))
os.unlink(nsswitch_filename)
def testVerifyConfigurationWithCache(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = files\n'
'maps = passwd, group, shadow\n'
'timestamp_dir = foo\n'
'files_cache_filename_suffix = cache')
conf_file.close()
config.LoadConfig(self.conf)
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: cache\n')
nsswitch_file.write('group: cache\n')
nsswitch_file.write('shadow: cache\n')
nsswitch_file.close()
self.assertEqual((0, 0),
config.VerifyConfiguration(self.conf,
nsswitch_filename))
os.unlink(nsswitch_filename)
def testVerifyConfigurationWithFiles(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = files\n'
'maps = passwd, group, shadow\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: files\n')
nsswitch_file.write('group: files\n')
nsswitch_file.write('shadow: files\n')
nsswitch_file.close()
self.assertEqual((0, 0),
config.VerifyConfiguration(self.conf,
nsswitch_filename))
os.unlink(nsswitch_filename)
def testVerifyBadConfigurationWithCache(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = files\n'
'maps = passwd, group, shadow\n'
'timestamp_dir = foo\n'
'files_cache_filename_suffix = cache')
conf_file.close()
config.LoadConfig(self.conf)
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: files\n')
nsswitch_file.write('group: files\n')
nsswitch_file.write('shadow: files\n')
nsswitch_file.close()
self.assertEqual((3, 0),
config.VerifyConfiguration(self.conf,
nsswitch_filename))
os.unlink(nsswitch_filename)
def testVerifyBadConfigurationIncrementsWarningCount(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = passwd, group, shadow\n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: files ldap\n')
nsswitch_file.write('group: files db\n')
nsswitch_file.write('shadow: files db\n')
nsswitch_file.close()
self.assertEqual((1, 0),
config.VerifyConfiguration(self.conf,
nsswitch_filename))
os.unlink(nsswitch_filename)
def testVerifyNoMapConfigurationIsError(self):
conf_file = open(self.conf_filename, 'w')
conf_file.write('[DEFAULT]\n'
'source = foo\n'
'cache = foo\n'
'maps = \n'
'timestamp_dir = foo\n')
conf_file.close()
config.LoadConfig(self.conf)
nsswitch_filename = os.path.join(self.workdir, 'nsswitch.conf')
nsswitch_file = open(nsswitch_filename, 'w')
nsswitch_file.write('passwd: files ldap\n')
nsswitch_file.close()
self.assertEqual((0, 1),
config.VerifyConfiguration(self.conf,
nsswitch_filename))
os.unlink(nsswitch_filename)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/error.py000066400000000000000000000036471402531134600203530ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Exception classes for nss_cache module."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
class Error(Exception):
"""Base exception class for nss_cache."""
pass
class CacheNotFound(Error):
"""Raised when a local cache is missing."""
pass
class CacheInvalid(Error):
"""Raised when a cache is invalid."""
pass
class CommandParseError(Error):
"""Raised when the command line fails to parse correctly."""
pass
class ConfigurationError(Error):
"""Raised when there is a problem with configuration values."""
pass
class EmptyMap(Error):
"""Raised when an empty map is discovered and one is not expected."""
pass
class NoConfigFound(Error):
"""Raised when no configuration file is loaded."""
pass
class PermissionDenied(Error):
"""Raised when nss_cache cannot access a resource."""
pass
class UnsupportedMap(Error):
"""Raised when trying to use an unsupported map type."""
pass
class InvalidMap(Error):
"""Raised when an invalid map is encountered."""
pass
class SourceUnavailable(Error):
"""Raised when a source is unavailable."""
pass
class InvalidMerge(Error):
"""An invalid merge was attempted."""
nsscache-version-0.42/nss_cache/error_test.py000066400000000000000000000066001402531134600214020ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/error.py."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import unittest
from nss_cache import error
class TestError(unittest.TestCase):
"""Unit tests for error.py."""
def testError(self):
"""We can throw an error.Error."""
class Ooops(object):
"""Raises error.Error."""
def __init__(self):
raise error.Error
self.assertRaises(error.Error, Ooops)
def testCacheNotFound(self):
"""We can throw an error.CacheNotFound."""
class Ooops(object):
"""Raises error.CacheNotFound."""
def __init__(self):
raise error.CacheNotFound
self.assertRaises(error.CacheNotFound, Ooops)
def testCommandParseError(self):
"""We can throw an error.CommandParseError."""
class Ooops(object):
"""Raises error.CommandParseError."""
def __init__(self):
raise error.CommandParseError
self.assertRaises(error.CommandParseError, Ooops)
def testConfigurationError(self):
"""We can throw an error.ConfigurationError."""
class Ooops(object):
"""Raises error.ConfigurationError."""
def __init__(self):
raise error.ConfigurationError
self.assertRaises(error.ConfigurationError, Ooops)
def testEmptyMap(self):
"""error.EmptyMap is raisable."""
def Kaboom():
raise error.EmptyMap
self.assertRaises(error.EmptyMap, Kaboom)
def testNoConfigFound(self):
"""We can throw an error.NoConfigFound."""
class Ooops(object):
"""Raises error.NoConfigFound."""
def __init__(self):
raise error.NoConfigFound
self.assertRaises(error.NoConfigFound, Ooops)
def testPermissionDenied(self):
"""error.PermissionDenied is raisable."""
def Kaboom():
raise error.PermissionDenied
self.assertRaises(error.PermissionDenied, Kaboom)
def testUnsupportedMap(self):
"""We can throw an error.UnsupportedMap."""
class Ooops(object):
"""Raises error.UnsupportedMap."""
def __init__(self):
raise error.UnsupportedMap
self.assertRaises(error.UnsupportedMap, Ooops)
def testSourceUnavailable(self):
"""We can throw an error.SourceUnavailable."""
class Ooops(object):
"""Raises error.SourceUnavailable."""
def __init__(self):
raise error.SourceUnavailable
self.assertRaises(error.SourceUnavailable, Ooops)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/lock.py000066400000000000000000000174331402531134600201500ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Lock management for nss_cache module."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import errno
import fcntl
import logging
import os
import re
import signal
import stat
import sys
# It would be interesting to subclass mutex, but we don't need the
# queueing functionality.
class PidFile(object):
"""Interprocess locking via fcntl and a pid file.
We use fcntl to manage locks between processes, as the kernel will
release the lock when the process dies no matter what, so it works
quite well.
We store the pid in the file we use so that 3rd party programs,
primarily small shell scripts, can easily see who has (or had) the
lock via the stored pid. We don't clean the pid up on exit
because most programs will have to check if the program is still
running anyways.
We can forcibly take a lock by deleting the file and re-creating
it. When we do so, we check if the pid in the file is running and
send it a SIGTERM *if and only if* it has a commandline with
'nsscache' somewhere in the string.
We try to kill the process to avoid it completing after us and
overwriting any changes. We check for 'nsscache' to avoid killing
a re-used PID. We are not paranoid, we send the SIGTERM and
assume it dies.
WARNING: Use over NFS with *extreme* caution. fcntl locking can
be configured to work, but your mileage can and will vary.
"""
STATE_DIR = '/var/run'
PROC_DIR = '/proc'
PROG_NAME = 'nsscache'
def __init__(self, filename=None, pid=None):
"""Initialize the PidFile object."""
self._locked = False
self._file = None
self.filename = filename
self.pid = pid
# Setup logging.
self.log = logging.getLogger(__name__)
if self.pid is None:
self.pid = os.getpid()
# If no filename is given, default to the basename we were
# invoked with.
if self.filename is None:
basename = os.path.basename(sys.argv[0])
if not basename:
# We were invoked from a python interpreter with
# bad arguments, or otherwise loaded without sys.argv
# being set.
self.log.critical('Can not determine lock file name!')
raise TypeError('missing required argument: filename')
self.filename = '%s/%s' % (self.STATE_DIR, basename)
self.log.debug('using %s for lock file', self.filename)
def __del__(self):
"""Release our pid file on object destruction."""
if self.Locked():
self.Unlock()
def _Open(self, filename=None):
"""Create our file and store the file object."""
if filename is None:
filename = self.filename
# We want to create this file if it doesn't exist, but 'w'
# will truncate, so we use 'a+' and seek. We don't truncate
# the file because we haven't tested if it is locked by
# another program yet, this is done later by fcntl module.
self._file = open(filename, 'a+')
self._file.seek(0)
# Set permissions.
os.chmod(filename,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
def Lock(self, force=False):
"""Open our pid file and lock it.
Args:
force: optional flag to override the lock.
Returns:
True if successful
False otherwise
"""
if self._file is None:
# Open the file and trap permission denied.
try:
self._Open()
except IOError as e:
if e.errno == errno.EACCES:
self.log.warning('Permission denied opening lock file: %s',
self.filename)
return False
raise
# Try to get the lock.
return_val = False
try:
fcntl.lockf(self._file, fcntl.LOCK_EX | fcntl.LOCK_NB)
return_val = True
except IOError as e:
if e.errno in [errno.EACCES, errno.EAGAIN]:
# Catch the error raised when the file is locked.
if not force:
self.log.debug('%s already locked!', self.filename)
return False
else:
# Otherwise re-raise it.
raise
# Check if we need to forcibly re-try the lock.
if not return_val and force:
self.log.debug('retrying lock.')
# Try to kill the process with the lock.
self.SendTerm()
# Clear the lock.
self.ClearLock()
# Try to lock only once more -- else we might recurse forever!
return self.Lock(force=False)
# Store the pid.
self._file.truncate()
self._file.write('%s\n' % self.pid)
self._file.flush()
self.log.debug('successfully locked %s', self.filename)
self._locked = True
return return_val
def SendTerm(self):
"""Send a SIGTERM to the process in the pidfile.
We only send a SIGTERM if such a process exists and it has a
commandline including the string 'nsscache'.
"""
# Grab the pid
pid_content = self._file.read()
try:
pid = int(pid_content.strip())
except (AttributeError, ValueError) as e:
self.log.warning(
'Not sending TERM, could not parse pid file content: %r',
pid_content)
return
self.log.debug('retrieved pid %d' % pid)
# Reset the filehandle just in case.
self._file.seek(0)
# By reading cmdline out of /proc we establish:
# a) if a process with that pid exists.
# b) what the command line is, to see if it included 'nsscache'.
proc_path = '%s/%i/cmdline' % (self.PROC_DIR, pid)
try:
proc_file = open(proc_path, 'r')
except IOError as e:
if e.errno == errno.ENOENT:
self.log.debug('process does not exist, skipping signal.')
return
raise
cmdline = proc_file.read()
proc_file.close()
# See if it matches our program name regex.
cmd_re = re.compile(r'.*%s' % self.PROG_NAME)
if not cmd_re.match(cmdline):
self.log.debug('process is running but not %s, skipping signal',
self.PROG_NAME)
return
# Send a SIGTERM.
self.log.debug('sending SIGTERM to %i', pid)
os.kill(pid, signal.SIGTERM)
# We are not paranoid about success, so we're done!
return
def ClearLock(self):
"""Delete the pid file to remove any locks on it."""
self.log.debug('clearing old pid file: %s', self.filename)
self._file.close()
self._file = None
os.remove(self.filename)
def Locked(self):
"""Return True if locked, False if not."""
return self._locked
def Unlock(self):
"""Release our pid file."""
fcntl.lockf(self._file, fcntl.LOCK_UN)
self._locked = False
nsscache-version-0.42/nss_cache/lock_test.py000066400000000000000000000221151402531134600212000ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/lock.py."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import builtins
import errno
import fcntl
import os
import re
import shutil
import signal
import stat
import sys
import tempfile
import unittest
from mox3 import mox
from nss_cache import lock
class TestPidFile(mox.MoxTestBase):
"""Unit tests for PidFile class in lock.py."""
# Note that we do not test whether fcntl actually works as expected.
# That is outside the scope of unit tests and I'm not going to fork
# a child to test this, at least not now.
#
# Rest assured, it works as expected and fcntl throws an exception if
# another process has the lock.
#
# We also do not test if os.kill works as expected :)
def setUp(self):
super(TestPidFile, self).setUp()
self.workdir = tempfile.mkdtemp()
self.filename = '%s/%s' % (self.workdir, 'pidfile')
def tearDown(self):
shutil.rmtree(self.workdir)
super(TestPidFile, self).tearDown()
def testInit(self):
locker = lock.PidFile()
pid = os.getpid()
filename = os.path.basename(sys.argv[0])
filename = '%s/%s' % (locker.STATE_DIR, filename)
self.assertTrue(isinstance(locker, lock.PidFile))
self.assertEqual(locker.pid, pid)
self.assertEqual(locker.filename, filename)
self.assertEqual(locker._locked, False)
self.assertEqual(locker._file, None)
# also check the case where argv[0] is empty (interactively loaded)
full_path = sys.argv[0]
sys.argv[0] = ''
self.assertRaises(TypeError, lock.PidFile)
sys.argv[0] = full_path
def testHandleArgumentsProperly(self):
filename = 'TEST'
pid = 10
locker = lock.PidFile(filename=filename, pid=pid)
self.assertEqual(locker.filename, filename)
self.assertEqual(locker.pid, pid)
def testDestructorUnlocks(self):
yes = lock.PidFile()
self.mox.StubOutWithMock(yes, 'Locked')
self.mox.StubOutWithMock(yes, 'Unlock')
yes.Locked().AndReturn(True)
yes.Unlock()
no = lock.PidFile()
self.mox.StubOutWithMock(no, 'Locked')
no.Locked().AndReturn(False)
self.mox.ReplayAll()
# test the case where locked returns True.
yes.__del__()
# test the case where self.Locked() returns False.
no.__del__()
def testOpenCreatesAppropriateFileWithPerms(self):
locker = lock.PidFile(filename=self.filename)
locker._Open()
self.assertTrue(os.path.exists(self.filename))
file_mode = os.stat(self.filename)[stat.ST_MODE]
correct_mode = (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR |
stat.S_IRGRP | stat.S_IROTH)
self.assertEqual(file_mode, correct_mode)
os.remove(self.filename)
def testLockCreatesPidfiles(self):
locker = lock.PidFile()
self.mox.StubOutWithMock(locker, '_Open')
locker._Open().AndRaise(NotImplementedError)
self.mox.ReplayAll()
self.assertRaises(NotImplementedError, locker.Lock)
# Note that testing when self._file is not None is covered below.
def testLockLocksWithFcntl(self):
locker = lock.PidFile(pid='PID')
self.mox.StubOutWithMock(locker, '_file', use_mock_anything=True)
locker._file.truncate()
locker._file.write('PID\n')
locker._file.flush()
self.mox.StubOutWithMock(fcntl, 'lockf')
fcntl.lockf(locker._file, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.mox.ReplayAll()
locker.Lock()
self.assertTrue(locker._locked)
# force __del__ to skip Unlock()
locker._locked = False
def testLockStoresPid(self):
locker = lock.PidFile(filename=self.filename, pid='PID')
locker.Lock()
pid_file = open(self.filename, 'r')
self.assertEqual(pid_file.read(), 'PID\n')
pid_file.close()
os.remove(self.filename)
def testLockTrapsPermissionDeniedOnly(self):
locker = lock.PidFile()
self.mox.StubOutWithMock(locker, '_Open')
locker._Open().AndRaise(IOError(errno.EACCES, ''))
locker._Open().AndRaise(IOError(errno.EIO, ''))
self.mox.ReplayAll()
self.assertEqual(False, locker.Lock())
self.assertRaises(IOError, locker.Lock)
def testForceLockTerminatesAndClearsLock(self):
locker = lock.PidFile(pid='PID')
self.mox.StubOutWithMock(locker, 'SendTerm')
locker.SendTerm()
self.mox.StubOutWithMock(locker, 'ClearLock')
locker.ClearLock()
self.mox.StubOutWithMock(locker, '_file')
self.mox.StubOutWithMock(fcntl, 'lockf')
fcntl.lockf(locker._file, fcntl.LOCK_EX | fcntl.LOCK_NB).AndRaise(
IOError(errno.EAGAIN, ''))
fcntl.lockf(locker._file, fcntl.LOCK_EX | fcntl.LOCK_NB).AndRaise(
IOError(errno.EAGAIN, ''))
self.mox.ReplayAll()
# This is a little weird due to recursion.
# The first time through lockf throws an error and we retry the lock.
# The 2nd time through we should fail, because lockf will still throw
# an error, so we expect False back and the above mock objects
# invoked.
self.assertFalse(locker.Lock(force=True))
def testSendTermMatchesCommandAndSendsTerm(self):
locker = lock.PidFile()
self.mox.StubOutWithMock(locker, '_file', use_mock_anything=True)
locker._file.read().AndReturn('1234')
locker._file.seek(0)
# Mock used in place of an re.compile() pattern -- expects the contents
# of our proc_file!
mock_re = self.mox.CreateMockAnything()
mock_re.match('TEST').AndReturn(True)
self.mox.StubOutWithMock(re, 'compile')
re.compile(r'.*nsscache').AndReturn(mock_re)
self.mox.StubOutWithMock(os, 'kill')
os.kill(1234, signal.SIGTERM)
# Create a file we open() in SendTerm().
proc_dir = '%s/1234' % self.workdir
proc_filename = '%s/cmdline' % proc_dir
os.mkdir(proc_dir)
proc_file = open(proc_filename, 'w')
proc_file.write('TEST')
proc_file.flush()
proc_file.close()
locker.PROC_DIR = self.workdir
self.mox.ReplayAll()
locker.SendTerm()
os.remove(proc_filename)
os.rmdir(proc_dir)
def testSendTermNoPid(self):
locker = lock.PidFile()
self.mox.StubOutWithMock(locker, '_file', use_mock_anything=True)
locker._file.read().AndReturn('\n')
locker.PROC = self.workdir
self.mox.ReplayAll()
locker.SendTerm()
def testSendTermNonePid(self):
locker = lock.PidFile()
self.mox.StubOutWithMock(locker, '_file', use_mock_anything=True)
locker._file.read().AndReturn(None)
locker.PROC = self.workdir
self.mox.ReplayAll()
locker.SendTerm()
def testSendTermTrapsENOENT(self):
locker = lock.PidFile()
self.mox.StubOutWithMock(locker, '_file', use_mock_anything=True)
locker._file.read().AndReturn('1234\n')
locker._file.seek(0)
locker.PROC = self.workdir
self.mox.StubOutWithMock(builtins, 'open', use_mock_anything=True)
builtins.open(mox.IgnoreArg(), 'r').AndRaise(IOError(errno.ENOENT, ''))
self.mox.ReplayAll()
# self.workdir/1234/cmdline should not exist :)
self.assertFalse(os.path.exists('%s/1234/cmdline' % self.workdir))
locker.SendTerm()
def testClearLockRemovesPidFile(self):
# Create a pid file.
pidfile = open(self.filename, 'w')
pidfile.write('foo')
pidfile.flush()
locker = lock.PidFile(filename=self.filename)
# Cheat instead of calling open.
locker._file = pidfile
locker.ClearLock()
self.assertFalse(os.path.exists(self.filename))
def testLockedPredicate(self):
locker = lock.PidFile()
locker._locked = True
self.assertTrue(locker.Locked())
locker._locked = False
self.assertFalse(locker.Locked())
def testUnlockReleasesFcntlLock(self):
locker = lock.PidFile()
locker._file = 'FILE_OBJECT'
self.mox.StubOutWithMock(fcntl, 'lockf')
fcntl.lockf('FILE_OBJECT', fcntl.LOCK_UN)
self.mox.ReplayAll()
locker.Unlock()
self.assertFalse(locker._locked)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/000077500000000000000000000000001402531134600175765ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/maps/__init__.py000066400000000000000000000000001402531134600216750ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/maps/automount.py000066400000000000000000000040461402531134600222070ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of an automount map for nsscache.
AutomountMap: An implementation of NSS automount maps based on the Map
class.
AutomountMapEntry: A automount map entry based on the MapEntry class.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from nss_cache.maps import maps
class AutomountMap(maps.Map):
"""This class represents an NSS automount map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def __init__(self, iterable=None):
"""Construct a AutomountMap object using optional iterable."""
super(AutomountMap, self).__init__(iterable)
def Add(self, entry):
"""Add a new object, verify it is a AutomountMapEntry object."""
if not isinstance(entry, AutomountMapEntry):
raise TypeError('Entry is not an AutomountMapEntry: %r' % entry)
return super(AutomountMap, self).Add(entry)
class AutomountMapEntry(maps.MapEntry):
"""This class represents NSS automount map entries."""
__slots__ = ('key', 'location', 'options')
_KEY = 'key'
_ATTRS = ('key', 'location', 'options')
def __init__(self, data=None):
"""Construct a AutomountMapEntry."""
self.key = None
self.location = None
self.options = None
super(AutomountMapEntry, self).__init__(data)
nsscache-version-0.42/nss_cache/maps/automount_test.py000066400000000000000000000111721402531134600232440ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for automount.py.
We only test what is overridden in the automount subclasses, most
functionality is in base.py and tested in passwd_test.py since a
subclass is required to test the abstract class functionality.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import unittest
from nss_cache.maps import automount
from nss_cache.maps import passwd
class TestAutomountMap(unittest.TestCase):
"""Tests for the AutomountMap class."""
def __init__(self, obj):
"""Set some default avalible data for testing."""
super(TestAutomountMap, self).__init__(obj)
self._good_entry = automount.AutomountMapEntry()
self._good_entry.key = 'foo'
self._good_entry.options = '-tcp'
self._good_entry.location = 'nfsserver:/mah/stuff'
def testInit(self):
"""Construct an empty or seeded AutomountMap."""
self.assertEqual(automount.AutomountMap,
type(automount.AutomountMap()),
msg='failed to create an empty AutomountMap')
amap = automount.AutomountMap([self._good_entry])
self.assertEqual(self._good_entry,
amap.PopItem(),
msg='failed to seed AutomountMap with list')
self.assertRaises(TypeError, automount.AutomountMap, ['string'])
def testAdd(self):
"""Add throws an error for objects it can't verify."""
amap = automount.AutomountMap()
entry = self._good_entry
self.assertTrue(amap.Add(entry), msg='failed to append new entry.')
self.assertEqual(1, len(amap), msg='unexpected size for Map.')
ret_entry = amap.PopItem()
self.assertEqual(ret_entry, entry, msg='failed to pop correct entry.')
pentry = passwd.PasswdMapEntry()
pentry.name = 'foo'
pentry.uid = 10
pentry.gid = 10
self.assertRaises(TypeError, amap.Add, pentry)
class TestAutomountMapEntry(unittest.TestCase):
"""Tests for the AutomountMapEntry class."""
def testInit(self):
"""Construct an empty and seeded AutomountMapEntry."""
self.assertTrue(automount.AutomountMapEntry(),
msg='Could not create empty AutomountMapEntry')
seed = {'key': 'foo', 'location': '/dev/sda1'}
entry = automount.AutomountMapEntry(seed)
self.assertTrue(entry.Verify(),
msg='Could not verify seeded AutomountMapEntry')
self.assertEqual(entry.key,
'foo',
msg='Entry returned wrong value for name')
self.assertEqual(entry.options,
None,
msg='Entry returned wrong value for options')
self.assertEqual(entry.location,
'/dev/sda1',
msg='Entry returned wrong value for location')
def testAttributes(self):
"""Test that we can get and set all expected attributes."""
entry = automount.AutomountMapEntry()
entry.key = 'foo'
self.assertEqual(entry.key, 'foo', msg='Could not set attribute: key')
entry.options = 'noatime'
self.assertEqual(entry.options,
'noatime',
msg='Could not set attribute: options')
entry.location = '/dev/ipod'
self.assertEqual(entry.location,
'/dev/ipod',
msg='Could not set attribute: location')
def testVerify(self):
"""Test that the object can verify it's attributes and itself."""
entry = automount.AutomountMapEntry()
# Empty object should bomb
self.assertFalse(entry.Verify())
def testKey(self):
"""Key() should return the value of the 'key' attribute."""
entry = automount.AutomountMapEntry()
entry.key = 'foo'
self.assertEqual(entry.Key(), entry.key)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/group.py000066400000000000000000000045421402531134600213110ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a group map for nsscache.
GroupMap: An implementation of NSS group maps based on the Map
class.
GroupMapEntry: A group map entry based on the MapEntry class.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from nss_cache.maps import maps
class GroupMap(maps.Map):
"""This class represents an NSS group map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def __init__(self, iterable=None):
"""Construct a GroupMap object using optional iterable."""
super(GroupMap, self).__init__(iterable)
def Add(self, entry):
"""Add a new object, verify it is a GroupMapEntry object."""
if not isinstance(entry, GroupMapEntry):
raise TypeError
return super(GroupMap, self).Add(entry)
class GroupMapEntry(maps.MapEntry):
"""This class represents NSS group map entries."""
# Using slots saves us over 2x memory on large maps.
__slots__ = ('name', 'passwd', 'gid', 'members', 'groupmembers')
_KEY = 'name'
_ATTRS = ('name', 'passwd', 'gid', 'members', 'groupmembers')
def __init__(self, data=None):
"""Construct a GroupMapEntry, setting reasonable defaults."""
self.name = None
self.passwd = None
self.gid = None
self.members = None
self.groupmembers = None
super(GroupMapEntry, self).__init__(data)
# Seed data with defaults if needed
if self.passwd is None:
self.passwd = 'x'
if self.members is None:
self.members = []
if self.groupmembers is None:
self.groupmembers = []
nsscache-version-0.42/nss_cache/maps/group_test.py000066400000000000000000000113021402531134600223400ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for group.py.
We only test what is overridden in the group subclasses, most
functionality is in base.py and tested in passwd_test.py since a
subclass is required to test the abstract class functionality.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import unittest
from nss_cache.maps import group
from nss_cache.maps import passwd
class TestGroupMap(unittest.TestCase):
"""Tests for the GroupMap class."""
def __init__(self, obj):
"""Set some default avalible data for testing."""
super(TestGroupMap, self).__init__(obj)
self._good_entry = group.GroupMapEntry()
self._good_entry.name = 'foo'
self._good_entry.passwd = 'x'
self._good_entry.gid = 10
self._good_entry.members = ['foo', 'bar']
def testInit(self):
"""Construct an empty or seeded GroupMap."""
self.assertEqual(group.GroupMap,
type(group.GroupMap()),
msg='failed to create an empty GroupMap')
gmap = group.GroupMap([self._good_entry])
self.assertEqual(self._good_entry,
gmap.PopItem(),
msg='failed to seed GroupMap with list')
self.assertRaises(TypeError, group.GroupMap, ['string'])
def testAdd(self):
"""Add throws an error for objects it can't verify."""
gmap = group.GroupMap()
entry = self._good_entry
self.assertTrue(gmap.Add(entry), msg='failed to append new entry.')
self.assertEqual(1, len(gmap), msg='unexpected size for Map.')
ret_entry = gmap.PopItem()
self.assertEqual(ret_entry, entry, msg='failed to pop correct entry.')
pentry = passwd.PasswdMapEntry()
pentry.name = 'foo'
pentry.uid = 10
pentry.gid = 10
self.assertRaises(TypeError, gmap.Add, pentry)
class TestGroupMapEntry(unittest.TestCase):
"""Tests for the GroupMapEntry class."""
def testInit(self):
"""Construct an empty and seeded GroupMapEntry."""
self.assertTrue(group.GroupMapEntry(),
msg='Could not create empty GroupMapEntry')
seed = {'name': 'foo', 'gid': 10}
entry = group.GroupMapEntry(seed)
self.assertTrue(entry.Verify(),
msg='Could not verify seeded PasswdMapEntry')
self.assertEqual(entry.name,
'foo',
msg='Entry returned wrong value for name')
self.assertEqual(entry.passwd,
'x',
msg='Entry returned wrong value for passwd')
self.assertEqual(entry.gid,
10,
msg='Entry returned wrong value for gid')
self.assertEqual(entry.members, [],
msg='Entry returned wrong value for members')
def testAttributes(self):
"""Test that we can get and set all expected attributes."""
entry = group.GroupMapEntry()
entry.name = 'foo'
self.assertEqual(entry.name, 'foo', msg='Could not set attribute: name')
entry.passwd = 'x'
self.assertEqual(entry.passwd,
'x',
msg='Could not set attribute: passwd')
entry.gid = 10
self.assertEqual(entry.gid, 10, msg='Could not set attribute: gid')
members = ['foo', 'bar']
entry.members = members
self.assertEqual(entry.members,
members,
msg='Could not set attribute: members')
def testVerify(self):
"""Test that the object can verify it's attributes and itself."""
entry = group.GroupMapEntry()
# Empty object should bomb
self.assertFalse(entry.Verify())
def testKey(self):
"""Key() should return the value of the 'name' attribute."""
entry = group.GroupMapEntry()
entry.name = 'foo'
self.assertEqual(entry.Key(), entry.name)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/maps.py000066400000000000000000000267451402531134600211260ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Base class of maps for nsscache.
Map: Abstract class representing a basic NSS map.
MapEntry: Abstract class representing an entry in a NSS map.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import logging
from nss_cache import error
class Map(object):
"""Abstract class representing a basic NSS map.
Map data is stored internally as a dict of MapEntry objects, with
the key being the unique value provided by MapEntry.Key().
MapEntry.Key() is implemented by returning the attribute value for
some attribute which is expected to be unique, e.g. the name of a
user or the name of a group.
This allows for a fast implementation of __contains__() although
it restricts Map objects from holding two MapEntry objects with
the same keys (e.g. no two entries for root allowed). This is
considered an acceptable restriction as posix semantics imply that
entries are unique in each map with respect to certain attributes.
A Map also stores two timestamps; a "last update timestamp" which
is set every time an update/merge operation occurs on a map, and a
"last modification timestamp", which stores the last time that
fresh data was merged into the map.
N.B. Changing the MapEntry().Key() after adding to a Map() will
corrupt the index...so don't do it.
Attributes:
log: A logging.Logger instance used for output.
"""
def __init__(self, iterable=None, modify_time=None, update_time=None):
"""Construct a Map object.
Args:
iterable: A tuple or list that can be iterated over and added to the Map,
defaults to None.
modify_time: An optional modify time for this Map, defaults to None.
defaults to None.
update_time: An optional update time for this Map, defaults to None.
defaults to None.
Raises:
TypeError: If the objects in the iterable are of the wrong type.
"""
if self.__class__ is Map:
raise TypeError('Map is an abstract class.')
self._data = {}
# The index preserves the order that entries are returned from the source
# (e.g. the LDAP server.) It is not a set as sets are unordered.
self._index = []
self._last_modification_timestamp = modify_time
self._last_update_timestamp = update_time
self.log = logging.getLogger(__name__)
# Seed with iterable, should raise TypeError for bad items.
if iterable is not None:
for item in iterable:
self.Add(item)
def __contains__(self, other):
"""Deep compare on a MapEntry."""
key = other.Key()
if key in self._data:
possibility = self._data[key]
if other == possibility:
return True
return False
def __iter__(self):
"""Iterate over the MapEntry objects in this map.
Actually this is a generator posing as an iterator so we can use
the index to emit values in the original order.
"""
for index_key in self._index:
yield self._data[index_key]
def __len__(self):
"""Returns the number of items in the map."""
return len(self._data)
def __repr__(self):
return '<%s: %r>' % (self.__class__.__name__, self._data)
def Add(self, entry):
"""Add a MapEntry object to the Map and verify it (overwrites).
Args:
entry: A maps.MapEntry instance.
Returns:
A boolean indicating the add is successful when True.
Raises:
TypeError: The object passed is not the right type.
"""
# Correct type?
if not isinstance(entry, MapEntry):
raise TypeError('Not instance of MapEntry')
# Entry okay?
if not entry.Verify():
self.log.info('refusing to add entry, verify failed')
return False
# Add to index if not already there.
if entry.Key() not in self._data:
self._index.append(entry.Key())
else:
self.log.warning(
'duplicate key detected when adding to map: %r, overwritten',
entry.Key())
self._data[entry.Key()] = entry
return True
def Exists(self, entry):
"""Deep comparison of a MapEntry to the MapEntry instances in the Map.
Args:
entry: A maps.MapEntry instance.
Returns:
A boolean indicating the object is present when True.
"""
if entry in self:
return True
return False
def Merge(self, other):
"""Update this Map based on another Map.
Walk over other and for each entry, Add() it if it doesn't
exist -- this will update changed entries as well as adding
new ones.
Args:
other: A maps.Map instance.
Returns:
True if anything was added or modified, False if
nothing changed.
Raises:
TypeError: Merging differently typed Maps.
InvalidMerge: Attempt to Merge an older map into a newer one.
"""
if type(self) != type(other):
raise TypeError(
'Attempt to Merge() differently typed Maps: %r != %r' %
(type(self), type(other)))
if other.GetModifyTimestamp() and self.GetModifyTimestamp():
if other.GetModifyTimestamp() < self.GetModifyTimestamp():
raise error.InvalidMerge(
'Attempt to Merge a map with an older modify time into a newer one: '
'other: %s, self: %s' %
(other.GetModifyTimestamp(), self.GetModifyTimestamp()))
if other.GetUpdateTimestamp() and self.GetUpdateTimestamp():
if other.GetUpdateTimestamp() < self.GetUpdateTimestamp():
raise error.InvalidMerge(
'Attempt to Merge a map with an older update time into a newer one: '
'other: %s, self: %s' %
(other.GetUpdateTimestamp(), self.GetUpdateTimestamp()))
self.log.info('merging from a map of %d entries', len(other))
merge_count = 0
for their_entry in other:
if their_entry not in self:
# Add() will overwrite similar entries if they exist.
if self.Add(their_entry):
merge_count += 1
self.log.info('%d of %d entries were new or modified', merge_count,
len(other))
if merge_count > 0:
self.SetModifyTimestamp(other.GetModifyTimestamp())
# set last update timestamp
self.SetUpdateTimestamp(other.GetUpdateTimestamp())
return merge_count > 0
def PopItem(self):
"""Return a MapEntry object, throw KeyError if none exist.
Returns:
A maps.MapEntry from within maps.Map internal dict.
Raises:
KeyError if there is nothing to return
"""
try:
# pop items off the start of the index, in sorted order.
index_key = self._index.pop(0)
except IndexError:
raise KeyError # Callers expect a KeyError rather than IndexError
return self._data.pop(index_key) # Throws the KeyError if empty.
def SetModifyTimestamp(self, value):
"""Set the last modify timestamp of this map.
Args:
value: An integer containing the number of seconds since epoch, or None.
Raises:
TypeError: The argument is not an int or None.
"""
if value is None or isinstance(value, int):
self._last_modification_timestamp = value
else:
raise TypeError('timestamp can only be int or None, not %r' % value)
def GetModifyTimestamp(self):
"""Return last modification timestamp of this map.
Returns:
Either an int containing seconds since epoch, or None.
"""
return self._last_modification_timestamp
def SetUpdateTimestamp(self, value):
"""Set the last update timestamp of this map.
Args:
value: An int containing seconds since epoch, or None.
Raises:
TypeError: The argument is not an int or None.
"""
if value is None or isinstance(value, int):
self._last_update_timestamp = value
else:
raise TypeError('timestamp can only be int or None, not %r', value)
def GetUpdateTimestamp(self):
"""Return last update timestamp of this map.
Returns:
An int containing seconds since epoch, or None.
"""
return self._last_update_timestamp
class MapEntry(object):
"""Abstract class for representing an entry in an NSS map.
We expect to be contained in MapEntry objects and provide a unique identifier
via Key() so that Map objects can properly index us. See the Map class for
more details.
Attributes:
log: A logging.Logger instance used for output.
"""
# Using slots saves us over 2x memory on large maps.
__slots__ = ('_KEY', '_ATTRS', 'log')
# Overridden in the derived classes
_KEY: str
_ATTRS: set()
def __init__(self, data=None, _KEY=None, _ATTRS=None):
"""This is an abstract class.
Args:
data: An optional dict of attribute, value pairs to populate with.
Raises:
TypeError: Bad argument, or attempt to instantiate abstract class.
"""
if self.__class__ is MapEntry:
raise TypeError('MapEntry is an abstract class.')
# Initialize from dict, if passed.
if data is None:
return
else:
for key in data:
setattr(self, key, data[key])
self.log = logging.getLogger(__name__)
def __eq__(self, other):
"""Deep comparison of two MapEntry objects."""
if type(self) != type(other):
return False
for key in self._ATTRS:
if getattr(self, key) != getattr(other, key, None):
return False
return True
def __repr__(self):
"""String representation."""
rep = ''
for key in self._ATTRS:
rep = '%r:%r %s' % (key, getattr(self, key), rep)
return '<%s : %r>' % (self.__class__.__name__, rep.rstrip())
def Key(self):
"""Return unique identifier for this MapEntry object.
Returns:
A str which contains the name of the attribute to be used as an index
value for a maps.MapEntry instance in a maps.Map.
"""
return getattr(self, self._KEY)
def Verify(self):
"""We can properly index this instance into a Map.
Returns:
True if the value in the attribute named by self._KEY for this class
is not None. False otherwise.
"""
return getattr(self, self._KEY) is not None
nsscache-version-0.42/nss_cache/maps/maps_test.py000066400000000000000000000044311402531134600221510ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit test for base.py.
Since these are abstract classes, the bulk of the functionality in
base.py is specifically tested in passwd_test.py instead.
"""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import time
import unittest
from nss_cache.maps import maps
class TestMap(unittest.TestCase):
"""Tests for the Map class."""
def testIsAbstract(self):
"""Creating a Map should raise a TypeError."""
self.assertRaises(TypeError, maps.Map)
def testModifyTimestamp(self):
class StubMap(maps.Map):
pass
foo = StubMap()
now = int(time.time())
foo.SetModifyTimestamp(now)
self.assertEqual(now, foo.GetModifyTimestamp())
self.assertRaises(TypeError, foo.SetModifyTimestamp, 1.1)
foo.SetModifyTimestamp(None)
self.assertEqual(None, foo.GetModifyTimestamp())
def testUpdateTimestamp(self):
class StubMap(maps.Map):
pass
foo = StubMap()
now = int(time.time())
foo.SetUpdateTimestamp(now)
self.assertEqual(now, foo.GetUpdateTimestamp())
self.assertRaises(TypeError, foo.SetUpdateTimestamp, 1.1)
foo.SetUpdateTimestamp(None)
self.assertEqual(None, foo.GetUpdateTimestamp())
class TestMapEntry(unittest.TestCase):
"""Tests for the MapEntry class."""
def testIsAbstract(self):
"""Creating a MapEntry should raise a TypeError."""
self.assertRaises(TypeError, maps.MapEntry)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/netgroup.py000066400000000000000000000060711402531134600220170ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a netgroup map for nsscache.
NetgroupMap: An implementation of NSS netgroup maps based on the Map
class.
NetgroupMapEntry: A netgroup map entry based on the MapEntry class.
Netgroup maps are somewhat different than the "typical"
passwd/group/shadow maps. Instead of each entry having a fixed set of
fields, each entry has an arbitrarily long list containing a arbitrary
mix of other netgroup names or (host, user, domain) triples.
Given the choice between more complex design, or just sticking a list
of strings into each MapEntry class... the latter was chosen due to
it's combination of simplicity and effectiveness.
No provisioning is done in these classes to prevent infinite reference
loops, e.g. a NetgroupMapEntry naming itself as a member, or
unresolvable references. No dereferencing is ever done in these
classes and datastores such as /etc/netgroup actually allow for those
and similar cases.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from nss_cache.maps import maps
class NetgroupMap(maps.Map):
"""This class represents an NSS netgroup map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def __init__(self, iterable=None):
"""Construct a NetgroupMap object using optional iterable."""
super(NetgroupMap, self).__init__(iterable)
def Add(self, entry):
"""Add a new object, verify it is a NetgroupMapEntry object."""
if not isinstance(entry, NetgroupMapEntry):
raise TypeError
return super(NetgroupMap, self).Add(entry)
class NetgroupMapEntry(maps.MapEntry):
"""This class represents NSS netgroup map entries.
The entries attribute is a list containing an arbitray mix of either
strings which are netgroup names, or tuples mapping to (host, user,
domain) as per the definition of netgroups. A None item in the
tuple is the equivalent of a null pointer from getnetgrent(),
specifically a wildcard.
"""
__slots__ = ('name', 'entries')
_KEY = 'name'
_ATTRS = ('name', 'entries')
def __init__(self, data=None):
"""Construct a NetgroupMapEntry."""
self.name = None
self.entries = None
super(NetgroupMapEntry, self).__init__(data)
# Seed data with defaults if needed
if self.entries is None:
self.entries = ''
nsscache-version-0.42/nss_cache/maps/netgroup_test.py000066400000000000000000000105041402531134600230520ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for netgroup.py.
We only test what is overridden in the netgroup subclasses, most
functionality is in base.py and tested in passwd_test.py since a
subclass is required to test the abstract class functionality.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import unittest
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
class TestNetgroupMap(unittest.TestCase):
"""Tests for the NetgroupMap class."""
def __init__(self, obj):
"""Set some default avalible data for testing."""
super(TestNetgroupMap, self).__init__(obj)
self._good_entry = netgroup.NetgroupMapEntry()
self._good_entry.name = 'foo'
self._good_entry.entries = [('-', 'bob', None), 'othernetgroup']
def testInit(self):
"""Construct an empty or seeded NetgroupMap."""
self.assertEqual(netgroup.NetgroupMap,
type(netgroup.NetgroupMap()),
msg='failed to create an empty NetgroupMap')
nmap = netgroup.NetgroupMap([self._good_entry])
self.assertEqual(self._good_entry,
nmap.PopItem(),
msg='failed to seed NetgroupMap with list')
self.assertRaises(TypeError, netgroup.NetgroupMap, ['string'])
def testAdd(self):
"""Add throws an error for objects it can't verify."""
nmap = netgroup.NetgroupMap()
entry = self._good_entry
self.assertTrue(nmap.Add(entry), msg='failed to append new entry.')
self.assertEqual(1, len(nmap), msg='unexpected size for Map.')
ret_entry = nmap.PopItem()
self.assertEqual(ret_entry, entry, msg='failed to pop correct entry.')
pentry = passwd.PasswdMapEntry()
pentry.name = 'foo'
pentry.uid = 10
pentry.gid = 10
self.assertRaises(TypeError, nmap.Add, pentry)
class TestNetgroupMapEntry(unittest.TestCase):
"""Tests for the NetgroupMapEntry class."""
def testInit(self):
"""Construct an empty and seeded NetgroupMapEntry."""
self.assertTrue(netgroup.NetgroupMapEntry(),
msg='Could not create empty NetgroupMapEntry')
entries = ['bar', ('baz', '-', None)]
seed = {'name': 'foo', 'entries': entries}
entry = netgroup.NetgroupMapEntry(seed)
self.assertTrue(entry.Verify(),
msg='Could not verify seeded NetgroupMapEntry')
self.assertEqual(entry.name,
'foo',
msg='Entry returned wrong value for name')
self.assertEqual(entry.entries,
entries,
msg='Entry returned wrong value for entries')
def testAttributes(self):
"""Test that we can get and set all expected attributes."""
entry = netgroup.NetgroupMapEntry()
entry.name = 'foo'
self.assertEqual(entry.name, 'foo', msg='Could not set attribute: name')
entries = ['foo', '(-,bar,)']
entry.entries = entries
self.assertEqual(entry.entries,
entries,
msg='Could not set attribute: entries')
def testVerify(self):
"""Test that the object can verify it's attributes and itself."""
entry = netgroup.NetgroupMapEntry()
# Empty object should bomb
self.assertFalse(entry.Verify())
def testKey(self):
"""Key() should return the value of the 'name' attribute."""
entry = netgroup.NetgroupMapEntry()
entry.name = 'foo'
self.assertEqual(entry.Key(), entry.name)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/passwd.py000066400000000000000000000050071402531134600214530ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a passwd map for nsscache.
PasswdMap: An implementation of NSS passwd maps based on the Map
class.
PasswdMapEntry: A passwd map entry based on the MapEntry class.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from nss_cache.maps import maps
class PasswdMap(maps.Map):
"""This class represents an NSS passwd map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def Add(self, entry):
"""Add a new object, verify it is a PasswdMapEntry instance.
Args:
entry: A PasswdMapEntry instance.
Returns:
True if added successfully, False otherwise.
Raises:
TypeError: The argument is of the wrong type.
"""
if not isinstance(entry, PasswdMapEntry):
raise TypeError
return super(PasswdMap, self).Add(entry)
class PasswdMapEntry(maps.MapEntry):
"""This class represents NSS passwd map entries."""
# Using slots saves us over 2x memory on large maps.
__slots__ = ('name', 'uid', 'gid', 'passwd', 'gecos', 'dir', 'shell')
_KEY = 'name'
_ATTRS = ('name', 'uid', 'gid', 'passwd', 'gecos', 'dir', 'shell')
def __init__(self, data=None):
"""Construct a PasswdMapEntry, setting reasonable defaults."""
self.name = None
self.uid = None
self.gid = None
self.passwd = None
self.gecos = None
self.dir = None
self.shell = None
super(PasswdMapEntry, self).__init__(data)
# Seed data with defaults if still empty
if self.passwd is None:
self.passwd = 'x'
if self.gecos is None:
self.gecos = ''
if self.dir is None:
self.dir = ''
if self.shell is None:
self.shell = ''
nsscache-version-0.42/nss_cache/maps/passwd_test.py000066400000000000000000000255301402531134600225150ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for passwd.py."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import time
import unittest
from nss_cache import error
from nss_cache.maps import group
from nss_cache.maps import passwd
class TestPasswdMap(unittest.TestCase):
"""Tests for the PasswdMap class."""
def setUp(self):
"""Set some default avalible data for testing."""
self._good_entry = passwd.PasswdMapEntry()
self._good_entry.name = 'foo'
self._good_entry.passwd = 'x'
self._good_entry.uid = 10
self._good_entry.gid = 10
self._good_entry.gecos = 'How Now Brown Cow'
self._good_entry.dir = '/home/foo'
self._good_entry.shell = '/bin/bash'
def testInit(self):
"""Construct an empty or seeded PasswdMap."""
self.assertEqual(passwd.PasswdMap,
type(passwd.PasswdMap()),
msg='failed to create emtpy PasswdMap')
pmap = passwd.PasswdMap([self._good_entry])
self.assertEqual(self._good_entry,
pmap.PopItem(),
msg='failed to seed PasswdMap with list')
self.assertRaises(TypeError, passwd.PasswdMap, ['string'])
def testAdd(self):
"""Add raises exceptions for objects it can't add or verify."""
pmap = passwd.PasswdMap()
entry = self._good_entry
self.assertTrue(pmap.Add(entry), msg='failed to add new entry.')
self.assertEqual(1, len(pmap), msg='unexpected size for Map.')
ret_entry = pmap.PopItem()
self.assertEqual(ret_entry, entry, msg='failed to pop existing entry.')
gentry = group.GroupMapEntry()
gentry.name = 'foo'
gentry.gid = 10
self.assertRaises(TypeError, pmap.Add, gentry)
def testContains(self):
"""Verify __contains__ works, and does a deep compare."""
pentry_good = self._good_entry
pentry_like_good = passwd.PasswdMapEntry()
pentry_like_good.name = 'foo' # same Key(), but rest of attributes differ
pentry_bad = passwd.PasswdMapEntry()
pentry_bad.name = 'bar'
pmap = passwd.PasswdMap([pentry_good])
self.assertTrue(pentry_good in pmap, msg='expected entry to be in map')
self.assertFalse(pentry_bad in pmap,
msg='did not expect entry to be in map')
self.assertFalse(pentry_like_good in pmap,
msg='__contains__ not doing a deep compare')
def testIterate(self):
"""Check that we can iterate over PasswdMap."""
pmap = passwd.PasswdMap()
pmap.Add(self._good_entry)
ret_entries = []
for entry in pmap:
ret_entries.append(entry)
self.assertEqual(len(ret_entries), 1, msg='iterated over wrong count')
self.assertEqual(ret_entries[0],
self._good_entry,
msg='got the wrong entry back')
def testLen(self):
"""Verify we have correctly overridden __len__ in MapEntry."""
pmap = passwd.PasswdMap()
self.assertEqual(len(pmap), 0, msg='expected len(pmap) to be 0')
pmap.Add(self._good_entry)
self.assertEqual(len(pmap), 1, msg='expected len(pmap) to be 1')
def testExists(self):
"""Verify Exists() checks for presence of MapEntry objects."""
pmap = passwd.PasswdMap()
entry = self._good_entry
self.assertFalse(pmap.Exists(entry))
pmap.Add(entry)
self.assertTrue(pmap.Exists(entry))
def testMerge(self):
"""Verify Merge() throws the right exceptions and correctly merges."""
# Setup some MapEntry objects with distinct Key()s
pentry1 = self._good_entry
pentry2 = passwd.PasswdMapEntry()
pentry2.name = 'john'
pentry3 = passwd.PasswdMapEntry()
pentry3.name = 'jane'
# Setup some Map objects
pmap_big = passwd.PasswdMap([pentry1, pentry2])
pmap_small = passwd.PasswdMap([pentry3])
# Merge small into big
self.assertTrue(pmap_big.Merge(pmap_small),
msg='Merging small into big failed!')
self.assertTrue(pmap_big.Exists(pentry1),
msg='pentry1 not found in Map')
self.assertTrue(pmap_big.Exists(pentry2),
msg='pentry1 not found in Map')
self.assertTrue(pmap_big.Exists(pentry3),
msg='pentry1 not found in Map')
# A second merge should do nothing
self.assertFalse(pmap_big.Merge(pmap_small),
msg='Re-merging small into big succeeded.')
# An empty merge should do nothing
self.assertFalse(pmap_big.Merge(passwd.PasswdMap()),
msg='Empty Merge should have done nothing.')
# Merge a GroupMap should throw TypeError
gmap = group.GroupMap()
self.assertRaises(TypeError, pmap_big.Merge, gmap)
# Merge an older map should throw an UnsupportedMap
old_map = passwd.PasswdMap(modify_time=1)
new_map = passwd.PasswdMap(modify_time=2)
self.assertRaises(error.InvalidMerge, new_map.Merge, old_map)
old_map = passwd.PasswdMap(update_time=1)
new_map = passwd.PasswdMap(update_time=2)
self.assertRaises(error.InvalidMerge, new_map.Merge, old_map)
def testPopItem(self):
"""Verify you can retrieve MapEntry with PopItem."""
pmap = passwd.PasswdMap([self._good_entry])
self.assertEqual(pmap.PopItem(), self._good_entry)
def testLastModificationTimestamp(self):
"""Test setting/getting of timestamps on maps."""
m = passwd.PasswdMap()
# we only work in whole-second resolution
now = int(time.time())
m.SetModifyTimestamp(now)
self.assertEqual(now, m._last_modification_timestamp)
ts = m.GetModifyTimestamp()
self.assertEqual(now, ts)
class TestPasswdMapEntry(unittest.TestCase):
"""Tests for the PasswdMapEntry class."""
def testInit(self):
"""Construct empty and seeded PasswdMapEntry."""
entry = passwd.PasswdMapEntry()
self.assertEqual(type(entry),
passwd.PasswdMapEntry,
msg='Could not create empty PasswdMapEntry')
seed = {
'name': 'foo',
'passwd': 'x',
'uid': 10,
'gid': 10,
'gecos': '',
'dir': '',
'shell': ''
}
entry = passwd.PasswdMapEntry(seed)
self.assertTrue(entry.Verify(),
msg='Could not verify seeded PasswdMapEntry')
self.assertEqual(entry.name,
'foo',
msg='Entry returned wrong value for name')
self.assertEqual(entry.passwd,
'x',
msg='Entry returned wrong value for passwd')
self.assertEqual(entry.uid,
10,
msg='Entry returned wrong value for uid')
self.assertEqual(entry.gid,
10,
msg='Entry returned wrong value for gid')
self.assertEqual(entry.gecos,
'',
msg='Entry returned wrong value for gecos')
self.assertEqual(entry.dir,
'',
msg='Entry returned wrong value for dir')
self.assertEqual(entry.shell,
'',
msg='Entry returned wrong value for shell')
def testAttributes(self):
"""Test that we can get and set all expected attributes."""
entry = passwd.PasswdMapEntry()
entry.name = 'foo'
self.assertEqual(entry.name, 'foo', msg='Could not set attribute: name')
entry.passwd = 'x'
self.assertEqual(entry.passwd,
'x',
msg='Could not set attribute: passwd')
entry.uid = 10
self.assertEqual(entry.uid, 10, msg='Could not set attribute: uid')
entry.gid = 10
self.assertEqual(entry.gid, 10, msg='Could not set attribute: gid')
entry.gecos = 'How Now Brown Cow'
self.assertEqual(entry.gecos,
'How Now Brown Cow',
msg='Could not set attribute: gecos')
entry.dir = '/home/foo'
self.assertEqual(entry.dir,
'/home/foo',
msg='Could not set attribute: dir')
entry.shell = '/bin/bash'
self.assertEqual(entry.shell,
'/bin/bash',
msg='Could not set attribute: shell')
def testEq(self):
"""Verify we are doing a deep compare in __eq__."""
# Setup some things to compare
entry_good = passwd.PasswdMapEntry({
'name': 'foo',
'uid': 10,
'gid': 10
})
entry_same_as_good = passwd.PasswdMapEntry({
'name': 'foo',
'uid': 10,
'gid': 10
})
entry_like_good = passwd.PasswdMapEntry()
entry_like_good.name = 'foo' # same Key(), but rest of attributes differ
entry_bad = passwd.PasswdMapEntry()
entry_bad.name = 'bar'
self.assertEqual(entry_good,
entry_good,
msg='entry_good not equal to itself')
self.assertEqual(entry_good,
entry_same_as_good,
msg='__eq__ not doing deep compare')
self.assertNotEqual(entry_good,
entry_like_good,
msg='__eq__ not doing deep compare')
self.assertNotEqual(entry_good, entry_bad, msg='unexpected equality')
def testVerify(self):
"""Test that the object can verify it's attributes and itself."""
entry = passwd.PasswdMapEntry()
# by leaving _KEY unset, we should bomb.
self.assertFalse(entry.Verify())
def testKey(self):
"""Key() should return the value of the 'name' attribute."""
entry = passwd.PasswdMapEntry()
entry.name = 'foo'
self.assertEqual(entry.Key(), entry.name)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/shadow.py000066400000000000000000000045401402531134600214400ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a shadow map for nsscache.
ShadowMap: An implementation of NSS shadow maps based on the Map
class.
ShadowMapEntry: A shadow map entry based on the MapEntry class.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
from nss_cache.maps import maps
class ShadowMap(maps.Map):
"""This class represents an NSS shadow map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def __init__(self, iterable=None):
"""Construct a ShadowMap object using optional iterable."""
super(ShadowMap, self).__init__(iterable)
def Add(self, entry):
"""Add a new object, verify it is a ShadowMapEntry object."""
if not isinstance(entry, ShadowMapEntry):
raise TypeError
return super(ShadowMap, self).Add(entry)
class ShadowMapEntry(maps.MapEntry):
"""This class represents NSS shadow map entries."""
__slots__ = ('name', 'passwd', 'lstchg', 'min', 'max', 'warn', 'inact',
'expire', 'flag')
_KEY = 'name'
_ATTRS = ('name', 'passwd', 'lstchg', 'min', 'max', 'warn', 'inact',
'expire', 'flag')
def __init__(self, data=None):
"""Construct a ShadowMapEntry, setting reasonable defaults."""
self.name = None
self.passwd = None
self.lstchg = None
self.min = None
self.max = None
self.warn = None
self.inact = None
self.expire = None
self.flag = None
super(ShadowMapEntry, self).__init__(data)
# Seed data with defaults if needed
if self.passwd is None:
self.passwd = '!!'
nsscache-version-0.42/nss_cache/maps/shadow_test.py000066400000000000000000000137131402531134600225010ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for shadow.py.
We only test what is overridden in the shadow subclasses, most
functionality is in base.py and tested in passwd_test.py since a
subclass is required to test the abstract class functionality.
"""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import unittest
from nss_cache.maps import passwd
from nss_cache.maps import shadow
class TestShadowMap(unittest.TestCase):
"""Tests for the ShadowMap class."""
def __init__(self, obj):
"""Set some default avalible data for testing."""
super(TestShadowMap, self).__init__(obj)
self._good_entry = shadow.ShadowMapEntry()
self._good_entry.name = 'foo'
self._good_entry.lstchg = None
self._good_entry.min = None
self._good_entry.max = None
self._good_entry.warn = None
self._good_entry.inact = None
self._good_entry.expire = None
self._good_entry.flag = None
def testInit(self):
"""Construct an empty or seeded ShadowMap."""
self.assertEqual(shadow.ShadowMap,
type(shadow.ShadowMap()),
msg='failed to create emtpy ShadowMap')
smap = shadow.ShadowMap([self._good_entry])
self.assertEqual(self._good_entry,
smap.PopItem(),
msg='failed to seed ShadowMap with list')
self.assertRaises(TypeError, shadow.ShadowMap, ['string'])
def testAdd(self):
"""Add throws an error for objects it can't verify."""
smap = shadow.ShadowMap()
entry = self._good_entry
self.assertTrue(smap.Add(entry), msg='failed to append new entry.')
self.assertEqual(1, len(smap), msg='unexpected size for Map.')
ret_entry = smap.PopItem()
self.assertEqual(ret_entry, entry, msg='failed to pop existing entry.')
pentry = passwd.PasswdMapEntry()
pentry.name = 'foo'
pentry.uid = 10
pentry.gid = 10
self.assertRaises(TypeError, smap.Add, pentry)
class TestShadowMapEntry(unittest.TestCase):
"""Tests for the ShadowMapEntry class."""
def testInit(self):
"""Construct empty and seeded ShadowMapEntry."""
self.assertTrue(shadow.ShadowMapEntry(),
msg='Could not create empty ShadowMapEntry')
seed = {'name': 'foo'}
entry = shadow.ShadowMapEntry(seed)
self.assertTrue(entry.Verify(),
msg='Could not verify seeded ShadowMapEntry')
self.assertEqual(entry.name,
'foo',
msg='Entry returned wrong value for name')
self.assertEqual(entry.passwd,
'!!',
msg='Entry returned wrong value for passwd')
self.assertEqual(entry.lstchg,
None,
msg='Entry returned wrong value for lstchg')
self.assertEqual(entry.min,
None,
msg='Entry returned wrong value for min')
self.assertEqual(entry.max,
None,
msg='Entry returned wrong value for max')
self.assertEqual(entry.warn,
None,
msg='Entry returned wrong value for warn')
self.assertEqual(entry.inact,
None,
msg='Entry returned wrong value for inact')
self.assertEqual(entry.expire,
None,
msg='Entry returned wrong value for expire')
self.assertEqual(entry.flag,
None,
msg='Entry returned wrong value for flag')
def testAttributes(self):
"""Test that we can get and set all expected attributes."""
entry = shadow.ShadowMapEntry()
entry.name = 'foo'
self.assertEqual(entry.name, 'foo', msg='Could not set attribute: name')
entry.passwd = 'seekret'
self.assertEqual(entry.passwd,
'seekret',
msg='Could not set attribute: passwd')
entry.lstchg = 0
self.assertEqual(entry.lstchg, 0, msg='Could not set attribute: lstchg')
entry.min = 0
self.assertEqual(entry.min, 0, msg='Could not set attribute: min')
entry.max = 0
self.assertEqual(entry.max, 0, msg='Could not set attribute: max')
entry.warn = 0
self.assertEqual(entry.warn, 0, msg='Could not set attribute: warn')
entry.inact = 0
self.assertEqual(entry.inact, 0, msg='Could not set attribute: inact')
entry.expire = 0
self.assertEqual(entry.expire, 0, msg='Could not set attribute: expire')
entry.flag = 0
self.assertEqual(entry.flag, 0, msg='Could not set attribute: flag')
def testVerify(self):
"""Test that the object can verify it's attributes and itself."""
entry = shadow.ShadowMapEntry()
# Emtpy object should bomb
self.assertFalse(entry.Verify())
def testKey(self):
"""Key() should return the value of the 'name' attribute."""
entry = shadow.ShadowMapEntry()
entry.name = 'foo'
self.assertEqual(entry.Key(), entry.name)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/maps/sshkey.py000066400000000000000000000041771402531134600214670ustar00rootroot00000000000000# Copyright 2014 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a sshkey map for nsscache.
SshkeyMap: An implementation of NSS sshkey maps based on the Map
class.
SshkeyMapEntry: A sshkey map entry based on the MapEntry class.
"""
__author__ = 'mimianddaniel@gmail.com'
from nss_cache.maps import maps
class SshkeyMap(maps.Map):
"""This class represents an NSS sshkey map.
Map data is stored as a list of MapEntry objects, see the abstract
class Map.
"""
def Add(self, entry):
"""Add a new object, verify it is a SshkeyMapEntry instance.
Args:
entry: A SshkeyMapEntry instance.
Returns:
True if added successfully, False otherwise.
Raises:
TypeError: The argument is of the wrong type.
"""
if not isinstance(entry, SshkeyMapEntry):
raise TypeError
return super(SshkeyMap, self).Add(entry)
class SshkeyMapEntry(maps.MapEntry):
"""This class represents NSS sshkey map entries."""
# Using slots saves us over 2x memory on large maps.
__slots__ = ('name', 'sshkey')
_KEY = 'name'
_ATTRS = ('name', 'sshkey')
def __init__(self, data=None):
"""Construct a SshkeyMapEntry, setting reasonable defaults."""
self.name = None
self.sshkey = None
super(SshkeyMapEntry, self).__init__(data)
# Seed data with defaults if still empty
if self.sshkey is None:
self.sshkey = ''
nsscache-version-0.42/nss_cache/nss.py000066400000000000000000000100541402531134600200130ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""NSS utility library."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import pwd
import grp
import logging
import subprocess
from nss_cache import config
from nss_cache import error
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
# TODO(v): this should be a config option someday, but it's as standard
# as libc so at the moment we'll leave it be for simplicity.
GETENT = '/usr/bin/getent'
def GetMap(map_name):
"""Retrieves a Map of type map_name via nss calls."""
if map_name == config.MAP_PASSWORD:
return GetPasswdMap()
elif map_name == config.MAP_GROUP:
return GetGroupMap()
elif map_name == config.MAP_SHADOW:
return GetShadowMap()
raise error.UnsupportedMap
def GetPasswdMap():
"""Returns a PasswdMap built from nss calls."""
passwd_map = passwd.PasswdMap()
for nss_entry in pwd.getpwall():
map_entry = passwd.PasswdMapEntry()
map_entry.name = nss_entry[0]
map_entry.passwd = nss_entry[1]
map_entry.uid = nss_entry[2]
map_entry.gid = nss_entry[3]
map_entry.gecos = nss_entry[4]
map_entry.dir = nss_entry[5]
map_entry.shell = nss_entry[6]
passwd_map.Add(map_entry)
return passwd_map
def GetGroupMap():
"""Returns a GroupMap built from nss calls."""
group_map = group.GroupMap()
for nss_entry in grp.getgrall():
map_entry = group.GroupMapEntry()
map_entry.name = nss_entry[0]
map_entry.passwd = nss_entry[1]
map_entry.gid = nss_entry[2]
map_entry.members = nss_entry[3]
if not map_entry.members:
map_entry.members = ['']
group_map.Add(map_entry)
return group_map
def GetShadowMap():
"""Returns a ShadowMap built from nss calls."""
getent = _SpawnGetent(config.MAP_SHADOW)
(getent_stdout, getent_stderr) = getent.communicate()
# The following is going to be map-specific each time, so no point in
# making more methods.
shadow_map = shadow.ShadowMap()
for line in getent_stdout.split():
line = line.decode('utf-8')
nss_entry = line.strip().split(':')
map_entry = shadow.ShadowMapEntry()
map_entry.name = nss_entry[0]
map_entry.passwd = nss_entry[1]
if nss_entry[2] != '':
map_entry.lstchg = int(nss_entry[2])
if nss_entry[3] != '':
map_entry.min = int(nss_entry[3])
if nss_entry[4] != '':
map_entry.max = int(nss_entry[4])
if nss_entry[5] != '':
map_entry.warn = int(nss_entry[5])
if nss_entry[6] != '':
map_entry.inact = int(nss_entry[6])
if nss_entry[7] != '':
map_entry.expire = int(nss_entry[7])
if nss_entry[8] != '':
map_entry.flag = int(nss_entry[8])
shadow_map.Add(map_entry)
if getent_stderr:
logging.debug('captured error %s', getent_stderr)
retval = getent.returncode
if retval != 0:
logging.warning('%s returned error code: %d', GETENT, retval)
return shadow_map
def _SpawnGetent(map_name):
"""Run 'getent map' in a subprocess for reading NSS data."""
getent = subprocess.Popen([GETENT, map_name],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
return getent
nsscache-version-0.42/nss_cache/nss_test.py000066400000000000000000000114341402531134600210550ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/command.py."""
__author__ = 'vasilios@google.com (Vasilios Hoffman)'
import grp
import pwd
import unittest
from mox3 import mox
from nss_cache import config
from nss_cache import error
from nss_cache import nss
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
class TestNSS(mox.MoxTestBase):
"""Tests for the NSS library."""
def testGetMap(self):
"""that GetMap is calling the right GetFooMap routines."""
self.mox.StubOutWithMock(nss, 'GetPasswdMap')
nss.GetPasswdMap().AndReturn('TEST_PASSWORD')
self.mox.StubOutWithMock(nss, 'GetGroupMap')
nss.GetGroupMap().AndReturn('TEST_GROUP')
self.mox.StubOutWithMock(nss, 'GetShadowMap')
nss.GetShadowMap().AndReturn('TEST_SHADOW')
self.mox.ReplayAll()
self.assertEqual('TEST_PASSWORD', nss.GetMap(config.MAP_PASSWORD))
self.assertEqual('TEST_GROUP', nss.GetMap(config.MAP_GROUP))
self.assertEqual('TEST_SHADOW', nss.GetMap(config.MAP_SHADOW))
def testGetMapException(self):
"""GetMap throws error.UnsupportedMap for unsupported maps."""
self.assertRaises(error.UnsupportedMap, nss.GetMap, 'ohio')
def testGetPasswdMap(self):
"""Verify we build a correct password map from nss calls."""
foo = ('foo', 'x', 10, 10, 'foo bar', '/home/foo', '/bin/shell')
bar = ('bar', 'x', 20, 20, 'foo bar', '/home/monkeyboy', '/bin/shell')
self.mox.StubOutWithMock(pwd, 'getpwall')
pwd.getpwall().AndReturn([foo, bar])
entry1 = passwd.PasswdMapEntry()
entry1.name = 'foo'
entry1.uid = 10
entry1.gid = 10
entry1.gecos = 'foo bar'
entry1.dir = '/home/foo'
entry1.shell = '/bin/shell'
entry2 = passwd.PasswdMapEntry()
entry2.name = 'bar'
entry2.uid = 20
entry2.gid = 20
entry2.gecos = 'foo bar'
entry2.dir = '/home/monkeyboy'
entry2.shell = '/bin/shell'
self.mox.ReplayAll()
password_map = nss.GetPasswdMap()
self.assertTrue(isinstance(password_map, passwd.PasswdMap))
self.assertEqual(len(password_map), 2)
self.assertTrue(password_map.Exists(entry1))
self.assertTrue(password_map.Exists(entry2))
def testGetGroupMap(self):
"""Verify we build a correct group map from nss calls."""
foo = ('foo', '*', 10, [])
bar = ('bar', '*', 20, ['foo', 'bar'])
self.mox.StubOutWithMock(grp, 'getgrall')
grp.getgrall().AndReturn([foo, bar])
entry1 = group.GroupMapEntry()
entry1.name = 'foo'
entry1.passwd = '*'
entry1.gid = 10
entry1.members = ['']
entry2 = group.GroupMapEntry()
entry2.name = 'bar'
entry2.passwd = '*'
entry2.gid = 20
entry2.members = ['foo', 'bar']
self.mox.ReplayAll()
group_map = nss.GetGroupMap()
self.assertTrue(isinstance(group_map, group.GroupMap))
self.assertEqual(len(group_map), 2)
self.assertTrue(group_map.Exists(entry1))
self.assertTrue(group_map.Exists(entry2))
def testGetShadowMap(self):
"""Verify we build a correct shadow map from nss calls."""
line1 = b'foo:!!::::::::'
line2 = b'bar:!!::::::::'
lines = [line1, line2]
mock_getent = self.mox.CreateMockAnything()
mock_getent.communicate().AndReturn([b'\n'.join(lines), b''])
mock_getent.returncode = 0
entry1 = shadow.ShadowMapEntry()
entry1.name = 'foo'
entry2 = shadow.ShadowMapEntry()
entry2.name = 'bar'
self.mox.StubOutWithMock(nss, '_SpawnGetent')
nss._SpawnGetent(config.MAP_SHADOW).AndReturn(mock_getent)
self.mox.ReplayAll()
shadow_map = nss.GetShadowMap()
self.assertTrue(isinstance(shadow_map, shadow.ShadowMap))
self.assertEqual(len(shadow_map), 2)
self.assertTrue(shadow_map.Exists(entry1))
self.assertTrue(shadow_map.Exists(entry2))
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/sources/000077500000000000000000000000001402531134600203215ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/sources/__init__.py000066400000000000000000000000001402531134600224200ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/sources/consulsource.py000066400000000000000000000160051402531134600234210ustar00rootroot00000000000000"""An implementation of a consul data source for nsscache."""
__author__ = 'hexedpackets@gmail.com (William Huba)'
import base64
import collections
import logging
import json
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.sources import httpsource
def RegisterImplementation(registration_callback):
registration_callback(ConsulFilesSource)
class ConsulFilesSource(httpsource.HttpFilesSource):
"""Source for data fetched via Consul."""
# Consul defaults
DATACENTER = 'dc1'
TOKEN = ''
# for registration
name = 'consul'
def _SetDefaults(self, configuration):
"""Set defaults if necessary."""
super(ConsulFilesSource, self)._SetDefaults(configuration)
if 'token' not in configuration:
configuration['token'] = self.TOKEN
if 'datacenter' not in configuration:
configuration['datacenter'] = self.DATACENTER
for url in ['passwd_url', 'group_url', 'shadow_url']:
configuration[url] = '{}?recurse&token={}&dc={}'.format(
configuration[url], configuration['token'],
configuration['datacenter'])
def GetPasswdMap(self, since=None):
"""Return the passwd map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of passwd.PasswdMap
"""
return PasswdUpdateGetter().GetUpdates(self, self.conf['passwd_url'],
since)
def GetGroupMap(self, since=None):
"""Return the group map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of group.GroupMap
"""
return GroupUpdateGetter().GetUpdates(self, self.conf['group_url'],
since)
def GetShadowMap(self, since=None):
"""Return the shadow map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of shadow.ShadowMap
"""
return ShadowUpdateGetter().GetUpdates(self, self.conf['shadow_url'],
since)
class PasswdUpdateGetter(httpsource.UpdateGetter):
"""Get passwd updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesPasswd cache."""
return ConsulPasswdMapParser()
def CreateMap(self):
"""Returns a new PasswdMap instance to have PasswdMapEntries added to
it."""
return passwd.PasswdMap()
class GroupUpdateGetter(httpsource.UpdateGetter):
"""Get group updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesGroup cache."""
return ConsulGroupMapParser()
def CreateMap(self):
"""Returns a new GroupMap instance to have GroupMapEntries added to
it."""
return group.GroupMap()
class ShadowUpdateGetter(httpsource.UpdateGetter):
"""Get shadow updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesShadow cache."""
return ConsulShadowMapParser()
def CreateMap(self):
"""Returns a new ShadowMap instance to have ShadowMapEntries added to
it."""
return shadow.ShadowMap()
class ConsulMapParser(object):
"""A base class for parsing nss_files module cache."""
def __init__(self):
self.log = logging.getLogger(__name__)
def GetMap(self, cache_info, data):
"""Returns a map from a cache.
Args:
cache_info: file like object containing the cache.
data: a Map to populate.
Returns:
A child of Map containing the cache data.
"""
entries = collections.defaultdict(dict)
for line in json.loads(cache_info.read()):
key = line.get('Key', '').split('/')
value = line.get('Value', '')
if not value or not key:
continue
value = base64.b64decode(value)
name = str(key[-2])
entry_piece = key[-1]
entries[name][entry_piece] = value
for name, entry in list(entries.items()):
map_entry = self._ReadEntry(name, entry)
if map_entry is None:
self.log.warning(
'Could not create entry from line %r in cache, skipping',
entry)
continue
if not data.Add(map_entry):
self.log.warning(
'Could not add entry %r read from line %r in cache',
map_entry, entry)
return data
class ConsulPasswdMapParser(ConsulMapParser):
"""Class for parsing nss_files module passwd cache."""
def _ReadEntry(self, name, entry):
"""Return a PasswdMapEntry from a record in the target cache."""
map_entry = passwd.PasswdMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = name
map_entry.passwd = entry.get('passwd', 'x')
try:
map_entry.uid = int(entry['uid'])
map_entry.gid = int(entry['gid'])
except (ValueError, KeyError):
return None
map_entry.gecos = entry.get('comment', '')
map_entry.dir = entry.get('home', '/home/{}'.format(name))
map_entry.shell = entry.get('shell', '/bin/bash')
return map_entry
class ConsulGroupMapParser(ConsulMapParser):
"""Class for parsing a nss_files module group cache."""
def _ReadEntry(self, name, entry):
"""Return a GroupMapEntry from a record in the target cache."""
map_entry = group.GroupMapEntry()
# map entries expect strict typing, so convert as appropriate
map_entry.name = name
map_entry.passwd = entry.get('passwd', 'x')
try:
map_entry.gid = int(entry['gid'])
except (ValueError, KeyError):
return None
try:
members = entry.get('members', '').split('\n')
except (ValueError, TypeError):
members = ['']
map_entry.members = members
return map_entry
class ConsulShadowMapParser(ConsulMapParser):
"""Class for parsing nss_files module shadow cache."""
def _ReadEntry(self, name, entry):
"""Return a ShadowMapEntry from a record in the target cache."""
map_entry = shadow.ShadowMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = name
map_entry.passwd = entry.get('passwd', '*')
if isinstance(map_entry.passwd, bytes):
map_entry.passwd = map_entry.passwd.decode('ascii')
for attr in ['lstchg', 'min', 'max', 'warn', 'inact', 'expire']:
try:
setattr(map_entry, attr, int(entry[attr]))
except (ValueError, KeyError):
continue
return map_entry
nsscache-version-0.42/nss_cache/sources/consulsource_test.py000066400000000000000000000151351402531134600244630ustar00rootroot00000000000000"""An implementation of a mock consul data source for nsscache."""
__author__ = 'hexedpackets@gmail.com (William Huba)'
import unittest
from io import StringIO
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.sources import consulsource
class TestConsulSource(unittest.TestCase):
def setUp(self):
"""Initialize a basic config dict."""
super(TestConsulSource, self).setUp()
self.config = {
'passwd_url': 'PASSWD_URL',
'group_url': 'GROUP_URL',
'datacenter': 'TEST_DATACENTER',
'token': 'TEST_TOKEN',
}
def testDefaultConfiguration(self):
source = consulsource.ConsulFilesSource({})
self.assertEqual(source.conf['datacenter'],
consulsource.ConsulFilesSource.DATACENTER)
self.assertEqual(source.conf['token'],
consulsource.ConsulFilesSource.TOKEN)
def testOverrideDefaultConfiguration(self):
source = consulsource.ConsulFilesSource(self.config)
self.assertEqual(source.conf['datacenter'], 'TEST_DATACENTER')
self.assertEqual(source.conf['token'], 'TEST_TOKEN')
self.assertEqual(
source.conf['passwd_url'],
'PASSWD_URL?recurse&token=TEST_TOKEN&dc=TEST_DATACENTER')
self.assertEqual(
source.conf['group_url'],
'GROUP_URL?recurse&token=TEST_TOKEN&dc=TEST_DATACENTER')
class TestPasswdMapParser(unittest.TestCase):
def setUp(self):
"""Set some default avalible data for testing."""
self.good_entry = passwd.PasswdMapEntry()
self.good_entry.name = 'foo'
self.good_entry.passwd = 'x'
self.good_entry.uid = 10
self.good_entry.gid = 10
self.good_entry.gecos = b'How Now Brown Cow'
self.good_entry.dir = b'/home/foo'
self.good_entry.shell = b'/bin/bash'
self.parser = consulsource.ConsulPasswdMapParser()
def testGetMap(self):
passwd_map = passwd.PasswdMap()
cache_info = StringIO('''[
{"Key": "org/users/foo/uid", "Value": "MTA="},
{"Key": "org/users/foo/gid", "Value": "MTA="},
{"Key": "org/users/foo/home", "Value": "L2hvbWUvZm9v"},
{"Key": "org/users/foo/shell", "Value": "L2Jpbi9iYXNo"},
{"Key": "org/users/foo/comment", "Value": "SG93IE5vdyBCcm93biBDb3c="},
{"Key": "org/users/foo/subkey/irrelevant_key", "Value": "YmFjb24="}
]''')
self.parser.GetMap(cache_info, passwd_map)
self.assertEqual(self.good_entry, passwd_map.PopItem())
def testReadEntry(self):
data = {
'uid': '10',
'gid': '10',
'comment': b'How Now Brown Cow',
'shell': b'/bin/bash',
'home': b'/home/foo',
'passwd': 'x'
}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testDefaultEntryValues(self):
data = {'uid': '10', 'gid': '10'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry.shell, '/bin/bash')
self.assertEqual(entry.dir, '/home/foo')
self.assertEqual(entry.gecos, '')
self.assertEqual(entry.passwd, 'x')
def testInvalidEntry(self):
data = {'irrelevant_key': 'bacon'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry, None)
class TestConsulGroupMapParser(unittest.TestCase):
def setUp(self):
self.good_entry = group.GroupMapEntry()
self.good_entry.name = 'foo'
self.good_entry.passwd = 'x'
self.good_entry.gid = 10
self.good_entry.members = ['foo', 'bar']
self.parser = consulsource.ConsulGroupMapParser()
@unittest.skip('broken')
def testGetMap(self):
group_map = group.GroupMap()
cache_info = StringIO('''[
{"Key": "org/groups/foo/gid", "Value": "MTA="},
{"Key": "org/groups/foo/members", "Value": "Zm9vCmJhcg=="},
{"Key": "org/groups/foo/subkey/irrelevant_key", "Value": "YmFjb24="}
]''')
self.parser.GetMap(cache_info, group_map)
self.assertEqual(self.good_entry, group_map.PopItem())
def testReadEntry(self):
data = {'passwd': 'x', 'gid': '10', 'members': 'foo\nbar'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testDefaultPasswd(self):
data = {'gid': '10', 'members': 'foo\nbar'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testNoMembers(self):
data = {'gid': '10', 'members': ''}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry.members, [''])
def testInvalidEntry(self):
data = {'irrelevant_key': 'bacon'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry, None)
class TestConsulShadowMapParser(unittest.TestCase):
def setUp(self):
self.good_entry = shadow.ShadowMapEntry()
self.good_entry.name = 'foo'
self.good_entry.passwd = '*'
self.good_entry.lstchg = 17246
self.good_entry.min = 0
self.good_entry.max = 99999
self.good_entry.warn = 7
self.parser = consulsource.ConsulShadowMapParser()
def testGetMap(self):
shadow_map = shadow.ShadowMap()
cache_info = StringIO('''[
{"Key": "org/groups/foo/passwd", "Value": "Kg=="},
{"Key": "org/groups/foo/lstchg", "Value": "MTcyNDY="},
{"Key": "org/groups/foo/min", "Value": "MA=="},
{"Key": "org/groups/foo/max", "Value": "OTk5OTk="},
{"Key": "org/groups/foo/warn", "Value": "Nw=="}
]''')
self.parser.GetMap(cache_info, shadow_map)
self.assertEqual(self.good_entry, shadow_map.PopItem())
def testReadEntry(self):
data = {
'passwd': '*',
'lstchg': 17246,
'min': 0,
'max': 99999,
'warn': 7
}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testDefaultPasswd(self):
data = {'lstchg': 17246, 'min': 0, 'max': 99999, 'warn': 7}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/sources/httpsource.py000066400000000000000000000330411402531134600230740ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of an http data source for nsscache."""
__author__ = ('blaedd@google.com (David MacKinnon',)
import bz2
import calendar
import logging
import os
import pycurl
import time
from urllib.parse import urljoin
from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.maps import sshkey
from nss_cache.sources import source
from nss_cache.util import file_formats
from nss_cache.util import curl
def RegisterImplementation(registration_callback):
registration_callback(HttpFilesSource)
class HttpFilesSource(source.Source):
"""Source for data fetched via HTTP."""
# HTTP defaults
PASSWD_URL = ''
SHADOW_URL = ''
GROUP_URL = ''
AUTOMOUNT_BASE_URL = ''
NETGROUP_URL = ''
SSHKEY_URL = ''
RETRY_DELAY = 5
RETRY_MAX = 3
TLS_CACERTFILE = '/etc/ssl/certs/ca-certificates.crt'
# for registration
name = 'http'
def __init__(self, conf, conn=None):
"""Initialise the HTTP Data Source.
Args:
conf: config.Config instance
conn: pycurl Curl object
"""
super(HttpFilesSource, self).__init__(conf)
self._SetDefaults(conf)
if not conn:
conn = pycurl.Curl()
conn.setopt(pycurl.NOPROGRESS, 1)
conn.setopt(pycurl.NOSIGNAL, 1)
# Don't hang on to connections from broken servers indefinitely.
conn.setopt(pycurl.TIMEOUT, 60)
conn.setopt(pycurl.USERAGENT, 'nsscache')
if self.conf['http_proxy']:
conn.setopt(pycurl.PROXY, self.conf['http_proxy'])
self.conn = conn
def _SetDefaults(self, configuration):
"""Set defaults if necessary."""
if 'automount_base_url' not in configuration:
configuration['automount_base_url'] = self.AUTOMOUNT_BASE_URL
if 'passwd_url' not in configuration:
configuration['passwd_url'] = self.PASSWD_URL
if 'shadow_url' not in configuration:
configuration['shadow_url'] = self.SHADOW_URL
if 'group_url' not in configuration:
configuration['group_url'] = self.GROUP_URL
if 'netgroup_url' not in configuration:
configuration['netgroup_url'] = self.GROUP_URL
if 'sshkey_url' not in configuration:
configuration['sshkey_url'] = self.SSHKEY_URL
if 'retry_delay' not in configuration:
configuration['retry_delay'] = self.RETRY_DELAY
if 'retry_max' not in configuration:
configuration['retry_max'] = self.RETRY_MAX
if 'tls_cacertfile' not in configuration:
configuration['tls_cacertfile'] = self.TLS_CACERTFILE
if 'http_proxy' not in configuration:
configuration['http_proxy'] = None
def GetPasswdMap(self, since=None):
"""Return the passwd map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of passwd.PasswdMap
"""
return PasswdUpdateGetter().GetUpdates(self, self.conf['passwd_url'],
since)
def GetShadowMap(self, since=None):
"""Return the shadow map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of shadow.ShadowMap
"""
return ShadowUpdateGetter().GetUpdates(self, self.conf['shadow_url'],
since)
def GetGroupMap(self, since=None):
"""Return the group map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of group.GroupMap
"""
return GroupUpdateGetter().GetUpdates(self, self.conf['group_url'],
since)
def GetNetgroupMap(self, since=None):
"""Return the netgroup map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of netgroup.NetgroupMap
"""
return NetgroupUpdateGetter().GetUpdates(self,
self.conf['netgroup_url'],
since)
def GetAutomountMap(self, since=None, location=None):
"""Return an automount map from this source.
Note that autmount maps are stored in multiple locations, thus we expect
a caller to provide a location. We also follow the automount spec and
set our search scope to be 'one'.
Args:
location: Currently a string containing our search source, later we
may support hostname and additional parameters.
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of AutomountMap
Raises:
EmptyMap:
"""
if location is None:
self.log.error(
'A location is required to retrieve an automount map!')
raise error.EmptyMap
automount_url = urljoin(self.conf['automount_base_url'], location)
return AutomountUpdateGetter().GetUpdates(self, automount_url, since)
def GetAutomountMasterMap(self):
"""Return the autmount master map from this source.
Returns:
an instance of automount.AutomountMap
"""
master_map = self.GetAutomountMap(location='auto.master')
for map_entry in master_map:
map_entry.location = os.path.split(map_entry.location)[1]
self.log.debug('master map has: %s' % map_entry.location)
return master_map
def GetSshkeyMap(self, since=None):
"""Return the sshkey map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of sshkey.SshkeyMap
"""
return SshkeyUpdateGetter().GetUpdates(self, self.conf['sshkey_url'],
since)
class UpdateGetter(object):
"""Base class that gets updates over http."""
def __init__(self):
self.log = logging.getLogger(__name__)
def FromTimestampToHttp(self, ts):
"""Converts internal nss_cache timestamp to HTTP timestamp.
Args:
ts: number of seconds since epoch
Returns:
HTTP format timestamp string
"""
ts = time.gmtime(ts)
return time.strftime('%a, %d %b %Y %H:%M:%S GMT', ts)
def FromHttpToTimestamp(self, http_ts_string):
"""Converts HTTP timestamp string to internal nss_cache timestamp.
Args:
HTTP format timestamp string
Returns:
number of seconds since epoch
"""
t = time.strptime(http_ts_string, '%a, %d %b %Y %H:%M:%S GMT')
return int(calendar.timegm(t))
def GetUpdates(self, source, url, since):
"""Get updates from a source.
Args:
source: A data source
url: url to the data we want
since: a timestamp representing the last change (None to force-get)
Returns:
A tuple containing the map of updates and a maximum timestamp
Raises:
ValueError: an object in the source map is malformed
ConfigurationError:
"""
proto = url.split(':')[0]
# Newer libcurl allow you to disable protocols there. Unfortunately
# it's not in dapper or hardy.
if proto not in ('http', 'https'):
raise error.ConfigurationError('Unsupported protocol %s' % proto)
conn = source.conn
conn.setopt(pycurl.OPT_FILETIME, 1)
conn.setopt(pycurl.ENCODING, 'bzip2, gzip')
if since is not None:
conn.setopt(pycurl.TIMEVALUE, int(since))
conn.setopt(pycurl.TIMECONDITION, pycurl.TIMECONDITION_IFMODSINCE)
retry_count = 0
resp_code = 500
while retry_count < source.conf['retry_max']:
try:
source.log.debug('fetching %s', url)
(resp_code, headers, body) = curl.CurlFetch(url, conn, self.log)
self.log.debug('response code: %s', resp_code)
finally:
if resp_code < 400:
# Not modified-since
if resp_code == 304:
return []
if resp_code == 200:
break
retry_count += 1
self.log.warning('Failed connection: attempt #%s.', retry_count)
if retry_count == source.conf['retry_max']:
self.log.debug('max retries hit')
raise error.SourceUnavailable('Max retries exceeded.')
time.sleep(source.conf['retry_delay'])
headers = headers.split('\r\n')
last_modified = conn.getinfo(pycurl.INFO_FILETIME)
self.log.debug('last modified: %s', last_modified)
if last_modified == -1:
for header in headers:
if header.lower().startswith('last-modified'):
self.log.debug('%s', header)
http_ts_string = header[header.find(':') + 1:].strip()
last_modified = self.FromHttpToTimestamp(http_ts_string)
break
else:
http_ts_string = ''
else:
http_ts_string = self.FromTimestampToHttp(last_modified)
self.log.debug('Last-modified is: %s', http_ts_string)
# curl (on Ubuntu hardy at least) will handle gzip, but not bzip2
try:
response = bz2.decompress(body)
self.log.debug('bzip encoding found')
except IOError:
response = body
data_map = self.GetMap(cache_info=response)
if http_ts_string:
http_ts = self.FromHttpToTimestamp(http_ts_string)
self.log.debug('setting last modified to: %s', http_ts)
data_map.SetModifyTimestamp(http_ts)
return data_map
def GetParser(self):
"""Return the appropriate parser.
Must be implemented by child class.
"""
raise NotImplementedError
def GetMap(self, cache_info):
"""Creates a Map from the cache_info data.
Args:
cache_info: file-like object containing the data to parse
Returns:
A child of Map containing the cache data.
"""
return self.GetParser().GetMap(cache_info, self.CreateMap())
class AutomountUpdateGetter(UpdateGetter):
"""Get automount updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesAutomount cache."""
return file_formats.FilesAutomountMapParser()
def CreateMap(self):
"""Returns a new AutomountMap instance."""
return automount.AutomountMap()
class PasswdUpdateGetter(UpdateGetter):
"""Get passwd updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesPasswd cache."""
return file_formats.FilesPasswdMapParser()
def CreateMap(self):
"""Returns a new PasswdMap instance to have PasswdMapEntries added to
it."""
return passwd.PasswdMap()
class ShadowUpdateGetter(UpdateGetter):
"""Get shadow updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesShadow cache."""
return file_formats.FilesShadowMapParser()
def CreateMap(self):
"""Returns a new ShadowMap instance to have ShadowMapEntries added to
it."""
return shadow.ShadowMap()
class GroupUpdateGetter(UpdateGetter):
"""Get group updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesGroup cache."""
return file_formats.FilesGroupMapParser()
def CreateMap(self):
"""Returns a new GroupMap instance to have GroupMapEntries added to
it."""
return group.GroupMap()
class NetgroupUpdateGetter(UpdateGetter):
"""Get netgroup updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesNetgroup cache."""
return file_formats.FilesNetgroupMapParser()
def CreateMap(self):
"""Returns a new NetgroupMap instance to have GroupMapEntries added to
it."""
return netgroup.NetgroupMap()
class SshkeyUpdateGetter(UpdateGetter):
"""Get sshkey updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesSshkey cache."""
return file_formats.FilesSshkeyMapParser()
def CreateMap(self):
"""Returns a new SshkeyMap instance to have SshkeyMapEntries added to
it."""
return sshkey.SshkeyMap()
nsscache-version-0.42/nss_cache/sources/httpsource_test.py000066400000000000000000000277221402531134600241440ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a mock http data source for nsscache."""
__author__ = 'blaedd@google.com (David MacKinnon)'
import time
import unittest
import pycurl
from mox3 import mox
from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.maps import sshkey
from nss_cache.sources import httpsource
from nss_cache.util import file_formats
class TestHttpSource(unittest.TestCase):
def setUp(self):
"""Initialize a basic config dict."""
super(TestHttpSource, self).setUp()
self.config = {
'passwd_url': 'PASSWD_URL',
'shadow_url': 'SHADOW_URL',
'group_url': 'GROUP_URL',
'sshkey_url': 'SSHKEY_URL',
'retry_delay': 'TEST_RETRY_DELAY',
'retry_max': 'TEST_RETRY_MAX',
'tls_cacertfile': 'TEST_TLS_CACERTFILE',
'http_proxy': 'HTTP_PROXY',
}
def testDefaultConfiguration(self):
source = httpsource.HttpFilesSource({})
self.assertEqual(source.conf['passwd_url'],
httpsource.HttpFilesSource.PASSWD_URL)
self.assertEqual(source.conf['shadow_url'],
httpsource.HttpFilesSource.SHADOW_URL)
self.assertEqual(source.conf['group_url'],
httpsource.HttpFilesSource.GROUP_URL)
self.assertEqual(source.conf['sshkey_url'],
httpsource.HttpFilesSource.SSHKEY_URL)
self.assertEqual(source.conf['retry_max'],
httpsource.HttpFilesSource.RETRY_MAX)
self.assertEqual(source.conf['retry_delay'],
httpsource.HttpFilesSource.RETRY_DELAY)
self.assertEqual(source.conf['tls_cacertfile'],
httpsource.HttpFilesSource.TLS_CACERTFILE)
self.assertEqual(source.conf['http_proxy'], None)
def testOverrideDefaultConfiguration(self):
source = httpsource.HttpFilesSource(self.config)
self.assertEqual(source.conf['passwd_url'], 'PASSWD_URL')
self.assertEqual(source.conf['group_url'], 'GROUP_URL')
self.assertEqual(source.conf['shadow_url'], 'SHADOW_URL')
self.assertEqual(source.conf['sshkey_url'], 'SSHKEY_URL')
self.assertEqual(source.conf['retry_delay'], 'TEST_RETRY_DELAY')
self.assertEqual(source.conf['retry_max'], 'TEST_RETRY_MAX')
self.assertEqual(source.conf['tls_cacertfile'], 'TEST_TLS_CACERTFILE')
self.assertEqual(source.conf['http_proxy'], 'HTTP_PROXY')
class TestHttpUpdateGetter(mox.MoxTestBase):
def testFromTimestampToHttp(self):
ts = 1259641025
expected_http_ts = 'Tue, 01 Dec 2009 04:17:05 GMT'
self.assertEqual(expected_http_ts,
httpsource.UpdateGetter().FromTimestampToHttp(ts))
def testFromHttpToTimestamp(self):
expected_ts = 1259641025
http_ts = 'Tue, 01 Dec 2009 04:17:05 GMT'
self.assertEqual(expected_ts,
httpsource.UpdateGetter().FromHttpToTimestamp(http_ts))
def testAcceptHttpProtocol(self):
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
mock_conn.perform()
# We use code 304 since it basically shortcuts to the end of the method.
mock_conn.getinfo(pycurl.RESPONSE_CODE).AndReturn(304)
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
self.mox.ReplayAll()
config = {}
source = httpsource.HttpFilesSource(config)
result = httpsource.UpdateGetter().GetUpdates(source, 'http://TEST_URL',
None)
self.assertEqual([], result)
def testAcceptHttpsProtocol(self):
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
mock_conn.perform()
# We use code 304 since it basically shortcuts to the end of the method.
mock_conn.getinfo(pycurl.RESPONSE_CODE).AndReturn(304)
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
self.mox.ReplayAll()
config = {}
source = httpsource.HttpFilesSource(config)
result = httpsource.UpdateGetter().GetUpdates(source,
'https://TEST_URL', None)
self.assertEqual([], result)
def testRaiseConfigurationErrorOnUnsupportedProtocol(self):
# connection should never be used in this case.
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
self.mox.ReplayAll()
source = httpsource.HttpFilesSource({})
self.assertRaises(error.ConfigurationError,
httpsource.UpdateGetter().GetUpdates, source,
'ftp://test_url', None)
def testNoUpdatesForTemporaryFailure(self):
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
mock_conn.perform()
mock_conn.getinfo(pycurl.RESPONSE_CODE).AndReturn(304)
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
self.mox.ReplayAll()
config = {}
source = httpsource.HttpFilesSource(config)
result = httpsource.UpdateGetter().GetUpdates(source,
'https://TEST_URL', 37)
self.assertEqual(result, [])
def testGetUpdatesIfTimestampNotMatch(self):
ts = 1259641025
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
mock_conn.perform()
mock_conn.getinfo(pycurl.RESPONSE_CODE).AndReturn(200)
mock_conn.getinfo(pycurl.INFO_FILETIME).AndReturn(ts)
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
mock_map = self.mox.CreateMockAnything()
mock_map.SetModifyTimestamp(ts)
getter = httpsource.UpdateGetter()
self.mox.StubOutWithMock(getter, 'GetMap')
getter.GetMap(cache_info=mox.IgnoreArg()).AndReturn(mock_map)
self.mox.ReplayAll()
config = {}
source = httpsource.HttpFilesSource(config)
result = getter.GetUpdates(source, 'https://TEST_URL', 1)
self.assertEqual(mock_map, result)
def testGetUpdatesWithoutTimestamp(self):
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
mock_conn.perform()
mock_conn.getinfo(pycurl.RESPONSE_CODE).AndReturn(200)
mock_conn.getinfo(pycurl.INFO_FILETIME).AndReturn(-1)
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
mock_map = self.mox.CreateMockAnything()
getter = httpsource.UpdateGetter()
self.mox.StubOutWithMock(getter, 'GetMap')
getter.GetMap(cache_info=mox.IgnoreArg()).AndReturn(mock_map)
self.mox.ReplayAll()
config = {}
source = httpsource.HttpFilesSource(config)
result = getter.GetUpdates(source, 'https://TEST_URL', 1)
self.assertEqual(mock_map, result)
def testRetryOnErrorCodeResponse(self):
config = {'retry_delay': 5, 'retry_max': 3}
mock_conn = self.mox.CreateMockAnything()
mock_conn.setopt(mox.IgnoreArg(), mox.IgnoreArg()).MultipleTimes()
mock_conn.perform().MultipleTimes()
mock_conn.getinfo(pycurl.RESPONSE_CODE).MultipleTimes().AndReturn(400)
self.mox.StubOutWithMock(time, 'sleep')
time.sleep(5)
time.sleep(5)
self.mox.StubOutWithMock(pycurl, 'Curl')
pycurl.Curl().AndReturn(mock_conn)
self.mox.ReplayAll()
source = httpsource.HttpFilesSource(config)
self.assertRaises(error.SourceUnavailable,
httpsource.UpdateGetter().GetUpdates,
source,
url='https://TEST_URL',
since=None)
class TestPasswdUpdateGetter(unittest.TestCase):
def setUp(self):
super(TestPasswdUpdateGetter, self).setUp()
self.updater = httpsource.PasswdUpdateGetter()
def testGetParser(self):
parser = self.updater.GetParser()
self.assertTrue(
isinstance(self.updater.GetParser(),
file_formats.FilesPasswdMapParser))
def testCreateMap(self):
self.assertTrue(isinstance(self.updater.CreateMap(), passwd.PasswdMap))
class TestShadowUpdateGetter(unittest.TestCase):
def setUp(self):
super(TestShadowUpdateGetter, self).setUp()
self.updater = httpsource.ShadowUpdateGetter()
def testGetParser(self):
parser = self.updater.GetParser()
self.assertTrue(
isinstance(self.updater.GetParser(),
file_formats.FilesShadowMapParser))
def testCreateMap(self):
self.assertTrue(isinstance(self.updater.CreateMap(), shadow.ShadowMap))
class TestGroupUpdateGetter(unittest.TestCase):
def setUp(self):
super(TestGroupUpdateGetter, self).setUp()
self.updater = httpsource.GroupUpdateGetter()
def testGetParser(self):
parser = self.updater.GetParser()
self.assertTrue(
isinstance(self.updater.GetParser(),
file_formats.FilesGroupMapParser))
def testCreateMap(self):
self.assertTrue(isinstance(self.updater.CreateMap(), group.GroupMap))
class TestNetgroupUpdateGetter(unittest.TestCase):
def setUp(self):
super(TestNetgroupUpdateGetter, self).setUp()
self.updater = httpsource.NetgroupUpdateGetter()
def testGetParser(self):
parser = self.updater.GetParser()
self.assertTrue(
isinstance(self.updater.GetParser(),
file_formats.FilesNetgroupMapParser))
def testCreateMap(self):
self.assertTrue(
isinstance(self.updater.CreateMap(), netgroup.NetgroupMap))
class TestAutomountUpdateGetter(unittest.TestCase):
def setUp(self):
super(TestAutomountUpdateGetter, self).setUp()
self.updater = httpsource.AutomountUpdateGetter()
def testGetParser(self):
parser = self.updater.GetParser()
self.assertTrue(
isinstance(self.updater.GetParser(),
file_formats.FilesAutomountMapParser))
def testCreateMap(self):
self.assertTrue(
isinstance(self.updater.CreateMap(), automount.AutomountMap))
class TestSshkeyUpdateGetter(unittest.TestCase):
def setUp(self):
super(TestSshkeyUpdateGetter, self).setUp()
self.updater = httpsource.SshkeyUpdateGetter()
def testGetParser(self):
parser = self.updater.GetParser()
self.assertTrue(
isinstance(self.updater.GetParser(),
file_formats.FilesSshkeyMapParser))
def testCreateMap(self):
self.assertTrue(isinstance(self.updater.CreateMap(), sshkey.SshkeyMap))
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/sources/ldapsource.py000066400000000000000000001265231402531134600230450ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of an ldap data source for nsscache."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import calendar
import logging
import time
import ldap
import ldap.sasl
import re
from binascii import b2a_hex
from urllib.parse import quote
from distutils.version import StrictVersion
from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.maps import sshkey
from nss_cache.sources import source
IS_LDAP24_OR_NEWER = StrictVersion(ldap.__version__) >= StrictVersion('2.4')
# ldap.LDAP_CONTROL_PAGE_OID is unavailable on some systems, so we define it here
LDAP_CONTROL_PAGE_OID = '1.2.840.113556.1.4.319'
def RegisterImplementation(registration_callback):
registration_callback(LdapSource)
def makeSimplePagedResultsControl(page_size):
# The API for this is different on older versions of python-ldap, so we need
# to handle this case.
if IS_LDAP24_OR_NEWER:
return ldap.controls.SimplePagedResultsControl(True,
size=page_size,
cookie='')
else:
return ldap.controls.SimplePagedResultsControl(LDAP_CONTROL_PAGE_OID,
True, (page_size, ''))
def getCookieFromControl(pctrl):
if IS_LDAP24_OR_NEWER:
return pctrl.cookie
else:
return pctrl.controlValue[1]
def setCookieOnControl(control, cookie, page_size):
if IS_LDAP24_OR_NEWER:
control.cookie = cookie
else:
control.controlValue = (page_size, cookie)
return cookie
def sidToStr(sid):
"""Converts an objectSid hexadecimal string returned from the LDAP query to
the objectSid string version in format of
S-1-5-21-1270288957-3800934213-3019856503-500 For more information about
the objectSid binary structure:
https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-dtyp/78eb9013-1c3a-4970-ad1f-2b1dad588a25
https://devblogs.microsoft.com/oldnewthing/?p=40253
This function was based from:
https://ldap3.readthedocs.io/_modules/ldap3/protocol/formatters/formatters.html#format_sid
"""
try:
if sid.startswith(b'S-1') or sid.startswith('S-1'):
return sid
except Exception:
pass
try:
if str is not bytes:
revision = int(sid[0])
sub_authorities = int(sid[1])
identifier_authority = int.from_bytes(sid[2:8], byteorder='big')
if identifier_authority >= 2**32:
identifier_authority = hex(identifier_authority)
sub_authority = '-' + '-'.join([
str(
int.from_bytes(sid[8 + (i * 4):12 + (i * 4)],
byteorder='little'))
for i in range(sub_authorities)
])
else:
revision = int(b2a_hex(sid[0]))
sub_authorities = int(b2a_hex(sid[1]))
identifier_authority = int(b2a_hex(sid[2:8]), 16)
if identifier_authority >= 2**32:
identifier_authority = hex(identifier_authority)
sub_authority = '-' + '-'.join([
str(int(b2a_hex(sid[11 + (i * 4):7 + (i * 4):-1]), 16))
for i in range(sub_authorities)
])
objectSid = 'S-' + str(revision) + '-' + str(
identifier_authority) + sub_authority
return objectSid
except Exception:
pass
return sid
class LdapSource(source.Source):
"""Source for data in LDAP.
After initialisation, one can search the data source for 'objects'
under a particular part of the LDAP tree, with some filter, and have it
return only some set of attributes.
'objects' in this sense means some structured blob of data, not a Python
object.
"""
# ldap defaults
BIND_DN = ''
BIND_PASSWORD = ''
RETRY_DELAY = 5
RETRY_MAX = 3
SCOPE = 'one'
TIMELIMIT = -1
TLS_REQUIRE_CERT = 'demand' # one of never, hard, demand, allow, try
# for registration
name = 'ldap'
# Page size for paged LDAP requests
# Value chosen based on default Active Directory MaxPageSize
PAGE_SIZE = 1000
def __init__(self, conf, conn=None):
"""Initialise the LDAP Data Source.
Args:
conf: config.Config instance
conn: An instance of ldap.LDAPObject that'll be used as the connection.
"""
super(LdapSource, self).__init__(conf)
self._dn_requested = False # dn is a special-cased attribute
self._SetDefaults(conf)
self._conf = conf
self.ldap_controls = makeSimplePagedResultsControl(self.PAGE_SIZE)
# Used by _ReSearch:
self._last_search_params = None
if conn is None:
# ReconnectLDAPObject should handle interrupted ldap transactions.
# also, ugh
rlo = ldap.ldapobject.ReconnectLDAPObject
self.conn = rlo(uri=conf['uri'],
retry_max=conf['retry_max'],
retry_delay=conf['retry_delay'])
if conf['tls_starttls'] == 1:
self.conn.start_tls_s()
if 'ldap_debug' in conf:
self.conn.set_option(ldap.OPT_DEBUG_LEVEL, conf['ldap_debug'])
else:
self.conn = conn
# TODO(v): We should bind on-demand instead.
# (although binding here makes it easier to simulate a dropped network)
self.Bind(conf)
def _SetDefaults(self, configuration):
"""Set defaults if necessary."""
# LDAPI URLs must be url escaped socket filenames; rewrite if necessary.
if 'uri' in configuration:
if configuration['uri'].startswith('ldapi://'):
configuration['uri'] = 'ldapi://' + quote(
configuration['uri'][8:], '')
if 'bind_dn' not in configuration:
configuration['bind_dn'] = self.BIND_DN
if 'bind_password' not in configuration:
configuration['bind_password'] = self.BIND_PASSWORD
if 'retry_delay' not in configuration:
configuration['retry_delay'] = self.RETRY_DELAY
if 'retry_max' not in configuration:
configuration['retry_max'] = self.RETRY_MAX
if 'scope' not in configuration:
configuration['scope'] = self.SCOPE
if 'timelimit' not in configuration:
configuration['timelimit'] = self.TIMELIMIT
# TODO(jaq): XXX EVIL. ldap client libraries change behaviour if we use
# polling, and it's nasty. So don't let the user poll.
if configuration['timelimit'] == 0:
configuration['timelimit'] = -1
if 'tls_require_cert' not in configuration:
configuration['tls_require_cert'] = self.TLS_REQUIRE_CERT
if 'tls_starttls' not in configuration:
configuration['tls_starttls'] = 0
# Translate tls_require into appropriate constant, if necessary.
if configuration['tls_require_cert'] == 'never':
configuration['tls_require_cert'] = ldap.OPT_X_TLS_NEVER
elif configuration['tls_require_cert'] == 'hard':
configuration['tls_require_cert'] = ldap.OPT_X_TLS_HARD
elif configuration['tls_require_cert'] == 'demand':
configuration['tls_require_cert'] = ldap.OPT_X_TLS_DEMAND
elif configuration['tls_require_cert'] == 'allow':
configuration['tls_require_cert'] = ldap.OPT_X_TLS_ALLOW
elif configuration['tls_require_cert'] == 'try':
configuration['tls_require_cert'] = ldap.OPT_X_TLS_TRY
if 'sasl_authzid' not in configuration:
configuration['sasl_authzid'] = ''
# Should we issue STARTTLS?
if configuration['tls_starttls'] in (1, '1', 'on', 'yes', 'true'):
configuration['tls_starttls'] = 1
# if not configuration['tls_starttls']:
else:
configuration['tls_starttls'] = 0
# Setting global ldap defaults.
ldap.set_option(ldap.OPT_X_TLS_REQUIRE_CERT,
configuration['tls_require_cert'])
ldap.set_option(ldap.OPT_REFERRALS, 0)
if 'tls_cacertdir' in configuration:
ldap.set_option(ldap.OPT_X_TLS_CACERTDIR,
configuration['tls_cacertdir'])
if 'tls_cacertfile' in configuration:
ldap.set_option(ldap.OPT_X_TLS_CACERTFILE,
configuration['tls_cacertfile'])
if 'tls_certfile' in configuration:
ldap.set_option(ldap.OPT_X_TLS_CERTFILE,
configuration['tls_certfile'])
if 'tls_keyfile' in configuration:
ldap.set_option(ldap.OPT_X_TLS_KEYFILE,
configuration['tls_keyfile'])
ldap.version = ldap.VERSION3 # this is hard-coded, we only support V3
def _SetCookie(self, cookie):
return setCookieOnControl(self.ldap_controls, cookie, self.PAGE_SIZE)
def Bind(self, configuration):
"""Bind to LDAP, retrying if necessary."""
# If the server is unavailable, we are going to find out now, as this
# actually initiates the network connection.
retry_count = 0
while retry_count < configuration['retry_max']:
self.log.debug('opening ldap connection and binding to %s',
configuration['uri'])
try:
if 'use_sasl' in configuration and configuration['use_sasl']:
if ('sasl_mech' in configuration and
configuration['sasl_mech'] and
configuration['sasl_mech'].lower() == 'gssapi'):
sasl = ldap.sasl.gssapi(configuration['sasl_authzid'])
# TODO: Add other sasl mechs
else:
raise error.ConfigurationError(
'SASL mechanism not supported')
self.conn.sasl_interactive_bind_s('', sasl)
else:
self.conn.simple_bind_s(who=configuration['bind_dn'],
cred=str(
configuration['bind_password']))
break
except ldap.SERVER_DOWN as e:
retry_count += 1
self.log.warning('Failed LDAP connection: attempt #%s.',
retry_count)
self.log.debug('ldap error is %r', e)
if retry_count == configuration['retry_max']:
self.log.debug('max retries hit')
raise error.SourceUnavailable(e)
self.log.debug('sleeping %d seconds',
configuration['retry_delay'])
time.sleep(configuration['retry_delay'])
def _ReSearch(self):
"""Performs self.Search again with the previously used parameters.
Returns:
self.Search result.
"""
self.Search(*self._last_search_params)
def Search(self, search_base, search_filter, search_scope, attrs):
"""Search the data source.
The search is asynchronous; data should be retrieved by iterating over
the source object itself (see __iter__() below).
Args:
search_base: the base of the tree being searched
search_filter: a filter on the objects to be returned
search_scope: the scope of the search from ldap.SCOPE_*
attrs: a list of attributes to be returned
Returns:
nothing.
"""
self._last_search_params = (search_base, search_filter, search_scope,
attrs)
self.log.debug('searching for base=%r, filter=%r, scope=%r, attrs=%r',
search_base, search_filter, search_scope, attrs)
if 'dn' in attrs: # special cased attribute
self._dn_requested = True
self.message_id = self.conn.search_ext(base=search_base,
filterstr=search_filter,
scope=search_scope,
attrlist=attrs,
serverctrls=[self.ldap_controls])
def __iter__(self):
"""Iterate over the data from the last search.
Probably not threadsafe.
Yields:
Search results from the prior call to self.Search()
"""
# Acquire data to yield:
while True:
result_type, data = None, None
timeout_retries = 0
while timeout_retries < int(self._conf['retry_max']):
try:
result_type, data, _, serverctrls = self.conn.result3(
self.message_id, all=0, timeout=self.conf['timelimit'])
# we need to filter out AD referrals
if data and not data[0][0]:
continue
# Paged requests return a new cookie in serverctrls at the end of a page,
# so we search for the cookie and perform another search if needed.
if len(serverctrls) > 0:
# Search for appropriate control
simple_paged_results_controls = [
control for control in serverctrls
if control.controlType == LDAP_CONTROL_PAGE_OID
]
if simple_paged_results_controls:
# We only expect one control; just take the first in the list.
cookie = getCookieFromControl(
simple_paged_results_controls[0])
if len(cookie) > 0:
# If cookie is non-empty, call search_ext and result3 again
self._SetCookie(cookie)
self._ReSearch()
result_type, data, _, serverctrls = self.conn.result3(
self.message_id,
all=0,
timeout=self.conf['timelimit'])
# else: An empty cookie means we are done.
# break loop once result3 doesn't time out and reset cookie
setCookieOnControl(self.ldap_controls, '', self.PAGE_SIZE)
break
except ldap.SIZELIMIT_EXCEEDED:
self.log.warning(
'LDAP server size limit exceeded; using page size {0}.'.
format(self.PAGE_SIZE))
return
except ldap.NO_SUCH_OBJECT:
self.log.debug('Returning due to ldap.NO_SUCH_OBJECT')
return
except ldap.TIMELIMIT_EXCEEDED:
timeout_retries += 1
self.log.warning('Timeout on LDAP results, attempt #%s.',
timeout_retries)
if timeout_retries >= self._conf['retry_max']:
self.log.debug('max retries hit, returning')
return
self.log.debug('sleeping %d seconds',
self._conf['retry_delay'])
time.sleep(self.conf['retry_delay'])
if result_type == ldap.RES_SEARCH_RESULT:
self.log.debug('Returning due to RES_SEARCH_RESULT')
return
if result_type != ldap.RES_SEARCH_ENTRY:
self.log.info('Unknown result type %r, ignoring.', result_type)
if not data:
self.log.debug('Returning due to len(data) == 0')
return
for record in data:
# If the dn is requested, return it along with the payload,
# otherwise ignore it.
for key in record[1]:
for i in range(len(record[1][key])):
if isinstance(record[1][key][i],
bytes) and key != 'objectSid':
value = record[1][key][i].decode('utf-8')
record[1][key][i] = value
if self._dn_requested:
merged_records = {'dn': record[0]}
merged_records.update(record[1])
yield merged_records
else:
yield record[1]
def GetSshkeyMap(self, since=None):
"""Return the sshkey map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of maps.SshkeyMap
"""
return SshkeyUpdateGetter(self.conf).GetUpdates(
source=self,
search_base=self.conf['base'],
search_filter=self.conf['filter'],
search_scope=self.conf['scope'],
since=since)
def GetPasswdMap(self, since=None):
"""Return the passwd map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of maps.PasswdMap
"""
return PasswdUpdateGetter(self.conf).GetUpdates(
source=self,
search_base=self.conf['base'],
search_filter=self.conf['filter'],
search_scope=self.conf['scope'],
since=since)
def GetGroupMap(self, since=None):
"""Return the group map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of maps.GroupMap
"""
return GroupUpdateGetter(self.conf).GetUpdates(
source=self,
search_base=self.conf['base'],
search_filter=self.conf['filter'],
search_scope=self.conf['scope'],
since=since)
def GetShadowMap(self, since=None):
"""Return the shadow map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of ShadowMap
"""
return ShadowUpdateGetter(self.conf).GetUpdates(
source=self,
search_base=self.conf['base'],
search_filter=self.conf['filter'],
search_scope=self.conf['scope'],
since=since)
def GetNetgroupMap(self, since=None):
"""Return the netgroup map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of NetgroupMap
"""
return NetgroupUpdateGetter(self.conf).GetUpdates(
source=self,
search_base=self.conf['base'],
search_filter=self.conf['filter'],
search_scope=self.conf['scope'],
since=since)
def GetAutomountMap(self, since=None, location=None):
"""Return an automount map from this source.
Note that autmount maps are stored in multiple locations, thus we expect
a caller to provide a location. We also follow the automount spec and
set our search scope to be 'one'.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
location: Currently a string containing our search base, later we
may support hostname and additional parameters.
Returns:
instance of AutomountMap
"""
if location is None:
self.log.error(
'A location is required to retrieve an automount map!')
raise error.EmptyMap
autofs_filter = '(objectclass=automount)'
return AutomountUpdateGetter(self.conf).GetUpdates(
source=self,
search_base=location,
search_filter=autofs_filter,
search_scope='one',
since=since)
def GetAutomountMasterMap(self):
"""Return the autmount master map from this source.
The automount master map is a special-case map which points to a dynamic
list of additional maps. We currently support only the schema outlined at
http://docs.sun.com/source/806-4251-10/mapping.htm commonly used by linux
automount clients, namely ou=auto.master and objectclass=automount entries.
Returns:
an instance of maps.AutomountMap
"""
search_base = self.conf['base']
search_scope = ldap.SCOPE_SUBTREE
# auto.master is stored under ou=auto.master with objectclass=automountMap
search_filter = '(&(objectclass=automountMap)(ou=auto.master))'
self.log.debug('retrieving automount master map.')
self.Search(search_base=search_base,
search_filter=search_filter,
search_scope=search_scope,
attrs=['dn'])
search_base = None
for obj in self:
# the dn of the matched object is our search base
search_base = obj['dn']
if search_base is None:
self.log.critical('Could not find automount master map!')
raise error.EmptyMap
self.log.debug('found ou=auto.master at %s', search_base)
master_map = self.GetAutomountMap(location=search_base)
# fix our location attribute to contain the data we
# expect returned to us later, namely the new search base(s)
for map_entry in master_map:
# we currently ignore hostname and just look for the dn which will
# be the search_base for this map. third field, colon delimited.
map_entry.location = map_entry.location.split(':')[2]
# and strip the space seperated options
map_entry.location = map_entry.location.split(' ')[0]
self.log.debug('master map has: %s' % map_entry.location)
return master_map
def Verify(self, since=None):
"""Verify that this source is contactable and can be queried for
data."""
if since is None:
# one minute in the future
since = int(time.time() + 60)
try:
results = self.GetPasswdMap(since=since)
except KeyError:
# AD groups don't have all attributes of AD users
results = self.GetGroupMap(since=since)
return len(results)
class UpdateGetter(object):
"""Base class that gets updates from LDAP."""
def __init__(self, conf):
super(UpdateGetter, self).__init__()
self.conf = conf
def FromLdapToTimestamp(self, ldap_ts_string):
"""Transforms a LDAP timestamp into the nss_cache internal timestamp.
Args:
ldap_ts_string: An LDAP timestamp string in the format %Y%m%d%H%M%SZ
Returns:
number of seconds since epoch.
"""
if isinstance(ldap_ts_string, bytes):
ldap_ts_string = ldap_ts_string.decode('utf-8')
try:
if self.conf.get('ad'):
# AD timestamp has different format
t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%S.0Z')
else:
t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%SZ')
except ValueError:
# Some systems add a decimal component; try to filter it:
m = re.match('([0-9]*)(\.[0-9]*)?(Z)', ldap_ts_string)
if m:
ldap_ts_string = m.group(1) + m.group(3)
if self.conf.get('ad'):
t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%S.0Z')
else:
t = time.strptime(ldap_ts_string, '%Y%m%d%H%M%SZ')
return int(calendar.timegm(t))
def FromTimestampToLdap(self, ts):
"""Transforms nss_cache internal timestamp into a LDAP timestamp.
Args:
ts: number of seconds since epoch
Returns:
LDAP format timestamp string.
"""
if self.conf.get('ad'):
t = time.strftime('%Y%m%d%H%M%S.0Z', time.gmtime(ts))
else:
t = time.strftime('%Y%m%d%H%M%SZ', time.gmtime(ts))
return t
def GetUpdates(self, source, search_base, search_filter, search_scope,
since):
"""Get updates from a source.
Args:
source: a data source
search_base: the LDAP base of the tree
search_filter: the LDAP object filter
search_scope: the LDAP scope filter, one of 'base', 'one', or 'sub'.
since: a timestamp to get updates since (None for 'get everything')
Returns:
a tuple containing the map of updates and a maximum timestamp
Raises:
error.ConfigurationError: scope is invalid
ValueError: an object in the source map is malformed
"""
if self.conf.get('ad'):
# AD attribute for modifyTimestamp is whenChanged
self.attrs.append('whenChanged')
else:
self.attrs.append('modifyTimestamp')
if since is not None:
ts = self.FromTimestampToLdap(since)
# since openldap disallows modifyTimestamp "greater than" we have to
# increment by one second.
if self.conf.get('ad'):
ts = int(ts.rstrip('.0Z')) + 1
ts = '%s.0Z' % ts
search_filter = ('(&%s(whenChanged>=%s))' % (search_filter, ts))
else:
ts = int(ts.rstrip('Z')) + 1
ts = '%sZ' % ts
search_filter = ('(&%s(modifyTimestamp>=%s))' %
(search_filter, ts))
if search_scope == 'base':
search_scope = ldap.SCOPE_BASE
elif search_scope in ['one', 'onelevel']:
search_scope = ldap.SCOPE_ONELEVEL
elif search_scope in ['sub', 'subtree']:
search_scope = ldap.SCOPE_SUBTREE
else:
raise error.ConfigurationError('Invalid scope: %s' % search_scope)
source.Search(search_base=search_base,
search_filter=search_filter,
search_scope=search_scope,
attrs=self.attrs)
# Don't initialize with since, because we really want to get the
# latest timestamp read, and if somehow a larger 'since' slips through
# the checks in main(), we'd better catch it here.
max_ts = None
data_map = self.CreateMap()
for obj in source:
for field in self.essential_fields:
if field not in obj:
logging.warn('invalid object passed: %r not in %r', field,
obj)
raise ValueError('Invalid object passed: %r', obj)
if self.conf.get('ad'):
obj_ts = self.FromLdapToTimestamp(obj['whenChanged'][0])
else:
try:
obj_ts = self.FromLdapToTimestamp(obj['modifyTimestamp'][0])
except KeyError:
obj_ts = self.FromLdapToTimestamp(obj['modifyTimeStamp'][0])
if max_ts is None or obj_ts > max_ts:
max_ts = obj_ts
try:
if not data_map.Add(self.Transform(obj)):
logging.info('could not add obj: %r', obj)
except AttributeError as e:
logging.warning('error %r, discarding malformed obj: %r',
str(e), obj)
# Perform some post processing on the data_map.
self.PostProcess(data_map, source, search_filter, search_scope)
data_map.SetModifyTimestamp(max_ts)
return data_map
def PostProcess(self, data_map, source, search_filter, search_scope):
"""Perform some post-process of the data."""
pass
class PasswdUpdateGetter(UpdateGetter):
"""Get passwd updates."""
def __init__(self, conf):
super(PasswdUpdateGetter, self).__init__(conf)
if self.conf.get('ad'):
# attributes of AD user to be returned
self.attrs = [
'sAMAccountName', 'objectSid', 'displayName',
'unixHomeDirectory', 'pwdLastSet', 'loginShell'
]
self.essential_fields = ['sAMAccountName', 'objectSid']
else:
self.attrs = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'loginShell', 'fullName'
]
if 'uidattr' in self.conf:
self.attrs.append(self.conf['uidattr'])
if 'uidregex' in self.conf:
self.uidregex = re.compile(self.conf['uidregex'])
self.essential_fields = ['uid', 'uidNumber', 'gidNumber']
if self.conf.get('use_rid'):
self.attrs.append('sambaSID')
self.essential_fields.append('sambaSID')
self.log = logging.getLogger(self.__class__.__name__)
def CreateMap(self):
"""Returns a new PasswdMap instance to have PasswdMapEntries added to
it."""
return passwd.PasswdMap()
def Transform(self, obj):
"""Transforms a LDAP posixAccount data structure into a
PasswdMapEntry."""
pw = passwd.PasswdMapEntry()
if self.conf.get('ad'):
if 'displayName' in obj:
pw.gecos = obj['displayName'][0]
elif 'gecos' in obj:
pw.gecos = obj['gecos'][0]
elif 'cn' in obj:
pw.gecos = obj['cn'][0]
elif 'fullName' in obj:
pw.gecos = obj['fullName'][0]
else:
raise ValueError('Neither gecos nor cn found')
pw.gecos = pw.gecos.replace('\n', '')
if self.conf.get('ad'):
pw.name = obj['sAMAccountName'][0]
elif 'uidattr' in self.conf:
pw.name = obj[self.conf['uidattr']][0]
else:
pw.name = obj['uid'][0]
if hasattr(self, 'uidregex'):
pw.name = ''.join([x for x in self.uidregex.findall(pw.name)])
if 'override_shell' in self.conf:
pw.shell = self.conf['override_shell']
elif 'loginShell' in obj:
pw.shell = obj['loginShell'][0]
else:
pw.shell = ''
if self.conf.get('ad'):
# use the user's RID for uid and gid to have
# the correspondant group with the same name
pw.uid = int(sidToStr(obj['objectSid'][0]).split('-')[-1])
pw.gid = int(sidToStr(obj['objectSid'][0]).split('-')[-1])
elif self.conf.get('use_rid'):
# use the user's RID for uid and gid to have
# the correspondant group with the same name
pw.uid = int(sidToStr(obj['sambaSID'][0]).split('-')[-1])
pw.gid = int(sidToStr(obj['sambaSID'][0]).split('-')[-1])
else:
pw.uid = int(obj['uidNumber'][0])
pw.gid = int(obj['gidNumber'][0])
if 'offset' in self.conf:
# map uid and gid to higher number
# to avoid conflict with local accounts
pw.uid = int(pw.uid + self.conf['offset'])
pw.gid = int(pw.gid + self.conf['offset'])
if self.conf.get('home_dir'):
pw.dir = '/home/%s' % pw.name
elif 'unixHomeDirectory' in obj:
pw.dir = obj['unixHomeDirectory'][0]
elif 'homeDirectory' in obj:
pw.dir = obj['homeDirectory'][0]
else:
pw.dir = ''
# hack
pw.passwd = 'x'
return pw
class GroupUpdateGetter(UpdateGetter):
"""Get group updates."""
def __init__(self, conf):
super(GroupUpdateGetter, self).__init__(conf)
# TODO: Merge multiple rcf2307bis[_alt] options into a single option.
if self.conf.get('ad'):
# attributes of AD group to be returned
self.attrs = ['sAMAccountName', 'member', 'objectSid']
self.essential_fields = ['sAMAccountName', 'objectSid']
else:
if conf.get('rfc2307bis'):
self.attrs = ['cn', 'gidNumber', 'member', 'uid']
elif conf.get('rfc2307bis_alt'):
self.attrs = ['cn', 'gidNumber', 'uniqueMember', 'uid']
else:
self.attrs = ['cn', 'gidNumber', 'memberUid', 'uid']
if 'groupregex' in conf:
self.groupregex = re.compile(self.conf['groupregex'])
self.essential_fields = ['cn']
if conf.get('use_rid'):
self.attrs.append('sambaSID')
self.essential_fields.append('sambaSID')
self.log = logging.getLogger(__name__)
def CreateMap(self):
"""Return a GroupMap instance."""
return group.GroupMap()
def Transform(self, obj):
"""Transforms a LDAP posixGroup object into a group(5) entry."""
gr = group.GroupMapEntry()
if self.conf.get('ad'):
gr.name = obj['sAMAccountName'][0]
# hack to map the users as the corresponding group with the same name
elif 'uid' in obj:
gr.name = obj['uid'][0]
else:
gr.name = obj['cn'][0]
# group passwords are deferred to gshadow
gr.passwd = '*'
base = self.conf.get("base")
members = []
group_members = []
if 'memberUid' in obj:
if hasattr(self, 'groupregex'):
members.extend(''.join(
[x for x in self.groupregex.findall(obj['memberUid'])]))
else:
members.extend(obj['memberUid'])
elif 'member' in obj:
for member_dn in obj['member']:
member_uid = member_dn.split(',')[0].split('=')[1]
# Note that there is not currently a way to consistently distinguish
# a group from a person
group_members.append(member_uid)
if hasattr(self, 'groupregex'):
members.append(''.join(
[x for x in self.groupregex.findall(member_uid)]))
else:
members.append(member_uid)
elif 'uniqueMember' in obj:
"""This contains a DN and is processed in PostProcess in
GetUpdates."""
members.extend(obj['uniqueMember'])
members.sort()
if self.conf.get('ad'):
gr.gid = int(sidToStr(obj['objectSid'][0]).split('-')[-1])
elif self.conf.get('use_rid'):
gr.gid = int(sidToStr(obj['sambaSID'][0]).split('-')[-1])
else:
gr.gid = int(obj['gidNumber'][0])
if 'offset' in self.conf:
gr.gid = int(gr.gid + self.conf['offset'])
gr.members = members
gr.groupmembers = group_members
return gr
def PostProcess(self, data_map, source, search_filter, search_scope):
"""Perform some post-process of the data."""
if 'uniqueMember' in self.attrs:
for gr in data_map:
uidmembers = []
for member in gr.members:
source.Search(search_base=member,
search_filter='(objectClass=*)',
search_scope=ldap.SCOPE_BASE,
attrs=['uid'])
for obj in source:
if 'uid' in obj:
uidmembers.extend(obj['uid'])
del gr.members[:]
gr.members.extend(uidmembers)
_group_map = {i.name: i for i in data_map}
def _expand_members(obj, visited=None):
"""Expand all subgroups recursively."""
for member_name in obj.groupmembers:
if member_name in _group_map and member_name not in visited:
gmember = _group_map[member_name]
for member in gmember.members:
if member not in obj.members:
obj.members.append(member)
for submember_name in gmember.groupmembers:
if submember_name in _group_map and submember_name not in visited:
visited.append(submember_name)
_expand_members(_group_map[submember_name], visited)
if self.conf.get("nested_groups"):
self.log.info("Expanding nested groups")
for gr in data_map:
_expand_members(gr, [gr.name])
class ShadowUpdateGetter(UpdateGetter):
"""Get Shadow updates from the LDAP Source."""
def __init__(self, conf):
super(ShadowUpdateGetter, self).__init__(conf)
self.attrs = [
'uid', 'shadowLastChange', 'shadowMin', 'shadowMax',
'shadowWarning', 'shadowInactive', 'shadowExpire', 'shadowFlag',
'userPassword'
]
if self.conf.get('ad'):
# attributes of AD user to be returned for shadow
self.attrs.extend(('sAMAccountName', 'pwdLastSet'))
self.essential_fields = ['sAMAccountName', 'pwdLastSet']
else:
if 'uidattr' in self.conf:
self.attrs.append(self.conf['uidattr'])
if 'uidregex' in self.conf:
self.uidregex = re.compile(self.conf['uidregex'])
self.essential_fields = ['uid']
self.log = logging.getLogger(self.__class__.__name__)
def CreateMap(self):
"""Return a ShadowMap instance."""
return shadow.ShadowMap()
def Transform(self, obj):
"""Transforms an LDAP shadowAccont object into a shadow(5) entry."""
shadow_ent = shadow.ShadowMapEntry()
if self.conf.get('ad'):
shadow_ent.name = obj['sAMAccountName'][0]
elif 'uidattr' in self.conf:
shadow_ent.name = obj[self.conf['uidattr']][0]
else:
shadow_ent.name = obj['uid'][0]
if hasattr(self, 'uidregex'):
shadow_ent.name = ''.join(
[x for x in self.uidregex.findall(shadow_end.name)])
# TODO(jaq): does nss_ldap check the contents of the userPassword
# attribute?
shadow_ent.passwd = '*'
if self.conf.get('ad'):
# Time attributes of AD objects use interval date/time format with a value
# that represents the number of 100-nanosecond intervals since January 1, 1601.
# We need to calculate the difference between 1970-01-01 and 1601-01-01 in seconds wich is 11644473600
# then abstract it from the pwdLastChange value in seconds, then devide it by 86400 to get the
# days since Jan 1, 1970 the password wa changed.
shadow_ent.lstchg = int(
(int(obj['pwdLastSet'][0]) / 10000000 - 11644473600) / 86400)
elif 'shadowLastChange' in obj:
shadow_ent.lstchg = int(obj['shadowLastChange'][0])
if 'shadowMin' in obj:
shadow_ent.min = int(obj['shadowMin'][0])
if 'shadowMax' in obj:
shadow_ent.max = int(obj['shadowMax'][0])
if 'shadowWarning' in obj:
shadow_ent.warn = int(obj['shadowWarning'][0])
if 'shadowInactive' in obj:
shadow_ent.inact = int(obj['shadowInactive'][0])
if 'shadowExpire' in obj:
shadow_ent.expire = int(obj['shadowExpire'][0])
if 'shadowFlag' in obj:
shadow_ent.flag = int(obj['shadowFlag'][0])
if shadow_ent.flag is None:
shadow_ent.flag = 0
if 'userPassword' in obj:
passwd = obj['userPassword'][0]
if passwd[:7].lower() == '{crypt}':
shadow_ent.passwd = passwd[7:]
else:
logging.info('Ignored password that was not in crypt format')
return shadow_ent
class NetgroupUpdateGetter(UpdateGetter):
"""Get netgroup updates."""
def __init__(self, conf):
super(NetgroupUpdateGetter, self).__init__(conf)
self.attrs = ['cn', 'memberNisNetgroup', 'nisNetgroupTriple']
self.essential_fields = ['cn']
def CreateMap(self):
"""Return a NetgroupMap instance."""
return netgroup.NetgroupMap()
def Transform(self, obj):
"""Transforms an LDAP nisNetgroup object into a netgroup(5) entry."""
netgroup_ent = netgroup.NetgroupMapEntry()
netgroup_ent.name = obj['cn'][0]
entries = set()
if 'memberNisNetgroup' in obj:
entries.update(obj['memberNisNetgroup'])
if 'nisNetgroupTriple' in obj:
entries.update(obj['nisNetgroupTriple'])
# final data is stored as a string in the object
netgroup_ent.entries = ' '.join(sorted(entries))
return netgroup_ent
class AutomountUpdateGetter(UpdateGetter):
"""Get specific automount maps."""
def __init__(self, conf):
super(AutomountUpdateGetter, self).__init__(conf)
self.attrs = ['cn', 'automountInformation']
self.essential_fields = ['cn']
def CreateMap(self):
"""Return a AutomountMap instance."""
return automount.AutomountMap()
def Transform(self, obj):
"""Transforms an LDAP automount object into an autofs(5) entry."""
automount_ent = automount.AutomountMapEntry()
automount_ent.key = obj['cn'][0]
automount_information = obj['automountInformation'][0]
if automount_information.startswith('ldap'):
# we are creating an autmount master map, pointing to other maps in LDAP
automount_ent.location = automount_information
else:
# we are creating normal automount maps, with filesystems and options
automount_ent.options = automount_information.split(' ')[0]
automount_ent.location = automount_information.split(' ')[1]
return automount_ent
class SshkeyUpdateGetter(UpdateGetter):
"""Fetches SSH keys."""
def __init__(self, conf):
super(SshkeyUpdateGetter, self).__init__(conf)
self.attrs = ['uid', 'sshPublicKey']
if 'uidattr' in self.conf:
self.attrs.append(self.conf['uidattr'])
if 'uidregex' in self.conf:
self.uidregex = re.compile(self.conf['uidregex'])
self.essential_fields = ['uid']
def CreateMap(self):
"""Returns a new SshkeyMap instance to have SshkeyMapEntries added to
it."""
return sshkey.SshkeyMap()
def Transform(self, obj):
"""Transforms a LDAP posixAccount data structure into a
SshkeyMapEntry."""
skey = sshkey.SshkeyMapEntry()
if 'uidattr' in self.conf:
skey.name = obj[self.conf['uidattr']][0]
else:
skey.name = obj['uid'][0]
if hasattr(self, 'uidregex'):
skey.name = ''.join([x for x in self.uidregex.findall(pw.name)])
if 'sshPublicKey' in obj:
skey.sshkey = obj['sshPublicKey']
else:
skey.sshkey = ''
return skey
nsscache-version-0.42/nss_cache/sources/ldapsource_test.py000066400000000000000000001662171402531134600241100ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""An implementation of a mock ldap data source for nsscache."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import time
import unittest
import ldap
from mox3 import mox
from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.sources import ldapsource
TEST_RETRY_MAX = 1
TEST_RETRY_DELAY = 0
TEST_URI = 'TEST_URI'
class TestLdapSource(mox.MoxTestBase):
def setUp(self):
"""Initialize a basic config dict."""
super(TestLdapSource, self).setUp()
self.config = {
'uri': 'TEST_URI',
'base': 'TEST_BASE',
'filter': 'TEST_FILTER',
'bind_dn': 'TEST_BIND_DN',
'bind_password': 'TEST_BIND_PASSWORD',
'retry_delay': TEST_RETRY_DELAY,
'retry_max': TEST_RETRY_MAX,
'timelimit': 'TEST_TIMELIMIT',
'tls_require_cert': 0,
'tls_cacertdir': 'TEST_TLS_CACERTDIR',
'tls_cacertfile': 'TEST_TLS_CACERTFILE',
}
def compareSPRC(self, expected_value=''):
def comparator(param):
if not isinstance(param, list):
return False
sprc = param[0]
if not isinstance(sprc, ldap.controls.SimplePagedResultsControl):
return False
cookie = ldapsource.getCookieFromControl(sprc)
return cookie == expected_value
return comparator
def testDefaultConfiguration(self):
config = {'uri': 'ldap://foo'}
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='', who='')
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(uri='ldap://foo',
retry_max=3,
retry_delay=5).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
self.assertEqual(source.conf['bind_dn'], ldapsource.LdapSource.BIND_DN)
self.assertEqual(source.conf['bind_password'],
ldapsource.LdapSource.BIND_PASSWORD)
self.assertEqual(source.conf['retry_max'],
ldapsource.LdapSource.RETRY_MAX)
self.assertEqual(source.conf['retry_delay'],
ldapsource.LdapSource.RETRY_DELAY)
self.assertEqual(source.conf['scope'], ldapsource.LdapSource.SCOPE)
self.assertEqual(source.conf['timelimit'],
ldapsource.LdapSource.TIMELIMIT)
self.assertEqual(source.conf['tls_require_cert'], ldap.OPT_X_TLS_DEMAND)
def testOverrideDefaultConfiguration(self):
config = dict(self.config)
config['scope'] = ldap.SCOPE_BASE
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY,
uri='TEST_URI').AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
self.assertEqual(source.conf['scope'], ldap.SCOPE_BASE)
self.assertEqual(source.conf['bind_dn'], 'TEST_BIND_DN')
self.assertEqual(source.conf['bind_password'], 'TEST_BIND_PASSWORD')
self.assertEqual(source.conf['retry_delay'], TEST_RETRY_DELAY)
self.assertEqual(source.conf['retry_max'], TEST_RETRY_MAX)
self.assertEqual(source.conf['timelimit'], 'TEST_TIMELIMIT')
self.assertEqual(source.conf['tls_require_cert'], 0)
self.assertEqual(source.conf['tls_cacertdir'], 'TEST_TLS_CACERTDIR')
self.assertEqual(source.conf['tls_cacertfile'], 'TEST_TLS_CACERTFILE')
def testDebugLevelSet(self):
config = dict(self.config)
config['ldap_debug'] = 3
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.set_option(ldap.OPT_DEBUG_LEVEL, 3)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY,
uri='TEST_URI').AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
def testTrapServerDownAndRetry(self):
config = dict(self.config)
config['bind_dn'] = ''
config['bind_password'] = ''
config['retry_delay'] = 5
config['retry_max'] = 3
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='', who='').MultipleTimes().AndRaise(
ldap.SERVER_DOWN)
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI', retry_max=3,
retry_delay=5).MultipleTimes().AndReturn(mock_rlo)
self.mox.StubOutWithMock(time, 'sleep')
time.sleep(5)
time.sleep(5)
self.mox.ReplayAll()
self.assertRaises(error.SourceUnavailable, ldapsource.LdapSource,
config)
def testIterationOverLdapDataSource(self):
config = dict(self.config)
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base=config['base'],
filterstr='TEST_FILTER',
scope='TEST_SCOPE',
attrlist='TEST_ATTRLIST',
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
dataset = [('dn', {'uid': [0]})]
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, dataset, None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
source.Search(search_base=config['base'],
search_filter='TEST_FILTER',
search_scope='TEST_SCOPE',
attrs='TEST_ATTRLIST')
count = 0
for r in source:
self.assertEqual(dataset[0][1], r)
count += 1
self.assertEqual(1, count)
def testIterationTimeout(self):
config = dict(self.config)
config['retry_delay'] = 5
config['retry_max'] = 3
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base=config['base'],
filterstr='TEST_FILTER',
scope='TEST_SCOPE',
attrlist='TEST_ATTRLIST',
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
dataset = [('dn', {'uid': [0]})]
mock_rlo.result3('TEST_RES', all=0,
timeout='TEST_TIMELIMIT').MultipleTimes().AndRaise(
ldap.TIMELIMIT_EXCEEDED)
self.mox.StubOutWithMock(time, 'sleep')
time.sleep(5)
time.sleep(5)
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(uri='TEST_URI',
retry_max=3,
retry_delay=5).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
source.Search(search_base=config['base'],
search_filter='TEST_FILTER',
search_scope='TEST_SCOPE',
attrs='TEST_ATTRLIST')
count = 0
for r in source:
count += 1
self.assertEqual(0, count)
def testGetPasswdMap(self):
test_posix_account = ('cn=test,ou=People,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'uidNumber': ['1000'],
'gidNumber': ['1000'],
'uid': ['test'],
'cn': ['Testguy McTest'],
'homeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'userPassword': ['p4ssw0rd'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
attrlist = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'fullName', 'loginShell', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetPasswdMap()
self.assertEqual(1, len(data))
first = data.PopItem()
self.assertEqual('test', first.name)
def testGetPasswdMapWithUidAttr(self):
test_posix_account = ('cn=test,ou=People,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'uidNumber': [1000],
'gidNumber': [1000],
'uid': ['test'],
'name': ['test'],
'cn': ['Testguy McTest'],
'homeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'userPassword': ['p4ssw0rd'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['uidattr'] = 'name'
attrlist = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'fullName', 'name', 'loginShell', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetPasswdMap()
self.assertEqual(1, len(data))
first = data.PopItem()
self.assertEqual('test', first.name)
def testGetPasswdMapWithShellOverride(self):
test_posix_account = ('cn=test,ou=People,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'uidNumber': [1000],
'gidNumber': [1000],
'uid': ['test'],
'cn': ['Testguy McTest'],
'homeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'userPassword': ['p4ssw0rd'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['override_shell'] = '/bin/false'
attrlist = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'fullName', 'loginShell', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetPasswdMap()
self.assertEqual(1, len(data))
first = data.PopItem()
self.assertEqual('/bin/false', first.shell)
def testGetPasswdMapWithUseRid(self):
test_posix_account = ('cn=test,ou=People,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'uidNumber': [1000],
'gidNumber': [1000],
'uid': ['test'],
'cn': ['Testguy McTest'],
'homeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'userPassword': ['p4ssw0rd'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['use_rid'] = '1'
attrlist = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'fullName', 'sambaSID', 'loginShell', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetPasswdMap()
self.assertEqual(1, len(data))
first = data.PopItem()
self.assertEqual('test', first.name)
def testGetPasswdMapAD(self):
test_posix_account = ('cn=test,ou=People,dc=example,dc=com', {
'objectSid': [
b'\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00\xa0e\xcf~xK\x9b_\xe7|\x87p\t\x1c\x01\x00'
],
'sAMAccountName': ['test'],
'displayName': ['Testguy McTest'],
'unixHomeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'pwdLastSet': ['132161071270000000'],
'whenChanged': ['20070227012807.0Z']
})
config = dict(self.config)
config['ad'] = '1'
attrlist = [
'sAMAccountName', 'pwdLastSet', 'loginShell', 'objectSid',
'displayName', 'whenChanged', 'unixHomeDirectory'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetPasswdMap()
self.assertEqual(1, len(data))
first = data.PopItem()
self.assertEqual('test', first.name)
def testGetPasswdMapADWithOffeset(self):
test_posix_account = ('cn=test,ou=People,dc=example,dc=com', {
'objectSid': [
b'\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00\xa0e\xcf~xK\x9b_\xe7|\x87p\t\x1c\x01\x00'
],
'sAMAccountName': ['test'],
'displayName': ['Testguy McTest'],
'unixHomeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'pwdLastSet': ['132161071270000000'],
'whenChanged': ['20070227012807.0Z']
})
config = dict(self.config)
config['ad'] = '1'
config['offset'] = 10000
attrlist = [
'sAMAccountName', 'pwdLastSet', 'loginShell', 'objectSid',
'displayName', 'whenChanged', 'unixHomeDirectory'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetPasswdMap()
self.assertEqual(1, len(data))
first = data.PopItem()
self.assertEqual('test', first.name)
def testGetGroupMap(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'memberUid': ['testguy', 'fooguy', 'barguy'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
attrlist = ['cn', 'uid', 'gidNumber', 'memberUid', 'modifyTimestamp']
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('testgroup', ent.name)
def testGetGroupMapWithUseRid(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'memberUid': ['testguy', 'fooguy', 'barguy'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['use_rid'] = '1'
attrlist = [
'cn', 'uid', 'gidNumber', 'memberUid', 'sambaSID', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('testgroup', ent.name)
def testGetGroupMapAsUser(self):
test_posix_group = ('cn=test,ou=People,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'uidNumber': [1000],
'gidNumber': [1000],
'uid': ['test'],
'cn': ['Testguy McTest'],
'homeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'userPassword': ['p4ssw0rd'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['use_rid'] = '1'
attrlist = [
'cn', 'uid', 'gidNumber', 'memberUid', 'sambaSID', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('test', ent.name)
def testGetGroupMapAD(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'objectSid': [
b'\x01\x05\x00\x00\x00\x00\x00\x05\x15\x00\x00\x00\xa0e\xcf~xK\x9b_\xe7|\x87p\t\x1c\x01\x00'
],
'sAMAccountName': ['testgroup'],
'cn': ['testgroup'],
'member': [
'cn=testguy,ou=People,dc=example,dc=com',
'cn=fooguy,ou=People,dc=example,dc=com',
'cn=barguy,ou=People,dc=example,dc=com'
],
'whenChanged': ['20070227012807.0Z']
})
config = dict(self.config)
config['ad'] = '1'
attrlist = ['sAMAccountName', 'objectSid', 'member', 'whenChanged']
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('testgroup', ent.name)
def testGetGroupMapBis(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'member': [
'cn=testguy,ou=People,dc=example,dc=com',
'cn=fooguy,ou=People,dc=example,dc=com',
'cn=barguy,ou=People,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['rfc2307bis'] = 1
attrlist = ['cn', 'uid', 'gidNumber', 'member', 'modifyTimestamp']
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('testgroup', ent.name)
self.assertEqual(3, len(ent.members))
def testGetGroupNestedNotConfigured(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'member': [
'cn=testguy,ou=People,dc=example,dc=com',
'cn=fooguy,ou=People,dc=example,dc=com',
'cn=barguy,ou=People,dc=example,dc=com',
'cn=child,ou=Group,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
test_child_group = ('cn=child,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72714'],
'gidNumber': [1001],
'cn': ['child'],
'member': [
'cn=newperson,ou=People,dc=example,dc=com',
'cn=fooperson,ou=People,dc=example,dc=com',
'cn=barperson,ou=People,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['rfc2307bis'] = 1
attrlist = ['cn', 'uid', 'gidNumber', 'member', 'modifyTimestamp']
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group,
test_child_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(2, len(data))
datadict = {i.name: i for i in data}
self.assertIn("child", datadict)
self.assertIn("testgroup", datadict)
self.assertEqual(len(datadict["testgroup"].members), 4)
self.assertEqual(len(datadict["child"].members), 3)
self.assertNotIn("newperson", datadict["testgroup"].members)
def testGetGroupNested(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'member': [
'cn=testguy,ou=People,dc=example,dc=com',
'cn=fooguy,ou=People,dc=example,dc=com',
'cn=barguy,ou=People,dc=example,dc=com',
'cn=child,ou=Group,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
test_child_group = ('cn=child,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72714'],
'gidNumber': [1001],
'cn': ['child'],
'member': [
'cn=newperson,ou=People,dc=example,dc=com',
'cn=fooperson,ou=People,dc=example,dc=com',
'cn=barperson,ou=People,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['rfc2307bis'] = 1
config["nested_groups"] = 1
config['use_rid'] = 1
attrlist = [
'cn', 'uid', 'gidNumber', 'member', 'sambaSID', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group,
test_child_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(2, len(data))
datadict = {i.name: i for i in data}
self.assertIn("child", datadict)
self.assertIn("testgroup", datadict)
self.assertEqual(len(datadict["testgroup"].members), 7)
self.assertEqual(len(datadict["child"].members), 3)
self.assertIn("newperson", datadict["testgroup"].members)
def testGetGroupLoop(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'member': [
'cn=testguy,ou=People,dc=example,dc=com',
'cn=fooguy,ou=People,dc=example,dc=com',
'cn=barguy,ou=People,dc=example,dc=com',
'cn=child,ou=Group,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
test_child_group = ('cn=child,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72714'],
'gidNumber': [1001],
'cn': ['child'],
'member': [
'cn=newperson,ou=People,dc=example,dc=com',
'cn=fooperson,ou=People,dc=example,dc=com',
'cn=barperson,ou=People,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
test_loop_group = ('cn=loop,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72715'],
'gidNumber': [1002],
'cn': ['loop'],
'member': [
'cn=loopperson,ou=People,dc=example,dc=com',
'cn=testgroup,ou=Group,dc=example,dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['rfc2307bis'] = 1
config["nested_groups"] = 1
config['use_rid'] = 1
attrlist = [
'cn', 'uid', 'gidNumber', 'member', 'sambaSID', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY,
[test_posix_group, test_child_group, test_loop_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(3, len(data))
datadict = {i.name: i for i in data}
self.assertIn("child", datadict)
self.assertIn("testgroup", datadict)
self.assertEqual(len(datadict["testgroup"].members), 7)
self.assertEqual(len(datadict["child"].members), 3)
self.assertIn("newperson", datadict["testgroup"].members)
def testGetGroupMapBisAlt(self):
test_posix_group = ('cn=test,ou=Group,dc=example,dc=com', {
'sambaSID': ['S-1-5-21-2127521184-1604012920-1887927527-72713'],
'gidNumber': [1000],
'cn': ['testgroup'],
'uniqueMember': ['cn=testguy,ou=People,dc=example,dc=com'],
'modifyTimestamp': ['20070227012807Z']
})
dn_user = 'cn=testguy,ou=People,dc=example,dc=com'
test_posix_account = (dn_user, {
'sambaSID': ['S-1-5-21-2562418665-3218585558-1813906818-1576'],
'uidNumber': [1000],
'gidNumber': [1000],
'uid': ['test'],
'cn': ['testguy'],
'homeDirectory': ['/home/test'],
'loginShell': ['/bin/sh'],
'userPassword': ['p4ssw0rd'],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
config['rfc2307bis_alt'] = 1
config['use_rid'] = 1
attrlist = [
'cn', 'gidNumber', 'uniqueMember', 'uid', 'sambaSID',
'modifyTimestamp'
]
uidattr = ['uid']
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_group], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
mock_rlo.search_ext(base=dn_user,
filterstr='(objectClass=*)',
scope=ldap.SCOPE_BASE,
attrlist=mox.SameElementsAs(uidattr),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_account], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetGroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('testgroup', ent.name)
self.assertEqual(1, len(ent.members))
def testGetShadowMap(self):
test_shadow = ('cn=test,ou=People,dc=example,dc=com', {
'uid': ['test'],
'shadowLastChange': ['11296'],
'shadowMax': ['99999'],
'shadowWarning': ['7'],
'shadowInactive': ['-1'],
'shadowExpire': ['-1'],
'shadowFlag': ['134537556'],
'modifyTimestamp': ['20070227012807Z'],
'userPassword': ['{CRYPT}p4ssw0rd']
})
config = dict(self.config)
attrlist = [
'uid', 'shadowLastChange', 'shadowMin', 'shadowMax',
'shadowWarning', 'shadowInactive', 'shadowExpire', 'shadowFlag',
'userPassword', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_shadow], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetShadowMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('test', ent.name)
self.assertEqual('p4ssw0rd', ent.passwd)
def testGetShadowMapWithUidAttr(self):
test_shadow = ('cn=test,ou=People,dc=example,dc=com', {
'uid': ['test'],
'name': ['test'],
'shadowLastChange': ['11296'],
'shadowMax': ['99999'],
'shadowWarning': ['7'],
'shadowInactive': ['-1'],
'shadowExpire': ['-1'],
'shadowFlag': ['134537556'],
'modifyTimestamp': ['20070227012807Z'],
'userPassword': ['{CRYPT}p4ssw0rd']
})
config = dict(self.config)
config['uidattr'] = 'name'
attrlist = [
'uid', 'shadowLastChange', 'shadowMin', 'shadowMax', 'name',
'shadowWarning', 'shadowInactive', 'shadowExpire', 'shadowFlag',
'userPassword', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_shadow], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetShadowMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('test', ent.name)
self.assertEqual('p4ssw0rd', ent.passwd)
def testGetNetgroupMap(self):
test_posix_netgroup = ('cn=test,ou=netgroup,dc=example,dc=com', {
'cn': ['test'],
'memberNisNetgroup': ['admins'],
'nisNetgroupTriple': ['(-,hax0r,)'],
'modifyTimestamp': ['20070227012807Z'],
})
config = dict(self.config)
attrlist = [
'cn', 'memberNisNetgroup', 'nisNetgroupTriple', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_netgroup], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetNetgroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('test', ent.name)
self.assertEqual('(-,hax0r,) admins', ent.entries)
def testGetNetgroupMapWithDupes(self):
test_posix_netgroup = ('cn=test,ou=netgroup,dc=example,dc=com', {
'cn': ['test'],
'memberNisNetgroup': ['(-,hax0r,)'],
'nisNetgroupTriple': ['(-,hax0r,)'],
'modifyTimestamp': ['20070227012807Z'],
})
config = dict(self.config)
attrlist = [
'cn', 'memberNisNetgroup', 'nisNetgroupTriple', 'modifyTimestamp'
]
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr='TEST_FILTER',
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_posix_netgroup], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetNetgroupMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('test', ent.name)
self.assertEqual('(-,hax0r,)', ent.entries)
def testGetAutomountMap(self):
test_automount = (
'cn=user,ou=auto.home,ou=automounts,dc=example,dc=com', {
'cn': ['user'],
'automountInformation': ['-tcp,rw home:/home/user'],
'modifyTimestamp': ['20070227012807Z'],
})
config = dict(self.config)
attrlist = ['cn', 'automountInformation', 'modifyTimestamp']
filterstr = '(objectclass=automount)'
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr=filterstr,
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_automount], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetAutomountMap(location='TEST_BASE')
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('user', ent.key)
self.assertEqual('-tcp,rw', ent.options)
self.assertEqual('home:/home/user', ent.location)
def testGetAutomountMasterMap(self):
test_master_ou = ('ou=auto.master,ou=automounts,dc=example,dc=com', {
'ou': ['auto.master']
})
test_automount = (
'cn=/home,ou=auto.master,ou=automounts,dc=example,dc=com', {
'cn': ['/home'],
'automountInformation': [
'ldap:ldap:ou=auto.home,'
'ou=automounts,dc=example,'
'dc=com'
],
'modifyTimestamp': ['20070227012807Z']
})
config = dict(self.config)
# first search for the dn of ou=auto.master
attrlist = ['dn']
filterstr = '(&(objectclass=automountMap)(ou=auto.master))'
scope = ldap.SCOPE_SUBTREE
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr=filterstr,
scope=scope,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_master_ou], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
# then search for the entries under ou=auto.master
attrlist = ['cn', 'automountInformation', 'modifyTimestamp']
filterstr = '(objectclass=automount)'
scope = ldap.SCOPE_ONELEVEL
base = 'ou=auto.master,ou=automounts,dc=example,dc=com'
mock_rlo.search_ext(base=base,
filterstr=filterstr,
scope=scope,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_ENTRY, [test_automount], None, []))
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
data = source.GetAutomountMasterMap()
self.assertEqual(1, len(data))
ent = data.PopItem()
self.assertEqual('/home', ent.key)
self.assertEqual('ou=auto.home,ou=automounts,dc=example,dc=com',
ent.location)
self.assertEqual(None, ent.options)
def testVerify(self):
attrlist = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'fullName', 'loginShell', 'modifyTimestamp'
]
filterstr = '(&TEST_FILTER(modifyTimestamp>=19700101000001Z))'
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr=filterstr,
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
self.mox.ReplayAll()
source = ldapsource.LdapSource(self.config)
self.assertEqual(0, source.Verify(0))
def testVerifyRID(self):
attrlist = [
'uid', 'uidNumber', 'gidNumber', 'gecos', 'cn', 'homeDirectory',
'fullName', 'loginShell', 'modifyTimestamp', 'sambaSID'
]
filterstr = '(&TEST_FILTER(modifyTimestamp>=19700101000001Z))'
mock_rlo = self.mox.CreateMock(ldap.ldapobject.ReconnectLDAPObject)
mock_rlo.simple_bind_s(cred='TEST_BIND_PASSWORD', who='TEST_BIND_DN')
mock_rlo.search_ext(base='TEST_BASE',
filterstr=filterstr,
scope=ldap.SCOPE_ONELEVEL,
attrlist=mox.SameElementsAs(attrlist),
serverctrls=mox.Func(
self.compareSPRC())).AndReturn('TEST_RES')
mock_rlo.result3('TEST_RES', all=0, timeout='TEST_TIMELIMIT').AndReturn(
(ldap.RES_SEARCH_RESULT, None, None, []))
self.mox.StubOutWithMock(ldap, 'ldapobject')
ldap.ldapobject.ReconnectLDAPObject(
uri='TEST_URI',
retry_max=TEST_RETRY_MAX,
retry_delay=TEST_RETRY_DELAY).AndReturn(mock_rlo)
config = dict(self.config)
config['use_rid'] = 1
self.mox.ReplayAll()
source = ldapsource.LdapSource(config)
self.assertEqual(0, source.Verify(0))
class TestUpdateGetter(unittest.TestCase):
def setUp(self):
"""Create a dummy source object."""
super(TestUpdateGetter, self).setUp()
class DummySource(list):
"""Dummy Source class for this test.
Inherits from list as Sources are iterables.
"""
def Search(self, search_base, search_filter, search_scope, attrs):
pass
self.source = DummySource()
def testFromTimestampToLdap(self):
ts = 1259641025
expected_ldap_ts = '20091201041705Z'
self.assertEqual(expected_ldap_ts,
ldapsource.UpdateGetter({}).FromTimestampToLdap(ts))
def testFromLdapToTimestamp(self):
expected_ts = 1259641025
ldap_ts = '20091201041705Z'
self.assertEqual(
expected_ts,
ldapsource.UpdateGetter({}).FromLdapToTimestamp(ldap_ts))
def testPasswdEmptySourceGetUpdates(self):
"""Test that getUpdates on the PasswdUpdateGetter works."""
getter = ldapsource.PasswdUpdateGetter({})
data = getter.GetUpdates(self.source, 'TEST_BASE', 'TEST_FILTER',
'base', None)
self.assertEqual(passwd.PasswdMap, type(data))
def testGroupEmptySourceGetUpdates(self):
"""Test that getUpdates on the GroupUpdateGetter works."""
getter = ldapsource.GroupUpdateGetter({})
data = getter.GetUpdates(self.source, 'TEST_BASE', 'TEST_FILTER',
'base', None)
self.assertEqual(group.GroupMap, type(data))
def testShadowEmptySourceGetUpdates(self):
"""Test that getUpdates on the ShadowUpdateGetter works."""
getter = ldapsource.ShadowUpdateGetter({})
data = getter.GetUpdates(self.source, 'TEST_BASE', 'TEST_FILTER',
'base', None)
self.assertEqual(shadow.ShadowMap, type(data))
def testAutomountEmptySourceGetsUpdates(self):
"""Test that getUpdates on the AutomountUpdateGetter works."""
getter = ldapsource.AutomountUpdateGetter({})
data = getter.GetUpdates(self.source, 'TEST_BASE', 'TEST_FILTER',
'base', None)
self.assertEqual(automount.AutomountMap, type(data))
def testBadScopeException(self):
"""Test that a bad scope raises a config.ConfigurationError."""
# One of the getters is sufficient, they all inherit the
# exception-raising code.
getter = ldapsource.PasswdUpdateGetter({})
self.assertRaises(error.ConfigurationError, getter.GetUpdates,
self.source, 'TEST_BASE', 'TEST_FILTER', 'BAD_SCOPE',
None)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/sources/s3source.py000066400000000000000000000240051402531134600224420ustar00rootroot00000000000000"""An implementation of a S3 data source for nsscache."""
__author__ = 'alexey.pikin@gmail.com'
import base64
import collections
import logging
import json
import datetime
import boto3
from botocore.exceptions import ClientError
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.sources import source
from nss_cache import error
def RegisterImplementation(registration_callback):
registration_callback(S3FilesSource)
class S3FilesSource(source.Source):
"""Source for data fetched from S3."""
# S3 defaults
BUCKET = ''
PASSWD_OBJECT = ''
GROUP_OBJECT = ''
SHADOW_OBJECT = ''
# for registration
name = 's3'
def __init__(self, conf):
"""Initialise the S3FilesSource object.
Args:
conf: A dictionary of key/value pairs.
Raises:
RuntimeError: object wasn't initialised with a dict
"""
super(S3FilesSource, self).__init__(conf)
self._SetDefaults(conf)
self.s3_client = None
def _GetClient(self):
if self.s3_client is None:
self.s3_client = boto3.client('s3')
return self.s3_client
def _SetDefaults(self, configuration):
"""Set defaults if necessary."""
if 'bucket' not in configuration:
configuration['bucket'] = self.BUCKET
if 'passwd_object' not in configuration:
configuration['passwd_object'] = self.PASSWD_OBJECT
if 'group_object' not in configuration:
configuration['group_object'] = self.GROUP_OBJECT
if 'shadow_object' not in configuration:
configuration['shadow_object'] = self.SHADOW_OBJECT
def GetPasswdMap(self, since=None):
"""Return the passwd map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of passwd.PasswdMap
"""
return PasswdUpdateGetter().GetUpdates(self._GetClient(),
self.conf['bucket'],
self.conf['passwd_object'],
since)
def GetGroupMap(self, since=None):
"""Return the group map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of group.GroupMap
"""
return GroupUpdateGetter().GetUpdates(self._GetClient(),
self.conf['bucket'],
self.conf['group_object'], since)
def GetShadowMap(self, since=None):
"""Return the shadow map from this source.
Args:
since: Get data only changed since this timestamp (inclusive) or None
for all data.
Returns:
instance of shadow.ShadowMap
"""
return ShadowUpdateGetter().GetUpdates(self._GetClient(),
self.conf['bucket'],
self.conf['shadow_object'],
since)
class S3UpdateGetter(object):
"""Base class that gets updates from s3."""
def __init__(self):
self.log = logging.getLogger(__name__)
def FromTimestampToDateTime(self, ts):
"""Converts internal nss_cache timestamp to datetime object.
Args:
ts: number of seconds since epoch
Returns:
datetime object
"""
return datetime.datetime.utcfromtimestamp(ts)
def FromDateTimeToTimestamp(self, datetime_obj):
"""Converts datetime object to internal nss_cache timestamp.
Args:
datetime object
Returns:
number of seconds since epoch
"""
dt = datetime_obj.replace(tzinfo=None)
return int((dt - datetime.datetime(1970, 1, 1)).total_seconds())
def GetUpdates(self, s3_client, bucket, obj, since):
"""Get updates from a source.
Args:
s3_client: initialized s3 client
bucket: s3 bucket
obj: object with the data
since: a timestamp representing the last change (None to force-get)
Returns:
A tuple containing the map of updates and a maximum timestamp
Raises:
ValueError: an object in the source map is malformed
ConfigurationError:
"""
try:
if since is not None:
response = s3_client.get_object(
Bucket=bucket,
IfModifiedSince=self.FromTimestampToDateTime(since),
Key=obj)
else:
response = s3_client.get_object(Bucket=bucket, Key=obj)
body = response['Body']
last_modified_ts = self.FromDateTimeToTimestamp(
response['LastModified'])
except ClientError as e:
error_code = int(e.response['Error']['Code'])
if error_code == 304:
return []
self.log.error('error getting S3 object ({}): {}'.format(obj, e))
raise error.SourceUnavailable('unable to download object from S3')
data_map = self.GetMap(cache_info=body)
data_map.SetModifyTimestamp(last_modified_ts)
return data_map
def GetParser(self):
"""Return the appropriate parser.
Must be implemented by child class.
"""
raise NotImplementedError
def GetMap(self, cache_info):
"""Creates a Map from the cache_info data.
Args:
cache_info: file-like object containing the data to parse
Returns:
A child of Map containing the cache data.
"""
return self.GetParser().GetMap(cache_info, self.CreateMap())
class PasswdUpdateGetter(S3UpdateGetter):
"""Get passwd updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesPasswd cache."""
return S3PasswdMapParser()
def CreateMap(self):
"""Returns a new PasswdMap instance to have PasswdMapEntries added to
it."""
return passwd.PasswdMap()
class GroupUpdateGetter(S3UpdateGetter):
"""Get group updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesGroup cache."""
return S3GroupMapParser()
def CreateMap(self):
"""Returns a new GroupMap instance to have GroupMapEntries added to
it."""
return group.GroupMap()
class ShadowUpdateGetter(S3UpdateGetter):
"""Get shadow updates."""
def GetParser(self):
"""Returns a MapParser to parse FilesShadow cache."""
return S3ShadowMapParser()
def CreateMap(self):
"""Returns a new ShadowMap instance to have ShadowMapEntries added to
it."""
return shadow.ShadowMap()
class S3MapParser(object):
"""A base class for parsing nss_files module cache."""
def __init__(self):
self.log = logging.getLogger(__name__)
def GetMap(self, cache_info, data):
"""Returns a map from a cache.
Args:
cache_info: file like object containing the cache.
data: a Map to populate.
Returns:
A child of Map containing the cache data.
"""
for obj in json.loads(cache_info.read()):
key = obj.get('Key', '')
value = obj.get('Value', '')
if not value or not key:
continue
map_entry = self._ReadEntry(key, value)
if map_entry is None:
self.log.warning(
'Could not create entry from line %r in cache, skipping',
value)
continue
if not data.Add(map_entry):
self.log.warning(
'Could not add entry %r read from line %r in cache',
map_entry, value)
return data
class S3PasswdMapParser(S3MapParser):
"""Class for parsing nss_files module passwd cache."""
def _ReadEntry(self, name, entry):
"""Return a PasswdMapEntry from a record in the target cache."""
map_entry = passwd.PasswdMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = name
map_entry.passwd = entry.get('passwd', 'x')
try:
map_entry.uid = int(entry['uid'])
map_entry.gid = int(entry['gid'])
except (ValueError, KeyError):
return None
map_entry.gecos = entry.get('comment', '')
map_entry.dir = entry.get('home', '/home/{}'.format(name))
map_entry.shell = entry.get('shell', '/bin/bash')
return map_entry
class S3GroupMapParser(S3MapParser):
"""Class for parsing a nss_files module group cache."""
def _ReadEntry(self, name, entry):
"""Return a GroupMapEntry from a record in the target cache."""
map_entry = group.GroupMapEntry()
# map entries expect strict typing, so convert as appropriate
map_entry.name = name
map_entry.passwd = entry.get('passwd', 'x')
try:
map_entry.gid = int(entry['gid'])
except (ValueError, KeyError):
return None
try:
members = entry.get('members', '').split('\n')
except (ValueError, TypeError):
members = ['']
map_entry.members = members
return map_entry
class S3ShadowMapParser(S3MapParser):
"""Class for parsing nss_files module shadow cache."""
def _ReadEntry(self, name, entry):
"""Return a ShadowMapEntry from a record in the target cache."""
map_entry = shadow.ShadowMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = name
map_entry.passwd = entry.get('passwd', '*')
for attr in ['lstchg', 'min', 'max', 'warn', 'inact', 'expire']:
try:
setattr(map_entry, attr, int(entry[attr]))
except (ValueError, KeyError):
continue
return map_entry
nsscache-version-0.42/nss_cache/sources/s3source_test.py000066400000000000000000000140201402531134600234750ustar00rootroot00000000000000"""An implementation of a mock S3 data source for nsscache."""
__author__ = 'alexey.pikin@gmail.com'
import unittest
from io import StringIO
from nss_cache.maps import group
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.sources import s3source
class TestS3Source(unittest.TestCase):
def setUp(self):
"""Initialize a basic config dict."""
super(TestS3Source, self).setUp()
self.config = {
'passwd_object': 'PASSWD_OBJ',
'group_object': 'GROUP_OBJ',
'bucket': 'TEST_BUCKET'
}
def testDefaultConfiguration(self):
source = s3source.S3FilesSource({})
self.assertEqual(source.conf['bucket'], s3source.S3FilesSource.BUCKET)
self.assertEqual(source.conf['passwd_object'],
s3source.S3FilesSource.PASSWD_OBJECT)
def testOverrideDefaultConfiguration(self):
source = s3source.S3FilesSource(self.config)
self.assertEqual(source.conf['bucket'], 'TEST_BUCKET')
self.assertEqual(source.conf['passwd_object'], 'PASSWD_OBJ')
self.assertEqual(source.conf['group_object'], 'GROUP_OBJ')
class TestPasswdMapParser(unittest.TestCase):
def setUp(self):
"""Set some default avalible data for testing."""
self.good_entry = passwd.PasswdMapEntry()
self.good_entry.name = 'foo'
self.good_entry.passwd = 'x'
self.good_entry.uid = 10
self.good_entry.gid = 10
self.good_entry.gecos = 'How Now Brown Cow'
self.good_entry.dir = '/home/foo'
self.good_entry.shell = '/bin/bash'
self.parser = s3source.S3PasswdMapParser()
def testGetMap(self):
passwd_map = passwd.PasswdMap()
cache_info = StringIO('''[
{ "Key": "foo",
"Value": {
"uid": 10, "gid": 10, "home": "/home/foo",
"shell": "/bin/bash", "comment": "How Now Brown Cow",
"irrelevant_key":"bacon"
}
}
]''')
self.parser.GetMap(cache_info, passwd_map)
self.assertEqual(self.good_entry, passwd_map.PopItem())
def testReadEntry(self):
data = {
'uid': '10',
'gid': '10',
'comment': 'How Now Brown Cow',
'shell': '/bin/bash',
'home': '/home/foo',
'passwd': 'x'
}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testDefaultEntryValues(self):
data = {'uid': '10', 'gid': '10'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry.shell, '/bin/bash')
self.assertEqual(entry.dir, '/home/foo')
self.assertEqual(entry.gecos, '')
self.assertEqual(entry.passwd, 'x')
def testInvalidEntry(self):
data = {'irrelevant_key': 'bacon'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry, None)
class TestS3GroupMapParser(unittest.TestCase):
def setUp(self):
self.good_entry = group.GroupMapEntry()
self.good_entry.name = 'foo'
self.good_entry.passwd = 'x'
self.good_entry.gid = 10
self.good_entry.members = ['foo', 'bar']
self.parser = s3source.S3GroupMapParser()
def testGetMap(self):
group_map = group.GroupMap()
cache_info = StringIO('''[
{ "Key": "foo",
"Value": {
"gid": 10,
"members": "foo\\nbar",
"irrelevant_key": "bacon"
}
}
]''')
self.parser.GetMap(cache_info, group_map)
self.assertEqual(self.good_entry, group_map.PopItem())
def testReadEntry(self):
data = {'passwd': 'x', 'gid': '10', 'members': 'foo\nbar'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testDefaultPasswd(self):
data = {'gid': '10', 'members': 'foo\nbar'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testNoMembers(self):
data = {'gid': '10', 'members': ''}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry.members, [''])
def testInvalidEntry(self):
data = {'irrelevant_key': 'bacon'}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(entry, None)
class TestS3ShadowMapParser(unittest.TestCase):
def setUp(self):
self.good_entry = shadow.ShadowMapEntry()
self.good_entry.name = 'foo'
self.good_entry.passwd = '*'
self.good_entry.lstchg = 17246
self.good_entry.min = 0
self.good_entry.max = 99999
self.good_entry.warn = 7
self.parser = s3source.S3ShadowMapParser()
def testGetMap(self):
shadow_map = shadow.ShadowMap()
cache_info = StringIO('''[
{ "Key": "foo",
"Value": {
"passwd": "*", "lstchg": 17246, "min": 0,
"max": 99999, "warn": 7
}
}
]''')
self.parser.GetMap(cache_info, shadow_map)
self.assertEqual(self.good_entry, shadow_map.PopItem())
def testReadEntry(self):
data = {
'passwd': '*',
'lstchg': 17246,
'min': 0,
'max': 99999,
'warn': 7
}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
def testDefaultPasswd(self):
data = {'lstchg': 17246, 'min': 0, 'max': 99999, 'warn': 7}
entry = self.parser._ReadEntry('foo', data)
self.assertEqual(self.good_entry, entry)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/sources/source.py000066400000000000000000000114621402531134600221770ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Base class of data source object for nss_cache."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import logging
from nss_cache import config
from nss_cache import error
class Source(object):
"""Abstract base class for map data sources."""
UPDATER = None
def __init__(self, conf):
"""Initialise the Source object.
Args:
conf: A dictionary of key/value pairs.
Raises:
RuntimeError: object wasn't initialised with a dict
"""
if not isinstance(conf, dict):
raise RuntimeError('Source constructor not passed a dictionary')
self.conf = conf
# create a logger for our children
self.log = logging.getLogger(__name__)
def GetMap(self, map_name, since=None, location=None):
"""Get a specific map from this source.
Args:
map_name: A string representation of the map you want
since: optional timestamp for incremental query
location: optional field used by automounts to indicate a specific map
Returns:
A Map child class for the map requested.
Raises:
UnsupportedMap: for unknown source maps
"""
if map_name == config.MAP_PASSWORD:
return self.GetPasswdMap(since)
elif map_name == config.MAP_SSHKEY:
return self.GetSshkeyMap(since)
elif map_name == config.MAP_GROUP:
return self.GetGroupMap(since)
elif map_name == config.MAP_SHADOW:
return self.GetShadowMap(since)
elif map_name == config.MAP_NETGROUP:
return self.GetNetgroupMap(since)
elif map_name == config.MAP_AUTOMOUNT:
return self.GetAutomountMap(since, location=location)
raise error.UnsupportedMap('Source can not fetch %s' % map_name)
def GetAutomountMap(self, since=None, location=None):
"""Get an automount map from this source."""
raise NotImplementedError
def GetAutomountMasterMap(self):
"""Get an automount map from this source."""
raise NotImplementedError
def Verify(self):
"""Perform verification of the source availability.
Attempt to open/connect or otherwise use the data source, and
report if there are any problems.
"""
raise NotImplementedError
class FileSource(object):
"""Abstract base class for file data sources."""
def __init__(self, conf):
"""Initialise the Source object.
Args:
conf: A dictionary of key/value pairs.
Raises:
RuntimeError: object wasn't initialised with a dict
"""
if not isinstance(conf, dict):
raise RuntimeError('Source constructor not passed a dictionary')
self.conf = conf
# create a logger for our children
self.log = logging.getLogger(__name__)
def GetFile(self, map_name, dst_file, current_file, location=None):
"""Retrieve a file from this source.
Args:
map_name: A string representation of the map whose file you want
dst_file: Temporary filename to write to.
current_file: Path to the current cache.
location: optional field used by automounts to indicate a specific map
Returns:
path to new file
Raises:
UnsupportedMap: for unknown source maps
"""
if map_name == config.MAP_PASSWORD:
return self.GetPasswdFile(dst_file, current_file)
elif map_name == config.MAP_GROUP:
return self.GetGroupFile(dst_file, current_file)
elif map_name == config.MAP_SHADOW:
return self.GetShadowFile(dst_file, current_file)
elif map_name == config.MAP_NETGROUP:
return self.GetNetgroupFile(dst_file, current_file)
elif map_name == config.MAP_AUTOMOUNT:
return self.GetAutomountFile(dst_file,
current_file,
location=location)
raise error.UnsupportedMap('Source can not fetch %s' % map_name)
nsscache-version-0.42/nss_cache/sources/source_factory.py000066400000000000000000000054571402531134600237350ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Factory for data source implementations."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
_source_implementations = {}
def RegisterImplementation(source):
"""Register a Source implementation with the factory method.
Sources being registered are expected to have a name attribute,
unique to themselves.
Child modules are expected to call this method in the file-level
scope.
Args:
source: A class type that is a subclass of Source
Returns:
Nothing
Raises:
RuntimeError: no 'name' entry in this source.
"""
global _source_implementations
if 'name' not in source.__dict__:
raise RuntimeError("'name' not defined in Source %r" % (source,))
_source_implementations[source.name] = source
# Discover all the known implementations of sources.
try:
from nss_cache.sources import httpsource
httpsource.RegisterImplementation(RegisterImplementation)
except ImportError:
pass
try:
from nss_cache.sources import ldapsource
ldapsource.RegisterImplementation(RegisterImplementation)
except ImportError:
pass
try:
from nss_cache.sources import consulsource
consulsource.RegisterImplementation(RegisterImplementation)
except ImportError:
pass
try:
from nss_cache.sources import s3source
s3source.RegisterImplementation(RegisterImplementation)
except ImportError:
pass
def Create(conf):
"""Source creation factory method.
Args:
conf: a dictionary of configuration key/value pairs, including one
required attribute 'name'.
Returns:
A Source instance.
Raises:
RuntimeError: no sources are registered with RegisterImplementation
"""
global _source_implementations
if not _source_implementations:
raise RuntimeError('no source implementations exist')
source_name = conf['name']
if source_name not in list(_source_implementations.keys()):
raise RuntimeError('source not implemented: %r' % (source_name,))
return _source_implementations[source_name](conf)
nsscache-version-0.42/nss_cache/sources/source_factory_test.py000066400000000000000000000042701402531134600247640ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for sources/source.py."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import unittest
from nss_cache.sources import source
from nss_cache.sources import source_factory
class TestSourceFactory(unittest.TestCase):
"""Unit tests for the source factory."""
def testRegister(self):
number_of_sources = len(source_factory._source_implementations)
class DummySource(source.Source):
name = 'dummy'
source_factory.RegisterImplementation(DummySource)
self.assertEqual(number_of_sources + 1,
len(source_factory._source_implementations))
self.assertEqual(DummySource,
source_factory._source_implementations['dummy'])
def testRegisterWithoutName(self):
class DummySource(source.Source):
pass
self.assertRaises(RuntimeError, source_factory.RegisterImplementation,
DummySource)
def testCreateWithNoImplementations(self):
source_factory._source_implementations = {}
self.assertRaises(RuntimeError, source_factory.Create, {})
def testCreate(self):
class DummySource(source.Source):
name = 'dummy'
source_factory.RegisterImplementation(DummySource)
dummy_config = {'name': 'dummy'}
dummy_source = source_factory.Create(dummy_config)
self.assertEqual(DummySource, type(dummy_source))
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/sources/source_test.py000066400000000000000000000025641402531134600232410ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for sources/source.py."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import unittest
from nss_cache.sources import source
class TestSource(unittest.TestCase):
"""Unit tests for the Source class."""
def testCreateNoConfig(self):
config = []
self.assertRaises(RuntimeError, source.Source, config)
self.assertRaises(RuntimeError, source.Source, None)
config = 'foo'
self.assertRaises(RuntimeError, source.Source, config)
def testVerify(self):
s = source.Source({})
self.assertRaises(NotImplementedError, s.Verify)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/update/000077500000000000000000000000001402531134600201205ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/update/__init__.py000066400000000000000000000000001402531134600222170ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/update/files_updater.py000066400000000000000000000271471402531134600233330ustar00rootroot00000000000000# Copyright 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Update class, used for manipulating source and cache data.
These update classes are based around file synchronization rather than
map synchronization.
These classes contains all the business logic for updating cache objects.
They also contain the code for reading, writing, and updating timestamps.
"""
__author__ = (
'jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (V Hoffman)',
'blaedd@google.com (David MacKinnon)',
)
import errno
import os
import tempfile
import time
from nss_cache import error
from nss_cache.caches import cache_factory
from nss_cache.update import updater
class FileMapUpdater(updater.Updater):
"""Updates simple map files like passwd, group, shadow, and netgroup."""
def UpdateCacheFromSource(self,
cache,
source,
incremental=False,
force_write=False,
location=None):
"""Update a single cache file, from a given source.
Args:
cache: A nss_cache.caches.Cache object.
source: A nss_cache.sources.Source object.
incremental: We ignore this.
force_write: A boolean flag forcing empty map updates when False,
defaults to False.
location: The optional location in the source of this map used by
automount to specify which automount map to get, defaults to None.
Returns:
An int indicating the success of an update (0 == good, fail otherwise).
"""
return_val = 0
cache_filename = cache.GetCacheFilename()
if cache_filename is not None:
new_file_fd, new_file = tempfile.mkstemp(
dir=os.path.dirname(cache_filename),
prefix=os.path.basename(cache_filename),
suffix='.nsscache.tmp')
else:
raise error.CacheInvalid('Cache has no filename.')
self.log.debug('temp source filename: %s', new_file)
try:
# Writes the source to new_file.
# Current file is passed in to allow the source to do partial diffs.
# TODO(jaq): refactor this to pass in the whole cache, so that the source
# can decide how to reduce downloads, c.f. last-modify-timestamp for ldap.
source.GetFile(self.map_name,
new_file,
current_file=cache.GetCacheFilename(),
location=location)
os.lseek(new_file_fd, 0, os.SEEK_SET)
# TODO(jaq): this sucks.
source_cache = cache_factory.Create(self.cache_options,
self.map_name)
source_map = source_cache.GetMap(new_file)
# Update the cache from the new file.
return_val += self._FullUpdateFromFile(cache, source_map,
force_write)
finally:
try:
os.unlink(new_file)
except OSError as e:
# If we're using zsync source, it already renames the file for us.
if e.errno != errno.ENOENT:
raise
return return_val
def _FullUpdateFromFile(self, cache, source_map, force_write=False):
"""Write a new map into the provided cache (overwrites).
Args:
cache: A nss_cache.caches.Cache object.
source_map: The map whose contents we're replacing the cache with, that is
used for verification.
force_write: A boolean flag forcing empty map updates when False,
defaults to False.
Returns:
0 if succesful, non-zero indicating number of failures otherwise.
Raises:
EmptyMap: Update is an empty map, not raised if force_write=True.
InvalidMap:
"""
return_val = 0
for entry in source_map:
if not entry.Verify():
raise error.InvalidMap('Map is not valid. Aborting')
if len(source_map) == 0 and not force_write:
raise error.EmptyMap(
'Source map empty during full update, aborting. '
'Use --force-write to override.')
return_val += cache.WriteMap(map_data=source_map)
# We did an update, write our timestamps unless there is an error.
if return_val == 0:
mtime = os.stat(cache.GetCacheFilename()).st_mtime
self.log.debug('Cache filename %s has mtime %d',
cache.GetCacheFilename(), mtime)
self.WriteModifyTimestamp(mtime)
self.WriteUpdateTimestamp()
return return_val
class FileAutomountUpdater(updater.Updater):
"""Update an automount map.
Automount maps are a unique case. They are not a single set of map entries,
they are a set of sets. Updating automount maps require fetching the list
of maps and updating each map as well as the list of maps.
This class is written to re-use the individual update code in the
FileMapUpdater class.
"""
# automount-specific options
OPT_LOCAL_MASTER = 'local_automount_master'
def __init__(self,
map_name,
timestamp_dir,
cache_options,
automount_mountpoint=None):
"""Initialize automount-specific updater options.
Args:
map_name: A string representing the type of the map we are an Updater for.
timestamp_dir: A string with the directory containing our timestamp files.
cache_options: A dict containing the options for any caches we create.
automount_mountpoint: An optional string containing automount path info.
"""
updater.Updater.__init__(self, map_name, timestamp_dir, cache_options,
automount_mountpoint)
self.local_master = False
if self.OPT_LOCAL_MASTER in cache_options:
if cache_options[self.OPT_LOCAL_MASTER] == 'yes':
self.local_master = True
def UpdateFromSource(self, source, incremental=False, force_write=False):
"""Update the automount master map, and every map it points to.
We fetch a full copy of the master map everytime, and then use the
FileMapUpdater to write each map the master map points to, as well
as the master map itself.
During this process, the master map will be modified. It starts
out pointing to other maps in the source, but when written it needs
to point to other maps in the cache instead. For example, using ldap we
store this data in ldap:
map_entry.key = /auto
map_entry.location = ou=auto.auto,ou=automounts,dc=example,dc=com
We need to go back to ldap get the map in ou=auto.auto, but when it comes
time to write the master map to (for example) a file, we need to write
out the /etc/auto.master file with:
map_entry.key = /auto
map_entry.location = /etc/auto.auto
This is annoying :) Since the keys are fixed, namely /auto is a mountpoint
that isn't going to change format, we expect each Cache implementation that
supports automount maps to support a GetMapLocation() method which returns
the correct cache location from the key.
Args:
source: An nss_cache.sources.Source object.
incremental: Not used by this class
force_write: A boolean flag forcing empty map updates when False,
defaults to False.
Returns:
An int indicating success of update (0 == good, fail otherwise).
"""
return_val = 0
try:
if not self.local_master:
self.log.info('Retrieving automount master map.')
master_file = source.GetAutomountMasterFile(
os.path.join(self.cache_options['dir'], 'auto.master'))
master_cache = cache_factory.Create(self.cache_options,
self.map_name, None)
master_map = master_cache.GetMap()
except error.CacheNotFound:
return 1
if self.local_master:
self.log.info('Using local master map to determine maps to update.')
# we need the local map to determine which of the other maps to update
cache = cache_factory.Create(self.cache_options,
self.map_name,
automount_mountpoint=None)
try:
local_master = cache.GetMap()
except error.CacheNotFound:
self.log.warning('Local master map specified but no map found! '
'No maps will update.')
return return_val + 1
# update specific maps, e.g. auto.home and auto.auto
for map_entry in master_map:
source_location = os.path.basename(map_entry.location)
mountpoint = map_entry.key # e.g. /auto mountpoint
self.log.debug('Looking at mountpoint %s', mountpoint)
# create the cache to update
cache = cache_factory.Create(self.cache_options,
self.map_name,
automount_mountpoint=mountpoint)
# update the master map with the location of the map in the cache
# e.g. /etc/auto.auto replaces ou=auto.auto
map_entry.location = cache.GetMapLocation()
self.log.debug('Map location: %s', map_entry.location)
# if configured to use the local master map, skip any not defined there
if self.local_master:
if map_entry not in local_master:
self.log.info('Skipping entry %s, not in map %s', map_entry,
local_master)
continue
self.log.info('Updating mountpoint %s', map_entry.key)
# update this map (e.g. /etc/auto.auto)
update_obj = FileMapUpdater(self.map_name,
self.timestamp_dir,
self.cache_options,
automount_mountpoint=mountpoint)
return_val += update_obj.UpdateCacheFromSource(
cache, source, False, force_write, source_location)
# with sub-maps updated, write modified master map to disk if configured to
if not self.local_master:
# automount_mountpoint=None defaults to master
cache = cache_factory.Create(self.cache_options,
self.map_name,
automount_mountpoint=None)
update_obj = FileMapUpdater(self.map_name, self.timestamp_dir,
self.cache_options)
return_val += update_obj.FullUpdateFromMap(cache, master_file)
return return_val
nsscache-version-0.42/nss_cache/update/files_updater_test.py000066400000000000000000000412731402531134600243660ustar00rootroot00000000000000# Copyright 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/files_updater.py."""
__author__ = ('vasilios@google.com (V Hoffman)',
'jaq@google.com (Jamie Wilkinson)',
'blaedd@google.com (David MacKinnon)')
import os
import shutil
import tempfile
import time
import unittest
from mox3 import mox
from nss_cache import config
from nss_cache import error
from nss_cache.caches import cache_factory
from nss_cache.caches import files
from nss_cache.maps import automount
from nss_cache.maps import passwd
from nss_cache.sources import source
from nss_cache.update import files_updater
class SingleFileUpdaterTest(mox.MoxTestBase):
"""Unit tests for FileMapUpdater."""
def setUp(self):
super(SingleFileUpdaterTest, self).setUp()
self.workdir = tempfile.mkdtemp()
self.workdir2 = tempfile.mkdtemp()
def tearDown(self):
super(SingleFileUpdaterTest, self).tearDown()
shutil.rmtree(self.workdir)
shutil.rmtree(self.workdir2)
@unittest.skip('timestamp isnt propagaged correctly')
def testFullUpdate(self):
original_modify_stamp = 1
new_modify_stamp = 2
# Construct a fake source.
def GetFile(map_name, dst_file, current_file, location):
print(("GetFile: %s" % dst_file))
f = open(dst_file, 'w')
f.write('root:x:0:0:root:/root:/bin/bash\n')
f.close()
os.utime(dst_file, (1, 2))
os.system("ls -al %s" % dst_file)
return dst_file
dst_file = mox.Value()
source_mock = self.mox.CreateMock(source.FileSource)
source_mock.GetFile(config.MAP_PASSWORD,
mox.Remember(dst_file),
current_file=mox.IgnoreArg(),
location=mox.IgnoreArg()).WithSideEffects(
GetFile).AndReturn(dst_file)
# Construct the cache.
cache = files.FilesPasswdMapHandler({'dir': self.workdir2})
map_entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
password_map = passwd.PasswdMap()
password_map.SetModifyTimestamp(new_modify_stamp)
password_map.Add(map_entry)
cache.Write(password_map)
updater = files_updater.FileMapUpdater(config.MAP_PASSWORD,
self.workdir, {
'name': 'files',
'dir': self.workdir2
})
updater.WriteModifyTimestamp(original_modify_stamp)
self.mox.ReplayAll()
self.assertEqual(
0,
updater.UpdateCacheFromSource(cache,
source_mock,
force_write=False,
location=None))
self.assertEqual(new_modify_stamp, updater.GetModifyTimestamp())
self.assertNotEqual(None, updater.GetUpdateTimestamp())
@unittest.skip('source map empty during full update')
def testFullUpdateOnEmptyCache(self):
"""A full update as above, but the initial cache is empty."""
original_modify_stamp = 1
new_modify_stamp = 2
# Construct an updater
self.updater = files_updater.FileMapUpdater(config.MAP_PASSWORD,
self.workdir, {
'name': 'files',
'dir': self.workdir2
})
self.updater.WriteModifyTimestamp(original_modify_stamp)
# Construct a cache
cache = files.FilesPasswdMapHandler({'dir': self.workdir2})
def GetFileEffects(map_name, dst_file, current_file, location):
f = open(dst_file, 'w')
f.write('root:x:0:0:root:/root:/bin/bash\n')
f.close()
os.utime(dst_file, (1, 2))
return dst_file
source_mock = self.mox.CreateMock(source.FileSource)
source_mock.GetFile(config.MAP_PASSWORD,
mox.IgnoreArg(),
mox.IgnoreArg(),
location=None).WithSideEffects(GetFileEffects)
#source_mock = MockSource()
self.assertEqual(
0,
self.updater.UpdateCacheFromSource(cache,
source_mock,
force_write=False,
location=None))
self.assertEqual(new_modify_stamp, self.updater.GetModifyTimestamp())
self.assertNotEqual(None, self.updater.GetUpdateTimestamp())
def testFullUpdateOnEmptySource(self):
"""A full update as above, but instead, the initial source is empty."""
original_modify_stamp = 1
new_modify_stamp = 2
# Construct an updater
self.updater = files_updater.FileMapUpdater(config.MAP_PASSWORD,
self.workdir, {
'name': 'files',
'dir': self.workdir2
})
self.updater.WriteModifyTimestamp(original_modify_stamp)
# Construct a cache
cache = files.FilesPasswdMapHandler({'dir': self.workdir2})
map_entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
password_map = passwd.PasswdMap()
password_map.SetModifyTimestamp(new_modify_stamp)
password_map.Add(map_entry)
cache.Write(password_map)
source_mock = self.mox.CreateMock(source.FileSource)
source_mock.GetFile(config.MAP_PASSWORD,
mox.IgnoreArg(),
current_file=mox.IgnoreArg(),
location=None).AndReturn(None)
self.mox.ReplayAll()
self.assertRaises(error.EmptyMap,
self.updater.UpdateCacheFromSource,
cache,
source_mock,
force_write=False,
location=None)
self.assertNotEqual(new_modify_stamp, self.updater.GetModifyTimestamp())
self.assertEqual(None, self.updater.GetUpdateTimestamp())
@unittest.skip('disabled')
def testFullUpdateOnEmptySourceForceWrite(self):
"""A full update as above, but instead, the initial source is empty."""
original_modify_stamp = time.gmtime(1)
new_modify_stamp = time.gmtime(2)
# Construct an updater
self.updater = files_updater.FileMapUpdater(config.MAP_PASSWORD,
self.workdir, {
'name': 'files',
'dir': self.workdir2
})
self.updater.WriteModifyTimestamp(original_modify_stamp)
# Construct a cache
cache = files.FilesPasswdMapHandler({'dir': self.workdir2})
map_entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
password_map = passwd.PasswdMap()
password_map.SetModifyTimestamp(new_modify_stamp)
password_map.Add(map_entry)
cache.Write(password_map)
class MockSource(pmock.Mock):
def GetFile(self, map_name, dst_file, current_file, location=None):
assert location is None
assert map_name == config.MAP_PASSWORD
f = open(dst_file, 'w')
f.write('')
f.close()
os.utime(dst_file, (1, 2))
return dst_file
source_mock = MockSource()
self.assertEqual(
0,
self.updater.UpdateCacheFromSource(cache,
source_mock,
force_write=True,
location=None))
self.assertEqual(new_modify_stamp, self.updater.GetModifyTimestamp())
self.assertNotEqual(None, self.updater.GetUpdateTimestamp())
@unittest.skip('disabled')
class AutomountUpdaterTest(mox.MoxTestBase):
"""Unit tests for FileAutomountUpdater class."""
def setUp(self):
super(AutomountUpdaterTest, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workdir)
super(AutomountUpdaterTest, self).tearDown()
def testInit(self):
"""An automount object correctly sets map-specific attributes."""
updater = files_updater.FileAutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, {})
self.assertEqual(updater.local_master, False)
conf = {files_updater.FileAutomountUpdater.OPT_LOCAL_MASTER: 'yes'}
updater = files_updater.FileAutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, conf)
self.assertEqual(updater.local_master, True)
conf = {files_updater.FileAutomountUpdater.OPT_LOCAL_MASTER: 'no'}
updater = files_updater.FileAutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, conf)
self.assertEqual(updater.local_master, False)
def testUpdate(self):
"""An update gets a master map and updates each entry."""
map_entry1 = automount.AutomountMapEntry()
map_entry2 = automount.AutomountMapEntry()
map_entry1.key = '/home'
map_entry2.key = '/auto'
map_entry1.location = 'ou=auto.home,ou=automounts'
map_entry2.location = 'ou=auto.auto,ou=automounts'
master_map = automount.AutomountMap([map_entry1, map_entry2])
source_mock = self.mox.CreateMock(zsyncsource.ZSyncSource)
source_mock.GetAutomountMasterFile(
mox.IgnoreArg()).AndReturn(master_map)
# the auto.home cache
cache_mock1 = self.mox.CreateMock(files.FilesCache)
cache_mock1.GetCacheFilename().AndReturn(None)
cache_mock1.GetMapLocation().AndReturn('/etc/auto.home')
# the auto.auto cache
cache_mock2 = self.mox.CreateMock(files.FilesCache)
cache_mock2.GetMapLocation().AndReturn('/etc/auto.auto')
cache_mock2.GetCacheFilename().AndReturn(None)
# the auto.master cache
cache_mock3 = self.mox.CreateMock(files.FilesCache)
cache_mock3.GetMap().AndReturn(master_map)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(mox.IgnoreArg(), mox.IgnoreArg(),
None).AndReturn(cache_mock3)
cache_factory.Create(
mox.IgnoreArg(), mox.IgnoreArg(),
automount_mountpoint='/auto').AndReturn(cache_mock2)
cache_factory.Create(
mox.IgnoreArg(), mox.IgnoreArg(),
automount_mountpoint='/home').AndReturn(cache_mock1)
self.mox.ReplayAll()
options = {'name': 'files', 'dir': self.workdir}
updater = files_updater.FileAutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, options)
updater.UpdateFromSource(source_mock)
self.assertEqual(map_entry1.location, '/etc/auto.home')
self.assertEqual(map_entry2.location, '/etc/auto.auto')
def testUpdateNoMaster(self):
"""An update skips updating the master map, and approprate sub maps."""
source_entry1 = automount.AutomountMapEntry()
source_entry2 = automount.AutomountMapEntry()
source_entry1.key = '/home'
source_entry2.key = '/auto'
source_entry1.location = 'ou=auto.home,ou=automounts'
source_entry2.location = 'ou=auto.auto,ou=automounts'
source_master = automount.AutomountMap([source_entry1, source_entry2])
local_entry1 = automount.AutomountMapEntry()
local_entry2 = automount.AutomountMapEntry()
local_entry1.key = '/home'
local_entry2.key = '/auto'
local_entry1.location = '/etc/auto.home'
local_entry2.location = '/etc/auto.null'
local_master = automount.AutomountMap([local_entry1, local_entry2])
source_mock = self.mock()
invocation = source_mock.expects(pmock.at_least_once())
invocation._CalledUpdateCacheFromSource()
# we should get called inside the DummyUpdater, too.
# the auto.home cache
cache_mock1 = self.mock()
# GetMapLocation() is called, and set to the master map map_entry
invocation = cache_mock1.expects(pmock.at_least_once()).GetMapLocation()
invocation.will(pmock.return_value('/etc/auto.home'))
# we should get called inside the DummyUpdater
cache_mock1.expects(
pmock.at_least_once())._CalledUpdateCacheFromSource()
# the auto.auto cache
cache_mock2 = self.mock()
# GetMapLocation() is called, and set to the master map map_entry
invocation = cache_mock2.expects(pmock.at_least_once()).GetMapLocation()
invocation.will(pmock.return_value('/etc/auto.auto'))
invocation = cache_mock2.expects(
pmock.at_least_once())._CalledUpdateCacheFromSource()
# the auto.master cache, which should not be written to
cache_mock3 = self.mock()
invocation = cache_mock3.expects(pmock.once())
invocation = invocation.method('GetMap')
invocation.will(pmock.return_value(local_master))
invocation = cache_mock3.expects(pmock.once())
invocation = invocation.method('GetMap')
invocation.will(pmock.return_value(local_master))
cache_mocks = {
'/home': cache_mock1,
'/auto': cache_mock2,
None: cache_mock3
}
# Create needs to return our mock_caches
def DummyCreate(unused_cache_options,
unused_map_name,
automount_mountpoint=None):
# the order of the master_map iterable is not predictable, so we use the
# automount_mountpoint as the key to return the right one.
return cache_mocks[automount_mountpoint]
original_create = cache_factory.Create
cache_factory.Create = DummyCreate
skip = files_updater.FileAutomountUpdater.OPT_LOCAL_MASTER
options = {skip: 'yes', 'dir': self.workdir}
updater = files_updater.FileAutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, options)
updater.UpdateFromSource(source_mock)
cache_factory.Create = original_create
def testUpdateCatchesMissingMaster(self):
"""Gracefully handle a missing local master map."""
# use an empty master map from the source, to avoid mocking out already
# tested code
source_mock = self.mock()
cache_mock = self.mock()
# raise error on GetMap()
invocation = cache_mock.expects(pmock.once()).GetMap()
invocation.will(pmock.raise_exception(error.CacheNotFound))
# Create needs to return our mock_cache
def DummyCreate(unused_cache_options,
unused_map_name,
automount_mountpoint=None):
# the order of the master_map iterable is not predictable, so we use the
# automount_mountpoint as the key to return the right one.
return cache_mock
original_create = cache_factory.Create
cache_factory.Create = DummyCreate
skip = files_updater.FileAutomountUpdater.OPT_LOCAL_MASTER
options = {skip: 'yes', 'dir': self.workdir}
updater = files_updater.FileAutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, options)
return_value = updater.UpdateFromSource(source_mock)
self.assertEqual(return_value, 1)
cache_factory.Create = original_create
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/update/map_updater.py000066400000000000000000000271161402531134600230020ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Update class, used for manipulating source and cache data.
These classes contains all the business logic for updating cache objects.
They also contain the code for reading, writing, and updating timestamps.
FileMapUpdater: Class used for all single map caches.
AutomountMapUpdater: Class used for updating automount map caches.
"""
__author__ = ('vasilios@google.com (V Hoffman)',
'jaq@google.com (Jamie Wilkinson)')
from nss_cache import error
from nss_cache.caches import cache_factory
from nss_cache.update import updater
class MapUpdater(updater.Updater):
"""Updates simple maps like passwd, group, shadow, and netgroup."""
def UpdateCacheFromSource(self,
cache,
source,
incremental=False,
force_write=False,
location=None):
"""Update a single cache, from a given source.
Args:
cache: A nss_cache.caches.Cache object.
source: A nss_cache.sources.Source object.
incremental: A boolean flag indicating that an incremental update
should be performed if True.
force_write: A boolean flag forcing empty map updates if True.
location: The optional location in the source of this map used by
automount to specify which automount map to get, defaults to None.
Returns:
An int indicating the success of an update (0 == good, fail otherwise).
"""
return_val = 0
incremental = incremental and self.can_do_incremental
timestamp = self.GetModifyTimestamp()
if timestamp is None and incremental is True:
self.log.info(
'Missing previous timestamp, defaulting to a full sync.')
incremental = False
if incremental:
source_map = source.GetMap(self.map_name,
since=timestamp,
location=location)
try:
return_val += self._IncrementalUpdateFromMap(cache, source_map)
except (error.CacheNotFound, error.EmptyMap):
self.log.warning(
'Local cache is invalid, faulting to a full sync.')
incremental = False
# We don't use an if/else, because we give the incremental a chance to
# fail through to a full sync.
if not incremental:
source_map = source.GetMap(self.map_name, location=location)
return_val += self.FullUpdateFromMap(cache, source_map, force_write)
return return_val
def _IncrementalUpdateFromMap(self, cache, new_map):
"""Merge a given map into the provided cache.
Args:
cache: A nss_cache.caches.Cache object.
new_map: A nss_cache.maps.Map object.
Returns:
An int indicating the success of an update (0 == good, fail otherwise).
Raises:
EmptyMap: We're trying to merge into cache with an emtpy map.
"""
return_val = 0
if len(new_map) == 0:
self.log.info('Empty map on incremental update, skipping')
return 0
self.log.debug('loading cache map, may be slow for large maps.')
cache_map = cache.GetMap()
if len(cache_map) == 0:
raise error.EmptyMap
if cache_map.Merge(new_map):
return_val += cache.WriteMap(map_data=cache_map)
if return_val == 0:
self.WriteModifyTimestamp(new_map.GetModifyTimestamp())
else:
self.WriteModifyTimestamp(new_map.GetModifyTimestamp())
self.log.info('Nothing new merged, returning')
# We did an update, even if nothing was written, so write our
# update timestamp unless there is an error.
if return_val == 0:
self.WriteUpdateTimestamp()
return return_val
def FullUpdateFromMap(self, cache, new_map, force_write=False):
"""Write a new map into the provided cache (overwrites).
Args:
cache: A nss_cache.caches.Cache object.
new_map: A nss_cache.maps.Map object.
force_write: A boolean indicating empty maps are okay to write, defaults
to False which means do not write them.
Returns:
0 if succesful, non-zero indicating number of failures otherwise.
Raises:
EmptyMap: Update is an empty map, not raised if force_write=True.
"""
return_val = 0
if len(new_map) == 0 and not force_write:
raise error.EmptyMap(
'Source map empty during full update, aborting. '
'Use --force-write to override.')
return_val = cache.WriteMap(map_data=new_map)
# We did an update, write our timestamps unless there is an error.
if return_val == 0:
self.WriteModifyTimestamp(new_map.GetModifyTimestamp())
self.WriteUpdateTimestamp()
return return_val
class AutomountUpdater(updater.Updater):
"""Update an automount map.
Automount maps are a unique case. They are not a single set of map entries,
they are a set of sets. Updating automount maps require fetching the list
of maps and updating each map as well as the list of maps.
This class is written to re-use the individual update code in the
FileMapUpdater class.
"""
# automount-specific options
OPT_LOCAL_MASTER = 'local_automount_master'
def __init__(self,
map_name,
timestamp_dir,
cache_options,
automount_mountpoint=None):
"""Initialize automount-specific updater options.
Args:
map_name: A string representing the type of the map we are an Updater for.
timestamp_dir: A string with the directory containing our timestamp files.
cache_options: A dict containing the options for any caches we create.
automount_mountpoint: An optional string containing automount path info.
"""
super(AutomountUpdater,
self).__init__(map_name, timestamp_dir, cache_options,
automount_mountpoint)
self.local_master = False
if self.OPT_LOCAL_MASTER in cache_options:
if cache_options[self.OPT_LOCAL_MASTER] == 'yes':
self.local_master = True
def UpdateFromSource(self, source, incremental=True, force_write=False):
"""Update the automount master map, and every map it points to.
We fetch a full copy of the master map everytime, and then use the
FileMapUpdater to write each map the master map points to, as well
as the master map itself.
During this process, the master map will be modified. It starts
out pointing to other maps in the source, but when written it needs
to point to other maps in the cache instead. For example, using ldap we
store this data in ldap:
map_entry.key = /auto
map_entry.location = ou=auto.auto,ou=automounts,dc=example,dc=com
We need to go back to ldap get the map in ou=auto.auto, but when it comes
time to write the master map to (for example) a file, we need to write
out the /etc/auto.master file with:
map_entry.key = /auto
map_entry.location = /etc/auto.auto
This is annoying :) Since the keys are fixed, namely /auto is a mountpoint
that isn't going to change format, we expect each Cache implementation that
supports automount maps to support a GetMapLocation() method which returns
the correct cache location from the key.
Args:
source: An nss_cache.sources.Source object.
incremental: A boolean flag indicating that an incremental update
should be performed when True, defaults to True.
force_write: A boolean flag forcing empty map updates when False,
defaults to False.
Returns:
An int indicating success of update (0 == good, fail otherwise).
"""
return_val = 0
self.log.info('Retrieving automount master map.')
master_map = source.GetAutomountMasterMap()
if self.local_master:
self.log.info('Using local master map to determine maps to update.')
# we need the local map to determine which of the other maps to update
cache = cache_factory.Create(self.cache_options,
self.map_name,
automount_mountpoint=None)
try:
local_master = cache.GetMap()
except error.CacheNotFound:
self.log.warning('Local master map specified but no map found! '
'No maps will update.')
return return_val + 1
# update specific maps, e.g. auto.home and auto.auto
for map_entry in master_map:
source_location = map_entry.location # e.g. ou=auto.auto in ldap
automount_mountpoint = map_entry.key # e.g. /auto mountpoint
self.log.debug('looking at %s mount.', automount_mountpoint)
# create the cache to update
cache = cache_factory.Create(
self.cache_options,
self.map_name,
automount_mountpoint=automount_mountpoint)
# update the master map with the location of the map in the cache
# e.g. /etc/auto.auto replaces ou=auto.auto
map_entry.location = cache.GetMapLocation()
# if configured to use the local master map, skip any not defined there
if self.local_master:
if map_entry not in local_master:
self.log.debug('skipping %s, not in %s', map_entry,
local_master)
continue
self.log.info('Updating %s mount.', map_entry.key)
# update this map (e.g. /etc/auto.auto)
update_obj = MapUpdater(self.map_name,
self.timestamp_dir,
self.cache_options,
automount_mountpoint=automount_mountpoint)
return_val += update_obj.UpdateCacheFromSource(
cache, source, incremental, force_write, source_location)
# with sub-maps updated, write modified master map to disk if configured to
if not self.local_master:
# automount_mountpoint=None defaults to master
cache = cache_factory.Create(self.cache_options,
self.map_name,
automount_mountpoint=None)
update_obj = MapUpdater(self.map_name, self.timestamp_dir,
self.cache_options)
return_val += update_obj.FullUpdateFromMap(cache, master_map)
return return_val
nsscache-version-0.42/nss_cache/update/map_updater_test.py000066400000000000000000000376661402531134600240540ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/map_updater.py."""
__author__ = ('vasilios@google.com (V Hoffman)',
'jaq@google.com (Jamie Wilkinson)')
import os
import shutil
import tempfile
import unittest
from mox3 import mox
from nss_cache.caches import caches
from nss_cache.caches import files
from nss_cache.sources import source
from nss_cache.caches import cache_factory
from nss_cache import config
from nss_cache import error
from nss_cache.maps import automount
from nss_cache.maps import passwd
from nss_cache.update import map_updater
class SingleMapUpdaterTest(mox.MoxTestBase):
"""Unit tests for FileMapUpdater class."""
def setUp(self):
super(SingleMapUpdaterTest, self).setUp()
self.workdir = tempfile.mkdtemp()
self.workdir2 = tempfile.mkdtemp()
def tearDown(self):
super(SingleMapUpdaterTest, self).tearDown()
shutil.rmtree(self.workdir)
shutil.rmtree(self.workdir2)
def testFullUpdate(self):
"""A full update reads the source, writes to cache, and updates
times."""
original_modify_stamp = 1
new_modify_stamp = 2
updater = map_updater.MapUpdater(config.MAP_PASSWORD, self.workdir, {})
updater.WriteModifyTimestamp(original_modify_stamp)
map_entry = passwd.PasswdMapEntry({'name': 'foo', 'uid': 10, 'gid': 10})
password_map = passwd.PasswdMap([map_entry])
password_map.SetModifyTimestamp(new_modify_stamp)
cache_mock = self.mox.CreateMock(files.FilesCache)
cache_mock.WriteMap(map_data=password_map).AndReturn(0)
source_mock = self.mox.CreateMock(source.Source)
source_mock.GetMap(config.MAP_PASSWORD,
location=None).AndReturn(password_map)
self.mox.ReplayAll()
self.assertEqual(
0,
updater.UpdateCacheFromSource(cache_mock, source_mock, False, False,
None))
self.assertEqual(updater.GetModifyTimestamp(), new_modify_stamp)
self.assertNotEqual(updater.GetUpdateTimestamp(), None)
def testIncrementalUpdate(self):
"""An incremental update reads a partial map and merges it."""
# Unlike in a full update, we create a cache map and a source map, and
# let it merge them. If it goes to write the merged map, we're good.
# Also check that timestamps were updated, as in testFullUpdate above.
def compare_function(map_object):
return len(map_object) == 2
original_modify_stamp = 1
new_modify_stamp = 2
updater = map_updater.MapUpdater(config.MAP_PASSWORD,
self.workdir, {},
can_do_incremental=True)
updater.WriteModifyTimestamp(original_modify_stamp)
cache_map_entry = passwd.PasswdMapEntry({
'name': 'bar',
'uid': 20,
'gid': 20
})
cache_map = passwd.PasswdMap([cache_map_entry])
cache_map.SetModifyTimestamp(original_modify_stamp)
cache_mock = self.mox.CreateMock(caches.Cache)
cache_mock.GetMap().AndReturn(cache_map)
cache_mock.WriteMap(map_data=mox.Func(compare_function)).AndReturn(0)
source_map_entry = passwd.PasswdMapEntry({
'name': 'foo',
'uid': 10,
'gid': 10
})
source_map = passwd.PasswdMap([source_map_entry])
source_map.SetModifyTimestamp(new_modify_stamp)
source_mock = self.mox.CreateMock(source.Source)
source_mock.GetMap(config.MAP_PASSWORD,
location=None,
since=original_modify_stamp).AndReturn(source_map)
self.mox.ReplayAll()
self.assertEqual(
0,
updater.UpdateCacheFromSource(cache_mock,
source_mock,
incremental=True,
force_write=False,
location=None))
self.assertEqual(updater.GetModifyTimestamp(), new_modify_stamp)
self.assertNotEqual(updater.GetUpdateTimestamp(), None)
def testFullUpdateOnMissingCache(self):
"""We fault to a full update if our cache is missing."""
original_modify_stamp = 1
updater = map_updater.MapUpdater(config.MAP_PASSWORD, self.workdir, {})
updater.WriteModifyTimestamp(original_modify_stamp)
source_mock = self.mox.CreateMock(source.Source)
# Try incremental first.
source_mock.GetMap(config.MAP_PASSWORD,
location=None,
since=original_modify_stamp).AndReturn('first map')
# Try full second.
source_mock.GetMap(config.MAP_PASSWORD,
location=None).AndReturn('second map')
updater = map_updater.MapUpdater(config.MAP_PASSWORD,
self.workdir, {},
can_do_incremental=True)
self.mox.StubOutWithMock(updater, 'GetModifyTimestamp')
updater.GetModifyTimestamp().AndReturn(original_modify_stamp)
self.mox.StubOutWithMock(updater, '_IncrementalUpdateFromMap')
# force a cache not found on incremental
updater._IncrementalUpdateFromMap('cache', 'first map').AndRaise(
error.CacheNotFound)
self.mox.StubOutWithMock(updater, 'FullUpdateFromMap')
updater.FullUpdateFromMap(mox.IgnoreArg(), 'second map',
False).AndReturn(0)
self.mox.ReplayAll()
self.assertEqual(
0,
updater.UpdateCacheFromSource('cache',
source_mock,
incremental=True,
force_write=False,
location=None))
def testFullUpdateOnMissingTimestamp(self):
"""We fault to a full update if our modify timestamp is missing."""
updater = map_updater.MapUpdater(config.MAP_PASSWORD, self.workdir, {})
# We do not call WriteModifyTimestamp() so we force a full sync.
source_mock = self.mox.CreateMock(source.Source)
source_mock.GetMap(config.MAP_PASSWORD,
location=None).AndReturn('second map')
updater = map_updater.MapUpdater(config.MAP_PASSWORD, self.workdir, {})
self.mox.StubOutWithMock(updater, 'FullUpdateFromMap')
updater.FullUpdateFromMap(mox.IgnoreArg(), 'second map',
False).AndReturn(0)
self.mox.ReplayAll()
self.assertEqual(
0,
updater.UpdateCacheFromSource('cache', source_mock, True, False,
None))
class MapAutomountUpdaterTest(mox.MoxTestBase):
"""Unit tests for AutomountUpdater class."""
def setUp(self):
super(MapAutomountUpdaterTest, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(MapAutomountUpdaterTest, self).tearDown()
os.rmdir(self.workdir)
def testInit(self):
"""An automount object correctly sets map-specific attributes."""
updater = map_updater.AutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, {})
self.assertEqual(updater.local_master, False)
conf = {map_updater.AutomountUpdater.OPT_LOCAL_MASTER: 'yes'}
updater = map_updater.AutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, conf)
self.assertEqual(updater.local_master, True)
conf = {map_updater.AutomountUpdater.OPT_LOCAL_MASTER: 'no'}
updater = map_updater.AutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, conf)
self.assertEqual(updater.local_master, False)
def testUpdate(self):
"""An update gets a master map and updates each entry."""
map_entry1 = automount.AutomountMapEntry()
map_entry2 = automount.AutomountMapEntry()
map_entry1.key = '/home'
map_entry2.key = '/auto'
map_entry1.location = 'ou=auto.home,ou=automounts'
map_entry2.location = 'ou=auto.auto,ou=automounts'
master_map = automount.AutomountMap([map_entry1, map_entry2])
source_mock = self.mox.CreateMock(source.Source)
# return the master map
source_mock.GetAutomountMasterMap().AndReturn(master_map)
# the auto.home cache
cache_home = self.mox.CreateMock(caches.Cache)
# GetMapLocation() is called, and set to the master map map_entry
cache_home.GetMapLocation().AndReturn('/etc/auto.home')
# the auto.auto cache
cache_auto = self.mox.CreateMock(caches.Cache)
# GetMapLocation() is called, and set to the master map map_entry
cache_auto.GetMapLocation().AndReturn('/etc/auto.auto')
# the auto.master cache
cache_master = self.mox.CreateMock(caches.Cache)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(mox.IgnoreArg(),
'automount',
automount_mountpoint='/home').AndReturn(cache_home)
cache_factory.Create(mox.IgnoreArg(),
'automount',
automount_mountpoint='/auto').AndReturn(cache_auto)
cache_factory.Create(mox.IgnoreArg(),
'automount',
automount_mountpoint=None).AndReturn(cache_master)
updater = map_updater.AutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, {})
self.mox.StubOutClassWithMocks(map_updater, 'MapUpdater')
updater_home = map_updater.MapUpdater(config.MAP_AUTOMOUNT,
self.workdir, {},
automount_mountpoint='/home')
updater_home.UpdateCacheFromSource(
cache_home, source_mock, True, False,
'ou=auto.home,ou=automounts').AndReturn(0)
updater_auto = map_updater.MapUpdater(config.MAP_AUTOMOUNT,
self.workdir, {},
automount_mountpoint='/auto')
updater_auto.UpdateCacheFromSource(
cache_auto, source_mock, True, False,
'ou=auto.auto,ou=automounts').AndReturn(0)
updater_master = map_updater.MapUpdater(config.MAP_AUTOMOUNT,
self.workdir, {})
updater_master.FullUpdateFromMap(cache_master, master_map).AndReturn(0)
self.mox.ReplayAll()
updater.UpdateFromSource(source_mock)
self.assertEqual(map_entry1.location, '/etc/auto.home')
self.assertEqual(map_entry2.location, '/etc/auto.auto')
def testUpdateNoMaster(self):
"""An update skips updating the master map, and approprate sub maps."""
source_entry1 = automount.AutomountMapEntry()
source_entry2 = automount.AutomountMapEntry()
source_entry1.key = '/home'
source_entry2.key = '/auto'
source_entry1.location = 'ou=auto.home,ou=automounts'
source_entry2.location = 'ou=auto.auto,ou=automounts'
source_master = automount.AutomountMap([source_entry1, source_entry2])
local_entry1 = automount.AutomountMapEntry()
local_entry2 = automount.AutomountMapEntry()
local_entry1.key = '/home'
local_entry2.key = '/auto'
local_entry1.location = '/etc/auto.home'
local_entry2.location = '/etc/auto.null'
local_master = automount.AutomountMap([local_entry1, local_entry2])
source_mock = self.mox.CreateMock(source.Source)
# return the source master map
source_mock.GetAutomountMasterMap().AndReturn(source_master)
# the auto.home cache
cache_home = self.mox.CreateMock(caches.Cache)
# GetMapLocation() is called, and set to the master map map_entry
cache_home.GetMapLocation().AndReturn('/etc/auto.home')
# the auto.auto cache
cache_auto = self.mox.CreateMock(caches.Cache)
# GetMapLocation() is called, and set to the master map map_entry
cache_auto.GetMapLocation().AndReturn('/etc/auto.auto')
# the auto.master cache, which should not be written to
cache_master = self.mox.CreateMock(caches.Cache)
cache_master.GetMap().AndReturn(local_master)
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(mox.IgnoreArg(),
mox.IgnoreArg(),
automount_mountpoint=None).AndReturn(cache_master)
cache_factory.Create(mox.IgnoreArg(),
mox.IgnoreArg(),
automount_mountpoint='/home').AndReturn(cache_home)
cache_factory.Create(mox.IgnoreArg(),
mox.IgnoreArg(),
automount_mountpoint='/auto').AndReturn(cache_auto)
skip = map_updater.AutomountUpdater.OPT_LOCAL_MASTER
updater = map_updater.AutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, {skip: 'yes'})
self.mox.StubOutClassWithMocks(map_updater, 'MapUpdater')
updater_home = map_updater.MapUpdater(config.MAP_AUTOMOUNT,
self.workdir,
{'local_automount_master': 'yes'},
automount_mountpoint='/home')
updater_home.UpdateCacheFromSource(
cache_home, source_mock, True, False,
'ou=auto.home,ou=automounts').AndReturn(0)
self.mox.ReplayAll()
updater.UpdateFromSource(source_mock)
class AutomountUpdaterMoxTest(mox.MoxTestBase):
def setUp(self):
super(AutomountUpdaterMoxTest, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(AutomountUpdaterMoxTest, self).tearDown()
shutil.rmtree(self.workdir)
def testUpdateCatchesMissingMaster(self):
"""Gracefully handle a missing local master maps."""
# use an empty master map from the source, to avoid mocking out already
# tested code
master_map = automount.AutomountMap()
source_mock = self.mox.CreateMockAnything()
source_mock.GetAutomountMasterMap().AndReturn(master_map)
cache_mock = self.mox.CreateMock(caches.Cache)
# raise error on GetMap()
cache_mock.GetMap().AndRaise(error.CacheNotFound)
skip = map_updater.AutomountUpdater.OPT_LOCAL_MASTER
cache_options = {skip: 'yes'}
self.mox.StubOutWithMock(cache_factory, 'Create')
cache_factory.Create(cache_options,
'automount',
automount_mountpoint=None).AndReturn(cache_mock)
self.mox.ReplayAll()
updater = map_updater.AutomountUpdater(config.MAP_AUTOMOUNT,
self.workdir, cache_options)
return_value = updater.UpdateFromSource(source_mock)
self.assertEqual(return_value, 1)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/update/updater.py000066400000000000000000000253441402531134600221460ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Update class, used for manipulating source and cache data.
These classes contains all the business logic for updating cache objects.
They also contain the code for reading, writing, and updating timestamps.
Updater: Base class with setup and timestamp code.
FileMapUpdater: Class used for all single map caches.
AutomountMapUpdater: Class used for updating automount map caches.
"""
import errno
__author__ = ('vasilios@google.com (V Hoffman)',
'jaq@google.com (Jamie Wilkinson)')
import calendar
import logging
import os
import stat
import tempfile
import time
from nss_cache.caches import cache_factory
from nss_cache import error
class Updater(object):
"""Base class which holds the setup and timestamp logic.
This class holds all the timestamp manipulation used by child classes and
callers.
Attributes:
log: logging.Logger instance used for output.
map_name: A string representing the type of the map we are an Updater for.
timestamp_dir: A string with the directory containing our timestamp files.
cache_options: A dict containing the options for any caches we create.
modify_file: A string with our last modified timestamp filename.
update_file: A string with our last updated timestamp filename.
"""
def __init__(self,
map_name,
timestamp_dir,
cache_options,
automount_mountpoint=None,
can_do_incremental=False):
"""Construct an updater object.
Args:
map_name: A string representing the type of the map we are an Updater for.
timestamp_dir: A string with the directory containing our timestamp files.
cache_options: A dict containing the options for any caches we create.
automount_mountpoint: An optional string containing automount path info.
can_do_incremental: Indicates whether or not our source can provide
incremental updates at all.
"""
# Set up a logger
self.log = logging.getLogger(__name__)
# Used to fetch the right maps later on
self.map_name = map_name
# Used for tempfile writing
self.timestamp_dir = timestamp_dir
# Used to create cache(s)
self.cache_options = cache_options
self.can_do_incremental = can_do_incremental
# Calculate our timestamp files
if automount_mountpoint is None:
timestamp_prefix = '%s/timestamp-%s' % (timestamp_dir, map_name)
else:
# turn /auto into auto.auto, and /usr/local into /auto.usr_local
automount_mountpoint = automount_mountpoint.lstrip('/')
automount_mountpoint = automount_mountpoint.replace('/', '_')
timestamp_prefix = '%s/timestamp-%s-%s' % (timestamp_dir, map_name,
automount_mountpoint)
self.modify_file = '%s-modify' % timestamp_prefix
self.update_file = '%s-update' % timestamp_prefix
# Timestamp info is cached here
self.modify_time = None
self.update_time = None
def _GetCurrentTime(self):
"""Helper method to get the current time, to assist test mocks."""
return int(time.time())
def _ReadTimestamp(self, filename):
"""Return a timestamp from a file.
The timestamp file format is a single line, containing a string in the
ISO-8601 format YYYY-MM-DDThh:mm:ssZ (i.e. UTC time). We do not support
all ISO-8601 formats for reasons of convenience in the code.
Timestamps internal to nss_cache deliberately do not carry milliseconds.
Args:
filename: A String naming the file to read from.
Returns:
An int with the number of seconds since epoch, or None if the timestamp
file doesn't exist or has errors.
"""
if not os.path.exists(filename):
return None
try:
timestamp_file = open(filename, 'r')
timestamp_string = timestamp_file.read().strip()
except IOError as e:
self.log.warning('error opening timestamp file: %s', e)
timestamp_string = None
else:
timestamp_file.close()
self.log.debug('read timestamp %s from file %r', timestamp_string,
filename)
if timestamp_string is not None:
try:
# Append UTC to force the timezone to parse the string in.
timestamp = int(
calendar.timegm(
time.strptime(timestamp_string + ' UTC',
'%Y-%m-%dT%H:%M:%SZ %Z')))
except ValueError as e:
self.log.error('cannot parse timestamp file %r: %s', filename,
e)
timestamp = None
else:
timestamp = None
now = self._GetCurrentTime()
if timestamp and timestamp > now:
self.log.warning('timestamp %r from %r is in the future, now is %r',
timestamp_string, filename, now)
if timestamp - now >= 60 * 60:
self.log.info('Resetting timestamp to now.')
timestamp = now
return timestamp
def _WriteTimestamp(self, timestamp, filename):
"""Write a given timestamp out to a file, converting to the ISO-8601
format.
We convert internal timestamp format (epoch) to ISO-8601 format, i.e.
YYYY-MM-DDThh:mm:ssZ which is basically UTC time, then write it out to a
file.
Args:
timestamp: A String in nss_cache internal timestamp format, aka time_t.
filename: A String naming the file to write to.
Returns:
A boolean indicating success of write.
"""
# Make sure self.timestamp_dir exists before calling tempfile.mkstemp
try:
os.makedirs(self.timestamp_dir)
except OSError as e:
if e.errno == errno.EEXIST and os.path.isdir(self.timestamp_dir):
pass # Directory already exists; squelch error
else:
raise
(filedesc, temp_filename) = tempfile.mkstemp(prefix='nsscache-update-',
dir=self.timestamp_dir)
time_string = time.strftime('%Y-%m-%dT%H:%M:%SZ',
time.gmtime(timestamp))
try:
os.write(filedesc, b'%s\n' % time_string.encode())
os.fsync(filedesc)
os.close(filedesc)
except OSError:
os.unlink(temp_filename)
self.log.warning('writing timestamp failed!')
return False
os.chmod(temp_filename,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
os.rename(temp_filename, filename)
self.log.debug('wrote timestamp %s to file %r', time_string, filename)
return True
def GetUpdateTimestamp(self):
"""Return the timestamp of the last cache update.
Returns:
An int with the number of seconds since epoch, or None if the timestamp
file doesn't exist or has errors.
"""
if self.update_time is None:
self.update_time = self._ReadTimestamp(self.update_file)
return self.update_time
def GetModifyTimestamp(self):
"""Return the timestamp of the last cache modification.
Args: None
Returns:
An int with the number of seconds since epoch, or None if the timestamp
file doesn't exist or has errors.
"""
if self.modify_time is None:
self.modify_time = self._ReadTimestamp(self.modify_file)
return self.modify_time
def WriteUpdateTimestamp(self, update_timestamp=None):
"""Convenience method for writing the last update timestamp.
Args:
update_timestamp: An int with the number of seconds since epoch,
defaulting to the current time if None.
Returns:
A boolean indicating success of the write.
"""
# blow away our cached value
self.update_time = None
# default to now
if update_timestamp is None:
update_timestamp = self._GetCurrentTime()
return self._WriteTimestamp(update_timestamp, self.update_file)
def WriteModifyTimestamp(self, timestamp):
"""Convenience method for writing the last modify timestamp.
Args:
timestamp: An int with the number of seconds since epoch.
If timestamp is None, performs no action.
Returns:
A boolean indicating success of the write.
"""
if timestamp is None:
return True
# blow away our cached value
self.modify_time = None
return self._WriteTimestamp(timestamp, self.modify_file)
def UpdateFromSource(self, source, incremental=True, force_write=False):
"""Update this map's cache from the source provided.
The FileMapUpdater expects to fetch as single map from the source
and write/merge it to disk. We create a cache to write to, and then call
UpdateCacheFromSource() with that cache.
Note that AutomountUpdater also calls UpdateCacheFromSource() for each
cache it is writing, hence the distinct seperation.
Args:
source: A nss_cache.sources.Source object.
incremental: A boolean flag indicating that an incremental update should
be performed, defaults to True.
force_write: A boolean flag forcing empty map updates, defaults to False.
Returns:
An int indicating success of update (0 == good, fail otherwise).
"""
# Create the single cache we write to
cache = cache_factory.Create(self.cache_options, self.map_name)
return self.UpdateCacheFromSource(cache,
source,
incremental,
force_write,
location=None)
nsscache-version-0.42/nss_cache/update/updater_test.py000066400000000000000000000106241402531134600232000ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/update/base.py."""
__author__ = ('vasilios@google.com (V Hoffman)',
'jaq@google.com (Jamie Wilkinson)')
import os
import shutil
import tempfile
import time
import unittest
from mox3 import mox
from nss_cache import config
from nss_cache.update import updater
class TestUpdater(mox.MoxTestBase):
"""Unit tests for the Updater class."""
def setUp(self):
super(TestUpdater, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.workdir)
super(TestUpdater, self).tearDown()
def testTimestampDir(self):
"""We read and write timestamps to the specified directory."""
update_obj = updater.Updater(config.MAP_PASSWORD, self.workdir, {})
self.updater = updater
update_time = 1199149400
modify_time = 1199149200
update_obj.WriteUpdateTimestamp(update_time)
update_obj.WriteModifyTimestamp(modify_time)
update_stamp = update_obj.GetUpdateTimestamp()
modify_stamp = update_obj.GetModifyTimestamp()
self.assertEqual(
update_time,
update_stamp,
msg=('retrieved a different update time than we stored: '
'Expected: %r, observed: %r' % (update_time, update_stamp)))
self.assertEqual(
modify_time,
modify_stamp,
msg=('retrieved a different modify time than we stored: '
'Expected %r, observed: %r' % (modify_time, modify_stamp)))
def testWriteWhenTimestampIsNone(self):
update_obj = updater.Updater(config.MAP_PASSWORD, self.workdir, {})
self.assertEqual(True, update_obj.WriteUpdateTimestamp(None))
self.assertEqual(True, update_obj.WriteModifyTimestamp(None))
def testTimestampDefaultsToNone(self):
"""Missing or unreadable timestamps return None."""
update_obj = updater.Updater(config.MAP_PASSWORD, self.workdir, {})
self.updater = update_obj
update_stamp = update_obj.GetUpdateTimestamp()
modify_stamp = update_obj.GetModifyTimestamp()
self.assertEqual(None,
update_stamp,
msg='update time did not default to None')
self.assertEqual(None,
modify_stamp,
msg='modify time did not default to None')
# touch a file, make it unreadable
update_file = open(update_obj.update_file, 'w')
modify_file = open(update_obj.modify_file, 'w')
update_file.close()
modify_file.close()
os.chmod(update_obj.update_file, 0000)
os.chmod(update_obj.modify_file, 0000)
update_stamp = update_obj.GetUpdateTimestamp()
modify_stamp = update_obj.GetModifyTimestamp()
self.assertEqual(None,
update_stamp,
msg='unreadable update time did not default to None')
self.assertEqual(None,
modify_stamp,
msg='unreadable modify time did not default to None')
def testTimestampInTheFuture(self):
"""Timestamps in the future are turned into now."""
update_obj = updater.Updater(config.MAP_PASSWORD, self.workdir, {})
expected_time = 1
update_time = 3601
update_file = open(update_obj.update_file, 'w')
update_obj.WriteUpdateTimestamp(update_time)
update_file.close()
self.mox.StubOutWithMock(update_obj, '_GetCurrentTime')
update_obj._GetCurrentTime().AndReturn(expected_time)
self.mox.ReplayAll()
self.assertEqual(expected_time, update_obj.GetUpdateTimestamp())
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/util/000077500000000000000000000000001402531134600176135ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/util/__init__.py000066400000000000000000000000001402531134600217120ustar00rootroot00000000000000nsscache-version-0.42/nss_cache/util/curl.py000066400000000000000000000053031402531134600211330ustar00rootroot00000000000000# Copyright 2010 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Minor curl methods."""
__author__ = 'blaedd@google.com (David MacKinnon)'
import logging
import pycurl
from io import StringIO
from nss_cache import error
def CurlFetch(url, conn=None, logger=None):
if not logger:
logger = logging
if not conn:
conn = pycurl.Curl()
conn.setopt(pycurl.URL, url)
conn.body = StringIO()
conn.headers = StringIO()
conn.setopt(pycurl.WRITEFUNCTION, conn.body.write)
conn.setopt(pycurl.HEADERFUNCTION, conn.headers.write)
try:
conn.perform()
except pycurl.error as e:
HandleCurlError(e, logger)
raise error.Error(e)
resp_code = conn.getinfo(pycurl.RESPONSE_CODE)
return (resp_code, conn.headers.getvalue(), conn.body.getvalue())
def HandleCurlError(e, logger=None):
"""Handle a curl exception.
See http://curl.haxx.se/libcurl/c/libcurl-errors.html for a list of codes.
Args:
e: pycurl.error
logger: logger object
Raises:
ConfigurationError:
PermissionDenied:
SourceUnavailable:
Error:
"""
if not logger:
logger = logging
code = e[0]
msg = e[1]
# Config errors
if code in (pycurl.E_UNSUPPORTED_PROTOCOL, pycurl.E_URL_MALFORMAT,
pycurl.E_SSL_ENGINE_NOTFOUND, pycurl.E_SSL_ENGINE_SETFAILED,
pycurl.E_SSL_CACERT_BADFILE):
raise error.ConfigurationError(msg)
# Possibly transient errors, try again
if code in (pycurl.E_FAILED_INIT, pycurl.E_COULDNT_CONNECT,
pycurl.E_PARTIAL_FILE, pycurl.E_WRITE_ERROR,
pycurl.E_READ_ERROR, pycurl.E_OPERATION_TIMEOUTED,
pycurl.E_SSL_CONNECT_ERROR, pycurl.E_COULDNT_RESOLVE_PROXY,
pycurl.E_COULDNT_RESOLVE_HOST, pycurl.E_GOT_NOTHING):
logger.debug('Possibly transient error: %s', msg)
return
# SSL issues
if code in (pycurl.E_SSL_PEER_CERTIFICATE,):
raise error.SourceUnavailable(msg)
# Anything else
raise error.Error(msg)
nsscache-version-0.42/nss_cache/util/file_formats.py000066400000000000000000000144101402531134600226370ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Parsing methods for file cache types."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import logging
from nss_cache.maps import automount
from nss_cache.maps import group
from nss_cache.maps import netgroup
from nss_cache.maps import passwd
from nss_cache.maps import shadow
from nss_cache.maps import sshkey
try:
SetType = set
except NameError:
import sets
SetType = sets.Set
class FilesMapParser(object):
"""A base class for parsing nss_files module cache."""
def __init__(self):
self.log = logging.getLogger(__name__)
def GetMap(self, cache_info, data):
"""Returns a map from a cache.
Args:
cache_info: file like object containing the cache.
data: a Map to populate.
Returns:
A child of Map containing the cache data.
"""
for line in cache_info:
line = line.rstrip('\n')
if not line or line[0] == '#':
continue
entry = self._ReadEntry(line)
if entry is None:
self.log.warning(
'Could not create entry from line %r in cache, skipping',
line)
continue
if not data.Add(entry):
self.log.warning(
'Could not add entry %r read from line %r in cache', entry,
line)
return data
class FilesSshkeyMapParser(FilesMapParser):
"""Class for parsing nss_files module sshkey cache."""
def _ReadEntry(self, entry):
"""Return a SshkeyMapEntry from a record in the target cache."""
entry = entry.split(':')
map_entry = sshkey.SshkeyMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = entry[0]
map_entry.sshkey = entry[1]
return map_entry
class FilesPasswdMapParser(FilesMapParser):
"""Class for parsing nss_files module passwd cache."""
def _ReadEntry(self, entry):
"""Return a PasswdMapEntry from a record in the target cache."""
entry = entry.split(':')
map_entry = passwd.PasswdMapEntry()
# maps expect strict typing, so convert to int as appropriate.
map_entry.name = entry[0]
map_entry.passwd = entry[1]
map_entry.uid = int(entry[2])
map_entry.gid = int(entry[3])
map_entry.gecos = entry[4]
map_entry.dir = entry[5]
map_entry.shell = entry[6]
return map_entry
class FilesGroupMapParser(FilesMapParser):
"""Class for parsing a nss_files module group cache."""
def _ReadEntry(self, line):
"""Return a GroupMapEntry from a record in the target cache."""
line = line.split(':')
map_entry = group.GroupMapEntry()
# map entries expect strict typing, so convert as appropriate
map_entry.name = line[0]
map_entry.passwd = line[1]
map_entry.gid = int(line[2])
map_entry.members = line[3].split(',')
return map_entry
class FilesShadowMapParser(FilesMapParser):
"""Class for parsing a nss_files module shadow cache."""
def _ReadEntry(self, line):
"""Return a ShadowMapEntry from a record in the target cache."""
line = line.split(':')
map_entry = shadow.ShadowMapEntry()
# map entries expect strict typing, so convert as appropriate
map_entry.name = line[0]
map_entry.passwd = line[1]
if line[2]:
map_entry.lstchg = int(line[2])
if line[3]:
map_entry.min = int(line[3])
if line[4]:
map_entry.max = int(line[4])
if line[5]:
map_entry.warn = int(line[5])
if line[6]:
map_entry.inact = int(line[6])
if line[7]:
map_entry.expire = int(line[7])
if line[8]:
map_entry.flag = int(line[8])
return map_entry
class FilesNetgroupMapParser(FilesMapParser):
"""Class for parsing a nss_files module netgroup cache."""
def _ReadEntry(self, line):
"""Return a NetgroupMapEntry from a record in the target cache."""
map_entry = netgroup.NetgroupMapEntry()
# the first word is our name, but since the whole line is space delimited
# avoid .split(' ') since groups can have thousands of members.
index = line.find(' ')
if index == -1:
if line:
# empty group is OK, as long as the line isn't blank
map_entry.name = line
return map_entry
raise RuntimeError('Failed to parse entry: %s' % line)
map_entry.name = line[0:index]
# the rest is our entries, and for better or for worse this preserves extra
# leading spaces
map_entry.entries = line[index + 1:]
return map_entry
class FilesAutomountMapParser(FilesMapParser):
"""Class for parsing a nss_files module automount cache."""
def _ReadEntry(self, line):
"""Return an AutomountMapEntry from a record in the target cache.
Args:
line: A string from a file cache.
Returns:
An AutomountMapEntry if the line is successfully parsed, None otherwise.
"""
line = line.split()
map_entry = automount.AutomountMapEntry()
try:
map_entry.key = line[0]
if len(line) > 2:
map_entry.options = line[1]
map_entry.location = line[2]
else:
map_entry.location = line[1]
except IndexError:
return None
return map_entry
nsscache-version-0.42/nss_cache/util/file_formats_test.py000066400000000000000000000113221402531134600236750ustar00rootroot00000000000000# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/util/file_formats.py."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import unittest
from nss_cache.util import file_formats
class TestFilesUtils(unittest.TestCase):
def testReadPasswdEntry(self):
"""We correctly parse a typical entry in /etc/passwd format."""
parser = file_formats.FilesPasswdMapParser()
file_entry = 'root:x:0:0:Rootsy:/root:/bin/bash'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.name, 'root')
self.assertEqual(map_entry.passwd, 'x')
self.assertEqual(map_entry.uid, 0)
self.assertEqual(map_entry.gid, 0)
self.assertEqual(map_entry.gecos, 'Rootsy')
self.assertEqual(map_entry.dir, '/root')
self.assertEqual(map_entry.shell, '/bin/bash')
def testReadGroupEntry(self):
"""We correctly parse a typical entry in /etc/group format."""
parser = file_formats.FilesGroupMapParser()
file_entry = 'root:x:0:zero_cool,acid_burn'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.name, 'root')
self.assertEqual(map_entry.passwd, 'x')
self.assertEqual(map_entry.gid, 0)
self.assertEqual(map_entry.members, ['zero_cool', 'acid_burn'])
def testReadShadowEntry(self):
"""We correctly parse a typical entry in /etc/shadow format."""
parser = file_formats.FilesShadowMapParser()
file_entry = 'root:$1$zomgmd5support:::::::'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.name, 'root')
self.assertEqual(map_entry.passwd, '$1$zomgmd5support')
self.assertEqual(map_entry.lstchg, None)
self.assertEqual(map_entry.min, None)
self.assertEqual(map_entry.max, None)
self.assertEqual(map_entry.warn, None)
self.assertEqual(map_entry.inact, None)
self.assertEqual(map_entry.expire, None)
self.assertEqual(map_entry.flag, None)
def testReadNetgroupEntry(self):
"""We correctly parse a typical entry in /etc/netgroup format."""
parser = file_formats.FilesNetgroupMapParser()
file_entry = 'administrators unix_admins noc_monkeys (-,zero_cool,)'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.name, 'administrators')
self.assertEqual(map_entry.entries,
'unix_admins noc_monkeys (-,zero_cool,)')
def testReadEmptyNetgroupEntry(self):
"""We correctly parse a memberless netgroup entry."""
parser = file_formats.FilesNetgroupMapParser()
file_entry = 'administrators'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.name, 'administrators')
self.assertEqual(map_entry.entries, '')
def testReadAutomountEntry(self):
"""We correctly parse a typical entry in /etc/auto.* format."""
parser = file_formats.FilesAutomountMapParser()
file_entry = 'scratch -tcp,rw,intr,bg fileserver:/scratch'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.key, 'scratch')
self.assertEqual(map_entry.options, '-tcp,rw,intr,bg')
self.assertEqual(map_entry.location, 'fileserver:/scratch')
def testReadAutmountEntryWithExtraWhitespace(self):
"""Extra whitespace doesn't break the parsing."""
parser = file_formats.FilesAutomountMapParser()
file_entry = 'scratch fileserver:/scratch'
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(map_entry.key, 'scratch')
self.assertEqual(map_entry.options, None)
self.assertEqual(map_entry.location, 'fileserver:/scratch')
def testReadBadAutomountEntry(self):
"""Cope with empty data."""
parser = file_formats.FilesAutomountMapParser()
file_entry = ''
map_entry = parser._ReadEntry(file_entry)
self.assertEqual(None, map_entry)
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nss_cache/util/timestamps.py000066400000000000000000000077451402531134600223700ustar00rootroot00000000000000# Copyright 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Timestamp handling routines."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import logging
import os.path
import tempfile
import time
import stat
def ReadTimestamp(filename):
"""Return a timestamp from a file.
The timestamp file format is a single line, containing a string in the
ISO-8601 format YYYY-MM-DDThh:mm:ssZ (i.e. UTC time). We do not support
all ISO-8601 formats for reasons of convenience in the code.
Timestamps internal to nss_cache deliberately do not carry milliseconds.
Args:
filename: A String naming the file to read from.
Returns:
A time.struct_time, or None if the timestamp file doesn't
exist or has errors.
"""
if not os.path.exists(filename):
return None
try:
timestamp_file = open(filename, 'r')
timestamp_string = timestamp_file.read().strip()
except IOError as e:
logging.warning('error opening timestamp file: %s', e)
timestamp_string = None
else:
timestamp_file.close()
logging.debug('read timestamp %s from file %r', timestamp_string, filename)
if timestamp_string is not None:
try:
# Append UTC to force the timezone to parse the string in.
timestamp = time.strptime(timestamp_string + ' UTC',
'%Y-%m-%dT%H:%M:%SZ %Z')
except ValueError as e:
logging.error('cannot parse timestamp file %r: %s', filename, e)
timestamp = None
else:
timestamp = None
logging.debug('Timestamp is: %r', timestamp)
now = time.gmtime()
logging.debug(' Now is: %r', now)
if timestamp > now:
logging.warning('timestamp %r (%r) from %r is in the future, now is %r',
timestamp_string, time.mktime(timestamp), filename,
time.mktime(now))
if time.mktime(timestamp) - time.mktime(now) >= 60 * 60:
logging.info('Resetting timestamp to now.')
timestamp = now
return timestamp
def WriteTimestamp(timestamp, filename):
"""Write a given timestamp out to a file, converting to the ISO-8601
format.
We convert internal timestamp format (epoch) to ISO-8601 format, i.e.
YYYY-MM-DDThh:mm:ssZ which is basically UTC time, then write it out to a
file.
Args:
timestamp: A struct time.struct_time or time tuple.
filename: A String naming the file to write to.
Returns:
A boolean indicating success of write.
"""
# TODO(jaq): hack
if timestamp is None:
return True
timestamp_dir = os.path.dirname(filename)
(filedesc, temp_filename) = tempfile.mkstemp(prefix='nsscache-update-',
dir=timestamp_dir)
time_string = time.strftime('%Y-%m-%dT%H:%M:%SZ', timestamp)
try:
os.write(filedesc, b'%s\n' % time_string.encode())
os.fsync(filedesc)
os.close(filedesc)
except OSError:
os.unlink(temp_filename)
logging.warning('writing timestamp failed!')
return False
os.chmod(temp_filename,
stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IROTH)
os.rename(temp_filename, filename)
logging.debug('wrote timestamp %s to file %r', time_string, filename)
return True
nsscache-version-0.42/nss_cache/util/timestamps_test.py000066400000000000000000000053401402531134600234140ustar00rootroot00000000000000# Copyright 2011 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Unit tests for nss_cache/util/timestamps.py."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
import os
import shutil
import tempfile
import time
import unittest
from mox3 import mox
from nss_cache.util import timestamps
class TestTimestamps(mox.MoxTestBase):
def setUp(self):
super(TestTimestamps, self).setUp()
self.workdir = tempfile.mkdtemp()
def tearDown(self):
super(TestTimestamps, self).tearDown()
shutil.rmtree(self.workdir)
def testReadTimestamp(self):
ts_filename = os.path.join(self.workdir, 'tsr')
ts_file = open(ts_filename, 'w')
ts_file.write('1970-01-01T00:00:01Z\n')
ts_file.close()
ts = timestamps.ReadTimestamp(ts_filename)
self.assertEqual(time.gmtime(1), ts)
def testReadTimestamp(self):
# TZ=UTC date -d @1306428781
# Thu May 26 16:53:01 UTC 2011
ts_filename = os.path.join(self.workdir, 'tsr')
ts_file = open(ts_filename, 'w')
ts_file.write('2011-05-26T16:53:01Z\n')
ts_file.close()
ts = timestamps.ReadTimestamp(ts_filename)
self.assertEqual(time.gmtime(1306428781), ts)
def testReadTimestampInFuture(self):
ts_filename = os.path.join(self.workdir, 'tsr')
ts_file = open(ts_filename, 'w')
ts_file.write('2011-05-26T16:02:00Z')
ts_file.close()
now = time.gmtime(1)
self.mox.StubOutWithMock(time, 'gmtime')
time.gmtime().AndReturn(now)
self.mox.ReplayAll()
ts = timestamps.ReadTimestamp(ts_filename)
self.assertEqual(now, ts)
def testWriteTimestamp(self):
ts_filename = os.path.join(self.workdir, 'tsw')
good_ts = time.gmtime(1)
timestamps.WriteTimestamp(good_ts, ts_filename)
self.assertEqual(good_ts, timestamps.ReadTimestamp(ts_filename))
ts_file = open(ts_filename, 'r')
self.assertEqual('1970-01-01T00:00:01Z\n', ts_file.read())
ts_file.close()
if __name__ == '__main__':
unittest.main()
nsscache-version-0.42/nsscache000077500000000000000000000024561402531134600164340ustar00rootroot00000000000000#!/usr/bin/python3
#
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Executable frontend to nss_cache."""
__author__ = ('jaq@google.com (Jamie Wilkinson)',
'vasilios@google.com (Vasilios Hoffman)')
import os
import sys
import time
from nss_cache import app
if __name__ == '__main__':
nsscache_app = app.NssCacheApp()
start_time = time.process_time()
return_value = nsscache_app.Run(sys.argv[1:], os.environ)
end_time = time.process_time()
nsscache_app.log.info('Exiting nsscache with value %d runtime %f',
return_value, end_time - start_time)
sys.exit(return_value)
nsscache-version-0.42/nsscache.1000066400000000000000000000056361402531134600165730ustar00rootroot00000000000000.TH NSSCACHE 1 2021-03-20 "nsscache 0.42" "User Commands"
.SH NAME
nsscache \- synchronise a local NSS cache with an upstream data source
.SH SYNOPSIS
.B nsscache
[\fIglobal options\fR] \fIcommand \fR[\fIcommand options\fR]
.SH DESCRIPTION
.B nsscache
synchronises a local NSS cache against a remote data source.
This approach allows the administrator to separate the network from
the NSS lookup codepath, improving speed and reliability of name
services.
.SH OPTIONS
Global options alter general program behaviour:
.TP
\fB\-v\fR, \fB\-\-verbose\fR
enable verbose output
.TP
\fB\-d\fR, \fB\-\-debug\fR
enable debugging output
.TP
\fB\-c\fR \fIFILE\fR, \fB\-\-config\-file\fR=\fIFILE\fR
read configuration from FILE
.TP
\fB\-\-version\fR
show program's version number and exit
.TP
\fB\-h\fR, \fB\-\-help\fR
show this help message and exit
.SH COMMANDS
.SS update
Performs an update of the configured caches from the configured sources.
.TP
\fB\-f\fR, \fB\-\-full\fR
force a full update from the data source
.TP
\fB\-\-force\fR
force the update, overriding any safeguards and checks that would
otherwise prevent the update from occurring. e.g. normally empty
results from the data source are ignored as bogus -- this option will
instruct the program to ignore its intuition and use the empty map
.TP
\fB\-m\fR \fIMAPS\fR, \fB\-\-map\fR=\fIMAPS\fR
NSS map to operate on, can be supplied multiple times
.TP
\fB\-h\fR, \fB\-\-help\fR
show help for the
.B update
command
.SS verify
Perform verification of the built caches and validation of the
system NSS configuration.
.TP
\fB\-m\fR \fIMAPS\fR, \fB\-\-map\fR=\fIMAPS\fR
NSS map to operate on, can be supplied multiple times
.TP
\fB\-h\fR, \fB\-\-help\fR
show help for the
.B verify
command
.SS status
Show the last update time of each configured cache, and other
metrics, optionally in a machine-readable format.
.TP
\fB\-m\fR \fIMAPS\fR, \fB\-\-map\fR=\fIMAPS\fR
NSS map to operate on, can be supplied multiple times
.TP
\fB\-h\fR, \fB\-\-help\fR
show help for the
.B status
command
.SS repair
Verify that the configuration is correct, that the source is
reachable, then perform a full synchronisation of the cache.
.TP
\fB\-m\fR \fIMAPS\fR, \fB\-\-map\fR=\fIMAPS\fR
NSS map to operate on, can be supplied multiple times
.TP
\fB\-h\fR, \fB\-\-help\fR
show help for the
.B repair
command
.SS help
Shows online help for each command.
.SH "SEE ALSO"
.TP
\fInsscache.conf\fP(5)
.TP
\fInsswitch.conf\fP(5)
.SH FILES
.TP
\fI\|/etc/nsscache.conf\|\fP
The system-wide configuration file
.TP
\fI\|/etc/nsswitch.conf\|\fP
The system name service switch configuration file
.SH AUTHOR
Written by Jamie Wilkinson (jaq@google.com) and Vasilios Hoffman (vasilios@google.com).
.TP
The source code lives at https://github.com/google/nsscache
.SH COPYRIGHT
Copyright \(co 2007 Google, Inc.
.br
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
nsscache-version-0.42/nsscache.conf000066400000000000000000000137121402531134600173520ustar00rootroot00000000000000# Example /etc/nsscache.conf - configuration for nsscache
#
# nsscache loads a config file from the environment variable NSSCACHE_CONFIG
#
# By default this is /etc/nsscache.conf
#
# Commented values are overrideable defaults, uncommented values
# require you to set them.
[DEFAULT]
# Default NSS data source module name
source = ldap
# Default NSS data cache module name; 'files' is compatible with the
# libnss-cache NSS module. 'nssdb' is deprecated, and should not be used for
# new installations.
cache = files
# NSS maps to be cached
maps = passwd, group, shadow, netgroup, automount
# Directory to store our update/modify timestamps
timestamp_dir = /var/lib/nsscache
# Lockfile to use for update/repair operations
#lockfile = /var/run/nsscache
# Defaults for specific modules; prefaced with "modulename_"
##
# ldap module defaults.
#
# Enable to connect to Active Directory. If enabled (set to 1),
# default Active Directory attributes will be used for mapping.
# Leave disabled if connecting to openldap.
#ldap_ad = 1
# LDAP URI to query for NSS data
ldap_uri = ldaps://ldap
# Base for LDAP searches
ldap_base = ou=people,dc=example,dc=com
# Default LDAP search filter for maps
ldap_filter = (objectclass=posixAccount)
# Default LDAP search scope
#ldap_scope = one
# Default LDAP BIND DN, empty string is an anonymous bind
#ldap_bind_dn = ""
# Default LDAP password, empty DN and empty password is used for
# anonymous binds
#ldap_bind_password = ""
# Default timelimit for LDAP queries, in seconds.
# The query will block for this number of seconds, or indefinitely if negative.
#ldap_timelimit = -1
# Default number of retry attempts
#ldap_retry_max = 3
# Default delay in between retry attempts
#ldap_retry_delay = 5
# Default setting for requiring tls certificates, one of:
# never, hard, demand, allow, try
#ldap_tls_require_cert = 'demand'
# Default directoy for trusted CAs
#ldap_tls_cacertdir = '/usr/share/ssl'
# Default filename for trusted CAs
#ldap_tls_cacertfile = '/usr/share/ssl/cert.pem'
# If you wish to use mTLS, set these to the paths of the TLS certificate and key.
#ldap_tls_certfile = ''
#ldap_tls_keyfile = ''
# Should we issue STARTTLS?
#ldap_tls_starttls = 1
# Default uid-like attribute
#ldap_uidattr = 'uid'
# If connecting to openldap, uidNumber and gidNumber
# will be used for mapping. If enabled (set to 1),
# the relative identifier (RID) will be used instead.
# Consider using this for Samba4 AD.
#ldap_use_rid = 0
# Default Offset option to map uidNumber and gidNumber to higher number.
#ldap_offset = 10000
# A Python regex to extract uid components from the uid-like attribute.
# All matching groups are concatenated without spaces.
# For example: '(.*)@example.com' would return a uid to the left of
# the @example.com domain. Default is no regex.
#ldap_uidregex = ''
# A Python regex to extract group member components from the member or
# memberOf attributes. All matching groups are concatenated without spaces.
# For example: '(.*)@example.com' would return a member without the
# the @example.com domain. Default is no regex.
#ldap_groupregex = ''
# Replace all users' shells with the specified one.
# Enable for Active Directory since the loginShell
# attribute is not present by default.
#ldap_override_shell='/bin/bash'
# Set directory for all users in passwd under /home.
#ldap_home_dir = 1
# Default uses rfc2307 schema. If rfc2307bis (groups stored as a list of DNs
# in 'member' attr), set this to 1
#ldap_rfc2307bis = 0
# Default uses rfc2307 schema. If rfc2307bis_alt (groups stored as a list of DNs
# in 'uniqueMember' attr), set this to 1
#ldap_rfc2307bis_alt = 0
# Debug logging
#ldap_debug = 3
# SASL
# Use SASL for authentication
#ldap_use_sasl = False
# SASL mechanism. Only 'gssapi' is supported now
#ldap_sasl_mech = 'gssapi'
#ldap_sasl_authzid = ''
##
# nssdb module defaults
# Directory to store nssdb databases. Current libnss_db code requires
# the path below
nssdb_dir = /var/lib/misc
# Path to `makedb', supplied by the nss_db module
#nssdb_makedb = /usr/bin/makedb
##
# files module defaults
# Directory to store the plain text files
files_dir = /etc
# Suffix used on the files module database files
files_cache_filename_suffix = cache
###
# Optional per-map sections, if present they will override the above
# defaults. The examples below show you some common values to override
#
# [passwd]
#
# ldap_base = ou=people,dc=example,dc=com
[group]
ldap_base = ou=group,dc=example,dc=com
ldap_filter = (objectclass=posixGroup)
# If ldap_nested_groups is enabled, any groups are members of other groups
# will be expanded recursively.
# Note: This will only work with full updates. Incremental updates will not
# propagate changes in child groups to their parents.
# ldap_nested_groups = 1
[shadow]
ldap_filter = (objectclass=shadowAccount)
[netgroup]
ldap_base = ou=netgroup,dc=example,dc=com
ldap_filter = (objectclass=nisNetgroup)
files_cache_filename_suffix =
[automount]
ldap_base = ou=automounts,dc=example,dc=com
files_cache_filename_suffix =
cache = files
# Files module has an option that lets you leave the local master map alone
# (e.g. /etc/auto.master) so that maps can be enabled/disabled locally.
#
# This also causes nsscache to limit automount updates to only the maps which
# are defined both in the local master map (/etc/auto.master) and in the source
# master map -- versus pulling local copies of all maps defined in the source,
# regardless. Effectively this makes for local control of which automount maps
# are used and updated.
#
# files_local_automount_master = no
##
## SSH Keys stored in LDAP
##
# For SSH keys stored in LDAP under the sshPublicKey attribute.
# sshd_config should contain a config option for AuthorizedKeysCommand that
# runs a script like:
#
# awk -F: -v name="$1" '$0 ~ name { print $2 }' /etc/sshkey.cache | \
# tr -d "[']" | \
# sed -e 's/, /\n/g'
#
# A featureful example is in examples/authorized-keys-command.py
#[sshkey]
#
#ldap_base = ou=people,dc=yourdomain,dc=com
[suffix]
prefix = ""
suffix = ""
nsscache-version-0.42/nsscache.conf.5000066400000000000000000000243501402531134600175150ustar00rootroot00000000000000.TH NSSCACHE.CONF 5 2021-03-20 "nsscache 0.42" "File formats"
.SH NAME
nsscache.conf - NSS local cache synchroniser configuration file
.SH SYNOPSIS
.B /etc/nsscache.conf
.SH DESCRIPTION
.B nsscache
synchronises a local NSS cache, and other databases, against a remote
data source. This approach allows the administrator to separate the
network from the NSS lookup codepath, improving speed and reliability
of name services.
The nsscache configuration file comprises of one DEFAULT section,
followed by zero or more map-specific configuration sections. The
file format is similar to that of ".ini" files.
The DEFAULT section must provide at least one
\fBsource\fP
keyword, specifying the data source to use, one
\fBcache\fP
keyword, specifying the means in which the cache data will be stored
locally, one
\fBmaps\fP
keyword, specifying which NSS maps should be cached, and one
\fBtimestamp_dir\fP
keyword, specifying the location of the timestamps used for
incremental updates.
Additional global defaults, such as LDAP search parameters, or the
filesystem location of the cache, may also be included in the DEFAULT
section.
Additional sections may be included that allow per-map overrides to
configuration options. For example, one might specify their global
LDAP search base as
\fBou=People\fP
but want to override that for the
\fIgroup\fP
mapping as
\fBou=Groups\fP
Apart from the \fIsource\fP, \fIcache\fP, and \fImaps\fP configuration
options, all options are prefixed by the name of the module that they
configure.
A complete list of configuration options follows.
.SH DEFAULT-only OPTIONS
.TP
\fBsource\fP
Specifies the source to use to retrieve NSS data from.
Valid Options:
.I ldap, s3
.TP
.B cache
Specifies the cache method to use to store the data, which will be
queried by the NSS itself.
Valid options:
.I files
Store in a plain text file, similar in format to
.I /etc/passwd.
If the files-module option
.I files_cache_filename_suffix
is also set to
.B cache
then not only will the files be created with a \fB.cache\fP suffix, but also an index file will be written alongside, for use with the
.I nss-cache
NSS module. (See https://github.com/google/libnss-cache.)
.I nssdb
Store in a Berkeley DB file, for use with the
.I nss_db
NSS module. Please note that this option is deprecated in favour of
.I cache
.TP
.B maps
Specifies the names of the maps that will be queried and cached by
.I nsscache
Valid options:
.I passwd
.I group
.I shadow
.I netgroup
.I automount
.I sshkey
.TP
.B timestamp_dir
Specifies the directory where update and modify timestamps are stored.
.SH ldap SOURCE OPTIONS
These options configure the behaviour of the
.I ldap
source.
.TP
.B ldap_ad
Set to 1 if connecting to Active Directory. If enabled, default Active Directory
attributes will be used for mapping. Leave disabled if connecting to
.I openldap.
.TP
.B ldap_uri
The LDAP URI to connect to.
.TP
.B ldap_base
The base to perform LDAP searches under.
.TP
.B ldap_filter
The search filter to use when querying.
.TP
.B ldap_scope
The search scope to use. Defaults to
.I one
Valid options:
.I sub[tree]
.I one[level]
.I base
.TP
.B ldap_bind_dn
The bind DN to use when connecting to LDAP. Empty string is an
anonymous bind. Defaults to the empty string.
.TP
.B ldap_bind_password
The bind password to use when connecting to LDAP. Empty string is
used for anonymous binds. Defaults to the empty string.
.TP
.B ldap_timelimit
Timelimit in seconds for search results to return. \-1 means no limit.
Defaults to \-1.
.TP
.B ldap_retry_max
Number of retries on soft failures before giving up. Defaults to 3.
.TP
.B ldap_retry_delay
Delay in seconds between retries. Defaults to 5.
.TP
.B ldap_tls_require_cert
Sets expectations for SSL certificates, using TLS. One
of 'never', 'hard', 'demand', 'allow', or 'try' ('demand'
is the default). See \fBldap.conf\fP(5) for more information.
.TP
.B ldap_tls_cacertdir
Directory for trusted CA certificates. By default, the system's
default CA certificate directory will be used.
.TP
.B ldap_tls_cacertfile
Filename containing trusted CA certificates.
.TP
.B ldap_tls_certfile
Filename of an optional LDAP client certificate. If specified,
\fBldap_tls_keyfile\fP must also be specified.
.TP
.B ldap_tls_keyfile
Filename of an optional LDAP client key. Only plaintext (unencrypted) keys are
currently supported. If specified, \fBldap_tls_certfile\fP must also be
specified.
.TP
.B ldap_tls_starttls
Set to 1 to enable STARTTLS. Leave absent to disable.
.TP
.B ldap_uidattr
The uid-like attribute in your directory. Defaults to uid.
.TP
.B ldap_use_rid
If enabled (set to 1) the relative identifier (RID) wll be used for mapping.
By default \fBuidNumber\fP and \fBgidNumber\fP will be mapped when connecting to OpenLDAP with a POSIX-like schema.
When using Samba4 AD, these attributes won't exist.
Leave disabled for default.
It has no effect if the option \fBldap_ad\fP is enabled.
.TP
.B ldap_offset
Default Offset option to map uidNumber and gidNumber to higher number.
This can be useful to avoid conflict with already existing uidNumber and gidNumber.
.TP
.B ldap_uidregex
A Python regex to extract uid components from the uid-like attribute.
All matching groups are concatenated without spaces.
For example: '(.*)@example.com' would return a uid to the left of
the @example.com domain. Default is no regex.
.TP
.B ldap_groupregex
A Python regex to extract group member components from the member or
memberOf attributes. All matching groups are concatenated without spaces.
For example: '(.*)@example.com' would return a member without the
the @example.com domain. Default is no regex.
.TP
.B ldap_nested_groups
To enable expansion of nested groups, set this to 1. Note that this only
applies during a full sync, and incremental synchronization should not be used
if this is set.
.TP
.B ldap_override_shell
If specified, set every user's login shell to the given one. May be
useful on bastion hosts or to ensure uniformity. Enable for
Active Directory since the attribute (loginShell) is not default.
.TP
.B ldap_home_dir
Set a home directory for all users in passwd. If enabled (set to 1),
all users will have their home directory in
.I /home.
.TP
.B ldap_rfc2307bis
Default uses rfc2307 schema. If rfc2307bis (groups stored as a list of DNs
in 'member' attr), set this to 1.
.TP
.B ldap_debug
Sets the debug level for the underlying C library. Defaults to no logging.
.SH s3 SOURCE OPTIONS
These options configure the behaviour of the
.I s3
source.
.TP
.B s3_bucket
AWS S3 bucket containing
.I passwd, group, shadow
objects.
.B boto3
python package should be installed to use this type of source.
It is highly recommended to use s3 source only with AWS IAM role
attached to the ec2 instance configured for read-only access to the bucket.
So no extra configuration options like access_key and secret provided in config.
Though they may be used via ~/.aws/config and ~/.aws/credentials because python
boto3 library used in implementation is capable to read them by itself.
.TP
.B s3_passwd_object
Object containing
.B passwd
array of records in json format. E.g.
.I [{"Value": {"gid": 10000, "uid": 10000}, "Key": "user1"}].
Valid attributes:
.I "comment", "home", "shell", "passwd", "gid", "uid"
.TP
.B s3_group_object
Object containing
.B group
array of records in json format. E.g.
.I [{"Value": {"gid": 20000, "members": "user1\\nuser2\\nuser3"}, "Key": "group1"}].
Valid attributes:
.I "gid", "members"
Members should be sequence of usernames split by \\n (see example above)
.TP
.B s3_shadow_object
Object containing
.B shadow
array of records in json format. E.g.
.I [{"Value": {"passwd": "*"}, "Key": "user1"}].
Valid attributes:
.I "passwd", "lstchg", "min", "max", "warn", "inact", "expire"
.SH nssdb CACHE OPTIONS
These options configure the behaviour of the
.I nssdb
cache.
.TP
.B nssdb_dir
Directory to store the Berkeley DB databases. Defaults to the current
directory. Note that
.B nss_db
hardcodes the path to
.I /var/lib/misc
on Debian systems, and
.I /var/db
on Red Hat systems.
.TP
.B nssdb_makedb
Path to the \fBmakedb\fP(1) command, which is used by the nssdb cache code
to ensure that the Berkeley DB version created by the module matches
that expected by the \fBnss_db\fP NSS module.
.SH files CACHE OPTIONS
These optiosn configure the behaviour of the
.I files
cache.
.TP
.B files_dir
Directory location to store the plain text files in. Defaults to the
current directory.
.TP
.B files_cache_filename_suffix
A suffix appended to the cache filename to differentiate it from, say,
system NSS databases. Defaults to '.cache'.
.TP
.B files_local_automount_master
A yes/no field only used for automount maps. A 'yes' value will cause nsscache
to update the auto.master file with the master map from the source. A 'no'
value will cause nsscache to leave auto.master alone, allowing the system to
manage this file in other ways. When set to 'no', nsscache will only update
other automount maps defined both locally and in the source. Defaults to 'yes'.
.TP
.B prefix
A regular expression to capture a prefix, or mount point.
.TP
.B suffix
A regular expression to modify the prefix.
.SH EXAMPLE
A typical example might look like this:
[DEFAULT]
source = ldap
cache = nssdb
maps = passwd, group, shadow
ldap_uri = ldap://ldap.example.com
ldap_base = ou=People,dc=example,dc=com
ldap_filter = (objectclass=posixAccount)
nssdb_dir = /var/lib/misc
[group]
ldap_base = ou=Group,dc=example,dc=com
ldap_filter = (objectclass=posixGroup)
ldap_nested_groups = 1
[shadow]
ldap_filter = (objectclass=posixAccount)
And a complementary \fI\|/etc/nsswitch.conf\|\fP might look like this:
passwd: files db
group: files db
shadow: files db
.SH FILES
.TP
\fI\|/etc/nsscache.conf\|\fP
The system-wide nsscache configuration file
.SH "SEE ALSO"
.TP
\fInsscache\fP(1)
.TP
\fInsswitch.conf\fP(5)
The system name service switch configuration file
.TP
\fIldap.conf\fP(5)
Details on LDAP configuration options exposed by the LDAP client libraries.
.SH AUTHOR
Written by Jamie Wilkinson (jaq@google.com) and Vasilios Hoffman (vasilios@google.com).
.TP
The source code lives at https://github.com/google/nsscache
.SH COPYRIGHT
Copyright \(co 2007 Google, Inc.
.br
This is free software; see the source for copying conditions. There is NO
warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
nsscache-version-0.42/nsscache.cron000066400000000000000000000010561402531134600173640ustar00rootroot00000000000000# /etc/cron.d/nsscache: crontab entries for the nsscache package
#
# Example crontab for nsscache.
# Replace the %% text with real values before deploying.
SHELL=/bin/sh
PATH=/usr/bin
MAILTO=""
NSSCACHE=/usr/bin/nsscache
# disable /etc/ldap.conf defaults like the 2 minute timeout.
LDAPNOINIT=1
# update the cache 15 minutely
%MINUTE15%-59/15 * * * * root $NSSCACHE -v update --sleep %SECONDS%
# perform a full update once a day, at a time chosen during package
# configuration (between 2AM and 5AM)
%MINUTE% %HOUR% * * * root $NSSCACHE -v update --full
nsscache-version-0.42/nsscache.sh000066400000000000000000000065151402531134600170420ustar00rootroot00000000000000#/usr/bin/env bash
_nsscache ()
{
local cur prev options commands update_options other_options maps
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
prev="${COMP_WORDS[COMP_CWORD-1]}"
options='-v --verbose -d --debug -c --config-file --version -h --help'
commands='update verify status repair help'
update_options='-f --full --force -m --map -h --help'
other_options='-m --map -h --help'
maps="passwd group shadow"
case "${COMP_CWORD}" in
1)
COMPREPLY=( $(compgen -W "${options} ${commands}" -- "${cur}" ))
;;
2)
case "${prev}" in
update)
COMPREPLY=( $( compgen -W "${update_options}" -- "${cur}" ))
return 0
;;
verify|status|repair)
COMPREPLY=( $( compgen -W "${other_options}" -- "${cur}" ))
return 0
;;
-c|--config-file)
COMPREPLY=( $( compgen -o plusdirs -f -- "${cur}" ))
return 0
;;
-h|--help|--version|help)
return 0
;;
-v|--verbose|-d|--debug )
COMPREPLY=( $( compgen -W "${commands}" -- "${cur}" ))
return 0
;;
esac
;;
3)
case "${prev}" in
update)
COMPREPLY=( $( compgen -W "${update_options}" -- "${cur}" ))
return 0
;;
verify|status|repair)
COMPREPLY=( $( compgen -W "${other_options}" -- "${cur}" ))
return 0
;;
-m|--map)
COMPREPLY=( $( compgen -W "${maps}" -- "${cur}" ))
return 0
;;
-f|--full|--force)
COMPREPLY=()
return 0
;;
*)
COMPREPLY=( $( compgen -W "${commands}" -- "${cur}" ))
return 0
;;
esac
;;
4)
case "${prev}" in
update)
COMPREPLY=( $( compgen -W "${update_options}" -- "${cur}" ))
return 0
;;
verify|status|repair)
COMPREPLY=( $( compgen -W "${other_options}" -- "${cur}" ))
return 0
;;
-m|--map)
COMPREPLY=( $( compgen -W "${maps}" -- "${cur}" ))
return 0
;;
-f|--full|--force)
COMPREPLY=()
return 0
;;
*)
COMPREPLY=( $( compgen -W "${commands}" -- "${cur}" ))
return 0
;;
esac
;;
5)
case "${prev}" in
-m|--map)
COMPREPLY=( $( compgen -W "${maps}" -- "${cur}" ))
return 0
;;
esac
;;
*)
COMPREPLY=()
return 0
;;
esac
}
complete -o filenames -F _nsscache nsscache
# ex: filetype=sh
nsscache-version-0.42/nsscache.spec000066400000000000000000000022051402531134600173520ustar00rootroot00000000000000Summary: Asynchronously synchronise local NSS databases with remote directory services
Name: nsscache
Version: 0.8.3
Release: 1
License: GPLv2
Group: System Environment/Base
Packager: Oliver Hookins
URL: http://code.google.com/p/nsscache/
Source: http://nsscache.googlecode.com/files/%{name}-%{version}.tar.gz
Requires: python, python-ldap
BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n)
BuildArchitectures: noarch
BuildRequires: python, python-ldap
%description
nsscache is a Python library and a commandline frontend to that library that
synchronises a local NSS cache against a remote directory service, such as
LDAP.
%prep
%setup -q
%build
CFLAGS="%{optflags}" %{__python} setup.py build
%install
%{__rm} -rf %{buildroot}
%{__python} setup.py install --root="%{buildroot}" --prefix="%{_prefix}"
%clean
%{__rm} -rf %{buildroot}
%files
%defattr(-, root, root, 0755)
%config /etc/nsscache.conf
%exclude /usr/bin/runtests.*
/usr/bin/nsscache
/usr/lib/python2.6/site-packages/nss_cache/
%changelog
* Tue Jan 06 2009 Oliver Hookins - 0.8.3-1
- Initial packaging
nsscache-version-0.42/release.sh000077500000000000000000000012551402531134600166720ustar00rootroot00000000000000#!/bin/bash -e
if [ -z $1 ]; then
CURRENT_VERSION=$(PYTHONPATH=. python -c 'import nss_cache; print nss_cache.__version__')
a=( ${CURRENT_VERSION//./ } )
(( a[${#a[@]}-1] += 1 ))
NEW_VERSION=$(IFS=.; echo "${a[*]}")
else
NEW_VERSION=$1
fi
echo Minting $NEW_VERSION
DATE=$(date +%Y-%m-%d)
sed -i "1c\.TH NSSCACHE 1 $DATE \"nsscache $NEW_VERSION\" \"User Commands\"" nsscache.1
sed -i "1c\.TH NSSCACHE.CONF 5 $DATE \"nsscache $NEW_VERSION\" \"File formats\"" nsscache.conf.5
sed -i "s/__version__ = '.*'/__version__ = '$NEW_VERSION'/" nss_cache/__init__.py
git commit -a -m "Mint version $NEW_VERSION"
git tag -s "version/$NEW_VERSION" -m "version/$NEW_VERSION"
nsscache-version-0.42/requirements.txt000066400000000000000000000001031402531134600201660ustar00rootroot00000000000000pytest
boto3
pycurl==7.43.0.6
python3-ldap
python-ldap
bsddb3
mox3
nsscache-version-0.42/rpm/000077500000000000000000000000001402531134600155065ustar00rootroot00000000000000nsscache-version-0.42/rpm/postinst.sh000066400000000000000000000002311402531134600177210ustar00rootroot00000000000000if [ -f /etc/nsscache.conf.rpmsave ]; then
cp -a /etc/nsscache.conf /etc/nsscache.conf.rpmnew
mv -f /etc/nsscache.conf.rpmsave /etc/nsscache.conf
fi
nsscache-version-0.42/rpm/preinst.sh000066400000000000000000000001311402531134600175210ustar00rootroot00000000000000if [ -f /etc/nsscache.conf ]; then
mv /etc/nsscache.conf /etc/nsscache.conf.rpmsave
fi
nsscache-version-0.42/setup.cfg000066400000000000000000000004101402531134600165240ustar00rootroot00000000000000[bdist_rpm]
release = 1
doc_files = COPYING
THANKS
nsscache.cron
requires = python-pycurl
python3-ldap
pre-install = rpm/preinst.sh
post-install = rpm/postinst.sh
[aliases]
test=pytest
[yapf]
based_on_style = google
[pylint]
nsscache-version-0.42/setup.py000077500000000000000000000042151402531134600164270ustar00rootroot00000000000000#!/usr/bin/python
#
# Copyright 2007 Google Inc.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
"""Distutils setup for nsscache tool and nss_cache package."""
__author__ = 'jaq@google.com (Jamie Wilkinson)'
from setuptools import setup, find_packages
import nss_cache
setup(
name='nsscache',
version=nss_cache.__version__,
author='Jamie Wilkinson',
author_email='jaq@google.com',
url='https://github.com/google/nsscache',
description='nsscache tool and library',
license='GPL',
long_description=
"""nsscache is a Python library and a commandline frontend to that library
that synchronises a local NSS cache against a remote directory service, such
as LDAP.""",
classifiers=[
'Development Status :: 4 - Beta', 'Environment :: Console',
'Indended Audience :: System Administrators',
'License :: OSI Approved :: GPL', 'Operating System :: POSIX',
'Programming Language :: Python', 'Topic :: System'
],
packages=[
'nss_cache', 'nss_cache.caches', 'nss_cache.maps', 'nss_cache.util',
'nss_cache.update', 'nss_cache.sources'
],
scripts=['nsscache'],
data_files=[('config', ['nsscache.conf'])],
python_requires='~=3.4',
setup_requires=['pytest-runner'],
tests_require=['pytest', 'mox3', 'pytest-cov', 'python-coveralls'],
extras_require={
'bdb': ['bsddb3'],
'ldap': ['python3-ldap', 'python-ldap'],
'http': ['pycurl'],
's3': ['boto3'],
'consul': ['pycurl'],
},
)
nsscache-version-0.42/tests/000077500000000000000000000000001402531134600160525ustar00rootroot00000000000000nsscache-version-0.42/tests/default.ldif000066400000000000000000000013161402531134600203370ustar00rootroot00000000000000dn: dc=example,dc=com
dc: example
objectClass: dcObject
objectClass: organization
o: Example, Inc.
dn: ou=people,dc=example,dc=com
objectclass: top
objectclass: organizationalUnit
ou: people
dn: ou=group,dc=example,dc=com
objectclass: top
objectclass: organizationalUnit
ou: groups
dn: uid=jaq,ou=people,dc=example,dc=com
objectClass: top
objectClass: account
objectClass: posixAccount
objectClass: shadowAccount
cn: Jamie Wilkinson
uid: jaq
userPassword: {CRYPT}e1y7ep455\//0rD
homeDirectory: /home/jaq
uidNumber: 37
gidNumber: 31337
loginShell: /bin/zsh
shadowLastChange: 0
shadowMax: 0
shadowWarning: 0
dn: cn=hax0rs,ou=group,dc=example,dc=com
objectClass: posixGroup
cn: hax0rs
gidNumber: 31337
memberUid: jaq
nsscache-version-0.42/tests/nsscache.conf000066400000000000000000000051241402531134600205120ustar00rootroot00000000000000# Example /etc/nsscache.conf - configuration for nsscache
#
# nsscache loads a config file from the environment variable NSSCACHE_CONFIG
#
# By default this is /etc/nsscache.conf
#
# Commented values are overrideable defaults, uncommented values
# require you to set them.
[DEFAULT]
# Default NSS data source module name
source = ldap
# Default NSS data cache module name; 'files' is compatible with the
# libnss-cache NSS module. 'nssdb' is deprecated, and should not be used for
# new installations.
cache = files
# NSS maps to be cached
maps = passwd, group, shadow
# Directory to store our update/modify timestamps
timestamp_dir = /var/lib/nsscache
# Lockfile to use for update/repair operations
lockfile = /var/run/nsscache
# Defaults for specific modules; prefaced with "modulename_"
##
# ldap module defaults.
#
# Enable to connect to Active Directory.
# Leave disabled if connecting to openldap or slapd
ldap_ad = 1
# LDAP URI to query for NSS data
ldap_uri = ldaps://local.domain
# Default LDAP search scope
ldap_scope = sub
# Default LDAP BIND DN, empty string is an anonymous bind
ldap_bind_dn = administrator@local.domain
# Default LDAP password, empty DN and empty password is used for
# anonymous binds
ldap_bind_password = 4dm1n_s3cr36_v3ry_c0mpl3x
# Default setting for requiring tls certificates, one of:
# never, hard, demand, allow, try
ldap_tls_require_cert = 'never'
# Default directoy for trusted CAs
ldap_tls_cacertdir = '/etc/ssl/certs/'
# Default filename for trusted CAs
ldap_tls_cacertfile = '/etc/ssl/certs/ad.pem'
# Replace all users' shells with the specified one.
ldap_override_shell = '/bin/bash'
# Set directory for all users in passwd under /home.
ldap_home_dir = 1
# Debug logging
ldap_debug = 3
# Directory to store nssdb databases. Current libnss_db code requires
# the path below
nssdb_dir = /var/lib/misc
##
# files module defaults
# Directory to store the plain text files
files_dir = /etc
# Suffix used on the files module database files
files_cache_filename_suffix = cache
###
# Optional per-map sections, if present they will override the above
# defaults. The examples below show you some common values to override
#
[passwd]
ldap_base = DC=local,DC=domain
ldap_filter = (&(objectCategory=User)(memberOf=CN=Admins,CN=Users,DC=local,DC=domain))
[group]
ldap_base = DC=local,DC=domain
ldap_filter = (|(&(objectCategory=Group)(CN=Admins))(&(objectCategory=User)(memberOf=CN=Admins,CN=Users,DC=local,DC=domain)))
[shadow]
ldap_base = DC=local,DC=domain
ldap_filter = (&(objectCategory=User)(memberOf=CN=Admins,CN=Users,DC=local,DC=domain))
[suffix]
prefix = ""
suffix = ""
nsscache-version-0.42/tests/samba.sh000077500000000000000000000043071402531134600175000ustar00rootroot00000000000000#!/bin/bash -eux
export DEBIAN_FRONTEND=noninteractive
apt-get update
PACKAGES=(
'samba'
'samba-dsdb-modules'
'samba-vfs-modules'
'winbind'
'heimdal-clients'
)
# Install needed packages
for package in "${PACKAGES[@]}"; do
apt-get -y install "$package"
done
# Samba must not be running during the provisioning
service smbd stop
service nmbd stop
service winbind stop
service samba-ad-dc stop
# Domain provision
rm -fr /etc/samba/smb.conf
/usr/bin/samba-tool domain provision --realm=LOCAL.DOMAIN --domain=LOCAL --server-role=dc --dns-backend=SAMBA_INTERNAL --adminpass='4dm1n_s3cr36_v3ry_c0mpl3x' --use-rfc2307 -d 1
# Start samba-ad-dc service only
rm -fr /etc/systemd/system/samba-ad-dc.service
service samba-ad-dc start
# Add users and groups
/usr/bin/samba-tool user create user1 --use-username-as-cn --surname=Test1 --given-name=User1 --random-password
/usr/bin/samba-tool user create user2 --use-username-as-cn --surname=Test2 --given-name=User2 --random-password
/usr/bin/samba-tool user create user3 --use-username-as-cn --surname=Test3 --given-name=User3 --random-password
/usr/bin/samba-tool user create user4 --use-username-as-cn --surname=Test4 --given-name=User4 --random-password
/usr/bin/samba-tool user create user5 --use-username-as-cn --surname=Test5 --given-name=User5 --random-password
# Add some groups
/usr/bin/samba-tool group add IT
/usr/bin/samba-tool group add Admins
/usr/bin/samba-tool group add Devs
/usr/bin/samba-tool group add DevOps
# Create members
/usr/bin/samba-tool group addmembers IT Admins,Devs,DevOps,user1
/usr/bin/samba-tool group addmembers Admins user2,user3
/usr/bin/samba-tool group addmembers Devs user4
/usr/bin/samba-tool group addmembers DevOps user5
# Add AD certificate
echo -n | openssl s_client -connect localhost:636 | sed -ne '/-BEGIN CERTIFICATE-/,/-END CERTIFICATE-/p' > /usr/local/share/ca-certificates/ad.crt
update-ca-certificates
# Add cache to nsswitch
cat > '/etc/nsswitch.conf' << EOF
passwd: files cache
group: files cache
shadow: files cache
gshadow: files
hosts: files dns
networks: files
protocols: db files
services: db files
ethers: db files
rpc: db files
netgroup: nis
EOF
nsscache-version-0.42/tests/slapd-nsscache.conf.tmpl000066400000000000000000000012731402531134600225670ustar00rootroot00000000000000# $Id: //depot/ops/src/nsscache/nsscache.conf.ldap#4 $
#
# See /usr/share/doc/nsscache/examples/nsscache.conf for
# detailed information about configuration file formats, defaults,
# and options.
[DEFAULT]
source = @source@
cache = @cache@
maps = passwd, group, shadow
lockfile = @workdir@/lock
ldap_uri = ldapi://@workdir@/ldapi
ldap_base = ou=people,dc=example,dc=com
ldap_filter = (objectclass=posixAccount)
files_cache_filename_suffix = cache
nssdb_dir = @workdir@/nssdb
files_dir = @workdir@/files
timestamp_dir = @workdir@/ldap-timestamps-@cache@
[group]
ldap_base = ou=group,dc=example,dc=com
ldap_filter = (objectclass=posixGroup)
[shadow]
ldap_filter = (objectclass=shadowAccount)
nsscache-version-0.42/tests/slapd-regtest000077500000000000000000000073731402531134600205700ustar00rootroot00000000000000#!/bin/bash
set -x
SLAPADD=/usr/sbin/slapadd
SLAPD=/usr/sbin/slapd
if [[ -z ${WORKDIR-} ]]; then
WORKDIR=$(mktemp -d -t nsscache.regtest.XXXXXX)
ARTIFACTS=${WORKDIR}
fi
slapd_apparmor_bkp="${WORKDIR}/slapd_profile.bkp"
slapd_apparmor_override="/etc/apparmor.d/local/usr.sbin.slapd"
slapd_apparmor="/etc/apparmor.d/usr.sbin.slapd"
cleanup() {
if [[ -f "$slapd_apparmor_bkp" ]]; then
sudo mv "$slapd_apparmor_bkp" "$slapd_apparmor_override"
sudo apparmor_parser -r -T -W "$slapd_apparmor"
fi
if [[ -e "$WORKDIR/slapd.pid" ]]; then
kill -TERM $(cat $WORKDIR/slapd.pid)
fi
if [[ -z ${ADTTMP-} ]]; then
rm -rf $WORKDIR
fi
}
trap cleanup 0 INT QUIT ABRT PIPE TERM
TESTDIR=$(dirname -- "$0")
apparmor_enabled() {
if [ -x /usr/sbin/aa-status ]; then
sudo /usr/sbin/aa-status --enabled && apparmor_enabled="0" || apparmor_enabled="1"
else
apparmor_enabled="1"
fi
return "$apparmor_enabled"
}
override_apparmor() {
# backup existing override
cp -af "$slapd_apparmor_override" "$slapd_apparmor_bkp"
# the test suite brings up a test slapd server running
# off /tmp/.
echo "${WORKDIR}/ rw," | sudo tee "$slapd_apparmor_override"
echo "${WORKDIR}/** rwk," | sudo tee -a "$slapd_apparmor_override"
echo "${ARTIFACTS}/ rw," | sudo tee -a "$slapd_apparmor_override"
echo "${ARTIFACTS}/** rwk," | sudo tee -a "$slapd_apparmor_override"
sudo apparmor_parser -r -T -W "$slapd_apparmor"
}
setup_slapd() {
set -e
mkdir -p $WORKDIR/ldap
sed -e "s!@workdir@!$WORKDIR!" \
< ${TESTDIR}/slapd.conf.tmpl > $ARTIFACTS/slapd.conf
$SLAPADD -d -1 -f $ARTIFACTS/slapd.conf -b dc=example,dc=com -l ${TESTDIR}/default.ldif
$SLAPD -h ldapi://${WORKDIR//\//%2F}%2Fldapi -f $ARTIFACTS/slapd.conf &
slappid=$!
attempts=0
until ldapsearch -x -H ldapi://${WORKDIR//\//%2F}%2Fldapi -b "dc=example,dc=com" '(objectclass=*)'; do
attempts=$(($attempts + 1))
if [[ $attempts -gt 10 ]]; then
echo "failed to connect to slapd in 60 attempts"
exit 1
fi
sleep 0.1
done
set +e
}
run_nsscache() {
source=$1
cache=$2
config_orig="${TESTDIR}/slapd-nsscache.conf.tmpl"
config=$(mktemp -p ${ARTIFACTS} nsscache.${source}.conf.XXXXXX)
sed -e "s!@cache@!$cache!" \
-e "s!@source@!$source!" \
-e "s!@workdir@!$WORKDIR!" \
< $config_orig > $config
mkdir $WORKDIR/$cache
mkdir $WORKDIR/ldap-timestamps-$cache
nsscache status
nsscache -d -c "${config}" update --full
r=$?
if [[ $r -ne 0 ]]; then
echo FAILED: $r
fi
test_${cache}
nsscache -d -c "${config}" status
}
test_nssdb() {
ls -alR $WORKDIR/nssdb
grep jaq $WORKDIR/nssdb/passwd.db
db_dump -da $WORKDIR/nssdb/passwd.db | grep jaq
db_dump -da $WORKDIR/nssdb/shadow.db | grep jaq
db_dump -da $WORKDIR/nssdb/group.db | grep jaq
[[ $(stat -c%A $WORKDIR/nssdb/shadow.db) == "-rw-r-----" ]] || exit 1
}
test_files() {
ls -alR $WORKDIR
set -e
grep jaq $WORKDIR/files/passwd.cache
grep jaq $WORKDIR/files/passwd.cache.ixname
grep 37 $WORKDIR/files/passwd.cache.ixuid
grep hax0rs $WORKDIR/files/group.cache
grep hax0rs $WORKDIR/files/group.cache.ixname
grep 31337 $WORKDIR/files/group.cache.ixgid
grep jaq $WORKDIR/files/shadow.cache
grep jaq $WORKDIR/files/shadow.cache.ixname
[[ $(stat -c%A $WORKDIR/files/shadow.cache) == "-rw-r-----" ]] || exit 1
[[ $(stat -c%A $WORKDIR/files/shadow.cache.ixname) == "-rw-r-----" ]] || exit 1
}
check () {
which nsscache
if [[ $? -ne 0 ]]; then
(
cd ${TESTDIR}/..
pip3 install --target="${WORKDIR}" .
)
export PATH=$PATH:${WORKDIR}/bin
fi
set -e
nsscache --version
set +e
}
check
if apparmor_enabled; then
override_apparmor
fi
setup_slapd
run_nsscache ldap nssdb
run_nsscache ldap files
echo OK
nsscache-version-0.42/tests/slapd.conf.tmpl000066400000000000000000000003641402531134600210020ustar00rootroot00000000000000include /etc/ldap/schema/core.schema
include /etc/ldap/schema/cosine.schema
include /etc/ldap/schema/nis.schema
loglevel -1
pidfile @workdir@/slapd.pid
moduleload back_bdb.la
database bdb
suffix "dc=example,dc=com"
directory @workdir@/ldap