pax_global_header 0000666 0000000 0000000 00000000064 15057601571 0014521 g ustar 00root root 0000000 0000000 52 comment=399bb34ad9fd8a252ad1d8bfaef96279b66dc774
spf13-afero-18d690e/ 0000775 0000000 0000000 00000000000 15057601571 0014111 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/.editorconfig 0000664 0000000 0000000 00000000320 15057601571 0016561 0 ustar 00root root 0000000 0000000 root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
[{*.yml,*.yaml}]
indent_size = 2
spf13-afero-18d690e/.github/ 0000775 0000000 0000000 00000000000 15057601571 0015451 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/.github/.editorconfig 0000664 0000000 0000000 00000000041 15057601571 0020121 0 ustar 00root root 0000000 0000000 [{*.yml,*.yaml}]
indent_size = 2
spf13-afero-18d690e/.github/dependabot.yaml 0000664 0000000 0000000 00000000303 15057601571 0020436 0 ustar 00root root 0000000 0000000 version: 2
updates:
- package-ecosystem: gomod
directory: /
schedule:
interval: daily
- package-ecosystem: github-actions
directory: /
schedule:
interval: daily
spf13-afero-18d690e/.github/workflows/ 0000775 0000000 0000000 00000000000 15057601571 0017506 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/.github/workflows/analysis-scorecard.yaml 0000664 0000000 0000000 00000002235 15057601571 0024162 0 ustar 00root root 0000000 0000000 name: OpenSSF Scorecard
on:
branch_protection_rule:
push:
branches: [master]
schedule:
- cron: "30 0 * * 5"
permissions:
contents: read
jobs:
analyze:
name: Analyze
runs-on: ubuntu-latest
permissions:
actions: read
contents: read
id-token: write
security-events: write
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
with:
persist-credentials: false
- name: Run analysis
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with:
results_file: results.sarif
results_format: sarif
publish_results: true
- name: Upload results as artifact
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
with:
name: OpenSSF Scorecard results
path: results.sarif
retention-days: 5
- name: Upload results to GitHub Security tab
uses: github/codeql-action/upload-sarif@f1f6e5f6af878fb37288ce1c627459e94dbf7d01 # v3.30.1
with:
sarif_file: results.sarif
spf13-afero-18d690e/.github/workflows/ci.yaml 0000664 0000000 0000000 00000003224 15057601571 0020766 0 ustar 00root root 0000000 0000000 name: CI
on:
push:
branches: [master]
pull_request:
jobs:
test:
name: Test
runs-on: ${{ matrix.os }}
strategy:
fail-fast: false
matrix:
os: [ubuntu-latest, macos-latest, windows-latest]
go: [stable, oldstable, "1.23", "1.24", "1.25"]
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Go
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with:
go-version: ${{ matrix.go }}
- name: Test
run: go test -race -v ./...
- name: Test gcsfs
run: go test -race -v ./...
working-directory: ./gcsfs
- name: Test sftpfs
run: go test -race -v ./...
working-directory: ./sftpfs
lint:
name: Lint
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Set up Go
uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0
with:
go-version: "1.25"
- name: Lint
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
with:
version: v2.4.0
dependency-review:
name: Dependency review
runs-on: ubuntu-latest
if: github.event_name == 'pull_request'
steps:
- name: Checkout repository
uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
- name: Dependency Review
uses: actions/dependency-review-action@595b5aeba73380359d98a5e087f648dbb0edce1b # v4.7.3
spf13-afero-18d690e/.gitignore 0000664 0000000 0000000 00000000032 15057601571 0016074 0 ustar 00root root 0000000 0000000 sftpfs/file1
sftpfs/test/
spf13-afero-18d690e/.golangci.yaml 0000664 0000000 0000000 00000001354 15057601571 0016641 0 ustar 00root root 0000000 0000000 version: "2"
run:
timeout: 10m
linters:
enable:
- govet
- ineffassign
- misspell
- nolintlint
# - revive
- staticcheck
- unused
disable:
- errcheck
# - staticcheck
settings:
misspell:
locale: US
nolintlint:
allow-unused: false # report any unused nolint directives
require-specific: false # don't require nolint directives to be specific about which linter is being skipped
exclusions:
paths:
- gcsfs/internal/stiface
formatters:
enable:
- gci
- gofmt
- gofumpt
- goimports
- golines
settings:
gci:
sections:
- standard
- default
- localmodule
exclusions:
paths:
- gcsfs/internal/stiface
spf13-afero-18d690e/LICENSE.txt 0000664 0000000 0000000 00000023634 15057601571 0015744 0 ustar 00root root 0000000 0000000 Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
spf13-afero-18d690e/README.md 0000664 0000000 0000000 00000046021 15057601571 0015373 0 ustar 00root root 0000000 0000000
[](https://github.com/spf13/afero/actions?query=workflow%3ACI)
[](https://pkg.go.dev/mod/github.com/spf13/afero)
[](https://goreportcard.com/report/github.com/spf13/afero)

# Afero: The Universal Filesystem Abstraction for Go
Afero is a powerful and extensible filesystem abstraction system for Go. It provides a single, unified API for interacting with diverse filesystems—including the local disk, memory, archives, and network storage.
Afero acts as a drop-in replacement for the standard `os` package, enabling you to write modular code that is agnostic to the underlying storage, dramatically simplifies testing, and allows for sophisticated architectural patterns through filesystem composition.
## Why Afero?
Afero elevates filesystem interaction beyond simple file reading and writing, offering solutions for testability, flexibility, and advanced architecture.
🔑 **Key Features:**
* **Universal API:** Write your code once. Run it against the local OS, in-memory storage, ZIP/TAR archives, or remote systems (SFTP, GCS).
* **Ultimate Testability:** Utilize `MemMapFs`, a fully concurrent-safe, read/write in-memory filesystem. Write fast, isolated, and reliable unit tests without touching the physical disk or worrying about cleanup.
* **Powerful Composition:** Afero's hidden superpower. Layer filesystems on top of each other to create sophisticated behaviors:
* **Sandboxing:** Use `CopyOnWriteFs` to create temporary scratch spaces that isolate changes from the base filesystem.
* **Caching:** Use `CacheOnReadFs` to automatically layer a fast cache (like memory) over a slow backend (like a network drive).
* **Security Jails:** Use `BasePathFs` to restrict application access to a specific subdirectory (chroot).
* **`os` Package Compatibility:** Afero mirrors the functions in the standard `os` package, making adoption and refactoring seamless.
* **`io/fs` Compatibility:** Fully compatible with the Go standard library's `io/fs` interfaces.
## Installation
```bash
go get github.com/spf13/afero
```
```go
import "github.com/spf13/afero"
```
## Quick Start: The Power of Abstraction
The core of Afero is the `afero.Fs` interface. By designing your functions to accept this interface rather than calling `os.*` functions directly, your code instantly becomes more flexible and testable.
### 1. Refactor Your Code
Change functions that rely on the `os` package to accept `afero.Fs`.
```go
// Before: Coupled to the OS and difficult to test
// func ProcessConfiguration(path string) error {
// data, err := os.ReadFile(path)
// ...
// }
import "github.com/spf13/afero"
// After: Decoupled, flexible, and testable
func ProcessConfiguration(fs afero.Fs, path string) error {
// Use Afero utility functions which mirror os/ioutil
data, err := afero.ReadFile(fs, path)
// ... process the data
return err
}
```
### 2. Usage in Production
In your production environment, inject the `OsFs` backend, which wraps the standard operating system calls.
```go
func main() {
// Use the real OS filesystem
AppFs := afero.NewOsFs()
ProcessConfiguration(AppFs, "/etc/myapp.conf")
}
```
### 3. Usage in Testing
In your tests, inject `MemMapFs`. This provides a blazing-fast, isolated, in-memory filesystem that requires no disk I/O and no cleanup.
```go
func TestProcessConfiguration(t *testing.T) {
// Use the in-memory filesystem
AppFs := afero.NewMemMapFs()
// Pre-populate the memory filesystem for the test
configPath := "/test/config.json"
afero.WriteFile(AppFs, configPath, []byte(`{"feature": true}`), 0644)
// Run the test entirely in memory
err := ProcessConfiguration(AppFs, configPath)
if err != nil {
t.Fatal(err)
}
}
```
## Afero's Superpower: Composition
Afero's most unique feature is its ability to combine filesystems. This allows you to build complex behaviors out of simple components, keeping your application logic clean.
### Example 1: Sandboxing with Copy-on-Write
Create a temporary environment where an application can "modify" system files without affecting the actual disk.
```go
// 1. The base layer is the real OS, made read-only for safety.
baseFs := afero.NewReadOnlyFs(afero.NewOsFs())
// 2. The overlay layer is a temporary in-memory filesystem for changes.
overlayFs := afero.NewMemMapFs()
// 3. Combine them. Reads fall through to the base; writes only hit the overlay.
sandboxFs := afero.NewCopyOnWriteFs(baseFs, overlayFs)
// The application can now "modify" /etc/hosts, but the changes are isolated in memory.
afero.WriteFile(sandboxFs, "/etc/hosts", []byte("127.0.0.1 sandboxed-app"), 0644)
// The real /etc/hosts on disk is untouched.
```
### Example 2: Caching a Slow Filesystem
Improve performance by layering a fast cache (like memory) over a slow backend (like a network drive or cloud storage).
```go
import "time"
// Assume 'remoteFs' is a slow backend (e.g., SFTP or GCS)
var remoteFs afero.Fs
// 'cacheFs' is a fast in-memory backend
cacheFs := afero.NewMemMapFs()
// Create the caching layer. Cache items for 5 minutes upon first read.
cachedFs := afero.NewCacheOnReadFs(remoteFs, cacheFs, 5*time.Minute)
// The first read is slow (fetches from remote, then caches)
data1, _ := afero.ReadFile(cachedFs, "data.json")
// The second read is instant (serves from memory cache)
data2, _ := afero.ReadFile(cachedFs, "data.json")
```
### Example 3: Security Jails (chroot)
Restrict an application component's access to a specific subdirectory.
```go
osFs := afero.NewOsFs()
// Create a filesystem rooted at /home/user/public
// The application cannot access anything above this directory.
jailedFs := afero.NewBasePathFs(osFs, "/home/user/public")
// To the application, this is reading "/"
// In reality, it's reading "/home/user/public/"
dirInfo, err := afero.ReadDir(jailedFs, "/")
// Attempts to access parent directories fail
_, err = jailedFs.Open("../secrets.txt") // Returns an error
```
## Real-World Use Cases
### Build Cloud-Agnostic Applications
Write applications that seamlessly work with different storage backends:
```go
type DocumentProcessor struct {
fs afero.Fs
}
func NewDocumentProcessor(fs afero.Fs) *DocumentProcessor {
return &DocumentProcessor{fs: fs}
}
func (p *DocumentProcessor) Process(inputPath, outputPath string) error {
// This code works whether fs is local disk, cloud storage, or memory
content, err := afero.ReadFile(p.fs, inputPath)
if err != nil {
return err
}
processed := processContent(content)
return afero.WriteFile(p.fs, outputPath, processed, 0644)
}
// Use with local filesystem
processor := NewDocumentProcessor(afero.NewOsFs())
// Use with Google Cloud Storage
processor := NewDocumentProcessor(gcsFS)
// Use with in-memory filesystem for testing
processor := NewDocumentProcessor(afero.NewMemMapFs())
```
### Treating Archives as Filesystems
Read files directly from `.zip` or `.tar` archives without unpacking them to disk first.
```go
import (
"archive/zip"
"github.com/spf13/afero/zipfs"
)
// Assume 'zipReader' is a *zip.Reader initialized from a file or memory
var zipReader *zip.Reader
// Create a read-only ZipFs
archiveFS := zipfs.New(zipReader)
// Read a file from within the archive using the standard Afero API
content, err := afero.ReadFile(archiveFS, "/docs/readme.md")
```
### Serving Any Filesystem over HTTP
Use `HttpFs` to expose any Afero filesystem—even one created dynamically in memory—through a standard Go web server.
```go
import (
"net/http"
"github.com/spf13/afero"
)
func main() {
memFS := afero.NewMemMapFs()
afero.WriteFile(memFS, "index.html", []byte("
Hello from Memory!
"), 0644)
// Wrap the memory filesystem to make it compatible with http.FileServer.
httpFS := afero.NewHttpFs(memFS)
http.Handle("/", http.FileServer(httpFS.Dir("/")))
http.ListenAndServe(":8080", nil)
}
```
### Testing Made Simple
One of Afero's greatest strengths is making filesystem-dependent code easily testable:
```go
func SaveUserData(fs afero.Fs, userID string, data []byte) error {
filename := fmt.Sprintf("users/%s.json", userID)
return afero.WriteFile(fs, filename, data, 0644)
}
func TestSaveUserData(t *testing.T) {
// Create a clean, fast, in-memory filesystem for testing
testFS := afero.NewMemMapFs()
userData := []byte(`{"name": "John", "email": "john@example.com"}`)
err := SaveUserData(testFS, "123", userData)
if err != nil {
t.Fatalf("SaveUserData failed: %v", err)
}
// Verify the file was saved correctly
saved, err := afero.ReadFile(testFS, "users/123.json")
if err != nil {
t.Fatalf("Failed to read saved file: %v", err)
}
if string(saved) != string(userData) {
t.Errorf("Data mismatch: got %s, want %s", saved, userData)
}
}
```
**Benefits of testing with Afero:**
- ⚡ **Fast** - No disk I/O, tests run in memory
- 🔄 **Reliable** - Each test starts with a clean slate
- 🧹 **No cleanup** - Memory is automatically freed
- 🔒 **Safe** - Can't accidentally modify real files
- 🏃 **Parallel** - Tests can run concurrently without conflicts
## Backend Reference
| Type | Backend | Constructor | Description | Status |
| :--- | :--- | :--- | :--- | :--- |
| **Core** | **OsFs** | `afero.NewOsFs()` | Interacts with the real operating system filesystem. Use in production. | ✅ Official |
| | **MemMapFs** | `afero.NewMemMapFs()` | A fast, atomic, concurrent-safe, in-memory filesystem. Ideal for testing. | ✅ Official |
| **Composition** | **CopyOnWriteFs**| `afero.NewCopyOnWriteFs(base, overlay)` | A read-only base with a writable overlay. Ideal for sandboxing. | ✅ Official |
| | **CacheOnReadFs**| `afero.NewCacheOnReadFs(base, cache, ttl)` | Lazily caches files from a slow base into a fast layer on first read. | ✅ Official |
| | **BasePathFs** | `afero.NewBasePathFs(source, path)` | Restricts operations to a subdirectory (chroot/jail). | ✅ Official |
| | **ReadOnlyFs** | `afero.NewReadOnlyFs(source)` | Provides a read-only view, preventing any modifications. | ✅ Official |
| | **RegexpFs** | `afero.NewRegexpFs(source, regexp)` | Filters a filesystem, only showing files that match a regex. | ✅ Official |
| **Utility** | **HttpFs** | `afero.NewHttpFs(source)` | Wraps any Afero filesystem to be served via `http.FileServer`. | ✅ Official |
| **Archives** | **ZipFs** | `zipfs.New(zipReader)` | Read-only access to files within a ZIP archive. | ✅ Official |
| | **TarFs** | `tarfs.New(tarReader)` | Read-only access to files within a TAR archive. | ✅ Official |
| **Network** | **GcsFs** | `gcsfs.NewGcsFs(...)` | Google Cloud Storage backend. | ⚡ Experimental |
| | **SftpFs** | `sftpfs.New(...)` | SFTP backend. | ⚡ Experimental |
| **3rd Party Cloud** | **S3Fs** | [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3) | Production-ready S3 backend built on official AWS SDK. | 🔹 3rd Party |
| | **MinioFs** | [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio) | MinIO object storage backend with S3 compatibility. | 🔹 3rd Party |
| | **DriveFs** | [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive) | Google Drive backend with streaming support. | 🔹 3rd Party |
| | **DropboxFs** | [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox) | Dropbox backend with streaming support. | 🔹 3rd Party |
| **3rd Party Specialized** | **GitFs** | [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs) | Git repository filesystem (read-only, Afero compatible). | 🔹 3rd Party |
| | **DockerFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Docker container filesystem access. | 🔹 3rd Party |
| | **GitHubFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | GitHub repository and releases filesystem. | 🔹 3rd Party |
| | **FilterFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | Filesystem filtering with predicates. | 🔹 3rd Party |
| | **IgnoreFs** | [`unmango/aferox`](https://github.com/unmango/aferox) | .gitignore-aware filtering filesystem. | 🔹 3rd Party |
| | **FUSEFs** | [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem) | Generic FUSE implementation using any Afero backend. | 🔹 3rd Party |
## Afero vs. `io/fs` (Go 1.16+)
Go 1.16 introduced the `io/fs` package, which provides a standard abstraction for **read-only** filesystems.
Afero complements `io/fs` by focusing on different needs:
* **Use `io/fs` when:** You only need to read files and want to conform strictly to the standard library interfaces.
* **Use Afero when:**
* Your application needs to **create, write, modify, or delete** files.
* You need to test complex read/write interactions (e.g., renaming, concurrent writes).
* You need advanced compositional features (Copy-on-Write, Caching, etc.).
Afero is fully compatible with `io/fs`. You can wrap any Afero filesystem to satisfy the `fs.FS` interface using `afero.NewIOFS`:
```go
import "io/fs"
// Create an Afero filesystem (writable)
var myAferoFs afero.Fs = afero.NewMemMapFs()
// Convert it to a standard library fs.FS (read-only view)
var myIoFs fs.FS = afero.NewIOFS(myAferoFs)
```
## Third-Party Backends & Ecosystem
The Afero community has developed numerous backends and tools that extend the library's capabilities. Below are curated, well-maintained options organized by maturity and reliability.
### Featured Community Backends
These are mature, reliable backends that we can confidently recommend for production use:
#### **Amazon S3** - [`fclairamb/afero-s3`](https://github.com/fclairamb/afero-s3)
Production-ready S3 backend built on the official AWS SDK for Go.
```go
import "github.com/fclairamb/afero-s3"
s3fs := s3.NewFs(bucket, session)
```
#### **MinIO** - [`cpyun/afero-minio`](https://github.com/cpyun/afero-minio)
MinIO object storage backend providing S3-compatible object storage with deduplication and optimization features.
```go
import "github.com/cpyun/afero-minio"
minioFs := miniofs.NewMinioFs(ctx, "minio://endpoint/bucket")
```
### Community & Specialized Backends
#### Cloud Storage
- **Google Drive** - [`fclairamb/afero-gdrive`](https://github.com/fclairamb/afero-gdrive)
Streaming support; no write-seeking or POSIX permissions; no files listing cache
- **Dropbox** - [`fclairamb/afero-dropbox`](https://github.com/fclairamb/afero-dropbox)
Streaming support; no write-seeking or POSIX permissions
#### Version Control Systems
- **Git Repositories** - [`tobiash/go-gitfs`](https://github.com/tobiash/go-gitfs)
Read-only filesystem abstraction for Git repositories. Works with bare repositories and provides filesystem view of any git reference. Uses go-git for repository access.
#### Container and Remote Systems
- **Docker Containers** - [`unmango/aferox`](https://github.com/unmango/aferox)
Access Docker container filesystems as if they were local filesystems
- **GitHub API** - [`unmango/aferox`](https://github.com/unmango/aferox)
Turn GitHub repositories, releases, and assets into browsable filesystems
#### FUSE Integration
- **Generic FUSE** - [`JakWai01/sile-fystem`](https://github.com/JakWai01/sile-fystem)
Mount any Afero filesystem as a FUSE filesystem, allowing any Afero backend to be used as a real mounted filesystem
#### Specialized Filesystems
- **FAT32 Support** - [`aligator/GoFAT`](https://github.com/aligator/GoFAT)
Pure Go FAT filesystem implementation (currently read-only)
### Interface Adapters & Utilities
**Cross-Interface Compatibility:**
- [`jfontan/go-billy-desfacer`](https://github.com/jfontan/go-billy-desfacer) - Adapter between Afero and go-billy interfaces (for go-git compatibility)
- [`Maldris/go-billy-afero`](https://github.com/Maldris/go-billy-afero) - Alternative wrapper for using Afero with go-billy
- [`c4milo/afero2billy`](https://github.com/c4milo/afero2billy) - Another Afero to billy filesystem adapter
**Working Directory Management:**
- [`carolynvs/aferox`](https://github.com/carolynvs/aferox) - Working directory-aware filesystem wrapper
**Advanced Filtering:**
- [`unmango/aferox`](https://github.com/unmango/aferox) includes multiple specialized filesystems:
- **FilterFs** - Predicate-based file filtering
- **IgnoreFs** - .gitignore-aware filtering
- **WriterFs** - Dump writes to io.Writer for debugging
#### Developer Tools & Utilities
**nhatthm Utility Suite** - Essential tools for Afero development:
- [`nhatthm/aferocopy`](https://github.com/nhatthm/aferocopy) - Copy files between any Afero filesystems
- [`nhatthm/aferomock`](https://github.com/nhatthm/aferomock) - Mocking toolkit for testing
- [`nhatthm/aferoassert`](https://github.com/nhatthm/aferoassert) - Assertion helpers for filesystem testing
### Ecosystem Showcase
**Windows Virtual Drives** - [`balazsgrill/potatodrive`](https://github.com/balazsgrill/potatodrive)
Mount any Afero filesystem as a Windows drive letter. Brilliant demonstration of Afero's power!
### Modern Asset Embedding (Go 1.16+)
Instead of third-party tools, use Go's native `//go:embed` with Afero:
```go
import (
"embed"
"github.com/spf13/afero"
)
//go:embed assets/*
var assetsFS embed.FS
func main() {
// Convert embedded files to Afero filesystem
fs := afero.FromIOFS(assetsFS)
// Use like any other Afero filesystem
content, _ := afero.ReadFile(fs, "assets/config.json")
}
```
## Contributing
We welcome contributions! The project is mature, but we are actively looking for contributors to help implement and stabilize network/cloud backends.
* 🔥 **Microsoft Azure Blob Storage**
* 🔒 **Modern Encryption Backend** - Built on secure, contemporary crypto (not legacy EncFS)
* 🐙 **Canonical go-git Adapter** - Unified solution for Git integration
* 📡 **SSH/SCP Backend** - Secure remote file operations
* Stabilization of existing experimental backends (GCS, SFTP)
To contribute:
1. Fork the repository
2. Create your feature branch (`git checkout -b my-new-feature`)
3. Commit your changes (`git commit -am 'Add some feature'`)
4. Push to the branch (`git push origin my-new-feature`)
5. Create a new Pull Request
## 📄 License
Afero is released under the Apache 2.0 license. See [LICENSE.txt](https://github.com/spf13/afero/blob/master/LICENSE.txt) for details.
## 🔗 Additional Resources
- [📖 Full API Documentation](https://pkg.go.dev/github.com/spf13/afero)
- [🎯 Examples Repository](https://github.com/spf13/afero/tree/master/examples)
- [📋 Release Notes](https://github.com/spf13/afero/releases)
- [❓ GitHub Discussions](https://github.com/spf13/afero/discussions)
---
*Afero comes from the Latin roots Ad-Facere, meaning "to make" or "to do" - fitting for a library that empowers you to make and do amazing things with filesystems.*
spf13-afero-18d690e/afero.go 0000664 0000000 0000000 00000006433 15057601571 0015542 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package afero provides types and methods for interacting with the filesystem,
// as an abstraction layer.
// Afero also provides a few implementations that are mostly interoperable. One that
// uses the operating system filesystem, one that uses memory to store files
// (cross platform) and an interface that should be implemented if you want to
// provide your own filesystem.
package afero
import (
"errors"
"io"
"os"
"time"
)
type Afero struct {
Fs
}
// File represents a file in the filesystem.
type File interface {
io.Closer
io.Reader
io.ReaderAt
io.Seeker
io.Writer
io.WriterAt
Name() string
Readdir(count int) ([]os.FileInfo, error)
Readdirnames(n int) ([]string, error)
Stat() (os.FileInfo, error)
Sync() error
Truncate(size int64) error
WriteString(s string) (ret int, err error)
}
// Fs is the filesystem interface.
//
// Any simulated or real filesystem should implement this interface.
type Fs interface {
// Create creates a file in the filesystem, returning the file and an
// error, if any happens.
Create(name string) (File, error)
// Mkdir creates a directory in the filesystem, return an error if any
// happens.
Mkdir(name string, perm os.FileMode) error
// MkdirAll creates a directory path and all parents that does not exist
// yet.
MkdirAll(path string, perm os.FileMode) error
// Open opens a file, returning it or an error, if any happens.
Open(name string) (File, error)
// OpenFile opens a file using the given flags and the given mode.
OpenFile(name string, flag int, perm os.FileMode) (File, error)
// Remove removes a file identified by name, returning an error, if any
// happens.
Remove(name string) error
// RemoveAll removes a directory path and any children it contains. It
// does not fail if the path does not exist (return nil).
RemoveAll(path string) error
// Rename renames a file.
Rename(oldname, newname string) error
// Stat returns a FileInfo describing the named file, or an error, if any
// happens.
Stat(name string) (os.FileInfo, error)
// The name of this FileSystem
Name() string
// Chmod changes the mode of the named file to mode.
Chmod(name string, mode os.FileMode) error
// Chown changes the uid and gid of the named file.
Chown(name string, uid, gid int) error
// Chtimes changes the access and modification times of the named file
Chtimes(name string, atime time.Time, mtime time.Time) error
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("out of range")
ErrTooLarge = errors.New("too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)
spf13-afero-18d690e/afero_test.go 0000664 0000000 0000000 00000040456 15057601571 0016604 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
// Copyright 2009 The Go Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
iofs "io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"syscall"
"testing"
)
var (
testName = "test.txt"
Fss = []Fs{&MemMapFs{}, &OsFs{}}
)
var testRegistry map[Fs][]string = make(map[Fs][]string)
func testDir(fs Fs) string {
name, err := TempDir(fs, "", "afero")
if err != nil {
panic(fmt.Sprint("unable to work with test dir", err))
}
testRegistry[fs] = append(testRegistry[fs], name)
return name
}
func tmpFile(fs Fs) File {
x, err := TempFile(fs, "", "afero")
if err != nil {
panic(fmt.Sprint("unable to work with temp file", err))
}
testRegistry[fs] = append(testRegistry[fs], x.Name())
return x
}
// Read with length 0 should not return EOF.
func TestRead0(t *testing.T) {
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
f.WriteString(
"Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum.",
)
var b []byte
// b := make([]byte, 0)
n, err := f.Read(b)
if n != 0 || err != nil {
t.Errorf("%v: Read(0) = %d, %v, want 0, nil", fs.Name(), n, err)
}
f.Seek(0, 0)
b = make([]byte, 100)
n, err = f.Read(b)
if n <= 0 || err != nil {
t.Errorf("%v: Read(100) = %d, %v, want >0, nil", fs.Name(), n, err)
}
}
}
func TestOpenFile(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
tmp := testDir(fs)
path := filepath.Join(tmp, testName)
f, err := fs.OpenFile(path, os.O_RDWR|os.O_CREATE, 0o600)
if err != nil {
t.Error(fs.Name(), "OpenFile (O_CREATE) failed:", err)
continue
}
io.WriteString(f, "initial")
f.Close()
f, err = fs.OpenFile(path, os.O_WRONLY|os.O_APPEND, 0o600)
if err != nil {
t.Error(fs.Name(), "OpenFile (O_APPEND) failed:", err)
continue
}
io.WriteString(f, "|append")
f.Close()
f, _ = fs.OpenFile(path, os.O_RDONLY, 0o600)
contents, _ := io.ReadAll(f)
expectedContents := "initial|append"
if string(contents) != expectedContents {
t.Errorf(
"%v: appending, expected '%v', got: '%v'",
fs.Name(),
expectedContents,
string(contents),
)
}
f.Close()
f, err = fs.OpenFile(path, os.O_RDWR|os.O_TRUNC, 0o600)
if err != nil {
t.Error(fs.Name(), "OpenFile (O_TRUNC) failed:", err)
continue
}
contents, _ = io.ReadAll(f)
if string(contents) != "" {
t.Errorf("%v: expected truncated file, got: '%v'", fs.Name(), string(contents))
}
f.Close()
}
}
func TestCreate(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
tmp := testDir(fs)
path := filepath.Join(tmp, testName)
f, err := fs.Create(path)
if err != nil {
t.Error(fs.Name(), "Create failed:", err)
f.Close()
continue
}
io.WriteString(f, "initial")
f.Close()
f, err = fs.Create(path)
if err != nil {
t.Error(fs.Name(), "Create failed:", err)
f.Close()
continue
}
secondContent := "second create"
io.WriteString(f, secondContent)
f.Close()
f, err = fs.Open(path)
if err != nil {
t.Error(fs.Name(), "Open failed:", err)
f.Close()
continue
}
buf, err := ReadAll(f)
if err != nil {
t.Error(fs.Name(), "ReadAll failed:", err)
f.Close()
continue
}
if string(buf) != secondContent {
t.Error(
fs.Name(),
"Content should be",
"\""+secondContent+"\" but is \""+string(buf)+"\"",
)
f.Close()
continue
}
f.Close()
}
}
func TestMemFileRead(t *testing.T) {
f := tmpFile(new(MemMapFs))
// f := MemFileCreate("testfile")
f.WriteString("abcd")
f.Seek(0, 0)
b := make([]byte, 8)
n, err := f.Read(b)
if n != 4 {
t.Errorf("didn't read all bytes: %v %v %v", n, err, b)
}
if err != nil {
t.Errorf("err is not nil: %v %v %v", n, err, b)
}
n, err = f.Read(b)
if n != 0 {
t.Errorf("read more bytes: %v %v %v", n, err, b)
}
if err != io.EOF {
t.Errorf("error is not EOF: %v %v %v", n, err, b)
}
}
func TestRename(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
tDir := testDir(fs)
from := filepath.Join(tDir, "/renamefrom")
to := filepath.Join(tDir, "/renameto")
exists := filepath.Join(tDir, "/renameexists")
file, err := fs.Create(from)
if err != nil {
t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
}
if err = file.Close(); err != nil {
t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
}
file, err = fs.Create(exists)
if err != nil {
t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
}
if err = file.Close(); err != nil {
t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
}
err = fs.Rename(from, to)
if err != nil {
t.Fatalf("%s: rename %q, %q failed: %v", fs.Name(), to, from, err)
}
file, err = fs.Create(from)
if err != nil {
t.Fatalf("%s: open %q failed: %v", fs.Name(), to, err)
}
if err = file.Close(); err != nil {
t.Errorf("%s: close %q failed: %v", fs.Name(), to, err)
}
err = fs.Rename(from, exists)
if err != nil {
t.Errorf("%s: rename %q, %q failed: %v", fs.Name(), exists, from, err)
}
names, err := readDirNames(fs, tDir)
if err != nil {
t.Errorf("%s: readDirNames error: %v", fs.Name(), err)
}
found := false
for _, e := range names {
if e == "renamefrom" {
t.Error("File is still called renamefrom")
}
if e == "renameto" {
found = true
}
}
if !found {
t.Error("File was not renamed to renameto")
}
_, err = fs.Stat(to)
if err != nil {
t.Errorf("%s: stat %q failed: %v", fs.Name(), to, err)
}
}
}
func TestRemove(t *testing.T) {
for _, fs := range Fss {
x, err := TempFile(fs, "", "afero")
if err != nil {
t.Error(fmt.Sprint("unable to work with temp file", err))
}
path := x.Name()
x.Close()
tDir := filepath.Dir(path)
err = fs.Remove(path)
if err != nil {
t.Errorf("%v: Remove() failed: %v", fs.Name(), err)
continue
}
_, err = fs.Stat(path)
if !os.IsNotExist(err) {
t.Errorf("%v: Remove() didn't remove file", fs.Name())
continue
}
// Deleting non-existent file should raise error
err = fs.Remove(path)
if !os.IsNotExist(err) {
t.Errorf("%v: Remove() didn't raise error for non-existent file", fs.Name())
}
f, err := fs.Open(tDir)
if err != nil {
t.Error("TestDir should still exist:", err)
}
names, err := f.Readdirnames(-1)
if err != nil {
t.Error("Readdirnames failed:", err)
}
for _, e := range names {
if e == testName {
t.Error("File was not removed from parent directory")
}
}
}
}
func TestTruncate(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
checkSize(t, f, 0)
f.Write([]byte("hello, world\n"))
checkSize(t, f, 13)
f.Truncate(10)
checkSize(t, f, 10)
f.Truncate(1024)
checkSize(t, f, 1024)
f.Truncate(0)
checkSize(t, f, 0)
_, err := f.Write([]byte("surprise!"))
if err == nil {
checkSize(t, f, 13+9) // wrote at offset past where hello, world was.
}
}
}
func TestSeek(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
const data = "hello, world\n"
io.WriteString(f, data)
type test struct {
in int64
whence int
out int64
}
tests := []test{
{0, 1, int64(len(data))},
{0, 0, 0},
{5, 0, 5},
{0, 2, int64(len(data))},
{0, 0, 0},
{-1, 2, int64(len(data)) - 1},
{1 << 33, 0, 1 << 33},
{1 << 33, 2, 1<<33 + int64(len(data))},
}
for i, tt := range tests {
off, err := f.Seek(tt.in, tt.whence)
if off != tt.out || err != nil {
if e, ok := err.(*os.PathError); ok && e.Err == syscall.EINVAL && tt.out > 1<<32 {
// Reiserfs rejects the big seeks.
// http://code.google.com/p/go/issues/detail?id=91
break
}
t.Errorf(
"#%d: Seek(%v, %v) = %v, %v want %v, nil",
i,
tt.in,
tt.whence,
off,
err,
tt.out,
)
}
}
}
}
func TestReadAt(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
const data = "hello, world\n"
io.WriteString(f, data)
b := make([]byte, 5)
n, err := f.ReadAt(b, 7)
if err != nil || n != len(b) {
t.Fatalf("ReadAt 7: %d, %v", n, err)
}
if string(b) != "world" {
t.Fatalf("ReadAt 7: have %q want %q", string(b), "world")
}
}
}
func TestWriteAt(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
const data = "hello, world\n"
io.WriteString(f, data)
n, err := f.WriteAt([]byte("WORLD"), 7)
if err != nil || n != 5 {
t.Fatalf("WriteAt 7: %d, %v", n, err)
}
f2, err := fs.Open(f.Name())
if err != nil {
t.Fatalf("%v: ReadFile %s: %v", fs.Name(), f.Name(), err)
}
defer f2.Close()
buf := new(bytes.Buffer)
buf.ReadFrom(f2)
b := buf.Bytes()
if string(b) != "hello, WORLD\n" {
t.Fatalf("after write: have %q want %q", string(b), "hello, WORLD\n")
}
}
}
func setupTestDir(t *testing.T, fs Fs) string {
path := testDir(fs)
return setupTestFiles(t, fs, path)
}
func setupTestDirRoot(t *testing.T, fs Fs) string {
path := testDir(fs)
setupTestFiles(t, fs, path)
return path
}
func setupTestDirReusePath(t *testing.T, fs Fs, path string) string {
testRegistry[fs] = append(testRegistry[fs], path)
return setupTestFiles(t, fs, path)
}
func setupTestFiles(t *testing.T, fs Fs, path string) string {
testSubDir := filepath.Join(path, "more", "subdirectories", "for", "testing", "we")
err := fs.MkdirAll(testSubDir, 0o700)
if err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := fs.Create(filepath.Join(testSubDir, "testfile1"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 1 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "testfile2"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 2 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "testfile3"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 3 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "testfile4"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 4 content")
f.Close()
return testSubDir
}
func TestReaddirnames(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
testSubDir := setupTestDir(t, fs)
tDir := filepath.Dir(testSubDir)
root, err := fs.Open(tDir)
if err != nil {
t.Fatal(fs.Name(), tDir, err)
}
defer root.Close()
namesRoot, err := root.Readdirnames(-1)
if err != nil {
t.Fatal(fs.Name(), namesRoot, err)
}
sub, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer sub.Close()
namesSub, err := sub.Readdirnames(-1)
if err != nil {
t.Fatal(fs.Name(), namesSub, err)
}
findNames(fs, t, tDir, testSubDir, namesRoot, namesSub)
}
}
func TestReaddirSimple(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
testSubDir := setupTestDir(t, fs)
tDir := filepath.Dir(testSubDir)
root, err := fs.Open(tDir)
if err != nil {
t.Fatal(err)
}
defer root.Close()
rootInfo, err := root.Readdir(1)
if err != nil {
t.Log(myFileInfo(rootInfo))
t.Error(err)
}
rootInfo, err = root.Readdir(5)
if err != io.EOF {
t.Log(myFileInfo(rootInfo))
t.Error(err)
}
sub, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer sub.Close()
subInfo, err := sub.Readdir(5)
if err != nil {
t.Log(myFileInfo(subInfo))
t.Error(err)
}
}
}
func TestReaddir(t *testing.T) {
defer removeAllTestFiles(t)
const nums = 6
for num := 0; num < nums; num++ {
outputs := make([]string, len(Fss))
infos := make([]string, len(Fss))
for i, fs := range Fss {
testSubDir := setupTestDir(t, fs)
root, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
infosn := make([]string, nums)
for j := 0; j < nums; j++ {
info, err := root.Readdir(num)
outputs[i] += fmt.Sprintf("%v Error: %v\n", myFileInfo(info), err)
s := fmt.Sprintln(len(info), err)
infosn[j] = s
infos[i] += s
}
root.Close()
// Also check fs.ReadDirFile interface if implemented
if _, ok := root.(iofs.ReadDirFile); ok {
root, err = fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer root.Close()
for j := 0; j < nums; j++ {
dirEntries, err := root.(iofs.ReadDirFile).ReadDir(num)
s := fmt.Sprintln(len(dirEntries), err)
if s != infosn[j] {
t.Fatalf("%s: %s != %s", fs.Name(), s, infosn[j])
}
}
}
}
fail := false
for i, o := range infos {
if i == 0 {
continue
}
if o != infos[i-1] {
fail = true
break
}
}
if fail {
t.Log("Readdir outputs not equal for Readdir(", num, ")")
for i, o := range outputs {
t.Log(Fss[i].Name())
t.Log(o)
}
t.Fail()
}
}
}
// https://github.com/spf13/afero/issues/169
func TestReaddirRegularFile(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
f := tmpFile(fs)
defer f.Close()
_, err := f.Readdirnames(-1)
if err == nil {
t.Fatal("Expected error")
}
_, err = f.Readdir(-1)
if err == nil {
t.Fatal("Expected error")
}
}
}
type myFileInfo []os.FileInfo
func (m myFileInfo) String() string {
out := "Fileinfos:\n"
for _, e := range m {
out += " " + e.Name() + "\n"
}
return out
}
func TestReaddirAll(t *testing.T) {
defer removeAllTestFiles(t)
for _, fs := range Fss {
testSubDir := setupTestDir(t, fs)
tDir := filepath.Dir(testSubDir)
root, err := fs.Open(tDir)
if err != nil {
t.Fatal(err)
}
defer root.Close()
rootInfo, err := root.Readdir(-1)
if err != nil {
t.Fatal(err)
}
namesRoot := []string{}
for _, e := range rootInfo {
namesRoot = append(namesRoot, e.Name())
}
sub, err := fs.Open(testSubDir)
if err != nil {
t.Fatal(err)
}
defer sub.Close()
subInfo, err := sub.Readdir(-1)
if err != nil {
t.Fatal(err)
}
namesSub := []string{}
for _, e := range subInfo {
namesSub = append(namesSub, e.Name())
}
findNames(fs, t, tDir, testSubDir, namesRoot, namesSub)
}
}
func findNames(fs Fs, t *testing.T, tDir, testSubDir string, root, sub []string) {
var foundRoot bool
for _, e := range root {
f, err := fs.Open(filepath.Join(tDir, e))
if err != nil {
t.Error("Open", filepath.Join(tDir, e), ":", err)
}
defer f.Close()
if equal(e, "we") {
foundRoot = true
}
}
if !foundRoot {
t.Logf("Names root: %v", root)
t.Logf("Names sub: %v", sub)
t.Error("Didn't find subdirectory we")
}
var found1, found2 bool
for _, e := range sub {
f, err := fs.Open(filepath.Join(testSubDir, e))
if err != nil {
t.Error("Open", filepath.Join(testSubDir, e), ":", err)
}
defer f.Close()
if equal(e, "testfile1") {
found1 = true
}
if equal(e, "testfile2") {
found2 = true
}
}
if !found1 {
t.Logf("Names root: %v", root)
t.Logf("Names sub: %v", sub)
t.Error("Didn't find testfile1")
}
if !found2 {
t.Logf("Names root: %v", root)
t.Logf("Names sub: %v", sub)
t.Error("Didn't find testfile2")
}
}
func removeAllTestFiles(t *testing.T) {
for fs, list := range testRegistry {
for _, path := range list {
if err := fs.RemoveAll(path); err != nil {
t.Error(fs.Name(), err)
}
}
}
testRegistry = make(map[Fs][]string)
}
func equal(name1, name2 string) (r bool) {
switch runtime.GOOS {
case "windows":
r = strings.EqualFold(name1, name2)
default:
r = name1 == name2
}
return
}
func checkSize(t *testing.T, f File, size int64) {
dir, err := f.Stat()
if err != nil {
t.Fatalf("Stat %q (looking for size %d): %s", f.Name(), size, err)
}
if dir.Size() != size {
t.Errorf("Stat %q: size %d want %d", f.Name(), dir.Size(), size)
}
}
spf13-afero-18d690e/appveyor.yml 0000664 0000000 0000000 00000000424 15057601571 0016501 0 ustar 00root root 0000000 0000000 # This currently does nothing. We have moved to GitHub action, but this is kept
# until spf13 has disabled this project in AppVeyor.
version: '{build}'
clone_folder: C:\gopath\src\github.com\spf13\afero
environment:
GOPATH: C:\gopath
build_script:
- cmd: >-
go version
spf13-afero-18d690e/basepath.go 0000664 0000000 0000000 00000014202 15057601571 0016226 0 ustar 00root root 0000000 0000000 package afero
import (
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"time"
)
var (
_ Lstater = (*BasePathFs)(nil)
_ fs.ReadDirFile = (*BasePathFile)(nil)
)
// The BasePathFs restricts all operations to a given path within an Fs.
// The given file name to the operations on this Fs will be prepended with
// the base path before calling the base Fs.
// Any file name (after filepath.Clean()) outside this base path will be
// treated as non existing file.
//
// Note that it does not clean the error messages on return, so you may
// reveal the real path on errors.
type BasePathFs struct {
source Fs
path string
}
type BasePathFile struct {
File
path string
}
func (f *BasePathFile) Name() string {
sourcename := f.File.Name()
return strings.TrimPrefix(sourcename, filepath.Clean(f.path))
}
func (f *BasePathFile) ReadDir(n int) ([]fs.DirEntry, error) {
if rdf, ok := f.File.(fs.ReadDirFile); ok {
return rdf.ReadDir(n)
}
return readDirFile{f.File}.ReadDir(n)
}
func NewBasePathFs(source Fs, path string) Fs {
return &BasePathFs{source: source, path: path}
}
// on a file outside the base path it returns the given file name and an error,
// else the given file with the base path prepended
func (b *BasePathFs) RealPath(name string) (path string, err error) {
if err := validateBasePathName(name); err != nil {
return name, err
}
bpath := filepath.Clean(b.path)
path = filepath.Clean(filepath.Join(bpath, name))
if !strings.HasPrefix(path, bpath) {
return name, os.ErrNotExist
}
return path, nil
}
func validateBasePathName(name string) error {
if runtime.GOOS != "windows" {
// Not much to do here;
// the virtual file paths all look absolute on *nix.
return nil
}
// On Windows a common mistake would be to provide an absolute OS path
// We could strip out the base part, but that would not be very portable.
if filepath.IsAbs(name) {
return os.ErrNotExist
}
return nil
}
func (b *BasePathFs) Chtimes(name string, atime, mtime time.Time) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chtimes", Path: name, Err: err}
}
return b.source.Chtimes(name, atime, mtime)
}
func (b *BasePathFs) Chmod(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chmod", Path: name, Err: err}
}
return b.source.Chmod(name, mode)
}
func (b *BasePathFs) Chown(name string, uid, gid int) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "chown", Path: name, Err: err}
}
return b.source.Chown(name, uid, gid)
}
func (b *BasePathFs) Name() string {
return "BasePathFs"
}
func (b *BasePathFs) Stat(name string) (fi os.FileInfo, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "stat", Path: name, Err: err}
}
return b.source.Stat(name)
}
func (b *BasePathFs) Rename(oldname, newname string) (err error) {
if oldname, err = b.RealPath(oldname); err != nil {
return &os.PathError{Op: "rename", Path: oldname, Err: err}
}
if newname, err = b.RealPath(newname); err != nil {
return &os.PathError{Op: "rename", Path: newname, Err: err}
}
return b.source.Rename(oldname, newname)
}
func (b *BasePathFs) RemoveAll(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove_all", Path: name, Err: err}
}
return b.source.RemoveAll(name)
}
func (b *BasePathFs) Remove(name string) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
return b.source.Remove(name)
}
func (b *BasePathFs) OpenFile(name string, flag int, mode os.FileMode) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "openfile", Path: name, Err: err}
}
sourcef, err := b.source.OpenFile(name, flag, mode)
if err != nil {
return nil, err
}
return &BasePathFile{sourcef, b.path}, nil
}
func (b *BasePathFs) Open(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "open", Path: name, Err: err}
}
sourcef, err := b.source.Open(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) Mkdir(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.Mkdir(name, mode)
}
func (b *BasePathFs) MkdirAll(name string, mode os.FileMode) (err error) {
if name, err = b.RealPath(name); err != nil {
return &os.PathError{Op: "mkdir", Path: name, Err: err}
}
return b.source.MkdirAll(name, mode)
}
func (b *BasePathFs) Create(name string) (f File, err error) {
if name, err = b.RealPath(name); err != nil {
return nil, &os.PathError{Op: "create", Path: name, Err: err}
}
sourcef, err := b.source.Create(name)
if err != nil {
return nil, err
}
return &BasePathFile{File: sourcef, path: b.path}, nil
}
func (b *BasePathFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
name, err := b.RealPath(name)
if err != nil {
return nil, false, &os.PathError{Op: "lstat", Path: name, Err: err}
}
if lstater, ok := b.source.(Lstater); ok {
return lstater.LstatIfPossible(name)
}
fi, err := b.source.Stat(name)
return fi, false, err
}
func (b *BasePathFs) SymlinkIfPossible(oldname, newname string) error {
oldname, err := b.RealPath(oldname)
if err != nil {
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
}
newname, err = b.RealPath(newname)
if err != nil {
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: err}
}
if linker, ok := b.source.(Linker); ok {
return linker.SymlinkIfPossible(oldname, newname)
}
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
}
func (b *BasePathFs) ReadlinkIfPossible(name string) (string, error) {
name, err := b.RealPath(name)
if err != nil {
return "", &os.PathError{Op: "readlink", Path: name, Err: err}
}
if reader, ok := b.source.(LinkReader); ok {
return reader.ReadlinkIfPossible(name)
}
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
}
spf13-afero-18d690e/basepath_test.go 0000664 0000000 0000000 00000011203 15057601571 0017263 0 ustar 00root root 0000000 0000000 package afero
import (
"os"
"path/filepath"
"runtime"
"testing"
)
func TestBasePath(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0o777)
bp := NewBasePathFs(baseFs, "/base/path")
if _, err := bp.Create("/tmp/foo"); err != nil {
t.Errorf("Failed to set real path")
}
if fh, err := bp.Create("../tmp/bar"); err == nil {
t.Errorf("succeeded in creating %s ...", fh.Name())
}
}
func TestBasePathRoot(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/foo/baz", 0o777)
baseFs.MkdirAll("/base/path/boo/", 0o777)
bp := NewBasePathFs(baseFs, "/base/path")
rd, err := ReadDir(bp, string(os.PathSeparator))
if len(rd) != 2 {
t.Errorf("base path doesn't respect root")
}
if err != nil {
t.Error(err)
}
}
func TestRealPath(t *testing.T) {
fs := NewOsFs()
baseDir, err := TempDir(fs, "", "base")
if err != nil {
t.Fatal("error creating tempDir", err)
}
defer fs.RemoveAll(baseDir)
anotherDir, err := TempDir(fs, "", "another")
if err != nil {
t.Fatal("error creating tempDir", err)
}
defer fs.RemoveAll(anotherDir)
bp := NewBasePathFs(fs, baseDir).(*BasePathFs)
subDir := filepath.Join(baseDir, "s1")
realPath, err := bp.RealPath("/s1")
if err != nil {
t.Errorf("Got error %s", err)
}
if realPath != subDir {
t.Errorf("Expected \n%s got \n%s", subDir, realPath)
}
if runtime.GOOS == "windows" {
_, err = bp.RealPath(anotherDir)
if err != os.ErrNotExist {
t.Errorf("Expected os.ErrNotExist")
}
} else {
// on *nix we have no way of just looking at the path and tell that anotherDir
// is not inside the base file system.
// The user will receive an os.ErrNotExist later.
surrealPath, err := bp.RealPath(anotherDir)
if err != nil {
t.Errorf("Got error %s", err)
}
expected := filepath.Join(baseDir, anotherDir)
if surrealPath != expected {
t.Errorf("Expected \n%s got \n%s", expected, surrealPath)
}
}
}
func TestNestedBasePaths(t *testing.T) {
type dirSpec struct {
Dir1, Dir2, Dir3 string
}
dirSpecs := []dirSpec{
{Dir1: "/", Dir2: "/", Dir3: "/"},
{Dir1: "/", Dir2: "/path2", Dir3: "/"},
{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"},
{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"},
}
for _, ds := range dirSpecs {
memFs := NewMemMapFs()
level1Fs := NewBasePathFs(memFs, ds.Dir1)
level2Fs := NewBasePathFs(level1Fs, ds.Dir2)
level3Fs := NewBasePathFs(level2Fs, ds.Dir3)
type spec struct {
BaseFs Fs
FileName string
}
specs := []spec{
{BaseFs: level3Fs, FileName: "f.txt"},
{BaseFs: level2Fs, FileName: "f.txt"},
{BaseFs: level1Fs, FileName: "f.txt"},
}
for _, s := range specs {
if err := s.BaseFs.MkdirAll(s.FileName, 0o755); err != nil {
t.Errorf("Got error %s", err.Error())
}
if _, err := s.BaseFs.Stat(s.FileName); err != nil {
t.Errorf("Got error %s", err.Error())
}
switch s.BaseFs {
case level3Fs:
pathToExist := filepath.Join(ds.Dir3, s.FileName)
if _, err := level2Fs.Stat(pathToExist); err != nil {
t.Errorf("Got error %s (path %s)", err.Error(), pathToExist)
}
case level2Fs:
pathToExist := filepath.Join(ds.Dir2, ds.Dir3, s.FileName)
if _, err := level1Fs.Stat(pathToExist); err != nil {
t.Errorf("Got error %s (path %s)", err.Error(), pathToExist)
}
}
}
}
}
func TestBasePathOpenFile(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0o777)
bp := NewBasePathFs(baseFs, "/base/path")
f, err := bp.OpenFile("/tmp/file.txt", os.O_CREATE, 0o600)
if err != nil {
t.Fatalf("failed to open file: %v", err)
}
if filepath.Dir(f.Name()) != filepath.Clean("/tmp") {
t.Fatalf("realpath leaked: %s", f.Name())
}
}
func TestBasePathCreate(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0o777)
bp := NewBasePathFs(baseFs, "/base/path")
f, err := bp.Create("/tmp/file.txt")
if err != nil {
t.Fatalf("failed to create file: %v", err)
}
if filepath.Dir(f.Name()) != filepath.Clean("/tmp") {
t.Fatalf("realpath leaked: %s", f.Name())
}
}
func TestBasePathTempFile(t *testing.T) {
baseFs := &MemMapFs{}
baseFs.MkdirAll("/base/path/tmp", 0o777)
bp := NewBasePathFs(baseFs, "/base/path")
tDir, err := TempDir(bp, "/tmp", "")
if err != nil {
t.Fatalf("Failed to TempDir: %v", err)
}
if filepath.Dir(tDir) != filepath.Clean("/tmp") {
t.Fatalf("Tempdir realpath leaked: %s", tDir)
}
tempFile, err := TempFile(bp, tDir, "")
if err != nil {
t.Fatalf("Failed to TempFile: %v", err)
}
defer tempFile.Close()
if expected, actual := tDir, filepath.Dir(tempFile.Name()); expected != actual {
t.Fatalf("TempFile realpath leaked: expected %s, got %s", expected, actual)
}
}
spf13-afero-18d690e/cacheOnReadFs.go 0000664 0000000 0000000 00000016507 15057601571 0017076 0 ustar 00root root 0000000 0000000 package afero
import (
"os"
"syscall"
"time"
)
// If the cache duration is 0, cache time will be unlimited, i.e. once
// a file is in the layer, the base will never be read again for this file.
//
// For cache times greater than 0, the modification time of a file is
// checked. Note that a lot of file system implementations only allow a
// resolution of a second for timestamps... or as the godoc for os.Chtimes()
// states: "The underlying filesystem may truncate or round the values to a
// less precise time unit."
//
// This caching union will forward all write calls also to the base file
// system first. To prevent writing to the base Fs, wrap it in a read-only
// filter - Note: this will also make the overlay read-only, for writing files
// in the overlay, use the overlay Fs directly, not via the union Fs.
type CacheOnReadFs struct {
base Fs
layer Fs
cacheTime time.Duration
}
func NewCacheOnReadFs(base Fs, layer Fs, cacheTime time.Duration) Fs {
return &CacheOnReadFs{base: base, layer: layer, cacheTime: cacheTime}
}
type cacheState int
const (
// not present in the overlay, unknown if it exists in the base:
cacheMiss cacheState = iota
// present in the overlay and in base, base file is newer:
cacheStale
// present in the overlay - with cache time == 0 it may exist in the base,
// with cacheTime > 0 it exists in the base and is same age or newer in the
// overlay
cacheHit
// happens if someone writes directly to the overlay without
// going through this union
cacheLocal
)
func (u *CacheOnReadFs) cacheStatus(name string) (state cacheState, fi os.FileInfo, err error) {
var lfi, bfi os.FileInfo
lfi, err = u.layer.Stat(name)
if err == nil {
if u.cacheTime == 0 {
return cacheHit, lfi, nil
}
if lfi.ModTime().Add(u.cacheTime).Before(time.Now()) {
bfi, err = u.base.Stat(name)
if err != nil {
return cacheLocal, lfi, nil
}
if bfi.ModTime().After(lfi.ModTime()) {
return cacheStale, bfi, nil
}
}
return cacheHit, lfi, nil
}
if err == syscall.ENOENT || os.IsNotExist(err) {
return cacheMiss, nil, nil
}
return cacheMiss, nil, err
}
func (u *CacheOnReadFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CacheOnReadFs) copyFileToLayer(name string, flag int, perm os.FileMode) error {
return copyFileToLayer(u.base, u.layer, name, flag, perm)
}
func (u *CacheOnReadFs) Chtimes(name string, atime, mtime time.Time) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chtimes(name, atime, mtime)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chtimes(name, atime, mtime)
}
if err != nil {
return err
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CacheOnReadFs) Chmod(name string, mode os.FileMode) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chmod(name, mode)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chmod(name, mode)
}
if err != nil {
return err
}
return u.layer.Chmod(name, mode)
}
func (u *CacheOnReadFs) Chown(name string, uid, gid int) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Chown(name, uid, gid)
case cacheStale, cacheMiss:
if err := u.copyToLayer(name); err != nil {
return err
}
err = u.base.Chown(name, uid, gid)
}
if err != nil {
return err
}
return u.layer.Chown(name, uid, gid)
}
func (u *CacheOnReadFs) Stat(name string) (os.FileInfo, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheMiss:
return u.base.Stat(name)
default: // cacheStale has base, cacheHit and cacheLocal the layer os.FileInfo
return fi, nil
}
}
func (u *CacheOnReadFs) Rename(oldname, newname string) error {
st, _, err := u.cacheStatus(oldname)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit:
err = u.base.Rename(oldname, newname)
case cacheStale, cacheMiss:
if err := u.copyToLayer(oldname); err != nil {
return err
}
err = u.base.Rename(oldname, newname)
}
if err != nil {
return err
}
return u.layer.Rename(oldname, newname)
}
func (u *CacheOnReadFs) Remove(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.Remove(name)
}
if err != nil {
return err
}
return u.layer.Remove(name)
}
func (u *CacheOnReadFs) RemoveAll(name string) error {
st, _, err := u.cacheStatus(name)
if err != nil {
return err
}
switch st {
case cacheLocal:
case cacheHit, cacheStale, cacheMiss:
err = u.base.RemoveAll(name)
}
if err != nil {
return err
}
return u.layer.RemoveAll(name)
}
func (u *CacheOnReadFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
st, _, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal, cacheHit:
default:
if err := u.copyFileToLayer(name, flag, perm); err != nil {
return nil, err
}
}
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
bfi, err := u.base.OpenFile(name, flag, perm)
if err != nil {
return nil, err
}
lfi, err := u.layer.OpenFile(name, flag, perm)
if err != nil {
bfi.Close() // oops, what if O_TRUNC was set and file opening in the layer failed...?
return nil, err
}
return &UnionFile{Base: bfi, Layer: lfi}, nil
}
return u.layer.OpenFile(name, flag, perm)
}
func (u *CacheOnReadFs) Open(name string) (File, error) {
st, fi, err := u.cacheStatus(name)
if err != nil {
return nil, err
}
switch st {
case cacheLocal:
return u.layer.Open(name)
case cacheMiss:
bfi, err := u.base.Stat(name)
if err != nil {
return nil, err
}
if bfi.IsDir() {
return u.base.Open(name)
}
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
case cacheStale:
if !fi.IsDir() {
if err := u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.Open(name)
}
case cacheHit:
if !fi.IsDir() {
return u.layer.Open(name)
}
}
// the dirs from cacheHit, cacheStale fall down here:
bfile, _ := u.base.Open(name)
lfile, err := u.layer.Open(name)
if err != nil && bfile == nil {
return nil, err
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CacheOnReadFs) Mkdir(name string, perm os.FileMode) error {
err := u.base.Mkdir(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm) // yes, MkdirAll... we cannot assume it exists in the cache
}
func (u *CacheOnReadFs) Name() string {
return "CacheOnReadFs"
}
func (u *CacheOnReadFs) MkdirAll(name string, perm os.FileMode) error {
err := u.base.MkdirAll(name, perm)
if err != nil {
return err
}
return u.layer.MkdirAll(name, perm)
}
func (u *CacheOnReadFs) Create(name string) (File, error) {
bfh, err := u.base.Create(name)
if err != nil {
return nil, err
}
lfh, err := u.layer.Create(name)
if err != nil {
// oops, see comment about OS_TRUNC above, should we remove? then we have to
// remember if the file did not exist before
bfh.Close()
return nil, err
}
return &UnionFile{Base: bfh, Layer: lfh}, nil
}
spf13-afero-18d690e/composite_test.go 0000664 0000000 0000000 00000026036 15057601571 0017510 0 ustar 00root root 0000000 0000000 package afero
import (
"bytes"
"fmt"
"io"
"os"
"testing"
"time"
)
var tempDirs []string
func NewTempOsBaseFs(t *testing.T) Fs {
name, err := TempDir(NewOsFs(), "", "")
if err != nil {
t.Error("error creating tempDir", err)
}
tempDirs = append(tempDirs, name)
return NewBasePathFs(NewOsFs(), name)
}
func CleanupTempDirs(t *testing.T) {
osfs := NewOsFs()
type ev struct {
path string
e error
}
errs := []ev{}
for _, x := range tempDirs {
err := osfs.RemoveAll(x)
if err != nil {
errs = append(errs, ev{path: x, e: err})
}
}
for _, e := range errs {
fmt.Println("error removing tempDir", e.path, e.e)
}
if len(errs) > 0 {
t.Error("error cleaning up tempDirs")
}
tempDirs = []string{}
}
func TestUnionCreateExisting(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
ufs := NewCopyOnWriteFs(roBase, &MemMapFs{})
base.MkdirAll("/home/test", 0o777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, err := ufs.OpenFile("/home/test/file.txt", os.O_RDWR, 0o666)
if err != nil {
t.Errorf("Failed to open file r/w: %s", err)
}
_, err = fh.Write([]byte("####"))
if err != nil {
t.Errorf("Failed to write file: %s", err)
}
fh.Seek(0, 0)
data, err := io.ReadAll(fh)
if err != nil {
t.Errorf("Failed to read file: %s", err)
}
if string(data) != "#### is a test" {
t.Errorf("Got wrong data")
}
fh.Close()
fh, _ = base.Open("/home/test/file.txt")
data, _ = io.ReadAll(fh)
if string(data) != "This is a test" {
t.Errorf("Got wrong data in base file")
}
fh.Close()
fh, err = ufs.Create("/home/test/file.txt")
switch err {
case nil:
if fi, _ := fh.Stat(); fi.Size() != 0 {
t.Errorf("Create did not truncate file")
}
fh.Close()
default:
t.Errorf("Create failed on existing file")
}
}
func TestUnionMergeReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
ufs := &CopyOnWriteFs{base: roBase, layer: &MemMapFs{}}
base.MkdirAll("/home/test", 0o777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Open("/home/test")
files, err := fh.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 2 {
t.Errorf("Got wrong number of files: %v", files)
}
}
func TestExistingDirectoryCollisionReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
overlay := &MemMapFs{}
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/home/test", 0o777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
overlay.MkdirAll("home/test", 0o777)
fh, _ = overlay.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Create("/home/test/file3.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Open("/home/test")
files, err := fh.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 3 {
t.Errorf("Got wrong number of files in union: %v", files)
}
fh, _ = overlay.Open("/home/test")
files, err = fh.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 2 {
t.Errorf("Got wrong number of files in overlay: %v", files)
}
}
func TestNestedDirBaseReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
overlay := &MemMapFs{}
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/home/test/foo/bar", 0o777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = base.Create("/home/test/foo/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = base.Create("/home/test/foo/bar/file3.txt")
fh.WriteString("This is a test")
fh.Close()
overlay.MkdirAll("/", 0o777)
// Opening something only in the base
fh, _ = ufs.Open("/home/test/foo")
list, err := fh.Readdir(-1)
if err != nil {
t.Errorf("Readdir failed %s", err)
}
if len(list) != 2 {
for _, x := range list {
fmt.Println(x.Name())
}
t.Errorf("Got wrong number of files in union: %v", len(list))
}
}
func TestNestedDirOverlayReaddir(t *testing.T) {
base := &MemMapFs{}
roBase := &ReadOnlyFs{source: base}
overlay := &MemMapFs{}
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/", 0o777)
overlay.MkdirAll("/home/test/foo/bar", 0o777)
fh, _ := overlay.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/bar/file3.txt")
fh.WriteString("This is a test")
fh.Close()
// Opening nested dir only in the overlay
fh, _ = ufs.Open("/home/test/foo")
list, err := fh.Readdir(-1)
if err != nil {
t.Errorf("Readdir failed %s", err)
}
if len(list) != 2 {
for _, x := range list {
fmt.Println(x.Name())
}
t.Errorf("Got wrong number of files in union: %v", len(list))
}
}
func TestNestedDirOverlayOsFsReaddir(t *testing.T) {
defer CleanupTempDirs(t)
base := NewTempOsBaseFs(t)
roBase := &ReadOnlyFs{source: base}
overlay := NewTempOsBaseFs(t)
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/", 0o777)
overlay.MkdirAll("/home/test/foo/bar", 0o777)
fh, _ := overlay.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = overlay.Create("/home/test/foo/bar/file3.txt")
fh.WriteString("This is a test")
fh.Close()
// Opening nested dir only in the overlay
fh, _ = ufs.Open("/home/test/foo")
list, err := fh.Readdir(-1)
fh.Close()
if err != nil {
t.Errorf("Readdir failed %s", err)
}
if len(list) != 2 {
for _, x := range list {
fmt.Println(x.Name())
}
t.Errorf("Got wrong number of files in union: %v", len(list))
}
}
func TestCopyOnWriteFsWithOsFs(t *testing.T) {
defer CleanupTempDirs(t)
base := NewTempOsBaseFs(t)
roBase := &ReadOnlyFs{source: base}
overlay := NewTempOsBaseFs(t)
ufs := &CopyOnWriteFs{base: roBase, layer: overlay}
base.MkdirAll("/home/test", 0o777)
fh, _ := base.Create("/home/test/file.txt")
fh.WriteString("This is a test")
fh.Close()
overlay.MkdirAll("home/test", 0o777)
fh, _ = overlay.Create("/home/test/file2.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Create("/home/test/file3.txt")
fh.WriteString("This is a test")
fh.Close()
fh, _ = ufs.Open("/home/test")
files, err := fh.Readdirnames(-1)
fh.Close()
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 3 {
t.Errorf("Got wrong number of files in union: %v", files)
}
fh, _ = overlay.Open("/home/test")
files, err = fh.Readdirnames(-1)
fh.Close()
if err != nil {
t.Errorf("Readdirnames failed")
}
if len(files) != 2 {
t.Errorf("Got wrong number of files in overlay: %v", files)
}
}
func TestUnionCacheWrite(t *testing.T) {
base := &MemMapFs{}
layer := &MemMapFs{}
ufs := NewCacheOnReadFs(base, layer, 0)
base.Mkdir("/data", 0o777)
fh, err := ufs.Create("/data/file.txt")
if err != nil {
t.Errorf("Failed to create file")
}
_, err = fh.Write([]byte("This is a test"))
if err != nil {
t.Errorf("Failed to write file")
}
fh.Seek(0, io.SeekStart)
buf := make([]byte, 4)
_, _ = fh.Read(buf)
fh.Write([]byte(" IS A"))
fh.Close()
baseData, _ := ReadFile(base, "/data/file.txt")
layerData, _ := ReadFile(layer, "/data/file.txt")
if string(baseData) != string(layerData) {
t.Errorf("Different data: %s <=> %s", baseData, layerData)
}
}
func TestUnionCacheExpire(t *testing.T) {
base := &MemMapFs{}
layer := &MemMapFs{}
ufs := &CacheOnReadFs{base: base, layer: layer, cacheTime: 1 * time.Second}
base.Mkdir("/data", 0o777)
fh, err := ufs.Create("/data/file.txt")
if err != nil {
t.Errorf("Failed to create file")
}
_, err = fh.Write([]byte("This is a test"))
if err != nil {
t.Errorf("Failed to write file")
}
fh.Close()
fh, _ = base.Create("/data/file.txt")
// sleep some time, so we really get a different time.Now() on write...
time.Sleep(2 * time.Second)
fh.WriteString("Another test")
fh.Close()
data, _ := ReadFile(ufs, "/data/file.txt")
if string(data) != "Another test" {
t.Errorf("cache time failed: <%s>", data)
}
}
func TestCacheOnReadFsNotInLayer(t *testing.T) {
base := NewMemMapFs()
layer := NewMemMapFs()
fs := NewCacheOnReadFs(base, layer, 0)
fh, err := base.Create("/file.txt")
if err != nil {
t.Fatal("unable to create file: ", err)
}
txt := []byte("This is a test")
fh.Write(txt)
fh.Close()
fh, err = fs.Open("/file.txt")
if err != nil {
t.Fatal("could not open file: ", err)
}
b, err := ReadAll(fh)
fh.Close()
if err != nil {
t.Fatal("could not read file: ", err)
} else if !bytes.Equal(txt, b) {
t.Fatalf("wanted file text %q, got %q", txt, b)
}
fh, err = layer.Open("/file.txt")
if err != nil {
t.Fatal("could not open file from layer: ", err)
}
fh.Close()
}
// #194
func TestUnionFileReaddirEmpty(t *testing.T) {
osFs := NewOsFs()
base := NewMemMapFs()
overlay := NewMemMapFs()
ufs := &CopyOnWriteFs{base: base, layer: overlay}
mem := NewMemMapFs()
// The OS file will return io.EOF on end of directory.
for _, fs := range []Fs{osFs, ufs, mem} {
baseDir, err := TempDir(fs, "", "empty-dir")
if err != nil {
t.Fatal(err)
}
f, err := fs.Open(baseDir)
if err != nil {
t.Fatal(err)
}
names, err := f.Readdirnames(1)
if err != io.EOF {
t.Fatal(err)
}
if len(names) != 0 {
t.Fatal("should be empty")
}
f.Close()
fs.RemoveAll(baseDir)
}
}
// #197
func TestUnionFileReaddirDuplicateEmpty(t *testing.T) {
base := NewMemMapFs()
dir, err := TempDir(base, "", "empty-dir")
if err != nil {
t.Fatal(err)
}
// Overlay shares same empty directory as base
overlay := NewMemMapFs()
err = overlay.Mkdir(dir, 0o700)
if err != nil {
t.Fatal(err)
}
ufs := &CopyOnWriteFs{base: base, layer: overlay}
f, err := ufs.Open(dir)
if err != nil {
t.Fatal(err)
}
defer f.Close()
names, err := f.Readdirnames(0)
if err == io.EOF {
t.Errorf("unexpected io.EOF error")
}
if len(names) != 0 {
t.Fatal("should be empty")
}
}
func TestUnionFileReaddirAskForTooMany(t *testing.T) {
base := &MemMapFs{}
overlay := &MemMapFs{}
const testFiles = 5
for i := 0; i < testFiles; i++ {
WriteFile(base, fmt.Sprintf("file%d.txt", i), []byte("afero"), 0o777)
}
ufs := &CopyOnWriteFs{base: base, layer: overlay}
f, err := ufs.Open("")
if err != nil {
t.Fatal(err)
}
defer f.Close()
// Read part of all files
wantNames := 3
names, err := f.Readdirnames(wantNames)
if err != nil {
t.Fatal(err)
}
if len(names) != wantNames {
t.Fatalf("got %d names %v, want %d", len(names), names, wantNames)
}
// Try to read more files than remaining
wantNames = testFiles - len(names)
names, err = f.Readdirnames(wantNames + 1)
if err != nil {
t.Fatal(err)
}
if len(names) != wantNames {
t.Fatalf("got %d names %v, want %d", len(names), names, wantNames)
}
// End of directory
_, err = f.Readdirnames(3)
if err != io.EOF {
t.Fatal(err)
}
}
spf13-afero-18d690e/const_bsds.go 0000664 0000000 0000000 00000001446 15057601571 0016606 0 ustar 00root root 0000000 0000000 // Copyright © 2016 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build aix || darwin || openbsd || freebsd || netbsd || dragonfly || zos
// +build aix darwin openbsd freebsd netbsd dragonfly zos
package afero
import (
"syscall"
)
const BADFD = syscall.EBADF
spf13-afero-18d690e/const_win_unix.go 0000664 0000000 0000000 00000001464 15057601571 0017513 0 ustar 00root root 0000000 0000000 // Copyright © 2016 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !darwin && !openbsd && !freebsd && !dragonfly && !netbsd && !aix && !zos
// +build !darwin,!openbsd,!freebsd,!dragonfly,!netbsd,!aix,!zos
package afero
import (
"syscall"
)
const BADFD = syscall.EBADFD
spf13-afero-18d690e/copyOnWriteFs.go 0000664 0000000 0000000 00000017150 15057601571 0017217 0 ustar 00root root 0000000 0000000 package afero
import (
"fmt"
"os"
"path/filepath"
"syscall"
"time"
)
var _ Lstater = (*CopyOnWriteFs)(nil)
// The CopyOnWriteFs is a union filesystem: a read only base file system with
// a possibly writeable layer on top. Changes to the file system will only
// be made in the overlay: Changing an existing file in the base layer which
// is not present in the overlay will copy the file to the overlay ("changing"
// includes also calls to e.g. Chtimes(), Chmod() and Chown()).
//
// Reading directories is currently only supported via Open(), not OpenFile().
type CopyOnWriteFs struct {
base Fs
layer Fs
}
func NewCopyOnWriteFs(base Fs, layer Fs) Fs {
return &CopyOnWriteFs{base: base, layer: layer}
}
// Returns true if the file is not in the overlay
func (u *CopyOnWriteFs) isBaseFile(name string) (bool, error) {
if _, err := u.layer.Stat(name); err == nil {
return false, nil
}
_, err := u.base.Stat(name)
if err != nil {
if oerr, ok := err.(*os.PathError); ok {
if oerr.Err == os.ErrNotExist || oerr.Err == syscall.ENOENT ||
oerr.Err == syscall.ENOTDIR {
return false, nil
}
}
if err == syscall.ENOENT {
return false, nil
}
}
return true, err
}
func (u *CopyOnWriteFs) copyToLayer(name string) error {
return copyToLayer(u.base, u.layer, name)
}
func (u *CopyOnWriteFs) Chtimes(name string, atime, mtime time.Time) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chtimes(name, atime, mtime)
}
func (u *CopyOnWriteFs) Chmod(name string, mode os.FileMode) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chmod(name, mode)
}
func (u *CopyOnWriteFs) Chown(name string, uid, gid int) error {
b, err := u.isBaseFile(name)
if err != nil {
return err
}
if b {
if err := u.copyToLayer(name); err != nil {
return err
}
}
return u.layer.Chown(name, uid, gid)
}
func (u *CopyOnWriteFs) Stat(name string) (os.FileInfo, error) {
fi, err := u.layer.Stat(name)
if err != nil {
isNotExist := u.isNotExist(err)
if isNotExist {
return u.base.Stat(name)
}
return nil, err
}
return fi, nil
}
func (u *CopyOnWriteFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
llayer, ok1 := u.layer.(Lstater)
lbase, ok2 := u.base.(Lstater)
if ok1 {
fi, b, err := llayer.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
if ok2 {
fi, b, err := lbase.LstatIfPossible(name)
if err == nil {
return fi, b, nil
}
if !u.isNotExist(err) {
return nil, b, err
}
}
fi, err := u.Stat(name)
return fi, false, err
}
func (u *CopyOnWriteFs) SymlinkIfPossible(oldname, newname string) error {
if slayer, ok := u.layer.(Linker); ok {
return slayer.SymlinkIfPossible(oldname, newname)
}
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
}
func (u *CopyOnWriteFs) ReadlinkIfPossible(name string) (string, error) {
if rlayer, ok := u.layer.(LinkReader); ok {
return rlayer.ReadlinkIfPossible(name)
}
if rbase, ok := u.base.(LinkReader); ok {
return rbase.ReadlinkIfPossible(name)
}
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
}
func (u *CopyOnWriteFs) isNotExist(err error) bool {
if e, ok := err.(*os.PathError); ok {
err = e.Err
}
if err == os.ErrNotExist || err == syscall.ENOENT || err == syscall.ENOTDIR {
return true
}
return false
}
// Renaming files present only in the base layer is not permitted
func (u *CopyOnWriteFs) Rename(oldname, newname string) error {
b, err := u.isBaseFile(oldname)
if err != nil {
return err
}
if b {
return syscall.EPERM
}
return u.layer.Rename(oldname, newname)
}
// Removing files present only in the base layer is not permitted. If
// a file is present in the base layer and the overlay, only the overlay
// will be removed.
func (u *CopyOnWriteFs) Remove(name string) error {
err := u.layer.Remove(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) RemoveAll(name string) error {
err := u.layer.RemoveAll(name)
switch err {
case syscall.ENOENT:
_, err = u.base.Stat(name)
if err == nil {
return syscall.EPERM
}
return syscall.ENOENT
default:
return err
}
}
func (u *CopyOnWriteFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
if flag&(os.O_WRONLY|os.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
if b {
if err = u.copyToLayer(name); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
dir := filepath.Dir(name)
isaDir, err := IsDir(u.base, dir)
if err != nil && !os.IsNotExist(err) {
return nil, err
}
if isaDir {
if err = u.layer.MkdirAll(dir, 0o777); err != nil {
return nil, err
}
return u.layer.OpenFile(name, flag, perm)
}
isaDir, err = IsDir(u.layer, dir)
if err != nil {
return nil, err
}
if isaDir {
return u.layer.OpenFile(name, flag, perm)
}
return nil, &os.PathError{
Op: "open",
Path: name,
Err: syscall.ENOTDIR,
} // ...or os.ErrNotExist?
}
if b {
return u.base.OpenFile(name, flag, perm)
}
return u.layer.OpenFile(name, flag, perm)
}
// This function handles the 9 different possibilities caused
// by the union which are the intersection of the following...
//
// layer: doesn't exist, exists as a file, and exists as a directory
// base: doesn't exist, exists as a file, and exists as a directory
func (u *CopyOnWriteFs) Open(name string) (File, error) {
// Since the overlay overrides the base we check that first
b, err := u.isBaseFile(name)
if err != nil {
return nil, err
}
// If overlay doesn't exist, return the base (base state irrelevant)
if b {
return u.base.Open(name)
}
// If overlay is a file, return it (base state irrelevant)
dir, err := IsDir(u.layer, name)
if err != nil {
return nil, err
}
if !dir {
return u.layer.Open(name)
}
// Overlay is a directory, base state now matters.
// Base state has 3 states to check but 2 outcomes:
// A. It's a file or non-readable in the base (return just the overlay)
// B. It's an accessible directory in the base (return a UnionFile)
// If base is file or nonreadable, return overlay
dir, err = IsDir(u.base, name)
if !dir || err != nil {
return u.layer.Open(name)
}
// Both base & layer are directories
// Return union file (if opens are without error)
bfile, bErr := u.base.Open(name)
lfile, lErr := u.layer.Open(name)
// If either have errors at this point something is very wrong. Return nil and the errors
if bErr != nil || lErr != nil {
return nil, fmt.Errorf("BaseErr: %v\nOverlayErr: %v", bErr, lErr)
}
return &UnionFile{Base: bfile, Layer: lfile}, nil
}
func (u *CopyOnWriteFs) Mkdir(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
return ErrFileExists
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Name() string {
return "CopyOnWriteFs"
}
func (u *CopyOnWriteFs) MkdirAll(name string, perm os.FileMode) error {
dir, err := IsDir(u.base, name)
if err != nil {
return u.layer.MkdirAll(name, perm)
}
if dir {
// This is in line with how os.MkdirAll behaves.
return nil
}
return u.layer.MkdirAll(name, perm)
}
func (u *CopyOnWriteFs) Create(name string) (File, error) {
return u.OpenFile(name, os.O_CREATE|os.O_TRUNC|os.O_RDWR, 0o666)
}
spf13-afero-18d690e/copyOnWriteFs_test.go 0000664 0000000 0000000 00000002533 15057601571 0020255 0 ustar 00root root 0000000 0000000 package afero
import (
"os"
"path/filepath"
"testing"
)
func TestCopyOnWrite(t *testing.T) {
osFs := NewOsFs()
writeDir, err := TempDir(osFs, "", "copy-on-write-test")
if err != nil {
t.Fatal("error creating tempDir", err)
}
defer osFs.RemoveAll(writeDir)
compositeFs := NewCopyOnWriteFs(NewReadOnlyFs(NewOsFs()), osFs)
dir := filepath.Join(writeDir, "some/path")
err = compositeFs.MkdirAll(dir, 0o744)
if err != nil {
t.Fatal(err)
}
_, err = compositeFs.Create(filepath.Join(dir, "newfile"))
if err != nil {
t.Fatal(err)
}
// https://github.com/spf13/afero/issues/189
// We want the composite file system to behave like the OS file system
// on Mkdir and MkdirAll
for _, fs := range []Fs{osFs, compositeFs} {
err = fs.Mkdir(dir, 0o744)
if err == nil || !os.IsExist(err) {
t.Errorf("Mkdir: Got %q for %T", err, fs)
}
// MkdirAll does not return an error when the directory already exists
err = fs.MkdirAll(dir, 0o744)
if err != nil {
t.Errorf("MkdirAll: Got %q for %T", err, fs)
}
}
}
func TestCopyOnWriteFileInMemMapBase(t *testing.T) {
base := &MemMapFs{}
layer := &MemMapFs{}
if err := WriteFile(base, "base.txt", []byte("base"), 0o755); err != nil {
t.Fatalf("Failed to write file: %s", err)
}
ufs := NewCopyOnWriteFs(base, layer)
_, err := ufs.Stat("base.txt")
if err != nil {
t.Fatal(err)
}
}
spf13-afero-18d690e/gcsfs/ 0000775 0000000 0000000 00000000000 15057601571 0015216 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/gcsfs/errors.go 0000664 0000000 0000000 00000002222 15057601571 0017057 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"errors"
"syscall"
"cloud.google.com/go/storage"
)
var (
ErrNoBucketInName = errors.New("no bucket name found in the name")
ErrFileClosed = errors.New("file is closed")
ErrOutOfRange = errors.New("out of range")
ErrObjectDoesNotExist = storage.ErrObjectNotExist
ErrEmptyObjectName = errors.New("storage: object name is empty")
ErrFileNotFound = syscall.ENOENT
)
spf13-afero-18d690e/gcsfs/file.go 0000664 0000000 0000000 00000015045 15057601571 0016471 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"context"
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"syscall"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
// GcsFs is the Afero version adapted for GCS
type GcsFile struct {
openFlags int
fhOffset int64 // File handle specific offset
closed bool
ReadDirIt stiface.ObjectIterator
resource *gcsFileResource
}
func NewGcsFile(
ctx context.Context,
fs *Fs,
obj stiface.ObjectHandle,
openFlags int,
// Unused: there is no use to the file mode in GCloud just yet - but we keep it here, just in case we need it
fileMode os.FileMode,
name string,
) *GcsFile {
return &GcsFile{
openFlags: openFlags,
fhOffset: 0,
closed: false,
ReadDirIt: nil,
resource: &gcsFileResource{
ctx: ctx,
fs: fs,
obj: obj,
name: name,
fileMode: fileMode,
currentGcsSize: 0,
offset: 0,
reader: nil,
writer: nil,
},
}
}
func NewGcsFileFromOldFH(
openFlags int,
fileMode os.FileMode,
oldFile *gcsFileResource,
) *GcsFile {
res := &GcsFile{
openFlags: openFlags,
fhOffset: 0,
closed: false,
ReadDirIt: nil,
resource: oldFile,
}
res.resource.fileMode = fileMode
return res
}
func (o *GcsFile) Close() error {
if o.closed {
// the afero spec expects the call to Close on a closed file to return an error
return ErrFileClosed
}
o.closed = true
return o.resource.Close()
}
func (o *GcsFile) Seek(newOffset int64, whence int) (int64, error) {
if o.closed {
return 0, ErrFileClosed
}
// Since this is an expensive operation; let's make sure we need it
if (whence == 0 && newOffset == o.fhOffset) || (whence == 1 && newOffset == 0) {
return o.fhOffset, nil
}
log.Printf(
"WARNING: Seek behavior triggered, highly inefficent. Offset before seek is at %d\n",
o.fhOffset,
)
// Fore the reader/writers to be reopened (at correct offset)
err := o.Sync()
if err != nil {
return 0, err
}
stat, err := o.Stat()
if err != nil {
return 0, nil
}
switch whence {
case 0:
o.fhOffset = newOffset
case 1:
o.fhOffset += newOffset
case 2:
o.fhOffset = stat.Size() + newOffset
}
return o.fhOffset, nil
}
func (o *GcsFile) Read(p []byte) (n int, err error) {
return o.ReadAt(p, o.fhOffset)
}
func (o *GcsFile) ReadAt(p []byte, off int64) (n int, err error) {
if o.closed {
return 0, ErrFileClosed
}
read, err := o.resource.ReadAt(p, off)
o.fhOffset += int64(read)
return read, err
}
func (o *GcsFile) Write(p []byte) (n int, err error) {
return o.WriteAt(p, o.fhOffset)
}
func (o *GcsFile) WriteAt(b []byte, off int64) (n int, err error) {
if o.closed {
return 0, ErrFileClosed
}
if o.openFlags&os.O_RDONLY != 0 {
return 0, fmt.Errorf("file is opend as read only")
}
_, err = o.resource.obj.Attrs(o.resource.ctx)
if err != nil {
if err == storage.ErrObjectNotExist {
if o.openFlags&os.O_CREATE == 0 {
return 0, ErrFileNotFound
}
} else {
return 0, fmt.Errorf("error getting file attributes: %v", err)
}
}
written, err := o.resource.WriteAt(b, off)
o.fhOffset += int64(written)
return written, err
}
func (o *GcsFile) Name() string {
return filepath.FromSlash(o.resource.name)
}
func (o *GcsFile) readdirImpl(count int) ([]*FileInfo, error) {
err := o.Sync()
if err != nil {
return nil, err
}
var ownInfo os.FileInfo
ownInfo, err = o.Stat()
if err != nil {
return nil, err
}
if !ownInfo.IsDir() {
return nil, syscall.ENOTDIR
}
path := o.resource.fs.ensureTrailingSeparator(o.resource.name)
if o.ReadDirIt == nil {
// log.Printf("Querying path : %s\n", path)
bucketName, bucketPath := o.resource.fs.splitName(path)
o.ReadDirIt = o.resource.fs.client.Bucket(bucketName).Objects(
o.resource.ctx, &storage.Query{Delimiter: o.resource.fs.separator, Prefix: bucketPath, Versions: false})
}
var res []*FileInfo
for {
object, err := o.ReadDirIt.Next()
if err == iterator.Done {
// reset the iterator
o.ReadDirIt = nil
if len(res) > 0 || count <= 0 {
return res, nil
}
return res, io.EOF
}
if err != nil {
return res, err
}
tmp := newFileInfoFromAttrs(object, o.resource.fileMode)
if tmp.Name() == "" {
// neither object.Name, not object.Prefix were present - so let's skip this unknown thing
continue
}
if object.Name == "" && object.Prefix == "" {
continue
}
if tmp.Name() == ownInfo.Name() {
// Hmmm
continue
}
res = append(res, tmp)
// This would interrupt the iteration, once we reach the count.
// But it would then have files coming before folders - that's not what we want to have exactly,
// since it makes the results unpredictable. Hence, we iterate all the objects and then do
// the cut-off in a higher level method
//if count > 0 && len(res) >= count {
// break
//}
}
// return res, nil
}
func (o *GcsFile) Readdir(count int) ([]os.FileInfo, error) {
fi, err := o.readdirImpl(count)
if len(fi) > 0 {
sort.Sort(ByName(fi))
}
if count > 0 {
fi = fi[:count]
}
var res []os.FileInfo
for _, f := range fi {
res = append(res, f)
}
return res, err
}
func (o *GcsFile) Readdirnames(n int) ([]string, error) {
fi, err := o.Readdir(n)
if err != nil && err != io.EOF {
return nil, err
}
names := make([]string, len(fi))
for i, f := range fi {
names[i] = f.Name()
}
return names, err
}
func (o *GcsFile) Stat() (os.FileInfo, error) {
err := o.Sync()
if err != nil {
return nil, err
}
return newFileInfo(o.resource.name, o.resource.fs, o.resource.fileMode)
}
func (o *GcsFile) Sync() error {
return o.resource.maybeCloseIo()
}
func (o *GcsFile) Truncate(wantedSize int64) error {
if o.closed {
return ErrFileClosed
}
if o.openFlags == os.O_RDONLY {
return fmt.Errorf("file was opened as read only")
}
return o.resource.Truncate(wantedSize)
}
func (o *GcsFile) WriteString(s string) (ret int, err error) {
return o.Write([]byte(s))
}
spf13-afero-18d690e/gcsfs/file_info.go 0000664 0000000 0000000 00000006642 15057601571 0017507 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"errors"
"os"
"path/filepath"
"strings"
"time"
"cloud.google.com/go/storage"
)
const (
folderSize = 42
)
type FileInfo struct {
name string
size int64
updated time.Time
isDir bool
fileMode os.FileMode
}
func newFileInfo(name string, fs *Fs, fileMode os.FileMode) (*FileInfo, error) {
res := &FileInfo{
name: name,
size: folderSize,
updated: time.Time{},
isDir: false,
fileMode: fileMode,
}
obj, err := fs.getObj(name)
if err != nil {
return nil, err
}
objAttrs, err := obj.Attrs(fs.ctx)
if err != nil {
if err.Error() == ErrEmptyObjectName.Error() {
// It's a root folder here, we return right away
res.name = fs.ensureTrailingSeparator(res.name)
res.isDir = true
return res, nil
} else if errors.Is(err, ErrObjectDoesNotExist) {
// Folders do not actually "exist" in GCloud, so we have to check, if something exists with
// such a prefix
bucketName, bucketPath := fs.splitName(name)
it := fs.client.Bucket(bucketName).Objects(
fs.ctx, &storage.Query{Delimiter: fs.separator, Prefix: bucketPath, Versions: false})
if _, err = it.Next(); err == nil {
res.name = fs.ensureTrailingSeparator(res.name)
res.isDir = true
return res, nil
}
return nil, ErrFileNotFound
}
return nil, err
}
res.size = objAttrs.Size
res.updated = objAttrs.Updated
return res, nil
}
func newFileInfoFromAttrs(objAttrs *storage.ObjectAttrs, fileMode os.FileMode) *FileInfo {
res := &FileInfo{
name: objAttrs.Name,
size: objAttrs.Size,
updated: objAttrs.Updated,
isDir: false,
fileMode: fileMode,
}
if res.name == "" {
if objAttrs.Prefix != "" {
// It's a virtual folder! It does not have a name, but prefix - this is how GCS API
// deals with them at the moment
res.name = objAttrs.Prefix
res.size = folderSize
res.isDir = true
}
}
return res
}
func (fi *FileInfo) Name() string {
return filepath.Base(filepath.FromSlash(fi.name))
}
func (fi *FileInfo) Size() int64 {
return fi.size
}
func (fi *FileInfo) Mode() os.FileMode {
if fi.IsDir() {
return os.ModeDir | fi.fileMode
}
return fi.fileMode
}
func (fi *FileInfo) ModTime() time.Time {
return fi.updated
}
func (fi *FileInfo) IsDir() bool {
return fi.isDir
}
func (fi *FileInfo) Sys() interface{} {
return nil
}
type ByName []*FileInfo
func (a ByName) Len() int { return len(a) }
func (a ByName) Swap(i, j int) {
a[i].name, a[j].name = a[j].name, a[i].name
a[i].size, a[j].size = a[j].size, a[i].size
a[i].updated, a[j].updated = a[j].updated, a[i].updated
a[i].isDir, a[j].isDir = a[j].isDir, a[i].isDir
}
func (a ByName) Less(i, j int) bool { return strings.Compare(a[i].Name(), a[j].Name()) == -1 }
spf13-afero-18d690e/gcsfs/file_resource.go 0000664 0000000 0000000 00000015025 15057601571 0020376 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"bytes"
"context"
"fmt"
"io"
"os"
"syscall"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
const (
maxWriteSize = 10000
)
// gcsFileResource represents a singleton version of each GCS object;
// Google cloud storage allows users to open multiple writers(!) to the same
// underlying resource, once the write is closed the written stream is commented. We are doing
// some magic where we read and and write to the same file which requires synchronization
// of the underlying resource.
type gcsFileResource struct {
ctx context.Context
fs *Fs
obj stiface.ObjectHandle
name string
fileMode os.FileMode
currentGcsSize int64
offset int64
reader io.ReadCloser
writer io.WriteCloser
closed bool
}
func (o *gcsFileResource) Close() error {
o.closed = true
// TODO rawGcsObjectsMap ?
return o.maybeCloseIo()
}
func (o *gcsFileResource) maybeCloseIo() error {
if err := o.maybeCloseReader(); err != nil {
return fmt.Errorf("error closing reader: %v", err)
}
if err := o.maybeCloseWriter(); err != nil {
return fmt.Errorf("error closing writer: %v", err)
}
return nil
}
func (o *gcsFileResource) maybeCloseReader() error {
if o.reader == nil {
return nil
}
if err := o.reader.Close(); err != nil {
return err
}
o.reader = nil
return nil
}
func (o *gcsFileResource) maybeCloseWriter() error {
if o.writer == nil {
return nil
}
// In cases of partial writes (e.g. to the middle of a file stream), we need to
// append any remaining data from the original file before we close the reader (and
// commit the results.)
// For small writes it can be more efficient
// to keep the original reader but that is for another iteration
if o.currentGcsSize > o.offset {
currentFile, err := o.obj.NewRangeReader(o.ctx, o.offset, -1)
if err != nil {
return fmt.Errorf(
"couldn't simulate a partial write; the closing (and thus"+
" the whole file write) is NOT commited to GCS. %v", err)
}
if currentFile != nil && currentFile.Remain() > 0 {
if _, err := io.Copy(o.writer, currentFile); err != nil {
return fmt.Errorf("error writing: %v", err)
}
}
}
if err := o.writer.Close(); err != nil {
return err
}
o.writer = nil
return nil
}
func (o *gcsFileResource) ReadAt(p []byte, off int64) (n int, err error) {
if cap(p) == 0 {
return 0, nil
}
// Assume that if the reader is open; it is at the correct offset
// a good performance assumption that we must ensure holds
if off == o.offset && o.reader != nil {
n, err = o.reader.Read(p)
o.offset += int64(n)
return n, err
}
// we have to check, whether it's a folder; the folder must not have an open readers, or writers though,
// so this check should not be invoked excessively and cause too much of a performance drop
if o.reader == nil && o.writer == nil {
var info *FileInfo
info, err = newFileInfo(o.name, o.fs, o.fileMode)
if err != nil {
return 0, err
}
if info.IsDir() {
// trying to read a directory must return this
return 0, syscall.EISDIR
}
}
// If any writers have written anything; commit it first so we can read it back.
if err = o.maybeCloseIo(); err != nil {
return 0, err
}
// Then read at the correct offset.
r, err := o.obj.NewRangeReader(o.ctx, off, -1)
if err != nil {
return 0, err
}
o.reader = r
o.offset = off
read, err := o.reader.Read(p)
o.offset += int64(read)
return read, err
}
func (o *gcsFileResource) WriteAt(b []byte, off int64) (n int, err error) {
// If the writer is opened and at the correct offset we're good!
if off == o.offset && o.writer != nil {
n, err = o.writer.Write(b)
o.offset += int64(n)
return n, err
}
// Ensure readers must be re-opened and that if a writer is active at another
// offset it is first committed before we do a "seek" below
if err = o.maybeCloseIo(); err != nil {
return 0, err
}
w := o.obj.NewWriter(o.ctx)
// TRIGGER WARNING: This can seem like a hack but it works thanks
// to GCS strong consistency. We will open and write to the same file; First when the
// writer is closed will the content get committed to GCS.
// The general idea is this:
// Objectv1[:offset] -> Objectv2
// newData1 -> Objectv2
// Objectv1[offset+len(newData1):] -> Objectv2
// Objectv2.Close
//
// It will however require a download and upload of the original file but it
// can't be avoided if we should support seek-write-operations on GCS.
objAttrs, err := o.obj.Attrs(o.ctx)
if err != nil {
if off > 0 {
return 0, err // WriteAt to a non existing file
}
o.currentGcsSize = 0
} else {
o.currentGcsSize = objAttrs.Size
}
if off > o.currentGcsSize {
return 0, ErrOutOfRange
}
if off > 0 {
var r stiface.Reader
r, err = o.obj.NewReader(o.ctx)
if err != nil {
return 0, err
}
if _, err = io.CopyN(w, r, off); err != nil {
return 0, err
}
if err = r.Close(); err != nil {
return 0, err
}
}
o.writer = w
o.offset = off
written, err := o.writer.Write(b)
o.offset += int64(written)
return written, err
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func (o *gcsFileResource) Truncate(wantedSize int64) error {
if wantedSize < 0 {
return ErrOutOfRange
}
if err := o.maybeCloseIo(); err != nil {
return err
}
r, err := o.obj.NewRangeReader(o.ctx, 0, wantedSize)
if err != nil {
return err
}
w := o.obj.NewWriter(o.ctx)
written, err := io.Copy(w, r)
if err != nil {
return err
}
for written < wantedSize {
// Bulk up padding writes
paddingBytes := bytes.Repeat([]byte(" "), min(maxWriteSize, int(wantedSize-written)))
n := 0
if n, err = w.Write(paddingBytes); err != nil {
return err
}
written += int64(n)
}
if err = r.Close(); err != nil {
return fmt.Errorf("error closing reader: %v", err)
}
if err = w.Close(); err != nil {
return fmt.Errorf("error closing writer: %v", err)
}
return nil
}
spf13-afero-18d690e/gcsfs/fs.go 0000664 0000000 0000000 00000023022 15057601571 0016154 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"context"
"errors"
"os"
"path/filepath"
"strings"
"syscall"
"time"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
const (
defaultFileMode = 0o755
gsPrefix = "gs://"
)
// Fs is a Fs implementation that uses functions provided by google cloud storage
type Fs struct {
ctx context.Context
client stiface.Client
separator string
buckets map[string]stiface.BucketHandle
rawGcsObjects map[string]*GcsFile
autoRemoveEmptyFolders bool // trigger for creating "virtual folders" (not required by GCSs)
}
func NewGcsFs(ctx context.Context, client stiface.Client) *Fs {
return NewGcsFsWithSeparator(ctx, client, "/")
}
func NewGcsFsWithSeparator(ctx context.Context, client stiface.Client, folderSep string) *Fs {
return &Fs{
ctx: ctx,
client: client,
separator: folderSep,
rawGcsObjects: make(map[string]*GcsFile),
autoRemoveEmptyFolders: true,
}
}
// normSeparators will normalize all "\\" and "/" to the provided separator
func (fs *Fs) normSeparators(s string) string {
return strings.Replace(strings.Replace(s, "\\", fs.separator, -1), "/", fs.separator, -1)
}
func (fs *Fs) ensureTrailingSeparator(s string) string {
if len(s) > 0 && !strings.HasSuffix(s, fs.separator) {
return s + fs.separator
}
return s
}
func (fs *Fs) ensureNoLeadingSeparator(s string) string {
if len(s) > 0 && strings.HasPrefix(s, fs.separator) {
s = s[len(fs.separator):]
}
return s
}
func ensureNoPrefix(s string) string {
if len(s) > 0 && strings.HasPrefix(s, gsPrefix) {
return s[len(gsPrefix):]
}
return s
}
func validateName(s string) error {
if len(s) == 0 {
return ErrNoBucketInName
}
return nil
}
// Splits provided name into bucket name and path
func (fs *Fs) splitName(name string) (bucketName string, path string) {
splitName := strings.Split(name, fs.separator)
return splitName[0], strings.Join(splitName[1:], fs.separator)
}
func (fs *Fs) getBucket(name string) (stiface.BucketHandle, error) {
bucket := fs.buckets[name]
if bucket == nil {
bucket = fs.client.Bucket(name)
_, err := bucket.Attrs(fs.ctx)
if err != nil {
return nil, err
}
}
return bucket, nil
}
func (fs *Fs) getObj(name string) (stiface.ObjectHandle, error) {
bucketName, path := fs.splitName(name)
bucket, err := fs.getBucket(bucketName)
if err != nil {
return nil, err
}
return bucket.Object(path), nil
}
func (fs *Fs) Name() string { return "GcsFs" }
func (fs *Fs) Create(name string) (*GcsFile, error) {
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err := validateName(name); err != nil {
return nil, err
}
if !fs.autoRemoveEmptyFolders {
baseDir := filepath.Base(name)
if stat, err := fs.Stat(baseDir); err != nil || !stat.IsDir() {
err = fs.MkdirAll(baseDir, 0)
if err != nil {
return nil, err
}
}
}
obj, err := fs.getObj(name)
if err != nil {
return nil, err
}
w := obj.NewWriter(fs.ctx)
err = w.Close()
if err != nil {
return nil, err
}
file := NewGcsFile(fs.ctx, fs, obj, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0, name)
fs.rawGcsObjects[name] = file
return file, nil
}
func (fs *Fs) Mkdir(name string, _ os.FileMode) error {
name = fs.ensureNoLeadingSeparator(
fs.ensureTrailingSeparator(fs.normSeparators(ensureNoPrefix(name))),
)
if err := validateName(name); err != nil {
return err
}
// folder creation logic has to additionally check for folder name presence
bucketName, path := fs.splitName(name)
if bucketName == "" {
return ErrNoBucketInName
}
if path == "" {
// the API would throw "googleapi: Error 400: No object name, required", but this one is more consistent
return ErrEmptyObjectName
}
obj, err := fs.getObj(name)
if err != nil {
return err
}
w := obj.NewWriter(fs.ctx)
return w.Close()
}
func (fs *Fs) MkdirAll(path string, perm os.FileMode) error {
path = fs.ensureNoLeadingSeparator(
fs.ensureTrailingSeparator(fs.normSeparators(ensureNoPrefix(path))),
)
if err := validateName(path); err != nil {
return err
}
// folder creation logic has to additionally check for folder name presence
bucketName, splitPath := fs.splitName(path)
if bucketName == "" {
return ErrNoBucketInName
}
if splitPath == "" {
// the API would throw "googleapi: Error 400: No object name, required", but this one is more consistent
return ErrEmptyObjectName
}
root := ""
folders := strings.Split(path, fs.separator)
for i, f := range folders {
if f == "" && i != 0 {
continue // it's the last item - it should be empty
}
// Don't force a delimiter prefix
if root != "" {
root = root + fs.separator + f
} else {
// we have to have at least bucket name + folder name to create successfully
root = f
continue
}
if err := fs.Mkdir(root, perm); err != nil {
return err
}
}
return nil
}
func (fs *Fs) Open(name string) (*GcsFile, error) {
return fs.OpenFile(name, os.O_RDONLY, 0)
}
func (fs *Fs) OpenFile(name string, flag int, fileMode os.FileMode) (*GcsFile, error) {
var file *GcsFile
var err error
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err = validateName(name); err != nil {
return nil, err
}
f, found := fs.rawGcsObjects[name]
if found {
file = NewGcsFileFromOldFH(flag, fileMode, f.resource)
} else {
var obj stiface.ObjectHandle
obj, err = fs.getObj(name)
if err != nil {
return nil, err
}
file = NewGcsFile(fs.ctx, fs, obj, flag, fileMode, name)
}
if flag == os.O_RDONLY {
_, err = file.Stat()
if err != nil {
return nil, err
}
}
if flag&os.O_TRUNC != 0 {
err = file.resource.obj.Delete(fs.ctx)
if err != nil {
return nil, err
}
return fs.Create(name)
}
if flag&os.O_APPEND != 0 {
_, err = file.Seek(0, 2)
if err != nil {
return nil, err
}
}
if flag&os.O_CREATE != 0 {
_, err = file.Stat()
if err == nil { // the file actually exists
return nil, syscall.EPERM
}
_, err = file.WriteString("")
if err != nil {
return nil, err
}
}
return file, nil
}
func (fs *Fs) Remove(name string) error {
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err := validateName(name); err != nil {
return err
}
obj, err := fs.getObj(name)
if err != nil {
return err
}
info, err := fs.Stat(name)
if err != nil {
return err
}
delete(fs.rawGcsObjects, name)
if info.IsDir() {
// it's a folder, we ha to check its contents - it cannot be removed, if not empty
var dir *GcsFile
dir, err = fs.Open(name)
if err != nil {
return err
}
var infos []os.FileInfo
infos, err = dir.Readdir(0)
if err != nil {
return err
}
if len(infos) > 0 {
return syscall.ENOTEMPTY
}
// it's an empty folder, we can continue
name = fs.ensureTrailingSeparator(name)
obj, err = fs.getObj(name)
if err != nil {
return err
}
return obj.Delete(fs.ctx)
}
return obj.Delete(fs.ctx)
}
func (fs *Fs) RemoveAll(path string) error {
path = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(path)))
if err := validateName(path); err != nil {
return err
}
pathInfo, err := fs.Stat(path)
if errors.Is(err, ErrFileNotFound) {
// return early if file doesn't exist
return nil
}
if err != nil {
return err
}
if !pathInfo.IsDir() {
return fs.Remove(path)
}
var dir *GcsFile
dir, err = fs.Open(path)
if err != nil {
return err
}
var infos []os.FileInfo
infos, err = dir.Readdir(0)
if err != nil {
return err
}
for _, info := range infos {
nameToRemove := fs.normSeparators(info.Name())
err = fs.RemoveAll(path + fs.separator + nameToRemove)
if err != nil {
return err
}
}
return fs.Remove(path)
}
func (fs *Fs) Rename(oldName, newName string) error {
oldName = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(oldName)))
if err := validateName(oldName); err != nil {
return err
}
newName = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(newName)))
if err := validateName(newName); err != nil {
return err
}
src, err := fs.getObj(oldName)
if err != nil {
return err
}
dst, err := fs.getObj(newName)
if err != nil {
return err
}
if _, err = dst.CopierFrom(src).Run(fs.ctx); err != nil {
return err
}
delete(fs.rawGcsObjects, oldName)
return src.Delete(fs.ctx)
}
func (fs *Fs) Stat(name string) (os.FileInfo, error) {
name = fs.ensureNoLeadingSeparator(fs.normSeparators(ensureNoPrefix(name)))
if err := validateName(name); err != nil {
return nil, err
}
return newFileInfo(name, fs, defaultFileMode)
}
func (fs *Fs) Chmod(_ string, _ os.FileMode) error {
return errors.New("method Chmod is not implemented in GCS")
}
func (fs *Fs) Chtimes(_ string, _, _ time.Time) error {
return errors.New(
"method Chtimes is not implemented. Create, Delete, Updated times are read only fields in GCS and set implicitly",
)
}
func (fs *Fs) Chown(_ string, _, _ int) error {
return errors.New("method Chown is not implemented for GCS")
}
spf13-afero-18d690e/gcsfs/gcs-fake-service-account.json 0000664 0000000 0000000 00000003755 15057601571 0022673 0 ustar 00root root 0000000 0000000 {
"type": "service_account",
"private_key_id": "abc",
"private_key": "-----BEGIN PRIVATE KEY-----\nMIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDY3E8o1NEFcjMM\nHW/5ZfFJw29/8NEqpViNjQIx95Xx5KDtJ+nWn9+OW0uqsSqKlKGhAdAo+Q6bjx2c\nuXVsXTu7XrZUY5Kltvj94DvUa1wjNXs606r/RxWTJ58bfdC+gLLxBfGnB6CwK0YQ\nxnfpjNbkUfVVzO0MQD7UP0Hl5ZcY0Puvxd/yHuONQn/rIAieTHH1pqgW+zrH/y3c\n59IGThC9PPtugI9ea8RSnVj3PWz1bX2UkCDpy9IRh9LzJLaYYX9RUd7++dULUlat\nAaXBh1U6emUDzhrIsgApjDVtimOPbmQWmX1S60mqQikRpVYZ8u+NDD+LNw+/Eovn\nxCj2Y3z1AgMBAAECggEAWDBzoqO1IvVXjBA2lqId10T6hXmN3j1ifyH+aAqK+FVl\nGjyWjDj0xWQcJ9ync7bQ6fSeTeNGzP0M6kzDU1+w6FgyZqwdmXWI2VmEizRjwk+/\n/uLQUcL7I55Dxn7KUoZs/rZPmQDxmGLoue60Gg6z3yLzVcKiDc7cnhzhdBgDc8vd\nQorNAlqGPRnm3EqKQ6VQp6fyQmCAxrr45kspRXNLddat3AMsuqImDkqGKBmF3Q1y\nxWGe81LphUiRqvqbyUlh6cdSZ8pLBpc9m0c3qWPKs9paqBIvgUPlvOZMqec6x4S6\nChbdkkTRLnbsRr0Yg/nDeEPlkhRBhasXpxpMUBgPywKBgQDs2axNkFjbU94uXvd5\nznUhDVxPFBuxyUHtsJNqW4p/ujLNimGet5E/YthCnQeC2P3Ym7c3fiz68amM6hiA\nOnW7HYPZ+jKFnefpAtjyOOs46AkftEg07T9XjwWNPt8+8l0DYawPoJgbM5iE0L2O\nx8TU1Vs4mXc+ql9F90GzI0x3VwKBgQDqZOOqWw3hTnNT07Ixqnmd3dugV9S7eW6o\nU9OoUgJB4rYTpG+yFqNqbRT8bkx37iKBMEReppqonOqGm4wtuRR6LSLlgcIU9Iwx\nyfH12UWqVmFSHsgZFqM/cK3wGev38h1WBIOx3/djKn7BdlKVh8kWyx6uC8bmV+E6\nOoK0vJD6kwKBgHAySOnROBZlqzkiKW8c+uU2VATtzJSydrWm0J4wUPJifNBa/hVW\ndcqmAzXC9xznt5AVa3wxHBOfyKaE+ig8CSsjNyNZ3vbmr0X04FoV1m91k2TeXNod\njMTobkPThaNm4eLJMN2SQJuaHGTGERWC0l3T18t+/zrDMDCPiSLX1NAvAoGBAN1T\nVLJYdjvIMxf1bm59VYcepbK7HLHFkRq6xMJMZbtG0ryraZjUzYvB4q4VjHk2UDiC\nlhx13tXWDZH7MJtABzjyg+AI7XWSEQs2cBXACos0M4Myc6lU+eL+iA+OuoUOhmrh\nqmT8YYGu76/IBWUSqWuvcpHPpwl7871i4Ga/I3qnAoGBANNkKAcMoeAbJQK7a/Rn\nwPEJB+dPgNDIaboAsh1nZhVhN5cvdvCWuEYgOGCPQLYQF0zmTLcM+sVxOYgfy8mV\nfbNgPgsP5xmu6dw2COBKdtozw0HrWSRjACd1N4yGu75+wPCcX/gQarcjRcXXZeEa\nNtBLSfcqPULqD+h7br9lEJio\n-----END PRIVATE KEY-----\n",
"client_email": "123-abc@developer.gserviceaccount.com",
"client_id": "123-abc.apps.googleusercontent.com",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "http://localhost:8080/token"
}
spf13-afero-18d690e/gcsfs/gcs.go 0000664 0000000 0000000 00000007505 15057601571 0016330 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// The code in this file is derived from afero fork github.com/Zatte/afero by Mikael Rapp
// licensed under Apache License 2.0.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gcsfs
import (
"context"
"os"
"time"
"cloud.google.com/go/storage"
"google.golang.org/api/option"
"github.com/spf13/afero"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
type GcsFs struct {
source *Fs
}
// NewGcsFS creates a GCS file system, automatically instantiating and decorating the storage client.
// You can provide additional options to be passed to the client creation, as per
// cloud.google.com/go/storage documentation
func NewGcsFS(ctx context.Context, opts ...option.ClientOption) (afero.Fs, error) {
if json := os.Getenv("GOOGLE_APPLICATION_CREDENTIALS_JSON"); json != "" {
opts = append(opts, option.WithCredentialsJSON([]byte(json)))
}
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
return NewGcsFSFromClient(ctx, client)
}
// NewGcsFSWithSeparator is the same as NewGcsFS, but the files system will use the provided folder separator.
func NewGcsFSWithSeparator(
ctx context.Context,
folderSeparator string,
opts ...option.ClientOption,
) (afero.Fs, error) {
client, err := storage.NewClient(ctx, opts...)
if err != nil {
return nil, err
}
return NewGcsFSFromClientWithSeparator(ctx, client, folderSeparator)
}
// NewGcsFSFromClient creates a GCS file system from a given storage client
func NewGcsFSFromClient(ctx context.Context, client *storage.Client) (afero.Fs, error) {
c := stiface.AdaptClient(client)
return &GcsFs{NewGcsFs(ctx, c)}, nil
}
// NewGcsFSFromClientWithSeparator is the same as NewGcsFSFromClient, but the file system will use the provided folder separator.
func NewGcsFSFromClientWithSeparator(
ctx context.Context,
client *storage.Client,
folderSeparator string,
) (afero.Fs, error) {
c := stiface.AdaptClient(client)
return &GcsFs{NewGcsFsWithSeparator(ctx, c, folderSeparator)}, nil
}
// Wraps gcs.GcsFs and convert some return types to afero interfaces.
func (fs *GcsFs) Name() string {
return fs.source.Name()
}
func (fs *GcsFs) Create(name string) (afero.File, error) {
return fs.source.Create(name)
}
func (fs *GcsFs) Mkdir(name string, perm os.FileMode) error {
return fs.source.Mkdir(name, perm)
}
func (fs *GcsFs) MkdirAll(path string, perm os.FileMode) error {
return fs.source.MkdirAll(path, perm)
}
func (fs *GcsFs) Open(name string) (afero.File, error) {
return fs.source.Open(name)
}
func (fs *GcsFs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
return fs.source.OpenFile(name, flag, perm)
}
func (fs *GcsFs) Remove(name string) error {
return fs.source.Remove(name)
}
func (fs *GcsFs) RemoveAll(path string) error {
return fs.source.RemoveAll(path)
}
func (fs *GcsFs) Rename(oldname, newname string) error {
return fs.source.Rename(oldname, newname)
}
func (fs *GcsFs) Stat(name string) (os.FileInfo, error) {
return fs.source.Stat(name)
}
func (fs *GcsFs) Chmod(name string, mode os.FileMode) error {
return fs.source.Chmod(name, mode)
}
func (fs *GcsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return fs.source.Chtimes(name, atime, mtime)
}
func (fs *GcsFs) Chown(name string, uid, gid int) error {
return fs.source.Chown(name, uid, gid)
}
spf13-afero-18d690e/gcsfs/gcs_mocks.go 0000664 0000000 0000000 00000012601 15057601571 0017515 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// A set of stiface-based mocks, replicating the GCS behavior, to make the tests not require any
// internet connection or real buckets.
// It is **not** a comprehensive set of mocks to test anything and everything GCS-related, rather
// a very tailored one for the current implementation - thus the tests, written with the use of
// these mocks are more of regression ones.
// If any GCS behavior changes and breaks the implementation, then it should first be adjusted by
// switching over to a real bucket - and then the mocks have to be adjusted to match the
// implementation.
package gcsfs
import (
"context"
"io"
"os"
"strings"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
"github.com/spf13/afero"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
// sets filesystem separators to the one, expected (and hard-coded) in the tests
func normSeparators(s string) string {
return strings.Replace(s, "\\", "/", -1)
}
type clientMock struct {
stiface.Client
fs afero.Fs
}
func newClientMock() *clientMock {
return &clientMock{fs: afero.NewMemMapFs()}
}
func (m *clientMock) Bucket(name string) stiface.BucketHandle {
return &bucketMock{bucketName: name, fs: m.fs}
}
type bucketMock struct {
stiface.BucketHandle
bucketName string
fs afero.Fs
}
func (m *bucketMock) Attrs(context.Context) (*storage.BucketAttrs, error) {
return &storage.BucketAttrs{}, nil
}
func (m *bucketMock) Object(name string) stiface.ObjectHandle {
return &objectMock{name: name, fs: m.fs}
}
func (m *bucketMock) Objects(_ context.Context, q *storage.Query) (it stiface.ObjectIterator) {
return &objectItMock{name: q.Prefix, fs: m.fs}
}
type objectMock struct {
stiface.ObjectHandle
name string
fs afero.Fs
}
func (o *objectMock) NewWriter(_ context.Context) stiface.Writer {
return &writerMock{name: o.name, fs: o.fs}
}
func (o *objectMock) NewRangeReader(
_ context.Context,
offset, length int64,
) (stiface.Reader, error) {
if o.name == "" {
return nil, ErrEmptyObjectName
}
file, err := o.fs.Open(o.name)
if err != nil {
return nil, err
}
if offset > 0 {
_, err = file.Seek(offset, io.SeekStart)
if err != nil {
return nil, err
}
}
res := &readerMock{file: file}
if length > -1 {
res.buf = make([]byte, length)
_, err = file.Read(res.buf)
if err != nil {
return nil, err
}
}
return res, nil
}
func (o *objectMock) Delete(_ context.Context) error {
if o.name == "" {
return ErrEmptyObjectName
}
return o.fs.Remove(o.name)
}
func (o *objectMock) Attrs(_ context.Context) (*storage.ObjectAttrs, error) {
if o.name == "" {
return nil, ErrEmptyObjectName
}
info, err := o.fs.Stat(o.name)
if err != nil {
pathError, ok := err.(*os.PathError)
if ok {
if pathError.Err == os.ErrNotExist {
return nil, storage.ErrObjectNotExist
}
}
return nil, err
}
res := &storage.ObjectAttrs{
Name: normSeparators(o.name),
Size: info.Size(),
Updated: info.ModTime(),
}
if info.IsDir() {
// we have to mock it here, because of FileInfo logic
return nil, storage.ErrObjectNotExist
}
return res, nil
}
type writerMock struct {
stiface.Writer
name string
fs afero.Fs
file afero.File
}
func (w *writerMock) Write(p []byte) (n int, err error) {
if w.name == "" {
return 0, ErrEmptyObjectName
}
if w.file == nil {
w.file, err = w.fs.Create(w.name)
if err != nil {
return 0, err
}
}
return w.file.Write(p)
}
func (w *writerMock) Close() error {
if w.name == "" {
return ErrEmptyObjectName
}
if w.file == nil {
var err error
if strings.HasSuffix(w.name, "/") {
err = w.fs.Mkdir(w.name, 0o755)
if err != nil {
return err
}
} else {
_, err = w.Write([]byte{})
if err != nil {
return err
}
}
}
if w.file != nil {
return w.file.Close()
}
return nil
}
type readerMock struct {
stiface.Reader
file afero.File
buf []byte
}
func (r *readerMock) Remain() int64 {
return 0
}
func (r *readerMock) Read(p []byte) (int, error) {
if r.buf != nil {
copy(p, r.buf)
return len(r.buf), nil
}
return r.file.Read(p)
}
func (r *readerMock) Close() error {
return r.file.Close()
}
type objectItMock struct {
stiface.ObjectIterator
name string
fs afero.Fs
dir afero.File
infos []*storage.ObjectAttrs
}
func (it *objectItMock) Next() (*storage.ObjectAttrs, error) {
var err error
if it.dir == nil {
it.dir, err = it.fs.Open(it.name)
if err != nil {
return nil, err
}
var isDir bool
isDir, err = afero.IsDir(it.fs, it.name)
if err != nil {
return nil, err
}
it.infos = []*storage.ObjectAttrs{}
if !isDir {
var info os.FileInfo
info, err = it.dir.Stat()
if err != nil {
return nil, err
}
it.infos = append(
it.infos,
&storage.ObjectAttrs{
Name: normSeparators(info.Name()),
Size: info.Size(),
Updated: info.ModTime(),
},
)
} else {
var fInfos []os.FileInfo
fInfos, err = it.dir.Readdir(0)
if err != nil {
return nil, err
}
if it.name != "" {
it.infos = append(it.infos, &storage.ObjectAttrs{
Prefix: normSeparators(it.name) + "/",
})
}
for _, info := range fInfos {
it.infos = append(it.infos, &storage.ObjectAttrs{Name: normSeparators(info.Name()), Size: info.Size(), Updated: info.ModTime()})
}
}
}
if len(it.infos) == 0 {
return nil, iterator.Done
}
res := it.infos[0]
it.infos = it.infos[1:]
return res, err
}
spf13-afero-18d690e/gcsfs/gcs_test.go 0000664 0000000 0000000 00000045371 15057601571 0017372 0 ustar 00root root 0000000 0000000 // Copyright © 2021 Vasily Ovchinnikov .
//
// Most of the tests are "derived" from the Afero's own tarfs implementation.
// Write-oriented tests and/or checks have been added on top of that
package gcsfs
import (
"context"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"syscall"
"testing"
"cloud.google.com/go/storage"
"golang.org/x/oauth2/google"
"github.com/spf13/afero"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
const (
testBytes = 8
dirSize = 42
)
var bucketName = "a-test-bucket"
var files = []struct {
name string
exists bool
isdir bool
size int64
content string
offset int64
contentAtOffset string
}{
{"sub", true, true, dirSize, "", 0, ""},
{"sub/testDir2", true, true, dirSize, "", 0, ""},
{"sub/testDir2/testFile", true, false, 8 * 1024, "c", 4 * 1024, "d"},
{"testFile", true, false, 12 * 1024, "a", 7 * 1024, "b"},
{"testDir1/testFile", true, false, 3 * 512, "b", 512, "c"},
{"", false, true, dirSize, "", 0, ""}, // special case
{"nonExisting", false, false, dirSize, "", 0, ""},
}
var dirs = []struct {
name string
children []string
}{
{
"",
[]string{"sub", "testDir1", "testFile"},
}, // in this case it will be prepended with bucket name
{"sub", []string{"testDir2"}},
{"sub/testDir2", []string{"testFile"}},
{"testDir1", []string{"testFile"}},
}
var gcsAfs *afero.Afero
func TestMain(m *testing.M) {
ctx := context.Background()
var err error
// in order to respect deferring
var exitCode int
defer os.Exit(exitCode)
defer func() {
err := recover()
if err != nil {
fmt.Print(err)
exitCode = 2
}
}()
// Check if any credentials are present. If not, a fake service account, taken from the link
// would be used: https://github.com/google/oauth2l/blob/master/integration/fixtures/fake-service-account.json
cred, err := google.FindDefaultCredentials(ctx)
if err != nil && !strings.HasPrefix(err.Error(), "google: could not find default credentials") {
panic(err)
}
if cred == nil {
var fakeCredentialsAbsPath string
fakeCredentialsAbsPath, err = filepath.Abs("gcs-fake-service-account.json")
if err != nil {
panic(err)
}
err = os.Setenv("GOOGLE_APPLICATION_CREDENTIALS", fakeCredentialsAbsPath)
if err != nil {
panic(err)
}
// reset it after the run
defer func() {
err = os.Remove("GOOGLE_APPLICATION_CREDENTIALS")
if err != nil {
// it's worth printing it out explicitly, since it might have implications further down the road
fmt.Print("failed to clear fake GOOGLE_APPLICATION_CREDENTIALS", err)
}
}()
}
var c *storage.Client
c, err = storage.NewClient(ctx)
if err != nil {
panic(err)
}
client := stiface.AdaptClient(c)
// This block is mocking the client for the sake of isolated testing
mockClient := newClientMock()
mockClient.Client = client
gcsAfs = &afero.Afero{Fs: &GcsFs{NewGcsFs(ctx, mockClient)}}
// Uncomment to use the real, not mocked, client
// gcsAfs = &Afero{Fs: &GcsFs{gcsfs.NewGcsFs(ctx, client)}}
exitCode = m.Run()
}
func createFiles(t *testing.T) {
t.Helper()
var err error
// the files have to be created first
for _, f := range files {
if !f.isdir && f.exists {
name := filepath.Join(bucketName, f.name)
var freshFile afero.File
freshFile, err = gcsAfs.Create(name)
if err != nil {
t.Fatalf("failed to create a file \"%s\": %s", f.name, err)
}
var written int
var totalWritten int64
for totalWritten < f.size {
if totalWritten < f.offset {
writeBuf := []byte(strings.Repeat(f.content, int(f.offset)))
written, err = freshFile.WriteAt(writeBuf, totalWritten)
} else {
writeBuf := []byte(strings.Repeat(f.contentAtOffset, int(f.size-f.offset)))
written, err = freshFile.WriteAt(writeBuf, totalWritten)
}
if err != nil {
t.Fatalf("failed to write a file \"%s\": %s", f.name, err)
}
totalWritten += int64(written)
}
err = freshFile.Close()
if err != nil {
t.Fatalf("failed to close a file \"%s\": %s", f.name, err)
}
}
}
}
func removeFiles(t *testing.T) {
t.Helper()
var err error
// the files have to be created first
for _, f := range files {
if !f.isdir && f.exists {
name := filepath.Join(bucketName, f.name)
err = gcsAfs.Remove(name)
if err != nil && err == syscall.ENOENT {
t.Errorf("failed to remove file \"%s\": %s", f.name, err)
}
}
}
}
func TestGcsFsOpen(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.Open(name)
if (err == nil) != f.exists {
t.Errorf("%v exists = %v, but got err = %v", name, f.exists, err)
}
if !f.exists {
continue
}
if err != nil {
t.Fatalf("%v: %v", name, err)
}
if file.Name() != filepath.FromSlash(nameBase) {
t.Errorf("Name(), got %v, expected %v", file.Name(), filepath.FromSlash(nameBase))
}
s, err := file.Stat()
if err != nil {
t.Fatalf("stat %v: got error '%v'", file.Name(), err)
}
if isdir := s.IsDir(); isdir != f.isdir {
t.Errorf("%v directory, got: %v, expected: %v", file.Name(), isdir, f.isdir)
}
if size := s.Size(); size != f.size {
t.Errorf("%v size, got: %v, expected: %v", file.Name(), size, f.size)
}
}
}
}
func TestGcsRead(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
if !f.exists {
continue
}
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.Open(name)
if err != nil {
t.Fatalf("opening %v: %v", name, err)
}
buf := make([]byte, 8)
n, err := file.Read(buf)
if err != nil {
if f.isdir && (err != syscall.EISDIR) {
t.Errorf("%v got error %v, expected EISDIR", name, err)
} else if !f.isdir {
t.Errorf("%v: %v", name, err)
}
} else if n != 8 {
t.Errorf("%v: got %d read bytes, expected 8", name, n)
} else if string(buf) != strings.Repeat(f.content, testBytes) {
t.Errorf("%v: got <%s>, expected <%s>", f.name, f.content, string(buf))
}
}
}
}
func TestGcsReadAt(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
if !f.exists {
continue
}
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.Open(name)
if err != nil {
t.Fatalf("opening %v: %v", name, err)
}
buf := make([]byte, testBytes)
n, err := file.ReadAt(buf, f.offset-testBytes/2)
if err != nil {
if f.isdir && (err != syscall.EISDIR) {
t.Errorf("%v got error %v, expected EISDIR", name, err)
} else if !f.isdir {
t.Errorf("%v: %v", name, err)
}
} else if n != 8 {
t.Errorf("%v: got %d read bytes, expected 8", f.name, n)
} else if string(buf) != strings.Repeat(f.content, testBytes/2)+strings.Repeat(f.contentAtOffset, testBytes/2) {
t.Errorf("%v: got <%s>, expected <%s>", f.name, f.contentAtOffset, string(buf))
}
}
}
}
func TestGcsSeek(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
if !f.exists {
continue
}
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.Open(name)
if err != nil {
t.Fatalf("opening %v: %v", name, err)
}
tests := []struct {
offIn int64
whence int
offOut int64
}{
{0, io.SeekStart, 0},
{10, io.SeekStart, 10},
{1, io.SeekCurrent, 11},
{10, io.SeekCurrent, 21},
{0, io.SeekEnd, f.size},
{-1, io.SeekEnd, f.size - 1},
}
for _, s := range tests {
n, err := file.Seek(s.offIn, s.whence)
if err != nil {
if f.isdir && err == syscall.EISDIR {
continue
}
t.Errorf("%v: %v", name, err)
}
if n != s.offOut {
t.Errorf(
"%v: (off: %v, whence: %v): got %v, expected %v",
f.name,
s.offIn,
s.whence,
n,
s.offOut,
)
}
}
}
}
}
func TestGcsName(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
if !f.exists {
continue
}
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.Open(name)
if err != nil {
t.Fatalf("opening %v: %v", name, err)
}
n := file.Name()
if n != filepath.FromSlash(nameBase) {
t.Errorf("got: %v, expected: %v", n, filepath.FromSlash(nameBase))
}
}
}
}
func TestGcsClose(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
if !f.exists {
continue
}
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.Open(name)
if err != nil {
t.Fatalf("opening %v: %v", name, err)
}
err = file.Close()
if err != nil {
t.Errorf("%v: %v", name, err)
}
err = file.Close()
if err == nil {
t.Errorf("%v: closing twice should return an error", name)
}
buf := make([]byte, 8)
n, err := file.Read(buf)
if n != 0 || err == nil {
t.Errorf("%v: could read from a closed file", name)
}
n, err = file.ReadAt(buf, 256)
if n != 0 || err == nil {
t.Errorf("%v: could readAt from a closed file", name)
}
off, err := file.Seek(0, io.SeekStart)
if off != 0 || err == nil {
t.Errorf("%v: could seek from a closed file", name)
}
}
}
}
func TestGcsOpenFile(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
file, err := gcsAfs.OpenFile(name, os.O_RDONLY, 0o400)
if !f.exists {
if (f.name != "" && !errors.Is(err, syscall.ENOENT)) ||
(f.name == "" && !errors.Is(err, ErrNoBucketInName)) {
t.Errorf("%v: got %v, expected%v", name, err, syscall.ENOENT)
}
continue
}
if err != nil {
t.Fatalf("%v: %v", name, err)
}
err = file.Close()
if err != nil {
t.Fatalf("failed to close a file \"%s\": %s", name, err)
}
_, err = gcsAfs.OpenFile(name, os.O_CREATE, 0o600)
if !errors.Is(err, syscall.EPERM) {
t.Errorf("%v: open for write: got %v, expected %v", name, err, syscall.EPERM)
}
}
}
}
func TestGcsFsStat(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, f := range files {
nameBase := filepath.Join(bucketName, f.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
if f.name == "" {
names = []string{f.name}
}
for _, name := range names {
fi, err := gcsAfs.Stat(name)
if !f.exists {
if (f.name != "" && !errors.Is(err, syscall.ENOENT)) ||
(f.name == "" && !errors.Is(err, ErrNoBucketInName)) {
t.Errorf("%v: got %v, expected%v", name, err, syscall.ENOENT)
}
continue
}
if err != nil {
t.Fatalf("stat %v: got error '%v'", name, err)
}
if isdir := fi.IsDir(); isdir != f.isdir {
t.Errorf("%v directory, got: %v, expected: %v", name, isdir, f.isdir)
}
if size := fi.Size(); size != f.size {
t.Errorf("%v size, got: %v, expected: %v", name, size, f.size)
}
}
}
}
func TestGcsReaddir(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, d := range dirs {
nameBase := filepath.Join(bucketName, d.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
for _, name := range names {
dir, err := gcsAfs.Open(name)
if err != nil {
t.Fatal(err)
}
fi, err := dir.Readdir(0)
if err != nil {
t.Fatal(err)
}
var fileNames []string
for _, f := range fi {
fileNames = append(fileNames, f.Name())
}
if !reflect.DeepEqual(fileNames, d.children) {
t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children)
}
fi, err = dir.Readdir(1)
if err != nil {
t.Fatal(err)
}
fileNames = []string{}
for _, f := range fi {
fileNames = append(fileNames, f.Name())
}
if !reflect.DeepEqual(fileNames, d.children[0:1]) {
t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children[0:1])
}
}
}
nameBase := filepath.Join(bucketName, "testFile")
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
for _, name := range names {
dir, err := gcsAfs.Open(name)
if err != nil {
t.Fatal(err)
}
_, err = dir.Readdir(-1)
if err != syscall.ENOTDIR {
t.Fatal("Expected error")
}
}
}
func TestGcsReaddirnames(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, d := range dirs {
nameBase := filepath.Join(bucketName, d.name)
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
for _, name := range names {
dir, err := gcsAfs.Open(name)
if err != nil {
t.Fatal(err)
}
fileNames, err := dir.Readdirnames(0)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(fileNames, d.children) {
t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children)
}
fileNames, err = dir.Readdirnames(1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(fileNames, d.children[0:1]) {
t.Errorf("%v: children, got '%v', expected '%v'", name, fileNames, d.children[0:1])
}
}
}
nameBase := filepath.Join(bucketName, "testFile")
names := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
for _, name := range names {
dir, err := gcsAfs.Open(name)
if err != nil {
t.Fatal(err)
}
_, err = dir.Readdirnames(-1)
if err != syscall.ENOTDIR {
t.Fatal("Expected error")
}
}
}
func TestGcsGlob(t *testing.T) {
createFiles(t)
defer removeFiles(t)
for _, s := range []struct {
glob string
entries []string
}{
{filepath.FromSlash("*"), []string{filepath.FromSlash("sub"), filepath.FromSlash("testDir1"), filepath.FromSlash("testFile")}},
{filepath.FromSlash("sub/*"), []string{filepath.FromSlash("sub/testDir2")}},
{filepath.FromSlash("sub/testDir2/*"), []string{filepath.FromSlash("sub/testDir2/testFile")}},
{filepath.FromSlash("testDir1/*"), []string{filepath.FromSlash("testDir1/testFile")}},
} {
nameBase := filepath.Join(bucketName, s.glob)
prefixedGlobs := []string{
nameBase,
string(os.PathSeparator) + nameBase,
}
prefixedEntries := [][]string{{}, {}}
for _, entry := range s.entries {
prefixedEntries[0] = append(prefixedEntries[0], filepath.Join(bucketName, entry))
prefixedEntries[1] = append(
prefixedEntries[1],
string(os.PathSeparator)+filepath.Join(bucketName, entry),
)
}
for i, prefixedGlob := range prefixedGlobs {
entries, err := afero.Glob(gcsAfs.Fs, prefixedGlob)
if err != nil {
t.Error(err)
}
if reflect.DeepEqual(entries, prefixedEntries[i]) {
t.Logf("glob: %s: glob ok", prefixedGlob)
} else {
t.Errorf("glob: %s: got %#v, expected %#v", prefixedGlob, entries, prefixedEntries)
}
}
}
}
func TestGcsMkdir(t *testing.T) {
t.Run("empty", func(t *testing.T) {
emptyDirName := bucketName
err := gcsAfs.Mkdir(emptyDirName, 0o755)
if err == nil {
t.Fatal("did not fail upon creation of an empty folder")
}
})
t.Run("success", func(t *testing.T) {
dirName := filepath.Join(bucketName, "a-test-dir")
var err error
err = gcsAfs.Mkdir(dirName, 0o755)
if err != nil {
t.Fatal("failed to create a folder with error", err)
}
info, err := gcsAfs.Stat(dirName)
if err != nil {
t.Fatal("failed to get info", err)
}
if !info.IsDir() {
t.Fatalf("%s: not a dir", dirName)
}
if !info.Mode().IsDir() {
t.Errorf("%s: mode is not directory", dirName)
}
if info.Mode() != os.ModeDir|0o755 {
t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", dirName, info.Mode())
}
err = gcsAfs.Remove(dirName)
if err != nil {
t.Fatalf("could not delete the folder %s after the test with error: %s", dirName, err)
}
})
}
func TestGcsMkdirAll(t *testing.T) {
t.Run("empty", func(t *testing.T) {
emptyDirName := bucketName
err := gcsAfs.MkdirAll(emptyDirName, 0o755)
if err == nil {
t.Fatal("did not fail upon creation of an empty folder")
}
})
t.Run("success", func(t *testing.T) {
dirName := filepath.Join(bucketName, "a/b/c")
err := gcsAfs.MkdirAll(dirName, 0o755)
if err != nil {
t.Fatal(err)
}
info, err := gcsAfs.Stat(filepath.Join(bucketName, "a"))
if err != nil {
t.Fatal(err)
}
if !info.Mode().IsDir() {
t.Errorf("%s: mode is not directory", filepath.Join(bucketName, "a"))
}
if info.Mode() != os.ModeDir|0o755 {
t.Errorf(
"%s: wrong permissions, expected drwxr-xr-x, got %s",
filepath.Join(bucketName, "a"),
info.Mode(),
)
}
info, err = gcsAfs.Stat(filepath.Join(bucketName, "a/b"))
if err != nil {
t.Fatal(err)
}
if !info.Mode().IsDir() {
t.Errorf("%s: mode is not directory", filepath.Join(bucketName, "a/b"))
}
if info.Mode() != os.ModeDir|0o755 {
t.Errorf(
"%s: wrong permissions, expected drwxr-xr-x, got %s",
filepath.Join(bucketName, "a/b"),
info.Mode(),
)
}
info, err = gcsAfs.Stat(dirName)
if err != nil {
t.Fatal(err)
}
if !info.Mode().IsDir() {
t.Errorf("%s: mode is not directory", dirName)
}
if info.Mode() != os.ModeDir|0o755 {
t.Errorf("%s: wrong permissions, expected drwxr-xr-x, got %s", dirName, info.Mode())
}
err = gcsAfs.RemoveAll(filepath.Join(bucketName, "a"))
if err != nil {
t.Fatalf(
"failed to remove the folder %s with error: %s",
filepath.Join(bucketName, "a"),
err,
)
}
})
}
func TestGcsRemoveAll(t *testing.T) {
t.Run("non-existent", func(t *testing.T) {
err := gcsAfs.RemoveAll(filepath.Join(bucketName, "a"))
if err != nil {
t.Fatal("error should be nil when removing non-existent file")
}
})
t.Run("success", func(t *testing.T) {
aDir := filepath.Join(bucketName, "a")
bDir := filepath.Join(aDir, "b")
err := gcsAfs.MkdirAll(bDir, 0o755)
if err != nil {
t.Fatal(err)
}
_, err = gcsAfs.Stat(bDir)
if err != nil {
t.Fatal(err)
}
err = gcsAfs.RemoveAll(aDir)
if err != nil {
t.Fatalf("failed to remove the folder %s with error: %s", aDir, err)
}
_, err = gcsAfs.Stat(aDir)
if err == nil {
t.Fatalf("folder %s wasn't removed", aDir)
}
})
}
spf13-afero-18d690e/gcsfs/go.mod 0000664 0000000 0000000 00000005171 15057601571 0016330 0 ustar 00root root 0000000 0000000 module github.com/spf13/afero/gcsfs
go 1.23.0
replace github.com/spf13/afero => ../
require (
cloud.google.com/go/storage v1.51.0
github.com/spf13/afero v1.14.0
golang.org/x/oauth2 v0.28.0
google.golang.org/api v0.226.0
)
require (
cel.dev/expr v0.19.2 // indirect
cloud.google.com/go v0.118.3 // indirect
cloud.google.com/go/auth v0.15.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
cloud.google.com/go/iam v1.4.1 // indirect
cloud.google.com/go/monitoring v1.24.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 // indirect
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 // indirect
github.com/envoyproxy/go-control-plane/envoy v1.32.4 // indirect
github.com/envoyproxy/protoc-gen-validate v1.2.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.5 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/detectors/gcp v1.34.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/sdk v1.34.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/net v0.37.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.28.0 // indirect
golang.org/x/time v0.11.0 // indirect
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb // indirect
google.golang.org/grpc v1.71.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
)
spf13-afero-18d690e/gcsfs/go.sum 0000664 0000000 0000000 00000026077 15057601571 0016365 0 ustar 00root root 0000000 0000000 cel.dev/expr v0.19.2 h1:V354PbqIXr9IQdwy4SYA4xa0HXaWq1BUPAGzugBY5V4=
cel.dev/expr v0.19.2/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go v0.118.3 h1:jsypSnrE/w4mJysioGdMBg4MiW/hHx/sArFpaBWHdME=
cloud.google.com/go v0.118.3/go.mod h1:Lhs3YLnBlwJ4KA6nuObNMZ/fCbOQBPuWKPoE0Wa/9Vc=
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/iam v1.4.1 h1:cFC25Nv+u5BkTR/BT1tXdoF2daiVbZ1RLx2eqfQ9RMM=
cloud.google.com/go/iam v1.4.1/go.mod h1:2vUEJpUG3Q9p2UdsyksaKpDzlwOrnMzS30isdReIcLM=
cloud.google.com/go/logging v1.13.0 h1:7j0HgAp0B94o1YRDqiqm26w4q1rDMH7XNRU34lJXHYc=
cloud.google.com/go/logging v1.13.0/go.mod h1:36CoKh6KA/M0PbhPKMq6/qety2DCAErbhXT62TuXALA=
cloud.google.com/go/longrunning v0.6.5 h1:sD+t8DO8j4HKW4QfouCklg7ZC1qC4uzVZt8iz3uTW+Q=
cloud.google.com/go/longrunning v0.6.5/go.mod h1:Et04XK+0TTLKa5IPYryKf5DkpwImy6TluQ1QTLwlKmY=
cloud.google.com/go/monitoring v1.24.0 h1:csSKiCJ+WVRgNkRzzz3BPoGjFhjPY23ZTcaenToJxMM=
cloud.google.com/go/monitoring v1.24.0/go.mod h1:Bd1PRK5bmQBQNnuGwHBfUamAV1ys9049oEPHnn4pcsc=
cloud.google.com/go/storage v1.51.0 h1:ZVZ11zCiD7b3k+cH5lQs/qcNaoSz3U9I0jgwVzqDlCw=
cloud.google.com/go/storage v1.51.0/go.mod h1:YEJfu/Ki3i5oHC/7jyTgsGZwdQ8P9hqMqvpi5kRKGgc=
cloud.google.com/go/trace v1.11.3 h1:c+I4YFjxRQjvAhRmSsmjpASUKq88chOX854ied0K/pE=
cloud.google.com/go/trace v1.11.3/go.mod h1:pt7zCYiDSQjC9Y2oqCsh9jF4GStB/hmjrYLsxRR27q8=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0 h1:3c8yed4lgqTt+oTQ+JNMDo+F4xprBf+O/il4ZC0nRLw=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0 h1:fYE9p3esPxA/C0rQ0AHhP0drtPXDRhaWiwg1DPqO7IU=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.51.0/go.mod h1:BnBReJLvVYx2CS/UHOgVz2BXKXD9wsQPxZug20nZhd0=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0 h1:OqVGm6Ei3x5+yZmSJG1Mh2NwHvpVmZ08CB5qJhT9Nuk=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/cloudmock v0.51.0/go.mod h1:SZiPHWGOOk3bl8tkevxkoiwPgsIl6CwrWcbwjfHZpdM=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0 h1:6/0iUd0xrnX7qt+mLNRwg5c0PGv8wpE8K90ryANQwMI=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.51.0/go.mod h1:otE2jQekW/PqXk1Awf5lmfokJx4uwuqcj1ab5SpGeW0=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42 h1:Om6kYQYDUk5wWbT0t0q6pvyM49i9XZAv9dDrkDA7gjk=
github.com/cncf/xds/go v0.0.0-20250121191232-2f005788dc42/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/envoyproxy/go-control-plane v0.13.4 h1:zEqyPVyku6IvWCFwux4x9RxkLOMUL+1vC9xUFv5l2/M=
github.com/envoyproxy/go-control-plane v0.13.4/go.mod h1:kDfuBlDVsSj2MjrLEtRWtHlsWIFcGyB2RMO44Dc5GZA=
github.com/envoyproxy/go-control-plane/envoy v1.32.4 h1:jb83lalDRZSpPWW2Z7Mck/8kXZ5CQAFYVjQcdVIr83A=
github.com/envoyproxy/go-control-plane/envoy v1.32.4/go.mod h1:Gzjc5k8JcJswLjAx1Zm+wSYE20UrLtt7JZMWiWQXQEw=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI=
github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4=
github.com/envoyproxy/protoc-gen-validate v1.2.1 h1:DEo3O99U8j4hBFwbJfrz9VtgcDfUKS7KJ7spH3d86P8=
github.com/envoyproxy/protoc-gen-validate v1.2.1/go.mod h1:d/C80l/jxXLdfEIhX1W2TmLfsJ31lvEjwamM4DxlWXU=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc=
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.5 h1:VgzTY2jogw3xt39CusEnFJWm7rlsq5yL5q9XdLOuP5g=
github.com/googleapis/enterprise-certificate-proxy v0.3.5/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/detectors/gcp v1.34.0 h1:JRxssobiPg23otYU5SbWtQC//snGVIM3Tx6QRzlQBao=
go.opentelemetry.io/contrib/detectors/gcp v1.34.0/go.mod h1:cV4BMFcscUR/ckqLkbfQmF0PRsq8w/lMGzdbCSveBHo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0 h1:WDdP9acbMYjbKIyJUhTvtzj601sVJOqgWdUxSdR/Ysc=
go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.29.0/go.mod h1:BLbf7zbNIONBLPwvFnwNHGj4zge8uTCM/UPIVW1Mq2I=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/metric v1.34.0 h1:5CeK9ujjbFVL5c1PhLuStg1wxA7vQv7ce1EK0Gyvahk=
go.opentelemetry.io/otel/sdk/metric v1.34.0/go.mod h1:jQ/r8Ze28zRKoNRdkjCZxfs6YvBTG1+YIqyFVFYec5w=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.28.0 h1:CrgCKl8PPAVtLnU3c+EDw6x11699EWlsDeWNWKdIOkc=
golang.org/x/oauth2 v0.28.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
google.golang.org/api v0.226.0 h1:9A29y1XUD+YRXfnHkO66KggxHBZWg9LsTGqm7TkUvtQ=
google.golang.org/api v0.226.0/go.mod h1:WP/0Xm4LVvMOCldfvOISnWquSRWbG2kArDZcg+W2DbY=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb h1:ITgPrl429bc6+2ZraNSzMDk3I95nmQln2fuPstKwFDE=
google.golang.org/genproto v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:sAo5UzpjUwgFBCzupwhcLcxHVDK7vG5IqI30YnwX2eE=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb h1:p31xT4yrYrSM/G4Sn2+TNUkVhFCbG9y8itM2S6Th950=
google.golang.org/genproto/googleapis/api v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:jbe3Bkdp+Dh2IrslsFCklNhweNTBgSYanP1UXhJDhKg=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb h1:TLPQVbx1GJ8VKZxz52VAxl1EBgKXXbTiU9Fc5fZeLn4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250303144028-a0af3efb3deb/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.71.0 h1:kF77BGdPTQ4/JZWMlb9VpJ5pa25aqvVqogsxNHHdeBg=
google.golang.org/grpc v1.71.0/go.mod h1:H0GRtasmQOh9LkFoCPDu3ZrwUtD1YGE+b2vYBYd/8Ec=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
spf13-afero-18d690e/gcsfs/internal/ 0000775 0000000 0000000 00000000000 15057601571 0017032 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/gcsfs/internal/stiface/ 0000775 0000000 0000000 00000000000 15057601571 0020450 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/gcsfs/internal/stiface/README.md 0000664 0000000 0000000 00000000532 15057601571 0021727 0 ustar 00root root 0000000 0000000 # Copy of [google-cloud-go-testing](https://github.com/googleapis/google-cloud-go-testing)
This is a temporary copy of the [google-cloud-go-testing](https://github.com/googleapis/google-cloud-go-testing) library.
The library is deprecated and the code was copied here to drop it as a dependency (allowing to upgrade other library dependencies).
spf13-afero-18d690e/gcsfs/internal/stiface/adapters.go 0000664 0000000 0000000 00000011604 15057601571 0022604 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stiface
import (
"context"
"cloud.google.com/go/storage"
)
// AdaptClient adapts a storage.Client so that it satisfies the Client
// interface.
func AdaptClient(c *storage.Client) Client {
return client{c}
}
type (
client struct{ *storage.Client }
bucketHandle struct{ *storage.BucketHandle }
objectHandle struct{ *storage.ObjectHandle }
bucketIterator struct{ *storage.BucketIterator }
objectIterator struct{ *storage.ObjectIterator }
reader struct{ *storage.Reader }
writer struct{ *storage.Writer }
copier struct{ *storage.Copier }
composer struct{ *storage.Composer }
aclHandle struct{ *storage.ACLHandle }
)
func (client) embedToIncludeNewMethods() {}
func (bucketHandle) embedToIncludeNewMethods() {}
func (objectHandle) embedToIncludeNewMethods() {}
func (bucketIterator) embedToIncludeNewMethods() {}
func (objectIterator) embedToIncludeNewMethods() {}
func (writer) embedToIncludeNewMethods() {}
func (reader) embedToIncludeNewMethods() {}
func (copier) embedToIncludeNewMethods() {}
func (composer) embedToIncludeNewMethods() {}
func (aclHandle) embedToIncludeNewMethods() {}
func (c client) Bucket(name string) BucketHandle {
return bucketHandle{c.Client.Bucket(name)}
}
func (c client) Buckets(ctx context.Context, projectID string) BucketIterator {
return bucketIterator{c.Client.Buckets(ctx, projectID)}
}
func (b bucketHandle) Object(name string) ObjectHandle {
return objectHandle{b.BucketHandle.Object(name)}
}
func (b bucketHandle) If(conds storage.BucketConditions) BucketHandle {
return bucketHandle{b.BucketHandle.If(conds)}
}
func (b bucketHandle) Objects(ctx context.Context, q *storage.Query) ObjectIterator {
return objectIterator{b.BucketHandle.Objects(ctx, q)}
}
func (b bucketHandle) DefaultObjectACL() ACLHandle {
return aclHandle{b.BucketHandle.DefaultObjectACL()}
}
func (b bucketHandle) ACL() ACLHandle {
return aclHandle{b.BucketHandle.ACL()}
}
func (b bucketHandle) UserProject(projectID string) BucketHandle {
return bucketHandle{b.BucketHandle.UserProject(projectID)}
}
func (bi bucketIterator) SetPrefix(s string) {
bi.BucketIterator.Prefix = s
}
func (o objectHandle) ACL() ACLHandle {
return aclHandle{o.ObjectHandle.ACL()}
}
func (o objectHandle) Generation(gen int64) ObjectHandle {
return objectHandle{o.ObjectHandle.Generation(gen)}
}
func (o objectHandle) If(conds storage.Conditions) ObjectHandle {
return objectHandle{o.ObjectHandle.If(conds)}
}
func (o objectHandle) Key(encryptionKey []byte) ObjectHandle {
return objectHandle{o.ObjectHandle.Key(encryptionKey)}
}
func (o objectHandle) ReadCompressed(compressed bool) ObjectHandle {
return objectHandle{o.ObjectHandle.ReadCompressed(compressed)}
}
func (o objectHandle) NewReader(ctx context.Context) (Reader, error) {
r, err := o.ObjectHandle.NewReader(ctx)
if err != nil {
return nil, err
}
return reader{r}, nil
}
func (o objectHandle) NewRangeReader(ctx context.Context, offset, length int64) (Reader, error) {
r, err := o.ObjectHandle.NewRangeReader(ctx, offset, length)
if err != nil {
return nil, err
}
return reader{r}, nil
}
func (o objectHandle) NewWriter(ctx context.Context) Writer {
return writer{o.ObjectHandle.NewWriter(ctx)}
}
func (o objectHandle) CopierFrom(src ObjectHandle) Copier {
return copier{o.ObjectHandle.CopierFrom(src.(objectHandle).ObjectHandle)}
}
func (o objectHandle) ComposerFrom(srcs ...ObjectHandle) Composer {
objs := make([]*storage.ObjectHandle, len(srcs))
for i, s := range srcs {
objs[i] = s.(objectHandle).ObjectHandle
}
return composer{o.ObjectHandle.ComposerFrom(objs...)}
}
func (w writer) ObjectAttrs() *storage.ObjectAttrs {
return &w.Writer.ObjectAttrs
}
func (w writer) SetChunkSize(s int) {
w.ChunkSize = s
}
func (w writer) SetProgressFunc(f func(int64)) {
w.ProgressFunc = f
}
func (w writer) SetCRC32C(c uint32) {
w.CRC32C = c
w.SendCRC32C = true
}
func (c copier) ObjectAttrs() *storage.ObjectAttrs {
return &c.Copier.ObjectAttrs
}
func (c copier) SetRewriteToken(t string) {
c.RewriteToken = t
}
func (c copier) SetProgressFunc(f func(copiedBytes, totalBytes uint64)) {
c.ProgressFunc = f
}
func (c copier) SetDestinationKMSKeyName(k string) {
c.DestinationKMSKeyName = k
}
func (c composer) ObjectAttrs() *storage.ObjectAttrs {
return &c.Composer.ObjectAttrs
}
spf13-afero-18d690e/gcsfs/internal/stiface/doc.go 0000664 0000000 0000000 00000002513 15057601571 0021545 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package stiface provides a set of interfaces for the types in
// cloud.google.com/go/storage. These can be used to create mocks or other test
// doubles. The package also provides adapters to enable the types of the
// storage package to implement these interfaces.
//
// We do not recommend using mocks for most testing. Please read
// https://testing.googleblog.com/2013/05/testing-on-toilet-dont-overuse-mocks.html.
//
// Note: This package is in alpha. Some backwards-incompatible changes may occur.
//
// You must embed these interfaces to implement them:
//
// type ClientMock struct {
// stiface.Client
// ...
// }
//
// This ensures that your implementations will not break when methods are added
// to the interfaces.
package stiface
spf13-afero-18d690e/gcsfs/internal/stiface/examples_test.go.bak 0000664 0000000 0000000 00000001771 15057601571 0024416 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stiface_test
import (
"context"
"cloud.google.com/go/storage"
"github.com/spf13/afero/gcsfs/internal/stiface"
)
func Example_AdaptClient() {
ctx := context.Background()
c, err := storage.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
client := stiface.AdaptClient(c)
w := client.Bucket("my-bucket").Object("my-object").NewWriter(ctx)
w.ObjectAttrs().ContentType = "text/plain"
// TODO: Use w.
}
spf13-afero-18d690e/gcsfs/internal/stiface/interfaces.go 0000664 0000000 0000000 00000007013 15057601571 0023123 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stiface
import (
"context"
"io"
"cloud.google.com/go/storage"
"google.golang.org/api/iterator"
)
type Client interface {
Bucket(name string) BucketHandle
Buckets(ctx context.Context, projectID string) BucketIterator
Close() error
embedToIncludeNewMethods()
}
type ObjectHandle interface {
ACL() ACLHandle
Generation(int64) ObjectHandle
If(storage.Conditions) ObjectHandle
Key([]byte) ObjectHandle
ReadCompressed(bool) ObjectHandle
Attrs(context.Context) (*storage.ObjectAttrs, error)
Update(context.Context, storage.ObjectAttrsToUpdate) (*storage.ObjectAttrs, error)
NewReader(context.Context) (Reader, error)
NewRangeReader(context.Context, int64, int64) (Reader, error)
NewWriter(context.Context) Writer
Delete(context.Context) error
CopierFrom(ObjectHandle) Copier
ComposerFrom(...ObjectHandle) Composer
embedToIncludeNewMethods()
}
type BucketHandle interface {
Create(context.Context, string, *storage.BucketAttrs) error
Delete(context.Context) error
DefaultObjectACL() ACLHandle
Object(string) ObjectHandle
Attrs(context.Context) (*storage.BucketAttrs, error)
Update(context.Context, storage.BucketAttrsToUpdate) (*storage.BucketAttrs, error)
If(storage.BucketConditions) BucketHandle
Objects(context.Context, *storage.Query) ObjectIterator
ACL() ACLHandle
// IAM() *iam.Handle
UserProject(projectID string) BucketHandle
Notifications(context.Context) (map[string]*storage.Notification, error)
AddNotification(context.Context, *storage.Notification) (*storage.Notification, error)
DeleteNotification(context.Context, string) error
LockRetentionPolicy(context.Context) error
embedToIncludeNewMethods()
}
type ObjectIterator interface {
Next() (*storage.ObjectAttrs, error)
PageInfo() *iterator.PageInfo
embedToIncludeNewMethods()
}
type BucketIterator interface {
SetPrefix(string)
Next() (*storage.BucketAttrs, error)
PageInfo() *iterator.PageInfo
embedToIncludeNewMethods()
}
type ACLHandle interface {
Delete(context.Context, storage.ACLEntity) error
Set(context.Context, storage.ACLEntity, storage.ACLRole) error
List(context.Context) ([]storage.ACLRule, error)
embedToIncludeNewMethods()
}
type Reader interface {
io.ReadCloser
Size() int64
Remain() int64
ContentType() string
ContentEncoding() string
CacheControl() string
embedToIncludeNewMethods()
}
type Writer interface {
io.WriteCloser
ObjectAttrs() *storage.ObjectAttrs
SetChunkSize(int)
SetProgressFunc(func(int64))
SetCRC32C(uint32) // Sets both CRC32C and SendCRC32C.
CloseWithError(err error) error
Attrs() *storage.ObjectAttrs
embedToIncludeNewMethods()
}
type Copier interface {
ObjectAttrs() *storage.ObjectAttrs
SetRewriteToken(string)
SetProgressFunc(func(uint64, uint64))
SetDestinationKMSKeyName(string)
Run(context.Context) (*storage.ObjectAttrs, error)
embedToIncludeNewMethods()
}
type Composer interface {
ObjectAttrs() *storage.ObjectAttrs
Run(context.Context) (*storage.ObjectAttrs, error)
embedToIncludeNewMethods()
}
spf13-afero-18d690e/gcsfs/internal/stiface/stiface_test.go 0000664 0000000 0000000 00000011475 15057601571 0023464 0 ustar 00root root 0000000 0000000 // Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package stiface
import (
"bytes"
"context"
"fmt"
"io/ioutil"
"os"
"testing"
"cloud.google.com/go/storage"
)
func TestIntegration(t *testing.T) {
if testing.Short() {
t.Skip("integration tests skipped in short mode")
}
name := os.Getenv("STIFACE_BUCKET")
if name == "" {
t.Skip("missing STIFACE_BUCKET environment variable")
}
ctx := context.Background()
c, err := storage.NewClient(ctx)
if err != nil {
t.Fatal(err)
}
client := AdaptClient(c)
defer client.Close()
bkt := client.Bucket(name)
basicTests(t, name, bkt)
}
func basicTests(t *testing.T, bucketName string, bkt BucketHandle) {
ctx := context.Background()
attrs, err := bkt.Attrs(ctx)
if err != nil {
t.Fatal(err)
}
if got, want := attrs.Name, bucketName; got != want {
t.Errorf("name: got %v, want %v", got, want)
}
const contents = "hello, stiface"
obj := bkt.Object("stiface-test")
w := obj.NewWriter(ctx)
if _, err := fmt.Fprint(w, contents); err != nil {
t.Fatal(err)
}
if err := w.Close(); err != nil {
t.Fatal(err)
}
bytes := readObject(t, obj)
if got, want := string(bytes), contents; got != want {
t.Errorf("got %q, want %q", got, want)
}
if err := obj.Delete(ctx); err != nil {
t.Errorf("deleting: %v", err)
}
}
func readObject(t *testing.T, obj ObjectHandle) []byte {
r, err := obj.NewReader(context.Background())
if err != nil {
t.Fatalf("reading %v: %v", obj, err)
}
defer r.Close()
bytes, err := ioutil.ReadAll(r)
if err != nil {
t.Fatalf("reading %v: %v", obj, err)
}
return bytes
}
// This test demonstrates how to use this package to create a simple fake for the storage client.
func TestFake(t *testing.T) {
ctx := context.Background()
client := newFakeClient()
bkt := client.Bucket("my-bucket")
if err := bkt.Create(ctx, "my-project", nil); err != nil {
t.Fatal(err)
}
basicTests(t, "my-bucket", bkt)
}
type fakeClient struct {
Client
buckets map[string]*fakeBucket
}
type fakeBucket struct {
attrs *storage.BucketAttrs
objects map[string][]byte
}
func newFakeClient() Client {
return &fakeClient{buckets: map[string]*fakeBucket{}}
}
func (c *fakeClient) Bucket(name string) BucketHandle {
return fakeBucketHandle{c: c, name: name}
}
type fakeBucketHandle struct {
BucketHandle
c *fakeClient
name string
}
func (b fakeBucketHandle) Create(_ context.Context, _ string, attrs *storage.BucketAttrs) error {
if _, ok := b.c.buckets[b.name]; ok {
return fmt.Errorf("bucket %q already exists", b.name)
}
if attrs == nil {
attrs = &storage.BucketAttrs{}
}
attrs.Name = b.name
b.c.buckets[b.name] = &fakeBucket{attrs: attrs, objects: map[string][]byte{}}
return nil
}
func (b fakeBucketHandle) Attrs(context.Context) (*storage.BucketAttrs, error) {
bkt, ok := b.c.buckets[b.name]
if !ok {
return nil, fmt.Errorf("bucket %q does not exist", b.name)
}
return bkt.attrs, nil
}
func (b fakeBucketHandle) Object(name string) ObjectHandle {
return fakeObjectHandle{c: b.c, bucketName: b.name, name: name}
}
type fakeObjectHandle struct {
ObjectHandle
c *fakeClient
bucketName string
name string
}
func (o fakeObjectHandle) NewReader(context.Context) (Reader, error) {
bkt, ok := o.c.buckets[o.bucketName]
if !ok {
return nil, fmt.Errorf("bucket %q not found", o.bucketName)
}
contents, ok := bkt.objects[o.name]
if !ok {
return nil, fmt.Errorf("object %q not found in bucket %q", o.name, o.bucketName)
}
return fakeReader{r: bytes.NewReader(contents)}, nil
}
func (o fakeObjectHandle) Delete(context.Context) error {
bkt, ok := o.c.buckets[o.bucketName]
if !ok {
return fmt.Errorf("bucket %q not found", o.bucketName)
}
delete(bkt.objects, o.name)
return nil
}
type fakeReader struct {
Reader
r *bytes.Reader
}
func (r fakeReader) Read(buf []byte) (int, error) {
return r.r.Read(buf)
}
func (r fakeReader) Close() error {
return nil
}
func (o fakeObjectHandle) NewWriter(context.Context) Writer {
return &fakeWriter{obj: o}
}
type fakeWriter struct {
Writer
obj fakeObjectHandle
buf bytes.Buffer
}
func (w *fakeWriter) Write(data []byte) (int, error) {
return w.buf.Write(data)
}
func (w *fakeWriter) Close() error {
bkt, ok := w.obj.c.buckets[w.obj.bucketName]
if !ok {
return fmt.Errorf("bucket %q not found", w.obj.bucketName)
}
bkt.objects[w.obj.name] = w.buf.Bytes()
return nil
}
spf13-afero-18d690e/go.mod 0000664 0000000 0000000 00000000114 15057601571 0015213 0 ustar 00root root 0000000 0000000 module github.com/spf13/afero
go 1.23.0
require golang.org/x/text v0.28.0
spf13-afero-18d690e/go.sum 0000664 0000000 0000000 00000000233 15057601571 0015242 0 ustar 00root root 0000000 0000000 golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
spf13-afero-18d690e/httpFs.go 0000664 0000000 0000000 00000005250 15057601571 0015712 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
"net/http"
"os"
"path"
"path/filepath"
"strings"
"time"
)
type httpDir struct {
basePath string
fs HttpFs
}
func (d httpDir) Open(name string) (http.File, error) {
if filepath.Separator != '/' && strings.ContainsRune(name, filepath.Separator) ||
strings.Contains(name, "\x00") {
return nil, errors.New("http: invalid character in file path")
}
dir := string(d.basePath)
if dir == "" {
dir = "."
}
f, err := d.fs.Open(filepath.Join(dir, filepath.FromSlash(path.Clean("/"+name))))
if err != nil {
return nil, err
}
return f, nil
}
type HttpFs struct {
source Fs
}
func NewHttpFs(source Fs) *HttpFs {
return &HttpFs{source: source}
}
func (h HttpFs) Dir(s string) *httpDir {
return &httpDir{basePath: s, fs: h}
}
func (h HttpFs) Name() string { return "h HttpFs" }
func (h HttpFs) Create(name string) (File, error) {
return h.source.Create(name)
}
func (h HttpFs) Chmod(name string, mode os.FileMode) error {
return h.source.Chmod(name, mode)
}
func (h HttpFs) Chown(name string, uid, gid int) error {
return h.source.Chown(name, uid, gid)
}
func (h HttpFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return h.source.Chtimes(name, atime, mtime)
}
func (h HttpFs) Mkdir(name string, perm os.FileMode) error {
return h.source.Mkdir(name, perm)
}
func (h HttpFs) MkdirAll(path string, perm os.FileMode) error {
return h.source.MkdirAll(path, perm)
}
func (h HttpFs) Open(name string) (http.File, error) {
f, err := h.source.Open(name)
if err == nil {
if httpfile, ok := f.(http.File); ok {
return httpfile, nil
}
}
return nil, err
}
func (h HttpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return h.source.OpenFile(name, flag, perm)
}
func (h HttpFs) Remove(name string) error {
return h.source.Remove(name)
}
func (h HttpFs) RemoveAll(path string) error {
return h.source.RemoveAll(path)
}
func (h HttpFs) Rename(oldname, newname string) error {
return h.source.Rename(oldname, newname)
}
func (h HttpFs) Stat(name string) (os.FileInfo, error) {
return h.source.Stat(name)
}
spf13-afero-18d690e/internal/ 0000775 0000000 0000000 00000000000 15057601571 0015725 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/internal/common/ 0000775 0000000 0000000 00000000000 15057601571 0017215 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/internal/common/adapters.go 0000664 0000000 0000000 00000001676 15057601571 0021361 0 ustar 00root root 0000000 0000000 // Copyright © 2022 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package common
import "io/fs"
// FileInfoDirEntry provides an adapter from os.FileInfo to fs.DirEntry
type FileInfoDirEntry struct {
fs.FileInfo
}
var _ fs.DirEntry = FileInfoDirEntry{}
func (d FileInfoDirEntry) Type() fs.FileMode { return d.FileInfo.Mode().Type() }
func (d FileInfoDirEntry) Info() (fs.FileInfo, error) { return d.FileInfo, nil }
spf13-afero-18d690e/iofs.go 0000664 0000000 0000000 00000014621 15057601571 0015404 0 ustar 00root root 0000000 0000000 //go:build go1.16
// +build go1.16
package afero
import (
"io"
"io/fs"
"os"
"path"
"sort"
"time"
"github.com/spf13/afero/internal/common"
)
// IOFS adopts afero.Fs to stdlib io/fs.FS
type IOFS struct {
Fs
}
func NewIOFS(fs Fs) IOFS {
return IOFS{Fs: fs}
}
var (
_ fs.FS = IOFS{}
_ fs.GlobFS = IOFS{}
_ fs.ReadDirFS = IOFS{}
_ fs.ReadFileFS = IOFS{}
_ fs.StatFS = IOFS{}
_ fs.SubFS = IOFS{}
)
func (iofs IOFS) Open(name string) (fs.File, error) {
const op = "open"
// by convention for fs.FS implementations we should perform this check
if !fs.ValidPath(name) {
return nil, iofs.wrapError(op, name, fs.ErrInvalid)
}
file, err := iofs.Fs.Open(name)
if err != nil {
return nil, iofs.wrapError(op, name, err)
}
// file should implement fs.ReadDirFile
if _, ok := file.(fs.ReadDirFile); !ok {
file = readDirFile{file}
}
return file, nil
}
func (iofs IOFS) Glob(pattern string) ([]string, error) {
const op = "glob"
// afero.Glob does not perform this check but it's required for implementations
if _, err := path.Match(pattern, ""); err != nil {
return nil, iofs.wrapError(op, pattern, err)
}
items, err := Glob(iofs.Fs, pattern)
if err != nil {
return nil, iofs.wrapError(op, pattern, err)
}
return items, nil
}
func (iofs IOFS) ReadDir(name string) ([]fs.DirEntry, error) {
f, err := iofs.Fs.Open(name)
if err != nil {
return nil, iofs.wrapError("readdir", name, err)
}
defer f.Close()
if rdf, ok := f.(fs.ReadDirFile); ok {
items, err := rdf.ReadDir(-1)
if err != nil {
return nil, iofs.wrapError("readdir", name, err)
}
sort.Slice(items, func(i, j int) bool { return items[i].Name() < items[j].Name() })
return items, nil
}
items, err := f.Readdir(-1)
if err != nil {
return nil, iofs.wrapError("readdir", name, err)
}
sort.Sort(byName(items))
ret := make([]fs.DirEntry, len(items))
for i := range items {
ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
}
return ret, nil
}
func (iofs IOFS) ReadFile(name string) ([]byte, error) {
const op = "readfile"
if !fs.ValidPath(name) {
return nil, iofs.wrapError(op, name, fs.ErrInvalid)
}
bytes, err := ReadFile(iofs.Fs, name)
if err != nil {
return nil, iofs.wrapError(op, name, err)
}
return bytes, nil
}
func (iofs IOFS) Sub(dir string) (fs.FS, error) { return IOFS{NewBasePathFs(iofs.Fs, dir)}, nil }
func (IOFS) wrapError(op, path string, err error) error {
if _, ok := err.(*fs.PathError); ok {
return err // don't need to wrap again
}
return &fs.PathError{
Op: op,
Path: path,
Err: err,
}
}
// readDirFile provides adapter from afero.File to fs.ReadDirFile needed for correct Open
type readDirFile struct {
File
}
var _ fs.ReadDirFile = readDirFile{}
func (r readDirFile) ReadDir(n int) ([]fs.DirEntry, error) {
items, err := r.Readdir(n)
if err != nil {
return nil, err
}
ret := make([]fs.DirEntry, len(items))
for i := range items {
ret[i] = common.FileInfoDirEntry{FileInfo: items[i]}
}
return ret, nil
}
// FromIOFS adopts io/fs.FS to use it as afero.Fs
// Note that io/fs.FS is read-only so all mutating methods will return fs.PathError with fs.ErrPermission
// To store modifications you may use afero.CopyOnWriteFs
type FromIOFS struct {
fs.FS
}
var _ Fs = FromIOFS{}
func (f FromIOFS) Create(name string) (File, error) { return nil, notImplemented("create", name) }
func (f FromIOFS) Mkdir(
name string,
perm os.FileMode,
) error {
return notImplemented("mkdir", name)
}
func (f FromIOFS) MkdirAll(path string, perm os.FileMode) error {
return notImplemented("mkdirall", path)
}
func (f FromIOFS) Open(name string) (File, error) {
file, err := f.FS.Open(name)
if err != nil {
return nil, err
}
return fromIOFSFile{File: file, name: name}, nil
}
func (f FromIOFS) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
return f.Open(name)
}
func (f FromIOFS) Remove(name string) error {
return notImplemented("remove", name)
}
func (f FromIOFS) RemoveAll(path string) error {
return notImplemented("removeall", path)
}
func (f FromIOFS) Rename(oldname, newname string) error {
return notImplemented("rename", oldname)
}
func (f FromIOFS) Stat(name string) (os.FileInfo, error) { return fs.Stat(f.FS, name) }
func (f FromIOFS) Name() string { return "fromiofs" }
func (f FromIOFS) Chmod(name string, mode os.FileMode) error {
return notImplemented("chmod", name)
}
func (f FromIOFS) Chown(name string, uid, gid int) error {
return notImplemented("chown", name)
}
func (f FromIOFS) Chtimes(name string, atime time.Time, mtime time.Time) error {
return notImplemented("chtimes", name)
}
type fromIOFSFile struct {
fs.File
name string
}
func (f fromIOFSFile) ReadAt(p []byte, off int64) (n int, err error) {
readerAt, ok := f.File.(io.ReaderAt)
if !ok {
return -1, notImplemented("readat", f.name)
}
return readerAt.ReadAt(p, off)
}
func (f fromIOFSFile) Seek(offset int64, whence int) (int64, error) {
seeker, ok := f.File.(io.Seeker)
if !ok {
return -1, notImplemented("seek", f.name)
}
return seeker.Seek(offset, whence)
}
func (f fromIOFSFile) Write(p []byte) (n int, err error) {
return -1, notImplemented("write", f.name)
}
func (f fromIOFSFile) WriteAt(p []byte, off int64) (n int, err error) {
return -1, notImplemented("writeat", f.name)
}
func (f fromIOFSFile) Name() string { return f.name }
func (f fromIOFSFile) Readdir(count int) ([]os.FileInfo, error) {
rdfile, ok := f.File.(fs.ReadDirFile)
if !ok {
return nil, notImplemented("readdir", f.name)
}
entries, err := rdfile.ReadDir(count)
if err != nil {
return nil, err
}
ret := make([]os.FileInfo, len(entries))
for i := range entries {
ret[i], err = entries[i].Info()
if err != nil {
return nil, err
}
}
return ret, nil
}
func (f fromIOFSFile) Readdirnames(n int) ([]string, error) {
rdfile, ok := f.File.(fs.ReadDirFile)
if !ok {
return nil, notImplemented("readdir", f.name)
}
entries, err := rdfile.ReadDir(n)
if err != nil {
return nil, err
}
ret := make([]string, len(entries))
for i := range entries {
ret[i] = entries[i].Name()
}
return ret, nil
}
func (f fromIOFSFile) Sync() error { return nil }
func (f fromIOFSFile) Truncate(size int64) error {
return notImplemented("truncate", f.name)
}
func (f fromIOFSFile) WriteString(s string) (ret int, err error) {
return -1, notImplemented("writestring", f.name)
}
func notImplemented(op, path string) error {
return &fs.PathError{Op: op, Path: path, Err: fs.ErrPermission}
}
spf13-afero-18d690e/iofs_test.go 0000664 0000000 0000000 00000027376 15057601571 0016456 0 ustar 00root root 0000000 0000000 //go:build go1.16
// +build go1.16
package afero
import (
"bytes"
"errors"
"fmt"
"io"
"io/fs"
"math/rand"
"os"
"path/filepath"
"runtime"
"testing"
"testing/fstest"
"time"
"github.com/spf13/afero/internal/common"
)
func TestIOFS(t *testing.T) {
if runtime.GOOS == "windows" {
// TODO(bep): some of the "bad path" tests in fstest.TestFS fail on Windows
t.Skip("Skipping on Windows")
}
t.Parallel()
t.Run("use MemMapFs", func(t *testing.T) {
mmfs := NewMemMapFs()
err := mmfs.MkdirAll("dir1/dir2", os.ModePerm)
if err != nil {
t.Fatal("MkdirAll failed:", err)
}
f, err := mmfs.OpenFile("dir1/dir2/test.txt", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
t.Fatal("OpenFile (O_CREATE) failed:", err)
}
f.Close()
if err := fstest.TestFS(NewIOFS(mmfs), "dir1/dir2/test.txt"); err != nil {
t.Error(err)
}
})
t.Run("use OsFs", func(t *testing.T) {
osfs := NewBasePathFs(NewOsFs(), t.TempDir())
err := osfs.MkdirAll("dir1/dir2", os.ModePerm)
if err != nil {
t.Fatal("MkdirAll failed:", err)
}
f, err := osfs.OpenFile("dir1/dir2/test.txt", os.O_RDWR|os.O_CREATE, os.ModePerm)
if err != nil {
t.Fatal("OpenFile (O_CREATE) failed:", err)
}
f.Close()
if err := fstest.TestFS(NewIOFS(osfs), "dir1/dir2/test.txt"); err != nil {
t.Error(err)
}
})
}
func TestIOFSNativeDirEntryWhenPossible(t *testing.T) {
t.Parallel()
osfs := NewBasePathFs(NewOsFs(), t.TempDir())
err := osfs.MkdirAll("dir1/dir2", os.ModePerm)
if err != nil {
t.Fatal(err)
}
const numFiles = 10
var fileNumbers []int
for i := 0; i < numFiles; i++ {
fileNumbers = append(fileNumbers, i)
}
rand.Shuffle(len(fileNumbers), func(i, j int) {
fileNumbers[i], fileNumbers[j] = fileNumbers[j], fileNumbers[i]
})
for _, i := range fileNumbers {
f, err := osfs.Create(fmt.Sprintf("dir1/dir2/test%d.txt", i))
if err != nil {
t.Fatal(err)
}
f.Close()
}
dir2, err := osfs.Open("dir1/dir2")
if err != nil {
t.Fatal(err)
}
defer dir2.Close()
assertDirEntries := func(entries []fs.DirEntry, ordered bool) {
if len(entries) != numFiles {
t.Fatalf("expected %d, got %d", numFiles, len(entries))
}
for i, entry := range entries {
if _, ok := entry.(common.FileInfoDirEntry); ok {
t.Fatal("DirEntry not native")
}
if ordered && entry.Name() != fmt.Sprintf("test%d.txt", i) {
t.Fatalf("expected %s, got %s", fmt.Sprintf("test%d.txt", i), entry.Name())
}
}
}
dirEntries, err := dir2.(fs.ReadDirFile).ReadDir(-1)
if err != nil {
t.Fatal(err)
}
assertDirEntries(dirEntries, false)
iofs := NewIOFS(osfs)
dirEntries, err = iofs.ReadDir("dir1/dir2")
if err != nil {
t.Fatal(err)
}
assertDirEntries(dirEntries, true)
fileCount := 0
err = fs.WalkDir(iofs, "", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
if !d.IsDir() {
fileCount++
}
if _, ok := d.(common.FileInfoDirEntry); ok {
t.Fatal("DirEntry not native")
}
return nil
})
if err != nil {
t.Fatal(err)
}
if fileCount != numFiles {
t.Fatalf("expected %d, got %d", numFiles, fileCount)
}
}
func TestFromIOFS(t *testing.T) {
t.Parallel()
fsys := fstest.MapFS{
"test.txt": {
Data: []byte("File in root"),
Mode: fs.ModePerm,
ModTime: time.Now(),
},
"dir1": {
Mode: fs.ModeDir | fs.ModePerm,
ModTime: time.Now(),
},
"dir1/dir2": {
Mode: fs.ModeDir | fs.ModePerm,
ModTime: time.Now(),
},
"dir1/dir2/hello.txt": {
Data: []byte("Hello world"),
Mode: fs.ModePerm,
ModTime: time.Now(),
},
}
fromIOFS := FromIOFS{fsys}
t.Run("Create", func(t *testing.T) {
_, err := fromIOFS.Create("test")
assertPermissionError(t, err)
})
t.Run("Mkdir", func(t *testing.T) {
err := fromIOFS.Mkdir("test", 0)
assertPermissionError(t, err)
})
t.Run("MkdirAll", func(t *testing.T) {
err := fromIOFS.Mkdir("test", 0)
assertPermissionError(t, err)
})
t.Run("Open", func(t *testing.T) {
t.Run("non existing file", func(t *testing.T) {
_, err := fromIOFS.Open("nonexisting")
if !errors.Is(err, fs.ErrNotExist) {
t.Errorf("Expected error to be fs.ErrNotExist, got %[1]T (%[1]v)", err)
}
})
t.Run("directory", func(t *testing.T) {
dirFile, err := fromIOFS.Open("dir1")
if err != nil {
t.Errorf("dir1 open failed: %v", err)
return
}
defer dirFile.Close()
dirStat, err := dirFile.Stat()
if err != nil {
t.Errorf("dir1 stat failed: %v", err)
return
}
if !dirStat.IsDir() {
t.Errorf("dir1 stat told that it is not a directory")
return
}
})
t.Run("simple file", func(t *testing.T) {
file, err := fromIOFS.Open("test.txt")
if err != nil {
t.Errorf("test.txt open failed: %v", err)
return
}
defer file.Close()
fileStat, err := file.Stat()
if err != nil {
t.Errorf("test.txt stat failed: %v", err)
return
}
if fileStat.IsDir() {
t.Errorf("test.txt stat told that it is a directory")
return
}
})
})
t.Run("Remove", func(t *testing.T) {
err := fromIOFS.Remove("test")
assertPermissionError(t, err)
})
t.Run("Rename", func(t *testing.T) {
err := fromIOFS.Rename("test", "test2")
assertPermissionError(t, err)
})
t.Run("Stat", func(t *testing.T) {
t.Run("non existing file", func(t *testing.T) {
_, err := fromIOFS.Stat("nonexisting")
if !errors.Is(err, fs.ErrNotExist) {
t.Errorf("Expected error to be fs.ErrNotExist, got %[1]T (%[1]v)", err)
}
})
t.Run("directory", func(t *testing.T) {
stat, err := fromIOFS.Stat("dir1/dir2")
if err != nil {
t.Errorf("dir1/dir2 stat failed: %v", err)
return
}
if !stat.IsDir() {
t.Errorf("dir1/dir2 stat told that it is not a directory")
return
}
})
t.Run("file", func(t *testing.T) {
stat, err := fromIOFS.Stat("dir1/dir2/hello.txt")
if err != nil {
t.Errorf("dir1/dir2 stat failed: %v", err)
return
}
if stat.IsDir() {
t.Errorf("dir1/dir2/hello.txt stat told that it is a directory")
return
}
if lenFile := len(fsys["dir1/dir2/hello.txt"].Data); int64(lenFile) != stat.Size() {
t.Errorf(
"dir1/dir2/hello.txt stat told invalid size: expected %d, got %d",
lenFile,
stat.Size(),
)
return
}
})
})
t.Run("Chmod", func(t *testing.T) {
err := fromIOFS.Chmod("test", os.ModePerm)
assertPermissionError(t, err)
})
t.Run("Chown", func(t *testing.T) {
err := fromIOFS.Chown("test", 0, 0)
assertPermissionError(t, err)
})
t.Run("Chtimes", func(t *testing.T) {
err := fromIOFS.Chtimes("test", time.Now(), time.Now())
assertPermissionError(t, err)
})
}
func TestFromIOFS_File(t *testing.T) {
t.Parallel()
fsys := fstest.MapFS{
"test.txt": {
Data: []byte("File in root"),
Mode: fs.ModePerm,
ModTime: time.Now(),
},
"dir1": {
Mode: fs.ModeDir | fs.ModePerm,
ModTime: time.Now(),
},
"dir2": {
Mode: fs.ModeDir | fs.ModePerm,
ModTime: time.Now(),
},
}
fromIOFS := FromIOFS{fsys}
file, err := fromIOFS.Open("test.txt")
if err != nil {
t.Errorf("test.txt open failed: %v", err)
return
}
defer file.Close()
fileStat, err := file.Stat()
if err != nil {
t.Errorf("test.txt stat failed: %v", err)
return
}
if fileStat.IsDir() {
t.Errorf("test.txt stat told that it is a directory")
return
}
t.Run("ReadAt", func(t *testing.T) {
// MapFS files implements io.ReaderAt
b := make([]byte, 2)
_, err := file.ReadAt(b, 2)
if err != nil {
t.Errorf("ReadAt failed: %v", err)
return
}
if expectedData := fsys["test.txt"].Data[2:4]; !bytes.Equal(b, expectedData) {
t.Errorf("Unexpected content read: %s, expected %s", b, expectedData)
}
})
t.Run("Seek", func(t *testing.T) {
n, err := file.Seek(2, io.SeekStart)
if err != nil {
t.Errorf("Seek failed: %v", err)
return
}
if n != 2 {
t.Errorf("Seek returned unexpected value: %d, expected 2", n)
}
})
t.Run("Write", func(t *testing.T) {
_, err := file.Write(nil)
assertPermissionError(t, err)
})
t.Run("WriteAt", func(t *testing.T) {
_, err := file.WriteAt(nil, 0)
assertPermissionError(t, err)
})
t.Run("Name", func(t *testing.T) {
if name := file.Name(); name != "test.txt" {
t.Errorf("expected file.Name() == test.txt, got %s", name)
}
})
t.Run("Readdir", func(t *testing.T) {
t.Run("not directory", func(t *testing.T) {
_, err := file.Readdir(-1)
assertPermissionError(t, err)
})
t.Run("root directory", func(t *testing.T) {
root, err := fromIOFS.Open(".")
if err != nil {
t.Errorf("root open failed: %v", err)
return
}
defer root.Close()
items, err := root.Readdir(-1)
if err != nil {
t.Errorf("Readdir error: %v", err)
return
}
expectedItems := []struct {
Name string
IsDir bool
Size int64
}{
{Name: "dir1", IsDir: true, Size: 0},
{Name: "dir2", IsDir: true, Size: 0},
{Name: "test.txt", IsDir: false, Size: int64(len(fsys["test.txt"].Data))},
}
if len(expectedItems) != len(items) {
t.Errorf(
"Items count mismatch, expected %d, got %d",
len(expectedItems),
len(items),
)
return
}
for i, item := range items {
if item.Name() != expectedItems[i].Name {
t.Errorf(
"Item %d: expected name %s, got %s",
i,
expectedItems[i].Name,
item.Name(),
)
}
if item.IsDir() != expectedItems[i].IsDir {
t.Errorf(
"Item %d: expected IsDir %t, got %t",
i,
expectedItems[i].IsDir,
item.IsDir(),
)
}
if item.Size() != expectedItems[i].Size {
t.Errorf(
"Item %d: expected IsDir %d, got %d",
i,
expectedItems[i].Size,
item.Size(),
)
}
}
})
})
t.Run("Readdirnames", func(t *testing.T) {
t.Run("not directory", func(t *testing.T) {
_, err := file.Readdirnames(-1)
assertPermissionError(t, err)
})
t.Run("root directory", func(t *testing.T) {
root, err := fromIOFS.Open(".")
if err != nil {
t.Errorf("root open failed: %v", err)
return
}
defer root.Close()
items, err := root.Readdirnames(-1)
if err != nil {
t.Errorf("Readdirnames error: %v", err)
return
}
expectedItems := []string{"dir1", "dir2", "test.txt"}
if len(expectedItems) != len(items) {
t.Errorf(
"Items count mismatch, expected %d, got %d",
len(expectedItems),
len(items),
)
return
}
for i, item := range items {
if item != expectedItems[i] {
t.Errorf("Item %d: expected name %s, got %s", i, expectedItems[i], item)
}
}
})
})
t.Run("Truncate", func(t *testing.T) {
err := file.Truncate(1)
assertPermissionError(t, err)
})
t.Run("WriteString", func(t *testing.T) {
_, err := file.WriteString("a")
assertPermissionError(t, err)
})
}
func assertPermissionError(t *testing.T, err error) {
t.Helper()
var perr *fs.PathError
if !errors.As(err, &perr) {
t.Errorf("Expected *fs.PathError, got %[1]T (%[1]v)", err)
return
}
if perr.Err != fs.ErrPermission {
t.Errorf("Expected (*fs.PathError).Err == fs.ErrPermisson, got %[1]T (%[1]v)", err)
}
}
func BenchmarkWalkDir(b *testing.B) {
osfs := NewBasePathFs(NewOsFs(), b.TempDir())
createSomeFiles := func(dirname string) {
for i := 0; i < 10; i++ {
f, err := osfs.Create(filepath.Join(dirname, fmt.Sprintf("test%d.txt", i)))
if err != nil {
b.Fatal(err)
}
f.Close()
}
}
depth := 10
for level := depth; level > 0; level-- {
dirname := ""
for i := 0; i < level; i++ {
dirname = filepath.Join(dirname, fmt.Sprintf("dir%d", i))
err := osfs.MkdirAll(dirname, 0o755)
if err != nil && !os.IsExist(err) {
b.Fatal(err)
}
}
createSomeFiles(dirname)
}
iofs := NewIOFS(osfs)
b.ResetTimer()
for i := 0; i < b.N; i++ {
err := fs.WalkDir(iofs, "", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
return nil
})
if err != nil {
b.Fatal(err)
}
}
}
spf13-afero-18d690e/ioutil.go 0000664 0000000 0000000 00000015733 15057601571 0015756 0 ustar 00root root 0000000 0000000 // Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"io"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
"sync"
"time"
)
// byName implements sort.Interface.
type byName []os.FileInfo
func (f byName) Len() int { return len(f) }
func (f byName) Less(i, j int) bool { return f[i].Name() < f[j].Name() }
func (f byName) Swap(i, j int) { f[i], f[j] = f[j], f[i] }
// ReadDir reads the directory named by dirname and returns
// a list of sorted directory entries.
func (a Afero) ReadDir(dirname string) ([]os.FileInfo, error) {
return ReadDir(a.Fs, dirname)
}
func ReadDir(fs Fs, dirname string) ([]os.FileInfo, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
list, err := f.Readdir(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Sort(byName(list))
return list, nil
}
// ReadFile reads the file named by filename and returns the contents.
// A successful call returns err == nil, not err == EOF. Because ReadFile
// reads the whole file, it does not treat an EOF from Read as an error
// to be reported.
func (a Afero) ReadFile(filename string) ([]byte, error) {
return ReadFile(a.Fs, filename)
}
func ReadFile(fs Fs, filename string) ([]byte, error) {
f, err := fs.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
// It's a good but not certain bet that FileInfo will tell us exactly how much to
// read, so let's try it but be prepared for the answer to be wrong.
var n int64
if fi, err := f.Stat(); err == nil {
// Don't preallocate a huge buffer, just in case.
if size := fi.Size(); size < 1e9 {
n = size
}
}
// As initial capacity for readAll, use n + a little extra in case Size is zero,
// and to avoid another allocation after Read has filled the buffer. The readAll
// call will read into its allocated internal buffer cheaply. If the size was
// wrong, we'll either waste some space off the end or reallocate as needed, but
// in the overwhelmingly common case we'll get it just right.
return readAll(f, n+bytes.MinRead)
}
// readAll reads from r until an error or EOF and returns the data it read
// from the internal buffer allocated with a specified capacity.
func readAll(r io.Reader, capacity int64) (b []byte, err error) {
buf := bytes.NewBuffer(make([]byte, 0, capacity))
// If the buffer overflows, we will get bytes.ErrTooLarge.
// Return that as an error. Any other panic remains.
defer func() {
e := recover()
if e == nil {
return
}
if panicErr, ok := e.(error); ok && panicErr == bytes.ErrTooLarge {
err = panicErr
} else {
panic(e)
}
}()
_, err = buf.ReadFrom(r)
return buf.Bytes(), err
}
// ReadAll reads from r until an error or EOF and returns the data it read.
// A successful call returns err == nil, not err == EOF. Because ReadAll is
// defined to read from src until EOF, it does not treat an EOF from Read
// as an error to be reported.
func ReadAll(r io.Reader) ([]byte, error) {
return readAll(r, bytes.MinRead)
}
// WriteFile writes data to a file named by filename.
// If the file does not exist, WriteFile creates it with permissions perm;
// otherwise WriteFile truncates it before writing.
func (a Afero) WriteFile(filename string, data []byte, perm os.FileMode) error {
return WriteFile(a.Fs, filename, data, perm)
}
func WriteFile(fs Fs, filename string, data []byte, perm os.FileMode) error {
f, err := fs.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm)
if err != nil {
return err
}
n, err := f.Write(data)
if err == nil && n < len(data) {
err = io.ErrShortWrite
}
if err1 := f.Close(); err == nil {
err = err1
}
return err
}
// Random number state.
// We generate random temporary file names so that there's a good
// chance the file doesn't exist yet - keeps the number of tries in
// TempFile to a minimum.
var (
randNum uint32
randmu sync.Mutex
)
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextRandom() string {
randmu.Lock()
r := randNum
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
randNum = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// TempFile creates a new temporary file in the directory dir,
// opens the file for reading and writing, and returns the resulting *os.File.
// The filename is generated by taking pattern and adding a random
// string to the end. If pattern includes a "*", the random string
// replaces the last "*".
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func (a Afero) TempFile(dir, pattern string) (f File, err error) {
return TempFile(a.Fs, dir, pattern)
}
func TempFile(fs Fs, dir, pattern string) (f File, err error) {
if dir == "" {
dir = os.TempDir()
}
var prefix, suffix string
if pos := strings.LastIndex(pattern, "*"); pos != -1 {
prefix, suffix = pattern[:pos], pattern[pos+1:]
} else {
prefix = pattern
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextRandom()+suffix)
f, err = fs.OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
randNum = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}
// TempDir creates a new temporary directory in the directory dir
// with a name beginning with prefix and returns the path of the
// new directory. If dir is the empty string, TempDir uses the
// default directory for temporary files (see os.TempDir).
// Multiple programs calling TempDir simultaneously
// will not choose the same directory. It is the caller's responsibility
// to remove the directory when no longer needed.
func (a Afero) TempDir(dir, prefix string) (name string, err error) {
return TempDir(a.Fs, dir, prefix)
}
func TempDir(fs Fs, dir, prefix string) (name string, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
try := filepath.Join(dir, prefix+nextRandom())
err = fs.Mkdir(try, 0o700)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
randNum = reseed()
randmu.Unlock()
}
continue
}
if err == nil {
name = try
}
break
}
return
}
spf13-afero-18d690e/ioutil_test.go 0000664 0000000 0000000 00000010725 15057601571 0017011 0 ustar 00root root 0000000 0000000 // ©2015 The Go Authors
// Copyright ©2015 Steve Francia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"strings"
"testing"
)
func checkSizePath(t *testing.T, path string, size int64) {
dir, err := testFS.Stat(path)
if err != nil {
t.Fatalf("Stat %q (looking for size %d): %s", path, size, err)
}
if dir.Size() != size {
t.Errorf("Stat %q: size %d want %d", path, dir.Size(), size)
}
}
func TestReadFile(t *testing.T) {
testFS = &MemMapFs{}
fsutil := &Afero{Fs: testFS}
testFS.Create("this_exists.go")
filename := "rumpelstilzchen"
_, err := fsutil.ReadFile(filename)
if err == nil {
t.Fatalf("ReadFile %s: error expected, none found", filename)
}
filename = "this_exists.go"
contents, err := fsutil.ReadFile(filename)
if err != nil {
t.Fatalf("ReadFile %s: %v", filename, err)
}
checkSizePath(t, filename, int64(len(contents)))
}
func TestWriteFile(t *testing.T) {
testFS = &MemMapFs{}
fsutil := &Afero{Fs: testFS}
f, err := fsutil.TempFile("", "ioutil-test")
if err != nil {
t.Fatal(err)
}
filename := f.Name()
data := "Programming today is a race between software engineers striving to " +
"build bigger and better idiot-proof programs, and the Universe trying " +
"to produce bigger and better idiots. So far, the Universe is winning."
if err := fsutil.WriteFile(filename, []byte(data), 0o644); err != nil {
t.Fatalf("WriteFile %s: %v", filename, err)
}
contents, err := fsutil.ReadFile(filename)
if err != nil {
t.Fatalf("ReadFile %s: %v", filename, err)
}
if string(contents) != data {
t.Fatalf("contents = %q\nexpected = %q", string(contents), data)
}
// cleanup
f.Close()
testFS.Remove(filename) // ignore error
}
func TestReadDir(t *testing.T) {
testFS = &MemMapFs{}
testFS.Mkdir("/i-am-a-dir", 0o777)
testFS.Create("/this_exists.go")
dirname := "rumpelstilzchen"
_, err := ReadDir(testFS, dirname)
if err == nil {
t.Fatalf("ReadDir %s: error expected, none found", dirname)
}
dirname = ".."
list, err := ReadDir(testFS, dirname)
if err != nil {
t.Fatalf("ReadDir %s: %v", dirname, err)
}
foundFile := false
foundSubDir := false
for _, dir := range list {
switch {
case !dir.IsDir() && dir.Name() == "this_exists.go":
foundFile = true
case dir.IsDir() && dir.Name() == "i-am-a-dir":
foundSubDir = true
}
}
if !foundFile {
t.Fatalf("ReadDir %s: this_exists.go file not found", dirname)
}
if !foundSubDir {
t.Fatalf("ReadDir %s: i-am-a-dir directory not found", dirname)
}
}
func TestTempFile(t *testing.T) {
type args struct {
dir string
pattern string
}
tests := map[string]struct {
args args
want func(*testing.T, string)
}{
"foo": { // simple file name
args: args{
dir: "",
pattern: "foo",
},
want: func(t *testing.T, base string) {
if !strings.HasPrefix(base, "foo") || len(base) <= len("foo") {
t.Errorf("TempFile() file = %s, invalid file name", base)
}
},
},
"foo.bar": { // file name w/ ext
args: args{
dir: "",
pattern: "foo.bar",
},
want: func(t *testing.T, base string) {
if !strings.HasPrefix(base, "foo.bar") || len(base) <= len("foo.bar") {
t.Errorf("TempFile() file = %v, invalid file name", base)
}
},
},
"foo-*.bar": { // file name with wild card
args: args{
dir: "",
pattern: "foo-*.bar",
},
want: func(t *testing.T, base string) {
//nolint: staticcheck
if !(strings.HasPrefix(base, "foo-") || strings.HasPrefix(base, "bar")) ||
len(base) <= len("foo-*.bar") {
t.Errorf("TempFile() file = %v, invalid file name", base)
}
},
},
}
for name, tt := range tests {
t.Run(name, func(t *testing.T) {
file, err := TempFile(NewMemMapFs(), tt.args.dir, tt.args.pattern)
if err != nil {
t.Errorf("TempFile() error = %v, none expected", err)
return
}
if file == nil {
t.Errorf("TempFile() file = %v, should not be nil", file)
return
}
tt.want(t, filepath.Base(file.Name()))
})
}
}
spf13-afero-18d690e/lstater.go 0000664 0000000 0000000 00000002016 15057601571 0016115 0 ustar 00root root 0000000 0000000 // Copyright © 2018 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
)
// Lstater is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Lstat if the filesystem itself is, or it delegates to, the os filesystem.
// Else it will call Stat.
// In addition to the FileInfo, it will return a boolean telling whether Lstat was called or not.
type Lstater interface {
LstatIfPossible(name string) (os.FileInfo, bool, error)
}
spf13-afero-18d690e/lstater_test.go 0000664 0000000 0000000 00000005760 15057601571 0017165 0 ustar 00root root 0000000 0000000 // Copyright ©2018 Steve Francia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"testing"
)
func TestLstatIfPossible(t *testing.T) {
wd, _ := os.Getwd()
defer func() {
os.Chdir(wd)
}()
osFs := &OsFs{}
workDir, err := TempDir(osFs, "", "afero-lstate")
if err != nil {
t.Fatal(err)
}
defer func() {
osFs.RemoveAll(workDir)
}()
memWorkDir := "/lstate"
memFs := NewMemMapFs()
overlayFs1 := &CopyOnWriteFs{base: osFs, layer: memFs}
overlayFs2 := &CopyOnWriteFs{base: memFs, layer: osFs}
overlayFsMemOnly := &CopyOnWriteFs{base: memFs, layer: NewMemMapFs()}
basePathFs := &BasePathFs{source: osFs, path: workDir}
basePathFsMem := &BasePathFs{source: memFs, path: memWorkDir}
roFs := &ReadOnlyFs{source: osFs}
roFsMem := &ReadOnlyFs{source: memFs}
pathFileMem := filepath.Join(memWorkDir, "aferom.txt")
WriteFile(osFs, filepath.Join(workDir, "afero.txt"), []byte("Hi, Afero!"), 0o777)
WriteFile(memFs, filepath.Join(pathFileMem), []byte("Hi, Afero!"), 0o777)
os.Chdir(workDir)
if err := os.Symlink("afero.txt", "symafero.txt"); err != nil {
t.Fatal(err)
}
pathFile := filepath.Join(workDir, "afero.txt")
pathSymlink := filepath.Join(workDir, "symafero.txt")
checkLstat := func(l Lstater, name string, shouldLstat bool) os.FileInfo {
statFile, isLstat, err := l.LstatIfPossible(name)
if err != nil {
t.Fatalf("Lstat check failed: %s", err)
}
if isLstat != shouldLstat {
t.Fatalf("Lstat status was %t for %s", isLstat, name)
}
return statFile
}
testLstat := func(l Lstater, pathFile, pathSymlink string) {
shouldLstat := pathSymlink != ""
statRegular := checkLstat(l, pathFile, shouldLstat)
statSymlink := checkLstat(l, pathSymlink, shouldLstat)
if statRegular == nil || statSymlink == nil {
t.Fatal("got nil FileInfo")
}
symSym := statSymlink.Mode()&os.ModeSymlink == os.ModeSymlink
if symSym == (pathSymlink == "") {
t.Fatal("expected the FileInfo to describe the symlink")
}
_, _, err := l.LstatIfPossible("this-should-not-exist.txt")
if err == nil || !os.IsNotExist(err) {
t.Fatalf("expected file to not exist, got %s", err)
}
}
testLstat(osFs, pathFile, pathSymlink)
testLstat(overlayFs1, pathFile, pathSymlink)
testLstat(overlayFs2, pathFile, pathSymlink)
testLstat(basePathFs, "afero.txt", "symafero.txt")
testLstat(overlayFsMemOnly, pathFileMem, "")
testLstat(basePathFsMem, "aferom.txt", "")
testLstat(roFs, pathFile, pathSymlink)
testLstat(roFsMem, pathFileMem, "")
}
spf13-afero-18d690e/match.go 0000664 0000000 0000000 00000005465 15057601571 0015546 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
// Copyright 2009 The Go Authors. All rights reserved.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"path/filepath"
"sort"
"strings"
)
// Glob returns the names of all files matching pattern or nil
// if there is no matching file. The syntax of patterns is the same
// as in Match. The pattern may describe hierarchical names such as
// /usr/*/bin/ed (assuming the Separator is '/').
//
// Glob ignores file system errors such as I/O errors reading directories.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
//
// This was adapted from (http://golang.org/pkg/path/filepath) and uses several
// built-ins from that package.
func Glob(fs Fs, pattern string) (matches []string, err error) {
if !hasMeta(pattern) {
// Lstat not supported by a ll filesystems.
if _, err = lstatIfPossible(fs, pattern); err != nil {
return nil, nil
}
return []string{pattern}, nil
}
dir, file := filepath.Split(pattern)
switch dir {
case "":
dir = "."
case string(filepath.Separator):
// nothing
default:
dir = dir[0 : len(dir)-1] // chop off trailing separator
}
if !hasMeta(dir) {
return glob(fs, dir, file, nil)
}
var m []string
m, err = Glob(fs, dir)
if err != nil {
return
}
for _, d := range m {
matches, err = glob(fs, d, file, matches)
if err != nil {
return
}
}
return
}
// glob searches for files matching pattern in the directory dir
// and appends them to matches. If the directory cannot be
// opened, it returns the existing matches. New matches are
// added in lexicographical order.
func glob(fs Fs, dir, pattern string, matches []string) (m []string, e error) {
m = matches
fi, err := fs.Stat(dir)
if err != nil {
return
}
if !fi.IsDir() {
return
}
d, err := fs.Open(dir)
if err != nil {
return
}
defer d.Close()
names, _ := d.Readdirnames(-1)
sort.Strings(names)
for _, n := range names {
matched, err := filepath.Match(pattern, n)
if err != nil {
return m, err
}
if matched {
m = append(m, filepath.Join(dir, n))
}
}
return
}
// hasMeta reports whether path contains any of the magic characters
// recognized by Match.
func hasMeta(path string) bool {
// TODO(niemeyer): Should other magic characters be added here?
return strings.ContainsAny(path, "*?[")
}
spf13-afero-18d690e/match_test.go 0000664 0000000 0000000 00000010302 15057601571 0016567 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
// Copyright 2009 The Go Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"runtime"
"testing"
)
// contains returns true if vector contains the string s.
func contains(vector []string, s string) bool {
for _, elem := range vector {
if elem == s {
return true
}
}
return false
}
func setupGlobDirRoot(t *testing.T, fs Fs) string {
path := testDir(fs)
setupGlobFiles(t, fs, path)
return path
}
func setupGlobDirReusePath(t *testing.T, fs Fs, path string) string {
testRegistry[fs] = append(testRegistry[fs], path)
return setupGlobFiles(t, fs, path)
}
func setupGlobFiles(t *testing.T, fs Fs, path string) string {
testSubDir := filepath.Join(path, "globs", "bobs")
err := fs.MkdirAll(testSubDir, 0o700)
if err != nil && !os.IsExist(err) {
t.Fatal(err)
}
f, err := fs.Create(filepath.Join(testSubDir, "/matcher"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 1 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "/../submatcher"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 2 content")
f.Close()
f, err = fs.Create(filepath.Join(testSubDir, "/../../match"))
if err != nil {
t.Fatal(err)
}
f.WriteString("Testfile 3 content")
f.Close()
return testSubDir
}
func TestGlob(t *testing.T) {
defer removeAllTestFiles(t)
var testDir string
for i, fs := range Fss {
if i == 0 {
testDir = setupGlobDirRoot(t, fs)
} else {
setupGlobDirReusePath(t, fs, testDir)
}
}
globTests := []struct {
pattern, result string
}{
{testDir + "/globs/bobs/matcher", testDir + "/globs/bobs/matcher"},
{testDir + "/globs/*/mat?her", testDir + "/globs/bobs/matcher"},
{testDir + "/globs/bobs/../*", testDir + "/globs/submatcher"},
{testDir + "/match", testDir + "/match"},
}
for _, fs := range Fss {
for _, tt := range globTests {
pattern := tt.pattern
result := tt.result
if runtime.GOOS == "windows" {
pattern = filepath.Clean(pattern)
result = filepath.Clean(result)
}
matches, err := Glob(fs, pattern)
if err != nil {
t.Errorf("Glob error for %q: %s", pattern, err)
continue
}
if !contains(matches, result) {
t.Errorf("Glob(%#q) = %#v want %v", pattern, matches, result)
}
}
for _, pattern := range []string{"no_match", "../*/no_match"} {
matches, err := Glob(fs, pattern)
if err != nil {
t.Errorf("Glob error for %q: %s", pattern, err)
continue
}
if len(matches) != 0 {
t.Errorf("Glob(%#q) = %#v want []", pattern, matches)
}
}
}
}
func TestGlobSymlink(t *testing.T) {
defer removeAllTestFiles(t)
fs := &OsFs{}
testDir := setupGlobDirRoot(t, fs)
err := os.Symlink("target", filepath.Join(testDir, "symlink"))
if err != nil {
t.Skipf("skipping on %s", runtime.GOOS)
}
globSymlinkTests := []struct {
path, dest string
brokenLink bool
}{
{"test1", "link1", false},
{"test2", "link2", true},
}
for _, tt := range globSymlinkTests {
path := filepath.Join(testDir, tt.path)
dest := filepath.Join(testDir, tt.dest)
f, err := fs.Create(path)
if err != nil {
t.Fatal(err)
}
if err := f.Close(); err != nil {
t.Fatal(err)
}
err = os.Symlink(path, dest)
if err != nil {
t.Fatal(err)
}
if tt.brokenLink {
// Break the symlink.
fs.Remove(path)
}
matches, err := Glob(fs, dest)
if err != nil {
t.Errorf("GlobSymlink error for %q: %s", dest, err)
}
if !contains(matches, dest) {
t.Errorf("Glob(%#q) = %#v want %v", dest, matches, dest)
}
}
}
func TestGlobError(t *testing.T) {
for _, fs := range Fss {
_, err := Glob(fs, "[7]")
if err != nil {
t.Error("expected error for bad pattern; got none")
}
}
}
spf13-afero-18d690e/mem/ 0000775 0000000 0000000 00000000000 15057601571 0014667 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/mem/dir.go 0000664 0000000 0000000 00000001713 15057601571 0015776 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
type Dir interface {
Len() int
Names() []string
Files() []*FileData
Add(*FileData)
Remove(*FileData)
}
func RemoveFromMemDir(dir *FileData, f *FileData) {
dir.memDir.Remove(f)
}
func AddToMemDir(dir *FileData, f *FileData) {
dir.memDir.Add(f)
}
func InitializeDir(d *FileData) {
if d.memDir == nil {
d.dir = true
d.memDir = &DirMap{}
}
}
spf13-afero-18d690e/mem/dirmap.go 0000664 0000000 0000000 00000002525 15057601571 0016476 0 ustar 00root root 0000000 0000000 // Copyright © 2015 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import "sort"
type DirMap map[string]*FileData
func (m DirMap) Len() int { return len(m) }
func (m DirMap) Add(f *FileData) { m[f.name] = f }
func (m DirMap) Remove(f *FileData) { delete(m, f.name) }
func (m DirMap) Files() (files []*FileData) {
for _, f := range m {
files = append(files, f)
}
sort.Sort(filesSorter(files))
return files
}
// implement sort.Interface for []*FileData
type filesSorter []*FileData
func (s filesSorter) Len() int { return len(s) }
func (s filesSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s filesSorter) Less(i, j int) bool { return s[i].name < s[j].name }
func (m DirMap) Names() (names []string) {
for x := range m {
names = append(names, x)
}
return names
}
spf13-afero-18d690e/mem/file.go 0000664 0000000 0000000 00000016757 15057601571 0016155 0 ustar 00root root 0000000 0000000 // Copyright © 2015 Steve Francia .
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package mem
import (
"bytes"
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"sync"
"sync/atomic"
"time"
"github.com/spf13/afero/internal/common"
)
const FilePathSeparator = string(filepath.Separator)
var _ fs.ReadDirFile = &File{}
type File struct {
// atomic requires 64-bit alignment for struct field access
at int64
readDirCount int64
closed bool
readOnly bool
fileData *FileData
}
func NewFileHandle(data *FileData) *File {
return &File{fileData: data}
}
func NewReadOnlyFileHandle(data *FileData) *File {
return &File{fileData: data, readOnly: true}
}
func (f File) Data() *FileData {
return f.fileData
}
type FileData struct {
sync.Mutex
name string
data []byte
memDir Dir
dir bool
mode os.FileMode
modtime time.Time
uid int
gid int
}
func (d *FileData) Name() string {
d.Lock()
defer d.Unlock()
return d.name
}
func CreateFile(name string) *FileData {
return &FileData{name: name, mode: os.ModeTemporary, modtime: time.Now()}
}
func CreateDir(name string) *FileData {
return &FileData{name: name, memDir: &DirMap{}, dir: true, modtime: time.Now()}
}
func ChangeFileName(f *FileData, newname string) {
f.Lock()
f.name = newname
f.Unlock()
}
func SetMode(f *FileData, mode os.FileMode) {
f.Lock()
f.mode = mode
f.Unlock()
}
func SetModTime(f *FileData, mtime time.Time) {
f.Lock()
setModTime(f, mtime)
f.Unlock()
}
func setModTime(f *FileData, mtime time.Time) {
f.modtime = mtime
}
func SetUID(f *FileData, uid int) {
f.Lock()
f.uid = uid
f.Unlock()
}
func SetGID(f *FileData, gid int) {
f.Lock()
f.gid = gid
f.Unlock()
}
func GetFileInfo(f *FileData) *FileInfo {
return &FileInfo{f}
}
func (f *File) Open() error {
atomic.StoreInt64(&f.at, 0)
atomic.StoreInt64(&f.readDirCount, 0)
f.fileData.Lock()
f.closed = false
f.fileData.Unlock()
return nil
}
func (f *File) Close() error {
f.fileData.Lock()
f.closed = true
if !f.readOnly {
setModTime(f.fileData, time.Now())
}
f.fileData.Unlock()
return nil
}
func (f *File) Name() string {
return f.fileData.Name()
}
func (f *File) Stat() (os.FileInfo, error) {
return &FileInfo{f.fileData}, nil
}
func (f *File) Sync() error {
return nil
}
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
if !f.fileData.dir {
return nil, &os.PathError{
Op: "readdir",
Path: f.fileData.name,
Err: errors.New("not a dir"),
}
}
var outLength int64
f.fileData.Lock()
files := f.fileData.memDir.Files()[f.readDirCount:]
if count > 0 {
if len(files) < count {
outLength = int64(len(files))
} else {
outLength = int64(count)
}
if len(files) == 0 {
err = io.EOF
}
} else {
outLength = int64(len(files))
}
f.readDirCount += outLength
f.fileData.Unlock()
res = make([]os.FileInfo, outLength)
for i := range res {
res[i] = &FileInfo{files[i]}
}
return res, err
}
func (f *File) Readdirnames(n int) (names []string, err error) {
fi, err := f.Readdir(n)
names = make([]string, len(fi))
for i, f := range fi {
_, names[i] = filepath.Split(f.Name())
}
return names, err
}
// Implements fs.ReadDirFile
func (f *File) ReadDir(n int) ([]fs.DirEntry, error) {
fi, err := f.Readdir(n)
if err != nil {
return nil, err
}
di := make([]fs.DirEntry, len(fi))
for i, f := range fi {
di[i] = common.FileInfoDirEntry{FileInfo: f}
}
return di, nil
}
func (f *File) Read(b []byte) (n int, err error) {
f.fileData.Lock()
defer f.fileData.Unlock()
if f.closed {
return 0, ErrFileClosed
}
if len(b) > 0 && int(f.at) == len(f.fileData.data) {
return 0, io.EOF
}
if int(f.at) > len(f.fileData.data) {
return 0, io.ErrUnexpectedEOF
}
if len(f.fileData.data)-int(f.at) >= len(b) {
n = len(b)
} else {
n = len(f.fileData.data) - int(f.at)
}
copy(b, f.fileData.data[f.at:f.at+int64(n)])
atomic.AddInt64(&f.at, int64(n))
return
}
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
prev := atomic.LoadInt64(&f.at)
atomic.StoreInt64(&f.at, off)
n, err = f.Read(b)
atomic.StoreInt64(&f.at, prev)
return
}
func (f *File) Truncate(size int64) error {
if f.closed {
return ErrFileClosed
}
if f.readOnly {
return &os.PathError{
Op: "truncate",
Path: f.fileData.name,
Err: errors.New("file handle is read only"),
}
}
if size < 0 {
return ErrOutOfRange
}
f.fileData.Lock()
defer f.fileData.Unlock()
if size > int64(len(f.fileData.data)) {
diff := size - int64(len(f.fileData.data))
f.fileData.data = append(f.fileData.data, bytes.Repeat([]byte{0o0}, int(diff))...)
} else {
f.fileData.data = f.fileData.data[0:size]
}
setModTime(f.fileData, time.Now())
return nil
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.closed {
return 0, ErrFileClosed
}
switch whence {
case io.SeekStart:
atomic.StoreInt64(&f.at, offset)
case io.SeekCurrent:
atomic.AddInt64(&f.at, offset)
case io.SeekEnd:
atomic.StoreInt64(&f.at, int64(len(f.fileData.data))+offset)
}
return f.at, nil
}
func (f *File) Write(b []byte) (n int, err error) {
if f.closed {
return 0, ErrFileClosed
}
if f.readOnly {
return 0, &os.PathError{
Op: "write",
Path: f.fileData.name,
Err: errors.New("file handle is read only"),
}
}
n = len(b)
cur := atomic.LoadInt64(&f.at)
f.fileData.Lock()
defer f.fileData.Unlock()
diff := cur - int64(len(f.fileData.data))
var tail []byte
if n+int(cur) < len(f.fileData.data) {
tail = f.fileData.data[n+int(cur):]
}
if diff > 0 {
f.fileData.data = append(
f.fileData.data,
append(bytes.Repeat([]byte{0o0}, int(diff)), b...)...)
f.fileData.data = append(f.fileData.data, tail...)
} else {
f.fileData.data = append(f.fileData.data[:cur], b...)
f.fileData.data = append(f.fileData.data, tail...)
}
setModTime(f.fileData, time.Now())
atomic.AddInt64(&f.at, int64(n))
return
}
func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
atomic.StoreInt64(&f.at, off)
return f.Write(b)
}
func (f *File) WriteString(s string) (ret int, err error) {
return f.Write([]byte(s))
}
func (f *File) Info() *FileInfo {
return &FileInfo{f.fileData}
}
type FileInfo struct {
*FileData
}
// Implements os.FileInfo
func (s *FileInfo) Name() string {
s.Lock()
_, name := filepath.Split(s.name)
s.Unlock()
return name
}
func (s *FileInfo) Mode() os.FileMode {
s.Lock()
defer s.Unlock()
return s.mode
}
func (s *FileInfo) ModTime() time.Time {
s.Lock()
defer s.Unlock()
return s.modtime
}
func (s *FileInfo) IsDir() bool {
s.Lock()
defer s.Unlock()
return s.dir
}
func (s *FileInfo) Sys() interface{} { return nil }
func (s *FileInfo) Size() int64 {
if s.IsDir() {
return int64(42)
}
s.Lock()
defer s.Unlock()
return int64(len(s.data))
}
var (
ErrFileClosed = errors.New("File is closed")
ErrOutOfRange = errors.New("out of range")
ErrTooLarge = errors.New("too large")
ErrFileNotFound = os.ErrNotExist
ErrFileExists = os.ErrExist
ErrDestinationExists = os.ErrExist
)
spf13-afero-18d690e/mem/file_test.go 0000664 0000000 0000000 00000012342 15057601571 0017176 0 ustar 00root root 0000000 0000000 package mem
import (
"bytes"
"io"
"testing"
"time"
)
func TestFileDataNameRace(t *testing.T) {
t.Parallel()
const someName = "someName"
const someOtherName = "someOtherName"
d := FileData{
name: someName,
}
if d.Name() != someName {
t.Errorf("Failed to read correct Name, was %v", d.Name())
}
ChangeFileName(&d, someOtherName)
if d.Name() != someOtherName {
t.Errorf("Failed to set Name, was %v", d.Name())
}
go func() {
ChangeFileName(&d, someName)
}()
if d.Name() != someName && d.Name() != someOtherName {
t.Errorf("Failed to read either Name, was %v", d.Name())
}
}
func TestFileDataModTimeRace(t *testing.T) {
t.Parallel()
someTime := time.Now()
someOtherTime := someTime.Add(1 * time.Minute)
d := FileData{
modtime: someTime,
}
s := FileInfo{
FileData: &d,
}
if s.ModTime() != someTime {
t.Errorf("Failed to read correct value, was %v", s.ModTime())
}
SetModTime(&d, someOtherTime)
if s.ModTime() != someOtherTime {
t.Errorf("Failed to set ModTime, was %v", s.ModTime())
}
go func() {
SetModTime(&d, someTime)
}()
if s.ModTime() != someTime && s.ModTime() != someOtherTime {
t.Errorf("Failed to read either modtime, was %v", s.ModTime())
}
}
func TestFileDataModeRace(t *testing.T) {
t.Parallel()
const someMode = 0o777
const someOtherMode = 0o660
d := FileData{
mode: someMode,
}
s := FileInfo{
FileData: &d,
}
if s.Mode() != someMode {
t.Errorf("Failed to read correct value, was %v", s.Mode())
}
SetMode(&d, someOtherMode)
if s.Mode() != someOtherMode {
t.Errorf("Failed to set Mode, was %v", s.Mode())
}
go func() {
SetMode(&d, someMode)
}()
if s.Mode() != someMode && s.Mode() != someOtherMode {
t.Errorf("Failed to read either mode, was %v", s.Mode())
}
}
// See https://github.com/spf13/afero/issues/286.
func TestFileWriteAt(t *testing.T) {
t.Parallel()
data := CreateFile("abc.txt")
f := NewFileHandle(data)
testData := []byte{1, 2, 3, 4, 5}
offset := len(testData)
// 5 zeros + testdata
_, err := f.WriteAt(testData, int64(offset))
if err != nil {
t.Fatal(err)
}
// 2 * testdata
_, err = f.WriteAt(testData, 0)
if err != nil {
t.Fatal(err)
}
// 3 * testdata
_, err = f.WriteAt(testData, int64(offset*2))
if err != nil {
t.Fatal(err)
}
// 3 * testdata + 5 zeros + testdata
_, err = f.WriteAt(testData, int64(offset*4))
if err != nil {
t.Fatal(err)
}
// 5 * testdata
_, err = f.WriteAt(testData, int64(offset*3))
if err != nil {
t.Fatal(err)
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
expected := bytes.Repeat(testData, 5)
if !bytes.Equal(expected, data.data) {
t.Fatalf("expected: %v, got: %v", expected, data.data)
}
}
func TestFileDataIsDirRace(t *testing.T) {
t.Parallel()
d := FileData{
dir: true,
}
s := FileInfo{
FileData: &d,
}
if s.IsDir() != true {
t.Errorf("Failed to read correct value, was %v", s.IsDir())
}
go func() {
s.Lock()
d.dir = false
s.Unlock()
}()
// just logging the value to trigger a read:
t.Logf("Value is %v", s.IsDir())
}
func TestFileDataSizeRace(t *testing.T) {
t.Parallel()
const someData = "Hello"
const someOtherDataSize = "Hello World"
d := FileData{
data: []byte(someData),
dir: false,
}
s := FileInfo{
FileData: &d,
}
if s.Size() != int64(len(someData)) {
t.Errorf("Failed to read correct value, was %v", s.Size())
}
go func() {
s.Lock()
d.data = []byte(someOtherDataSize)
s.Unlock()
}()
// just logging the value to trigger a read:
t.Logf("Value is %v", s.Size())
// Testing the Dir size case
d.dir = true
if s.Size() != int64(42) {
t.Errorf("Failed to read correct value for dir, was %v", s.Size())
}
}
func TestFileReadAtSeekOffset(t *testing.T) {
t.Parallel()
fd := CreateFile("foo")
f := NewFileHandle(fd)
_, err := f.WriteString("TEST")
if err != nil {
t.Fatal(err)
}
offset, err := f.Seek(0, io.SeekStart)
if err != nil {
t.Fatal(err)
}
if offset != 0 {
t.Fail()
}
offsetBeforeReadAt, err := f.Seek(0, io.SeekCurrent)
if err != nil {
t.Fatal(err)
}
if offsetBeforeReadAt != 0 {
t.Fatal("expected 0")
}
b := make([]byte, 4)
n, err := f.ReadAt(b, 0)
if err != nil {
t.Fatal(err)
}
if n != 4 {
t.Fail()
}
if string(b) != "TEST" {
t.Fail()
}
offsetAfterReadAt, err := f.Seek(0, io.SeekCurrent)
if err != nil {
t.Fatal(err)
}
if offsetAfterReadAt != offsetBeforeReadAt {
t.Fatal("ReadAt should not affect offset")
}
err = f.Close()
if err != nil {
t.Fatal(err)
}
}
func TestFileWriteAndSeek(t *testing.T) {
fd := CreateFile("foo")
f := NewFileHandle(fd)
assert := func(expected bool, v ...interface{}) {
if !expected {
t.Helper()
t.Fatal(v...)
}
}
data4 := []byte{0, 1, 2, 3}
data20 := bytes.Repeat(data4, 5)
var off int64
for i := 0; i < 100; i++ {
// write 20 bytes
n, err := f.Write(data20)
assert(err == nil, err)
off += int64(n)
assert(n == len(data20), n)
assert(off == int64((i+1)*len(data20)), off)
// rewind to start and write 4 bytes there
cur, err := f.Seek(-off, io.SeekCurrent)
assert(err == nil, err)
assert(cur == 0, cur)
n, err = f.Write(data4)
assert(err == nil, err)
assert(n == len(data4), n)
// back at the end
cur, err = f.Seek(off-int64(n), io.SeekCurrent)
assert(err == nil, err)
assert(cur == off, cur, off)
}
}
spf13-afero-18d690e/memmap.go 0000664 0000000 0000000 00000023611 15057601571 0015717 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"fmt"
"io"
"log"
"os"
"path/filepath"
"sort"
"strings"
"sync"
"time"
"github.com/spf13/afero/mem"
)
const chmodBits = os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky // Only a subset of bits are allowed to be changed. Documented under os.Chmod()
type MemMapFs struct {
mu sync.RWMutex
data map[string]*mem.FileData
init sync.Once
}
func NewMemMapFs() Fs {
return &MemMapFs{}
}
func (m *MemMapFs) getData() map[string]*mem.FileData {
m.init.Do(func() {
m.data = make(map[string]*mem.FileData)
// Root should always exist, right?
// TODO: what about windows?
root := mem.CreateDir(FilePathSeparator)
mem.SetMode(root, os.ModeDir|0o755)
m.data[FilePathSeparator] = root
})
return m.data
}
func (*MemMapFs) Name() string { return "MemMapFS" }
func (m *MemMapFs) Create(name string) (File, error) {
name = normalizePath(name)
m.mu.Lock()
file := mem.CreateFile(name)
m.getData()[name] = file
m.registerWithParent(file, 0)
m.mu.Unlock()
return mem.NewFileHandle(file), nil
}
func (m *MemMapFs) unRegisterWithParent(fileName string) error {
f, err := m.lockfreeOpen(fileName)
if err != nil {
return err
}
parent := m.findParent(f)
if parent == nil {
log.Panic("parent of ", f.Name(), " is nil")
}
parent.Lock()
mem.RemoveFromMemDir(parent, f)
parent.Unlock()
return nil
}
func (m *MemMapFs) findParent(f *mem.FileData) *mem.FileData {
pdir, _ := filepath.Split(f.Name())
pdir = filepath.Clean(pdir)
pfile, err := m.lockfreeOpen(pdir)
if err != nil {
return nil
}
return pfile
}
func (m *MemMapFs) findDescendants(name string) []*mem.FileData {
fData := m.getData()
descendants := make([]*mem.FileData, 0, len(fData))
for p, dFile := range fData {
if strings.HasPrefix(p, name+FilePathSeparator) {
descendants = append(descendants, dFile)
}
}
sort.Slice(descendants, func(i, j int) bool {
cur := len(strings.Split(descendants[i].Name(), FilePathSeparator))
next := len(strings.Split(descendants[j].Name(), FilePathSeparator))
return cur < next
})
return descendants
}
func (m *MemMapFs) registerWithParent(f *mem.FileData, perm os.FileMode) {
if f == nil {
return
}
parent := m.findParent(f)
if parent == nil {
pdir := filepath.Dir(filepath.Clean(f.Name()))
err := m.lockfreeMkdir(pdir, perm)
if err != nil {
// log.Println("Mkdir error:", err)
return
}
parent, err = m.lockfreeOpen(pdir)
if err != nil {
// log.Println("Open after Mkdir error:", err)
return
}
}
parent.Lock()
mem.InitializeDir(parent)
mem.AddToMemDir(parent, f)
parent.Unlock()
}
func (m *MemMapFs) lockfreeMkdir(name string, perm os.FileMode) error {
name = normalizePath(name)
x, ok := m.getData()[name]
if ok {
// Only return ErrFileExists if it's a file, not a directory.
i := mem.FileInfo{FileData: x}
if !i.IsDir() {
return ErrFileExists
}
} else {
item := mem.CreateDir(name)
mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
m.registerWithParent(item, perm)
}
return nil
}
func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error {
perm &= chmodBits
name = normalizePath(name)
m.mu.RLock()
_, ok := m.getData()[name]
m.mu.RUnlock()
if ok {
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
m.mu.Lock()
// Dobule check that it doesn't exist.
if _, ok := m.getData()[name]; ok {
m.mu.Unlock()
return &os.PathError{Op: "mkdir", Path: name, Err: ErrFileExists}
}
item := mem.CreateDir(name)
mem.SetMode(item, os.ModeDir|perm)
m.getData()[name] = item
m.registerWithParent(item, perm)
m.mu.Unlock()
return m.setFileMode(name, perm|os.ModeDir)
}
func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error {
err := m.Mkdir(path, perm)
if err != nil {
if err.(*os.PathError).Err == ErrFileExists {
return nil
}
return err
}
return nil
}
// Handle some relative paths
func normalizePath(path string) string {
path = filepath.Clean(path)
switch path {
case ".":
return FilePathSeparator
case "..":
return FilePathSeparator
default:
return path
}
}
func (m *MemMapFs) Open(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewReadOnlyFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) openWrite(name string) (File, error) {
f, err := m.open(name)
if f != nil {
return mem.NewFileHandle(f), err
}
return nil, err
}
func (m *MemMapFs) open(name string) (*mem.FileData, error) {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileNotFound}
}
return f, nil
}
func (m *MemMapFs) lockfreeOpen(name string) (*mem.FileData, error) {
name = normalizePath(name)
f, ok := m.getData()[name]
if ok {
return f, nil
} else {
return nil, ErrFileNotFound
}
}
func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
perm &= chmodBits
chmod := false
file, err := m.openWrite(name)
if err == nil && (flag&os.O_EXCL > 0) {
return nil, &os.PathError{Op: "open", Path: name, Err: ErrFileExists}
}
if os.IsNotExist(err) && (flag&os.O_CREATE > 0) {
file, err = m.Create(name)
chmod = true
}
if err != nil {
return nil, err
}
if flag == os.O_RDONLY {
file = mem.NewReadOnlyFileHandle(file.(*mem.File).Data())
}
if flag&os.O_APPEND > 0 {
_, err = file.Seek(0, io.SeekEnd)
if err != nil {
file.Close()
return nil, err
}
}
if flag&os.O_TRUNC > 0 && flag&(os.O_RDWR|os.O_WRONLY) > 0 {
err = file.Truncate(0)
if err != nil {
file.Close()
return nil, err
}
}
if chmod {
return file, m.setFileMode(name, perm)
}
return file, nil
}
func (m *MemMapFs) Remove(name string) error {
name = normalizePath(name)
m.mu.Lock()
defer m.mu.Unlock()
if _, ok := m.getData()[name]; ok {
err := m.unRegisterWithParent(name)
if err != nil {
return &os.PathError{Op: "remove", Path: name, Err: err}
}
delete(m.getData(), name)
} else {
return &os.PathError{Op: "remove", Path: name, Err: os.ErrNotExist}
}
return nil
}
func (m *MemMapFs) RemoveAll(path string) error {
path = normalizePath(path)
m.mu.Lock()
m.unRegisterWithParent(path)
m.mu.Unlock()
m.mu.RLock()
defer m.mu.RUnlock()
for p := range m.getData() {
if p == path || strings.HasPrefix(p, path+FilePathSeparator) {
m.mu.RUnlock()
m.mu.Lock()
delete(m.getData(), p)
m.mu.Unlock()
m.mu.RLock()
}
}
return nil
}
func (m *MemMapFs) Rename(oldname, newname string) error {
oldname = normalizePath(oldname)
newname = normalizePath(newname)
if oldname == newname {
return nil
}
m.mu.RLock()
defer m.mu.RUnlock()
if _, ok := m.getData()[oldname]; ok {
m.mu.RUnlock()
m.mu.Lock()
err := m.unRegisterWithParent(oldname)
if err != nil {
return err
}
fileData := m.getData()[oldname]
mem.ChangeFileName(fileData, newname)
m.getData()[newname] = fileData
err = m.renameDescendants(oldname, newname)
if err != nil {
return err
}
delete(m.getData(), oldname)
m.registerWithParent(fileData, 0)
m.mu.Unlock()
m.mu.RLock()
} else {
return &os.PathError{Op: "rename", Path: oldname, Err: ErrFileNotFound}
}
return nil
}
func (m *MemMapFs) renameDescendants(oldname, newname string) error {
descendants := m.findDescendants(oldname)
removes := make([]string, 0, len(descendants))
for _, desc := range descendants {
descNewName := strings.Replace(desc.Name(), oldname, newname, 1)
err := m.unRegisterWithParent(desc.Name())
if err != nil {
return err
}
removes = append(removes, desc.Name())
mem.ChangeFileName(desc, descNewName)
m.getData()[descNewName] = desc
m.registerWithParent(desc, 0)
}
for _, r := range removes {
delete(m.getData(), r)
}
return nil
}
func (m *MemMapFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fileInfo, err := m.Stat(name)
return fileInfo, false, err
}
func (m *MemMapFs) Stat(name string) (os.FileInfo, error) {
f, err := m.Open(name)
if err != nil {
return nil, err
}
fi := mem.GetFileInfo(f.(*mem.File).Data())
return fi, nil
}
func (m *MemMapFs) Chmod(name string, mode os.FileMode) error {
mode &= chmodBits
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
}
prevOtherBits := mem.GetFileInfo(f).Mode() & ^chmodBits
mode = prevOtherBits | mode
return m.setFileMode(name, mode)
}
func (m *MemMapFs) setFileMode(name string, mode os.FileMode) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chmod", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetMode(f, mode)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) Chown(name string, uid, gid int) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chown", Path: name, Err: ErrFileNotFound}
}
mem.SetUID(f, uid)
mem.SetGID(f, gid)
return nil
}
func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
name = normalizePath(name)
m.mu.RLock()
f, ok := m.getData()[name]
m.mu.RUnlock()
if !ok {
return &os.PathError{Op: "chtimes", Path: name, Err: ErrFileNotFound}
}
m.mu.Lock()
mem.SetModTime(f, mtime)
m.mu.Unlock()
return nil
}
func (m *MemMapFs) List() {
for _, x := range m.data {
y := mem.FileInfo{FileData: x}
fmt.Println(x.Name(), y.Size())
}
}
spf13-afero-18d690e/memmap_test.go 0000664 0000000 0000000 00000050321 15057601571 0016754 0 ustar 00root root 0000000 0000000 package afero
import (
"fmt"
"io"
"io/fs"
"os"
"path/filepath"
"runtime"
"strings"
"sync"
"testing"
"time"
)
func TestNormalizePath(t *testing.T) {
type test struct {
input string
expected string
}
data := []test{
{".", FilePathSeparator},
{"./", FilePathSeparator},
{"..", FilePathSeparator},
{"../", FilePathSeparator},
{"./..", FilePathSeparator},
{"./../", FilePathSeparator},
}
for i, d := range data {
cpath := normalizePath(d.input)
if d.expected != cpath {
t.Errorf("Test %d failed. Expected %q got %q", i, d.expected, cpath)
}
}
}
func TestPathErrors(t *testing.T) {
path := filepath.Join(".", "some", "path")
path2 := filepath.Join(".", "different", "path")
fs := NewMemMapFs()
perm := os.FileMode(0o755)
uid := 1000
gid := 1000
// relevant functions:
// func (m *MemMapFs) Chmod(name string, mode os.FileMode) error
// func (m *MemMapFs) Chtimes(name string, atime time.Time, mtime time.Time) error
// func (m *MemMapFs) Create(name string) (File, error)
// func (m *MemMapFs) Mkdir(name string, perm os.FileMode) error
// func (m *MemMapFs) MkdirAll(path string, perm os.FileMode) error
// func (m *MemMapFs) Open(name string) (File, error)
// func (m *MemMapFs) OpenFile(name string, flag int, perm os.FileMode) (File, error)
// func (m *MemMapFs) Remove(name string) error
// func (m *MemMapFs) Rename(oldname, newname string) error
// func (m *MemMapFs) Stat(name string) (os.FileInfo, error)
err := fs.Chmod(path, perm)
checkPathError(t, err, "Chmod")
err = fs.Chown(path, uid, gid)
checkPathError(t, err, "Chown")
err = fs.Chtimes(path, time.Now(), time.Now())
checkPathError(t, err, "Chtimes")
// fs.Create doesn't return an error
err = fs.Mkdir(path2, perm)
if err != nil {
t.Error(err)
}
err = fs.Mkdir(path2, perm)
checkPathError(t, err, "Mkdir")
err = fs.MkdirAll(path2, perm)
if err != nil {
t.Error("MkdirAll:", err)
}
_, err = fs.Open(path)
checkPathError(t, err, "Open")
_, err = fs.OpenFile(path, os.O_RDWR, perm)
checkPathError(t, err, "OpenFile")
err = fs.Remove(path)
checkPathError(t, err, "Remove")
err = fs.RemoveAll(path)
if err != nil {
t.Error("RemoveAll:", err)
}
err = fs.Rename(path, path2)
checkPathError(t, err, "Rename")
_, err = fs.Stat(path)
checkPathError(t, err, "Stat")
}
func checkPathError(t *testing.T, err error, op string) {
pathErr, ok := err.(*os.PathError)
if !ok {
t.Error(op+":", err, "is not a os.PathError")
return
}
_, ok = pathErr.Err.(*os.PathError)
if ok {
t.Error(op+":", err, "contains another os.PathError")
}
}
// Ensure os.O_EXCL is correctly handled.
func TestOpenFileExcl(t *testing.T) {
const fileName = "/myFileTest"
const fileMode = os.FileMode(0o765)
fs := NewMemMapFs()
// First creation should succeed.
f, err := fs.OpenFile(fileName, os.O_CREATE|os.O_EXCL, fileMode)
if err != nil {
t.Errorf("OpenFile Create Excl failed: %s", err)
return
}
f.Close()
// Second creation should fail.
_, err = fs.OpenFile(fileName, os.O_CREATE|os.O_EXCL, fileMode)
if err == nil {
t.Errorf("OpenFile Create Excl should have failed, but it didn't")
}
checkPathError(t, err, "Open")
}
// Ensure Permissions are set on OpenFile/Mkdir/MkdirAll
func TestPermSet(t *testing.T) {
const fileName = "/myFileTest"
const dirPath = "/myDirTest"
const dirPathAll = "/my/path/to/dir"
const fileMode = os.FileMode(0o765)
// directories will also have the directory bit set
const dirMode = fileMode | os.ModeDir
fs := NewMemMapFs()
// Test Openfile
f, err := fs.OpenFile(fileName, os.O_CREATE, fileMode)
if err != nil {
t.Errorf("OpenFile Create failed: %s", err)
return
}
f.Close()
s, err := fs.Stat(fileName)
if err != nil {
t.Errorf("Stat failed: %s", err)
return
}
if s.Mode().String() != fileMode.String() {
t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), fileMode.String())
return
}
// Test Mkdir
err = fs.Mkdir(dirPath, dirMode)
if err != nil {
t.Errorf("MkDir Create failed: %s", err)
return
}
s, err = fs.Stat(dirPath)
if err != nil {
t.Errorf("Stat failed: %s", err)
return
}
// sets File
if s.Mode().String() != dirMode.String() {
t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), dirMode.String())
return
}
// Test MkdirAll
err = fs.MkdirAll(dirPathAll, dirMode)
if err != nil {
t.Errorf("MkDir Create failed: %s", err)
return
}
s, err = fs.Stat(dirPathAll)
if err != nil {
t.Errorf("Stat failed: %s", err)
return
}
if s.Mode().String() != dirMode.String() {
t.Errorf("Permissions Incorrect: %s != %s", s.Mode().String(), dirMode.String())
return
}
}
// Fails if multiple file objects use the same file.at counter in MemMapFs
func TestMultipleOpenFiles(t *testing.T) {
defer removeAllTestFiles(t)
const fileName = "afero-demo2.txt"
data := make([][]byte, len(Fss))
for i, fs := range Fss {
dir := testDir(fs)
path := filepath.Join(dir, fileName)
fh1, err := fs.Create(path)
if err != nil {
t.Error("fs.Create failed: " + err.Error())
}
_, err = fh1.Write([]byte("test"))
if err != nil {
t.Error("fh.Write failed: " + err.Error())
}
_, err = fh1.Seek(0, io.SeekStart)
if err != nil {
t.Error(err)
}
fh2, err := fs.OpenFile(path, os.O_RDWR, 0o777)
if err != nil {
t.Error("fs.OpenFile failed: " + err.Error())
}
_, err = fh2.Seek(0, io.SeekEnd)
if err != nil {
t.Error(err)
}
_, err = fh2.Write([]byte("data"))
if err != nil {
t.Error(err)
}
err = fh2.Close()
if err != nil {
t.Error(err)
}
_, err = fh1.Write([]byte("data"))
if err != nil {
t.Error(err)
}
err = fh1.Close()
if err != nil {
t.Error(err)
}
// the file now should contain "datadata"
data[i], err = ReadFile(fs, path)
if err != nil {
t.Error(err)
}
}
for i, fs := range Fss {
if i == 0 {
continue
}
if string(data[0]) != string(data[i]) {
t.Errorf("%s and %s don't behave the same\n"+
"%s: \"%s\"\n%s: \"%s\"\n",
Fss[0].Name(), fs.Name(), Fss[0].Name(), data[0], fs.Name(), data[i])
}
}
}
// Test if file.Write() fails when opened as read only
func TestReadOnly(t *testing.T) {
defer removeAllTestFiles(t)
const fileName = "afero-demo.txt"
for _, fs := range Fss {
dir := testDir(fs)
path := filepath.Join(dir, fileName)
f, err := fs.Create(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
}
_, err = f.Write([]byte("test"))
if err != nil {
t.Error(fs.Name()+":", "Write failed: "+err.Error())
}
f.Close()
f, err = fs.Open(path)
if err != nil {
t.Error("fs.Open failed: " + err.Error())
}
_, err = f.Write([]byte("data"))
if err == nil {
t.Error(fs.Name()+":", "No write error")
}
f.Close()
f, err = fs.OpenFile(path, os.O_RDONLY, 0o644)
if err != nil {
t.Error("fs.Open failed: " + err.Error())
}
_, err = f.Write([]byte("data"))
if err == nil {
t.Error(fs.Name()+":", "No write error")
}
f.Close()
}
}
func TestWriteCloseTime(t *testing.T) {
defer removeAllTestFiles(t)
const fileName = "afero-demo.txt"
for _, fs := range Fss {
dir := testDir(fs)
path := filepath.Join(dir, fileName)
f, err := fs.Create(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
}
f.Close()
f, err = fs.Create(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Create failed: "+err.Error())
}
fi, err := f.Stat()
if err != nil {
t.Error(fs.Name()+":", "Stat failed: "+err.Error())
}
timeBefore := fi.ModTime()
// sorry for the delay, but we have to make sure time advances,
// also on non Un*x systems...
switch runtime.GOOS {
case "windows":
time.Sleep(2 * time.Second)
case "darwin":
time.Sleep(1 * time.Second)
default: // depending on the FS, this may work with < 1 second, on my old ext3 it does not
time.Sleep(1 * time.Second)
}
_, err = f.Write([]byte("test"))
if err != nil {
t.Error(fs.Name()+":", "Write failed: "+err.Error())
}
f.Close()
fi, err = fs.Stat(path)
if err != nil {
t.Error(fs.Name()+":", "fs.Stat failed: "+err.Error())
}
if fi.ModTime().Equal(timeBefore) {
t.Error(fs.Name()+":", "ModTime was not set on Close()")
}
}
}
// This test should be run with the race detector on:
// go test -race -v -timeout 10s -run TestRacingDeleteAndClose
func TestRacingDeleteAndClose(t *testing.T) {
fs := NewMemMapFs()
pathname := "testfile"
f, err := fs.Create(pathname)
if err != nil {
t.Fatal(err)
}
in := make(chan bool)
go func() {
<-in
f.Close()
}()
go func() {
<-in
fs.Remove(pathname)
}()
close(in)
}
// This test should be run with the race detector on:
// go test -run TestMemFsDataRace -race
func TestMemFsDataRace(t *testing.T) {
const dir = "test_dir"
fs := NewMemMapFs()
if err := fs.MkdirAll(dir, 0o777); err != nil {
t.Fatal(err)
}
const n = 1000
done := make(chan struct{})
go func() {
defer close(done)
for i := 0; i < n; i++ {
fname := filepath.Join(dir, fmt.Sprintf("%d.txt", i))
if err := WriteFile(fs, fname, []byte(""), 0o777); err != nil {
panic(err)
}
if err := fs.Remove(fname); err != nil {
panic(err)
}
}
}()
loop:
for {
select {
case <-done:
break loop
default:
_, err := ReadDir(fs, dir)
if err != nil {
t.Fatal(err)
}
}
}
}
// root is a directory
func TestMemFsRootDirMode(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
info, err := fs.Stat("/")
if err != nil {
t.Fatal(err)
}
if !info.IsDir() {
t.Error("should be a directory")
}
if !info.Mode().IsDir() {
t.Errorf("FileMode is not directory, is %s", info.Mode().String())
}
}
// MkdirAll creates intermediate directories with correct mode
func TestMemFsMkdirAllMode(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
err := fs.MkdirAll("/a/b/c", 0o755)
if err != nil {
t.Fatal(err)
}
info, err := fs.Stat("/a")
if err != nil {
t.Fatal(err)
}
if !info.Mode().IsDir() {
t.Error("/a: mode is not directory")
}
if !info.ModTime().After(time.Now().Add(-1 * time.Hour)) {
t.Errorf("/a: mod time not set, got %s", info.ModTime())
}
if info.Mode() != os.FileMode(os.ModeDir|0o755) {
t.Errorf("/a: wrong permissions, expected drwxr-xr-x, got %s", info.Mode())
}
info, err = fs.Stat("/a/b")
if err != nil {
t.Fatal(err)
}
if !info.Mode().IsDir() {
t.Error("/a/b: mode is not directory")
}
if info.Mode() != os.FileMode(os.ModeDir|0o755) {
t.Errorf("/a/b: wrong permissions, expected drwxr-xr-x, got %s", info.Mode())
}
if !info.ModTime().After(time.Now().Add(-1 * time.Hour)) {
t.Errorf("/a/b: mod time not set, got %s", info.ModTime())
}
info, err = fs.Stat("/a/b/c")
if err != nil {
t.Fatal(err)
}
if !info.Mode().IsDir() {
t.Error("/a/b/c: mode is not directory")
}
if info.Mode() != os.FileMode(os.ModeDir|0o755) {
t.Errorf("/a/b/c: wrong permissions, expected drwxr-xr-x, got %s", info.Mode())
}
if !info.ModTime().After(time.Now().Add(-1 * time.Hour)) {
t.Errorf("/a/b/c: mod time not set, got %s", info.ModTime())
}
}
// MkdirAll does not change permissions of already-existing directories
func TestMemFsMkdirAllNoClobber(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
err := fs.MkdirAll("/a/b/c", 0o755)
if err != nil {
t.Fatal(err)
}
info, err := fs.Stat("/a/b")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(os.ModeDir|0o755) {
t.Errorf("/a/b: wrong permissions, expected drwxr-xr-x, got %s", info.Mode())
}
err = fs.MkdirAll("/a/b/c/d/e/f", 0o710)
// '/a/b' is unchanged
if err != nil {
t.Fatal(err)
}
info, err = fs.Stat("/a/b")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(os.ModeDir|0o755) {
t.Errorf("/a/b: wrong permissions, expected drwxr-xr-x, got %s", info.Mode())
}
// new directories created with proper permissions
info, err = fs.Stat("/a/b/c/d")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(os.ModeDir|0o710) {
t.Errorf("/a/b/c/d: wrong permissions, expected drwx--x---, got %s", info.Mode())
}
info, err = fs.Stat("/a/b/c/d/e")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(os.ModeDir|0o710) {
t.Errorf("/a/b/c/d/e: wrong permissions, expected drwx--x---, got %s", info.Mode())
}
info, err = fs.Stat("/a/b/c/d/e/f")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(os.ModeDir|0o710) {
t.Errorf("/a/b/c/d/e/f: wrong permissions, expected drwx--x---, got %s", info.Mode())
}
}
func TestMemFsDirMode(t *testing.T) {
fs := NewMemMapFs()
err := fs.Mkdir("/testDir1", 0o644)
if err != nil {
t.Error(err)
}
err = fs.MkdirAll("/sub/testDir2", 0o644)
if err != nil {
t.Error(err)
}
info, err := fs.Stat("/testDir1")
if err != nil {
t.Error(err)
}
if !info.IsDir() {
t.Error("should be a directory")
}
if !info.Mode().IsDir() {
t.Error("FileMode is not directory")
}
info, err = fs.Stat("/sub/testDir2")
if err != nil {
t.Error(err)
}
if !info.IsDir() {
t.Error("should be a directory")
}
if !info.Mode().IsDir() {
t.Error("FileMode is not directory")
}
}
func TestMemFsUnexpectedEOF(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
if err := WriteFile(fs, "file.txt", []byte("abc"), 0o777); err != nil {
t.Fatal(err)
}
f, err := fs.Open("file.txt")
if err != nil {
t.Fatal(err)
}
defer f.Close()
// Seek beyond the end.
_, err = f.Seek(512, 0)
if err != nil {
t.Fatal(err)
}
buff := make([]byte, 256)
_, err = io.ReadAtLeast(f, buff, 256)
if err != io.ErrUnexpectedEOF {
t.Fatal("Expected ErrUnexpectedEOF")
}
}
func TestMemFsChmod(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
const file = "hello"
if err := fs.Mkdir(file, 0o700); err != nil {
t.Fatal(err)
}
info, err := fs.Stat(file)
if err != nil {
t.Fatal(err)
}
if info.Mode().String() != "drwx------" {
t.Fatal("mkdir failed to create a directory: mode =", info.Mode())
}
err = fs.Chmod(file, 0)
if err != nil {
t.Error("Failed to run chmod:", err)
}
info, err = fs.Stat(file)
if err != nil {
t.Fatal(err)
}
if info.Mode().String() != "d---------" {
t.Error("chmod should not change file type. New mode =", info.Mode())
}
}
// can't use Mkdir to get around which permissions we're allowed to set
func TestMemFsMkdirModeIllegal(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
err := fs.Mkdir("/a", os.ModeSocket|0o755)
if err != nil {
t.Fatal(err)
}
info, err := fs.Stat("/a")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(os.ModeDir|0o755) {
t.Fatalf("should not be able to use Mkdir to set illegal mode: %s", info.Mode().String())
}
}
// can't use OpenFile to get around which permissions we're allowed to set
func TestMemFsOpenFileModeIllegal(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
file, err := fs.OpenFile("/a", os.O_CREATE, os.ModeSymlink|0o644)
if err != nil {
t.Fatal(err)
}
defer file.Close()
info, err := fs.Stat("/a")
if err != nil {
t.Fatal(err)
}
if info.Mode() != os.FileMode(0o644) {
t.Fatalf("should not be able to use OpenFile to set illegal mode: %s", info.Mode().String())
}
}
// LstatIfPossible should always return false, since MemMapFs does not
// support symlinks.
func TestMemFsLstatIfPossible(t *testing.T) {
t.Parallel()
fs := NewMemMapFs()
// We assert that fs implements Lstater
fsAsserted, ok := fs.(Lstater)
if !ok {
t.Fatalf("The filesytem does not implement Lstater")
}
file, err := fs.OpenFile("/a.txt", os.O_CREATE, 0o644)
if err != nil {
t.Fatalf("Error when opening file: %v", err)
}
defer file.Close()
_, lstatCalled, err := fsAsserted.LstatIfPossible("/a.txt")
if err != nil {
t.Fatalf("Function returned err: %v", err)
}
if lstatCalled {
t.Fatalf("Function indicated lstat was called. This should never be true.")
}
}
func TestMemMapFsConfurrentMkdir(t *testing.T) {
const dir = "test_dir"
const n = 1000
mfs := NewMemMapFs().(*MemMapFs)
allFilePaths := make([]string, 0, n)
// run concurrency test
var wg sync.WaitGroup
for i := 0; i < n; i++ {
fp := filepath.Join(
dir,
fmt.Sprintf("%02d", n%10),
fmt.Sprintf("%d.txt", i),
)
allFilePaths = append(allFilePaths, fp)
wg.Add(1)
go func() {
defer wg.Done()
if err := mfs.MkdirAll(filepath.Dir(fp), 0o755); err != nil {
t.Error(err)
}
wt, err := mfs.Create(fp)
if err != nil {
t.Error(err)
}
defer func() {
if err := wt.Close(); err != nil {
t.Error(err)
}
}()
// write 30 bytes
for j := 0; j < 10; j++ {
_, err := wt.Write([]byte("000"))
if err != nil {
t.Error(err)
}
}
}()
}
wg.Wait()
// Test1: find all files by full path access
for _, fp := range allFilePaths {
info, err := mfs.Stat(fp)
if err != nil {
t.Error(err)
}
if info.Size() != 30 {
t.Errorf("file size should be 30, but got %d", info.Size())
}
}
// Test2: find all files by walk
foundFiles := make([]string, 0, n)
wErr := Walk(mfs, dir, func(path string, info fs.FileInfo, err error) error {
if err != nil {
t.Error(err)
}
if info.IsDir() {
return nil // skip dir
}
if strings.HasSuffix(info.Name(), ".txt") {
foundFiles = append(foundFiles, path)
}
return nil
})
if wErr != nil {
t.Error(wErr)
}
if len(foundFiles) != n {
t.Errorf("found %d files, but expect %d", len(foundFiles), n)
}
}
func TestMemFsRenameDir(t *testing.T) {
const srcPath = "/src"
const dstPath = "/dst"
const subDir = "dir"
const subFile = "file.txt"
fs := NewMemMapFs()
err := fs.MkdirAll(srcPath+FilePathSeparator+subDir, 0o777)
if err != nil {
t.Fatalf("MkDirAll failed: %s", err)
}
f, err := fs.Create(srcPath + FilePathSeparator + subFile)
if err != nil {
t.Fatalf("Create failed: %s", err)
}
if err = f.Close(); err != nil {
t.Fatalf("Close failed: %s", err)
}
err = fs.Rename(srcPath, dstPath)
if err != nil {
t.Fatalf("Rename failed: %s", err)
}
_, err = fs.Stat(srcPath + FilePathSeparator + subDir)
if err == nil {
t.Fatalf("SubDir still exists in the source dir")
}
_, err = fs.Stat(srcPath + FilePathSeparator + subFile)
if err == nil {
t.Fatalf("SubFile still exists in the source dir")
}
_, err = fs.Stat(dstPath + FilePathSeparator + subDir)
if err != nil {
t.Fatalf("SubDir stat in the destination dir: %s", err)
}
_, err = fs.Stat(dstPath + FilePathSeparator + subFile)
if err != nil {
t.Fatalf("SubFile stat in the destination dir: %s", err)
}
err = fs.Mkdir(srcPath, 0o777)
if err != nil {
t.Fatalf("Cannot recreate the source dir: %s", err)
}
err = fs.Mkdir(srcPath+FilePathSeparator+subDir, 0o777)
if err != nil {
t.Errorf("Cannot recreate the subdir in the source dir: %s", err)
}
}
func TestMemMapFsRename(t *testing.T) {
t.Parallel()
fs := &MemMapFs{}
tDir := testDir(fs)
rFrom := "/renamefrom"
rTo := "/renameto"
rExists := "/renameexists"
type test struct {
dirs []string
from string
to string
exists string
}
parts := strings.Split(tDir, "/")
root := "/"
if len(parts) > 1 {
root = filepath.Join("/", parts[1])
}
testData := make([]test, 0, len(parts))
i := len(parts)
for i > 0 {
prefix := strings.Join(parts[:i], "/")
suffix := strings.Join(parts[i:], "/")
testData = append(testData, test{
dirs: []string{
filepath.Join(prefix, rFrom, suffix),
filepath.Join(prefix, rExists, suffix),
},
from: filepath.Join(prefix, rFrom),
to: filepath.Join(prefix, rTo),
exists: filepath.Join(prefix, rExists),
})
i--
}
for _, data := range testData {
err := fs.RemoveAll(root)
if err != nil {
t.Fatalf("%s: RemoveAll %q failed: %v", fs.Name(), root, err)
}
for _, dir := range data.dirs {
err = fs.MkdirAll(dir, os.FileMode(0o775))
if err != nil {
t.Fatalf("%s: MkdirAll %q failed: %v", fs.Name(), dir, err)
}
}
dataCnt := len(fs.getData())
err = fs.Rename(data.from, data.to)
if err != nil {
t.Fatalf("%s: rename %q, %q failed: %v", fs.Name(), data.from, data.to, err)
}
err = fs.Mkdir(data.from, os.FileMode(0o775))
if err != nil {
t.Fatalf("%s: Mkdir %q failed: %v", fs.Name(), data.from, err)
}
err = fs.Rename(data.from, data.exists)
if err != nil {
t.Errorf("%s: rename %q, %q failed: %v", fs.Name(), data.from, data.exists, err)
}
for p := range fs.getData() {
if strings.Contains(p, data.from) {
t.Errorf("File was not renamed to renameto: %v", p)
}
}
_, err = fs.Stat(data.to)
if err != nil {
t.Errorf("%s: stat %q failed: %v", fs.Name(), data.to, err)
}
if dataCnt != len(fs.getData()) {
t.Errorf("invalid data len: expected %v, get %v", dataCnt, len(fs.getData()))
}
}
}
spf13-afero-18d690e/os.go 0000664 0000000 0000000 00000005557 15057601571 0015075 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
// Copyright 2013 tsuru authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"time"
)
var _ Lstater = (*OsFs)(nil)
// OsFs is a Fs implementation that uses functions provided by the os package.
//
// For details in any method, check the documentation of the os package
// (http://golang.org/pkg/os/).
type OsFs struct{}
func NewOsFs() Fs {
return &OsFs{}
}
func (OsFs) Name() string { return "OsFs" }
func (OsFs) Create(name string) (File, error) {
f, e := os.Create(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Mkdir(name string, perm os.FileMode) error {
return os.Mkdir(name, perm)
}
func (OsFs) MkdirAll(path string, perm os.FileMode) error {
return os.MkdirAll(path, perm)
}
func (OsFs) Open(name string) (File, error) {
f, e := os.Open(name)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
f, e := os.OpenFile(name, flag, perm)
if f == nil {
// while this looks strange, we need to return a bare nil (of type nil) not
// a nil value of type *os.File or nil won't be nil
return nil, e
}
return f, e
}
func (OsFs) Remove(name string) error {
return os.Remove(name)
}
func (OsFs) RemoveAll(path string) error {
return os.RemoveAll(path)
}
func (OsFs) Rename(oldname, newname string) error {
return os.Rename(oldname, newname)
}
func (OsFs) Stat(name string) (os.FileInfo, error) {
return os.Stat(name)
}
func (OsFs) Chmod(name string, mode os.FileMode) error {
return os.Chmod(name, mode)
}
func (OsFs) Chown(name string, uid, gid int) error {
return os.Chown(name, uid, gid)
}
func (OsFs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return os.Chtimes(name, atime, mtime)
}
func (OsFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
fi, err := os.Lstat(name)
return fi, true, err
}
func (OsFs) SymlinkIfPossible(oldname, newname string) error {
return os.Symlink(oldname, newname)
}
func (OsFs) ReadlinkIfPossible(name string) (string, error) {
return os.Readlink(name)
}
spf13-afero-18d690e/path.go 0000664 0000000 0000000 00000005560 15057601571 0015402 0 ustar 00root root 0000000 0000000 // Copyright ©2015 The Go Authors
// Copyright ©2015 Steve Francia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"os"
"path/filepath"
"sort"
)
// readDirNames reads the directory named by dirname and returns
// a sorted list of directory entries.
// adapted from https://golang.org/src/path/filepath/path.go
func readDirNames(fs Fs, dirname string) ([]string, error) {
f, err := fs.Open(dirname)
if err != nil {
return nil, err
}
names, err := f.Readdirnames(-1)
f.Close()
if err != nil {
return nil, err
}
sort.Strings(names)
return names, nil
}
// walk recursively descends path, calling walkFn
// adapted from https://golang.org/src/path/filepath/path.go
func walk(fs Fs, path string, info os.FileInfo, walkFn filepath.WalkFunc) error {
err := walkFn(path, info, nil)
if err != nil {
if info.IsDir() && err == filepath.SkipDir {
return nil
}
return err
}
if !info.IsDir() {
return nil
}
names, err := readDirNames(fs, path)
if err != nil {
return walkFn(path, info, err)
}
for _, name := range names {
filename := filepath.Join(path, name)
fileInfo, err := lstatIfPossible(fs, filename)
if err != nil {
if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir {
return err
}
} else {
err = walk(fs, filename, fileInfo, walkFn)
if err != nil {
if !fileInfo.IsDir() || err != filepath.SkipDir {
return err
}
}
}
}
return nil
}
// if the filesystem supports it, use Lstat, else use fs.Stat
func lstatIfPossible(fs Fs, path string) (os.FileInfo, error) {
if lfs, ok := fs.(Lstater); ok {
fi, _, err := lfs.LstatIfPossible(path)
return fi, err
}
return fs.Stat(path)
}
// Walk walks the file tree rooted at root, calling walkFn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by walkFn. The files are walked in lexical
// order, which makes the output deterministic but means that for very
// large directories Walk can be inefficient.
// Walk does not follow symbolic links.
func (a Afero) Walk(root string, walkFn filepath.WalkFunc) error {
return Walk(a.Fs, root, walkFn)
}
func Walk(fs Fs, root string, walkFn filepath.WalkFunc) error {
info, err := lstatIfPossible(fs, root)
if err != nil {
return walkFn(root, nil, err)
}
return walk(fs, root, info, walkFn)
}
spf13-afero-18d690e/path_test.go 0000664 0000000 0000000 00000003111 15057601571 0016427 0 ustar 00root root 0000000 0000000 // Copyright © 2014 Steve Francia .
// Copyright 2009 The Go Authors. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"fmt"
"os"
"testing"
)
func TestWalk(t *testing.T) {
defer removeAllTestFiles(t)
var testDir string
for i, fs := range Fss {
if i == 0 {
testDir = setupTestDirRoot(t, fs)
} else {
setupTestDirReusePath(t, fs, testDir)
}
}
outputs := make([]string, len(Fss))
for i, fs := range Fss {
walkFn := func(path string, info os.FileInfo, err error) error {
if err != nil {
t.Error("walkFn err:", err)
}
var size int64
if !info.IsDir() {
size = info.Size()
}
outputs[i] += fmt.Sprintln(path, info.Name(), size, info.IsDir(), err)
return nil
}
err := Walk(fs, testDir, walkFn)
if err != nil {
t.Error(err)
}
}
fail := false
for i, o := range outputs {
if i == 0 {
continue
}
if o != outputs[i-1] {
fail = true
break
}
}
if fail {
t.Log("Walk outputs not equal!")
for i, o := range outputs {
t.Log(Fss[i].Name() + "\n" + o)
}
t.Fail()
}
}
spf13-afero-18d690e/readonlyfs.go 0000664 0000000 0000000 00000004110 15057601571 0016602 0 ustar 00root root 0000000 0000000 package afero
import (
"os"
"syscall"
"time"
)
var _ Lstater = (*ReadOnlyFs)(nil)
type ReadOnlyFs struct {
source Fs
}
func NewReadOnlyFs(source Fs) Fs {
return &ReadOnlyFs{source: source}
}
func (r *ReadOnlyFs) ReadDir(name string) ([]os.FileInfo, error) {
return ReadDir(r.source, name)
}
func (r *ReadOnlyFs) Chtimes(n string, a, m time.Time) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Chmod(n string, m os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Chown(n string, uid, gid int) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Name() string {
return "ReadOnlyFilter"
}
func (r *ReadOnlyFs) Stat(name string) (os.FileInfo, error) {
return r.source.Stat(name)
}
func (r *ReadOnlyFs) LstatIfPossible(name string) (os.FileInfo, bool, error) {
if lsf, ok := r.source.(Lstater); ok {
return lsf.LstatIfPossible(name)
}
fi, err := r.Stat(name)
return fi, false, err
}
func (r *ReadOnlyFs) SymlinkIfPossible(oldname, newname string) error {
return &os.LinkError{Op: "symlink", Old: oldname, New: newname, Err: ErrNoSymlink}
}
func (r *ReadOnlyFs) ReadlinkIfPossible(name string) (string, error) {
if srdr, ok := r.source.(LinkReader); ok {
return srdr.ReadlinkIfPossible(name)
}
return "", &os.PathError{Op: "readlink", Path: name, Err: ErrNoReadlink}
}
func (r *ReadOnlyFs) Rename(o, n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) RemoveAll(p string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Remove(n string) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if flag&(os.O_WRONLY|syscall.O_RDWR|os.O_APPEND|os.O_CREATE|os.O_TRUNC) != 0 {
return nil, syscall.EPERM
}
return r.source.OpenFile(name, flag, perm)
}
func (r *ReadOnlyFs) Open(n string) (File, error) {
return r.source.Open(n)
}
func (r *ReadOnlyFs) Mkdir(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) MkdirAll(n string, p os.FileMode) error {
return syscall.EPERM
}
func (r *ReadOnlyFs) Create(n string) (File, error) {
return nil, syscall.EPERM
}
spf13-afero-18d690e/regexpfs.go 0000664 0000000 0000000 00000010421 15057601571 0016261 0 ustar 00root root 0000000 0000000 package afero
import (
"os"
"regexp"
"syscall"
"time"
)
// The RegexpFs filters files (not directories) by regular expression. Only
// files matching the given regexp will be allowed, all others get a ENOENT error (
// "No such file or directory").
type RegexpFs struct {
re *regexp.Regexp
source Fs
}
func NewRegexpFs(source Fs, re *regexp.Regexp) Fs {
return &RegexpFs{source: source, re: re}
}
type RegexpFile struct {
f File
re *regexp.Regexp
}
func (r *RegexpFs) matchesName(name string) error {
if r.re == nil {
return nil
}
if r.re.MatchString(name) {
return nil
}
return syscall.ENOENT
}
func (r *RegexpFs) dirOrMatches(name string) error {
dir, err := IsDir(r.source, name)
if err != nil {
return err
}
if dir {
return nil
}
return r.matchesName(name)
}
func (r *RegexpFs) Chtimes(name string, a, m time.Time) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chtimes(name, a, m)
}
func (r *RegexpFs) Chmod(name string, mode os.FileMode) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chmod(name, mode)
}
func (r *RegexpFs) Chown(name string, uid, gid int) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Chown(name, uid, gid)
}
func (r *RegexpFs) Name() string {
return "RegexpFs"
}
func (r *RegexpFs) Stat(name string) (os.FileInfo, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.Stat(name)
}
func (r *RegexpFs) Rename(oldname, newname string) error {
dir, err := IsDir(r.source, oldname)
if err != nil {
return err
}
if dir {
return nil
}
if err := r.matchesName(oldname); err != nil {
return err
}
if err := r.matchesName(newname); err != nil {
return err
}
return r.source.Rename(oldname, newname)
}
func (r *RegexpFs) RemoveAll(p string) error {
dir, err := IsDir(r.source, p)
if err != nil {
return err
}
if !dir {
if err := r.matchesName(p); err != nil {
return err
}
}
return r.source.RemoveAll(p)
}
func (r *RegexpFs) Remove(name string) error {
if err := r.dirOrMatches(name); err != nil {
return err
}
return r.source.Remove(name)
}
func (r *RegexpFs) OpenFile(name string, flag int, perm os.FileMode) (File, error) {
if err := r.dirOrMatches(name); err != nil {
return nil, err
}
return r.source.OpenFile(name, flag, perm)
}
func (r *RegexpFs) Open(name string) (File, error) {
dir, err := IsDir(r.source, name)
if err != nil {
return nil, err
}
if !dir {
if err := r.matchesName(name); err != nil {
return nil, err
}
}
f, err := r.source.Open(name)
if err != nil {
return nil, err
}
return &RegexpFile{f: f, re: r.re}, nil
}
func (r *RegexpFs) Mkdir(n string, p os.FileMode) error {
return r.source.Mkdir(n, p)
}
func (r *RegexpFs) MkdirAll(n string, p os.FileMode) error {
return r.source.MkdirAll(n, p)
}
func (r *RegexpFs) Create(name string) (File, error) {
if err := r.matchesName(name); err != nil {
return nil, err
}
return r.source.Create(name)
}
func (f *RegexpFile) Close() error {
return f.f.Close()
}
func (f *RegexpFile) Read(s []byte) (int, error) {
return f.f.Read(s)
}
func (f *RegexpFile) ReadAt(s []byte, o int64) (int, error) {
return f.f.ReadAt(s, o)
}
func (f *RegexpFile) Seek(o int64, w int) (int64, error) {
return f.f.Seek(o, w)
}
func (f *RegexpFile) Write(s []byte) (int, error) {
return f.f.Write(s)
}
func (f *RegexpFile) WriteAt(s []byte, o int64) (int, error) {
return f.f.WriteAt(s, o)
}
func (f *RegexpFile) Name() string {
return f.f.Name()
}
func (f *RegexpFile) Readdir(c int) (fi []os.FileInfo, err error) {
var rfi []os.FileInfo
rfi, err = f.f.Readdir(c)
if err != nil {
return nil, err
}
for _, i := range rfi {
if i.IsDir() || f.re.MatchString(i.Name()) {
fi = append(fi, i)
}
}
return fi, nil
}
func (f *RegexpFile) Readdirnames(c int) (n []string, err error) {
fi, err := f.Readdir(c)
if err != nil {
return nil, err
}
for _, s := range fi {
n = append(n, s.Name())
}
return n, nil
}
func (f *RegexpFile) Stat() (os.FileInfo, error) {
return f.f.Stat()
}
func (f *RegexpFile) Sync() error {
return f.f.Sync()
}
func (f *RegexpFile) Truncate(s int64) error {
return f.f.Truncate(s)
}
func (f *RegexpFile) WriteString(s string) (int, error) {
return f.f.WriteString(s)
}
spf13-afero-18d690e/ro_regexp_test.go 0000664 0000000 0000000 00000004147 15057601571 0017477 0 ustar 00root root 0000000 0000000 package afero
import (
"regexp"
"testing"
)
func TestFilterReadOnly(t *testing.T) {
fs := &ReadOnlyFs{source: &MemMapFs{}}
_, err := fs.Create("/file.txt")
if err == nil {
t.Errorf("Did not fail to create file")
}
// t.Logf("ERR=%s", err)
}
func TestFilterReadonlyRemoveAndRead(t *testing.T) {
mfs := &MemMapFs{}
fh, _ := mfs.Create("/file.txt")
fh.Write([]byte("content here"))
fh.Close()
fs := NewReadOnlyFs(mfs)
err := fs.Remove("/file.txt")
if err == nil {
t.Errorf("Did not fail to remove file")
}
fh, err = fs.Open("/file.txt")
if err != nil {
t.Errorf("Failed to open file: %s", err)
}
buf := make([]byte, len("content here"))
_, err = fh.Read(buf)
fh.Close()
if string(buf) != "content here" {
t.Errorf("Failed to read file: %s", err)
}
err = mfs.Remove("/file.txt")
if err != nil {
t.Errorf("Failed to remove file")
}
fh, err = fs.Open("/file.txt")
if err == nil {
fh.Close()
t.Errorf("File still present")
}
}
func TestFilterRegexp(t *testing.T) {
fs := NewRegexpFs(&MemMapFs{}, regexp.MustCompile(`\.txt$`))
_, err := fs.Create("/file.html")
if err == nil {
t.Errorf("Did not fail to create file")
}
// t.Logf("ERR=%s", err)
}
func TestFilterRORegexpChain(t *testing.T) {
rofs := &ReadOnlyFs{source: &MemMapFs{}}
fs := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: rofs}
_, err := fs.Create("/file.txt")
if err == nil {
t.Errorf("Did not fail to create file")
}
// t.Logf("ERR=%s", err)
}
func TestFilterRegexReadDir(t *testing.T) {
mfs := &MemMapFs{}
fs1 := &RegexpFs{re: regexp.MustCompile(`\.txt$`), source: mfs}
fs := &RegexpFs{re: regexp.MustCompile(`^a`), source: fs1}
mfs.MkdirAll("/dir/sub", 0o777)
for _, name := range []string{"afile.txt", "afile.html", "bfile.txt"} {
for _, dir := range []string{"/dir/", "/dir/sub/"} {
fh, _ := mfs.Create(dir + name)
fh.Close()
}
}
files, _ := ReadDir(fs, "/dir")
if len(files) != 2 { // afile.txt, sub
t.Errorf("Got wrong number of files: %#v", files)
}
f, _ := fs.Open("/dir/sub")
names, _ := f.Readdirnames(-1)
if len(names) != 1 {
t.Errorf("Got wrong number of names: %v", names)
}
}
spf13-afero-18d690e/sftpfs/ 0000775 0000000 0000000 00000000000 15057601571 0015416 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/sftpfs/file.go 0000664 0000000 0000000 00000004420 15057601571 0016664 0 ustar 00root root 0000000 0000000 // Copyright © 2015 Jerry Jacobs .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sftpfs
import (
"os"
"github.com/pkg/sftp"
)
type File struct {
client *sftp.Client
fd *sftp.File
}
func FileOpen(s *sftp.Client, name string) (*File, error) {
fd, err := s.Open(name)
if err != nil {
return &File{}, err
}
return &File{fd: fd, client: s}, nil
}
func FileCreate(s *sftp.Client, name string) (*File, error) {
fd, err := s.Create(name)
if err != nil {
return &File{}, err
}
return &File{fd: fd, client: s}, nil
}
func (f *File) Close() error {
return f.fd.Close()
}
func (f *File) Name() string {
return f.fd.Name()
}
func (f *File) Stat() (os.FileInfo, error) {
return f.fd.Stat()
}
func (f *File) Sync() error {
return nil
}
func (f *File) Truncate(size int64) error {
return f.fd.Truncate(size)
}
func (f *File) Read(b []byte) (n int, err error) {
return f.fd.Read(b)
}
func (f *File) ReadAt(b []byte, off int64) (n int, err error) {
return f.fd.ReadAt(b, off)
}
func (f *File) Readdir(count int) (res []os.FileInfo, err error) {
res, err = f.client.ReadDir(f.Name())
if err != nil {
return
}
if count > 0 {
if len(res) > count {
res = res[:count]
}
}
return
}
func (f *File) Readdirnames(n int) (names []string, err error) {
data, err := f.Readdir(n)
if err != nil {
return nil, err
}
for _, v := range data {
names = append(names, v.Name())
}
return
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
return f.fd.Seek(offset, whence)
}
func (f *File) Write(b []byte) (n int, err error) {
return f.fd.Write(b)
}
// TODO
func (f *File) WriteAt(b []byte, off int64) (n int, err error) {
return 0, nil
}
func (f *File) WriteString(s string) (ret int, err error) {
return f.fd.Write([]byte(s))
}
spf13-afero-18d690e/sftpfs/go.mod 0000664 0000000 0000000 00000000475 15057601571 0016532 0 ustar 00root root 0000000 0000000 module github.com/spf13/afero/sftpfs
go 1.23.0
replace github.com/spf13/afero => ../
require (
github.com/pkg/sftp v1.13.8
github.com/spf13/afero v1.14.0
golang.org/x/crypto v0.36.0
)
require (
github.com/kr/fs v0.1.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/text v0.28.0 // indirect
)
spf13-afero-18d690e/sftpfs/go.sum 0000664 0000000 0000000 00000017340 15057601571 0016556 0 ustar 00root root 0000000 0000000 github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/pkg/sftp v1.13.8 h1:Xt7eJ/xqXv7s0VuzFw7JXhZj6Oc1zI6l4GK8KP9sFB0=
github.com/pkg/sftp v1.13.8/go.mod h1:DmvEkvKE2lshEeuo2JMp06yqcx9HVnR7e3zqQl42F3U=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
spf13-afero-18d690e/sftpfs/sftp.go 0000664 0000000 0000000 00000006706 15057601571 0016732 0 ustar 00root root 0000000 0000000 // Copyright © 2015 Jerry Jacobs .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sftpfs
import (
"os"
"time"
"github.com/pkg/sftp"
"github.com/spf13/afero"
)
// Fs is a afero.Fs implementation that uses functions provided by the sftp package.
//
// For details in any method, check the documentation of the sftp package
// (github.com/pkg/sftp).
type Fs struct {
client *sftp.Client
}
func New(client *sftp.Client) afero.Fs {
return &Fs{client: client}
}
func (s Fs) Name() string { return "sftpfs" }
func (s Fs) Create(name string) (afero.File, error) {
return FileCreate(s.client, name)
}
func (s Fs) Mkdir(name string, perm os.FileMode) error {
err := s.client.Mkdir(name)
if err != nil {
return err
}
return s.client.Chmod(name, perm)
}
func (s Fs) MkdirAll(path string, perm os.FileMode) error {
// Fast path: if we can tell whether path is a directory or file, stop with success or error.
dir, err := s.Stat(path)
if err == nil {
if dir.IsDir() {
return nil
}
return err
}
// Slow path: make sure parent exists and then call Mkdir for path.
i := len(path)
for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator.
i--
}
j := i
for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element.
j--
}
if j > 1 {
// Create parent
err = s.MkdirAll(path[0:j-1], perm)
if err != nil {
return err
}
}
// Parent now exists; invoke Mkdir and use its result.
err = s.Mkdir(path, perm)
if err != nil {
// Handle arguments like "foo/." by
// double-checking that directory doesn't exist.
dir, err1 := s.Lstat(path)
if err1 == nil && dir.IsDir() {
return nil
}
return err
}
return nil
}
func (s Fs) Open(name string) (afero.File, error) {
return FileOpen(s.client, name)
}
// OpenFile calls the OpenFile method on the SSHFS connection. The mode argument
// is ignored because it's ignored by the github.com/pkg/sftp implementation.
func (s Fs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
sshfsFile, err := s.client.OpenFile(name, flag)
if err != nil {
return nil, err
}
err = sshfsFile.Chmod(perm)
return &File{fd: sshfsFile}, err
}
func (s Fs) Remove(name string) error {
return s.client.Remove(name)
}
func (s Fs) RemoveAll(path string) error {
// TODO have a look at os.RemoveAll
// https://github.com/golang/go/blob/master/src/os/path.go#L66
return nil
}
func (s Fs) Rename(oldname, newname string) error {
return s.client.Rename(oldname, newname)
}
func (s Fs) Stat(name string) (os.FileInfo, error) {
return s.client.Stat(name)
}
func (s Fs) Lstat(p string) (os.FileInfo, error) {
return s.client.Lstat(p)
}
func (s Fs) Chmod(name string, mode os.FileMode) error {
return s.client.Chmod(name, mode)
}
func (s Fs) Chown(name string, uid, gid int) error {
return s.client.Chown(name, uid, gid)
}
func (s Fs) Chtimes(name string, atime time.Time, mtime time.Time) error {
return s.client.Chtimes(name, atime, mtime)
}
spf13-afero-18d690e/sftpfs/sftp_test.go 0000664 0000000 0000000 00000016021 15057601571 0017760 0 ustar 00root root 0000000 0000000 // Copyright © 2015 Jerry Jacobs .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package sftpfs
import (
_rand "crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"testing"
"time"
"github.com/pkg/sftp"
"golang.org/x/crypto/ssh"
)
type SftpFsContext struct {
sshc *ssh.Client
sshcfg *ssh.ClientConfig
sftpc *sftp.Client
}
// TODO we only connect with hardcoded user+pass for now
// it should be possible to use $HOME/.ssh/id_rsa to login into the stub sftp server
func SftpConnect(user, password, host string) (*SftpFsContext, error) {
/*
pemBytes, err := ioutil.ReadFile(os.Getenv("HOME") + "/.ssh/id_rsa")
if err != nil {
return nil,err
}
signer, err := ssh.ParsePrivateKey(pemBytes)
if err != nil {
return nil,err
}
sshcfg := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{
ssh.Password(password),
ssh.PublicKeys(signer),
},
}
*/
sshcfg := &ssh.ClientConfig{
User: user,
Auth: []ssh.AuthMethod{
ssh.Password(password),
},
HostKeyCallback: ssh.InsecureIgnoreHostKey(),
}
sshc, err := ssh.Dial("tcp", host, sshcfg)
if err != nil {
return nil, err
}
sftpc, err := sftp.NewClient(sshc)
if err != nil {
return nil, err
}
ctx := &SftpFsContext{
sshc: sshc,
sshcfg: sshcfg,
sftpc: sftpc,
}
return ctx, nil
}
func (ctx *SftpFsContext) Disconnect() error {
ctx.sftpc.Close()
ctx.sshc.Close()
return nil
}
// TODO for such a weird reason rootpath is "." when writing "file1" with afero sftp backend
func RunSftpServer(rootpath string) {
var (
readOnly bool
debugLevelStr string
debugStderr bool
rootDir string
)
flag.BoolVar(&readOnly, "R", false, "read-only server")
flag.BoolVar(&debugStderr, "e", true, "debug to stderr")
flag.StringVar(&debugLevelStr, "l", "none", "debug level")
flag.StringVar(&rootDir, "root", rootpath, "root directory")
flag.Parse()
debugStream := io.Discard
// An SSH server is represented by a ServerConfig, which holds
// certificate details and handles authentication of ServerConns.
config := &ssh.ServerConfig{
PasswordCallback: func(c ssh.ConnMetadata, pass []byte) (*ssh.Permissions, error) {
// Should use constant-time compare (or better, salt+hash) in
// a production setting.
fmt.Fprintf(debugStream, "Login: %s\n", c.User())
if c.User() == "test" && string(pass) == "test" {
return nil, nil
}
return nil, fmt.Errorf("password rejected for %q", c.User())
},
}
privateBytes, err := os.ReadFile("./test/id_rsa")
if err != nil {
log.Fatal("Failed to load private key", err)
}
private, err := ssh.ParsePrivateKey(privateBytes)
if err != nil {
log.Fatal("Failed to parse private key", err)
}
config.AddHostKey(private)
// Once a ServerConfig has been configured, connections can be
// accepted.
listener, err := net.Listen("tcp", "0.0.0.0:2022")
if err != nil {
log.Fatal("failed to listen for connection", err)
}
nConn, err := listener.Accept()
if err != nil {
log.Fatal("failed to accept incoming connection", err)
}
// Before use, a handshake must be performed on the incoming
// net.Conn.
conn, chans, reqs, err := ssh.NewServerConn(nConn, config)
if err != nil {
log.Fatal("failed to handshake", err)
}
defer conn.Close()
// The incoming Request channel must be serviced.
go ssh.DiscardRequests(reqs)
// Service the incoming Channel channel.
for newChannel := range chans {
// Channels have a type, depending on the application level
// protocol intended. In the case of an SFTP session, this is "subsystem"
// with a payload string of "sftp"
fmt.Fprintf(debugStream, "Incoming channel: %s\n", newChannel.ChannelType())
if newChannel.ChannelType() != "session" {
newChannel.Reject(ssh.UnknownChannelType, "unknown channel type")
fmt.Fprintf(debugStream, "Unknown channel type: %s\n", newChannel.ChannelType())
continue
}
channel, requests, err := newChannel.Accept()
if err != nil {
log.Fatal("could not accept channel.", err)
}
fmt.Fprintf(debugStream, "Channel accepted\n")
// Sessions have out-of-band requests such as "shell",
// "pty-req" and "env". Here we handle only the
// "subsystem" request.
go func(in <-chan *ssh.Request) {
for req := range in {
fmt.Fprintf(debugStream, "Request: %v\n", req.Type)
ok := false
switch req.Type {
case "subsystem":
fmt.Fprintf(debugStream, "Subsystem: %s\n", req.Payload[4:])
if string(req.Payload[4:]) == "sftp" {
ok = true
}
}
fmt.Fprintf(debugStream, " - accepted: %v\n", ok)
req.Reply(ok, nil)
}
}(requests)
server, err := sftp.NewServer(channel, sftp.WithDebug(debugStream))
if err != nil {
log.Fatal(err)
}
_ = server.Serve()
return
}
}
// MakeSSHKeyPair make a pair of public and private keys for SSH access.
// Public key is encoded in the format for inclusion in an OpenSSH authorized_keys file.
// Private Key generated is PEM encoded
func MakeSSHKeyPair(bits int, pubKeyPath, privateKeyPath string) error {
privateKey, err := rsa.GenerateKey(_rand.Reader, bits)
if err != nil {
return err
}
// generate and write private key as PEM
privateKeyFile, err := os.Create(privateKeyPath)
if err != nil {
return err
}
defer privateKeyFile.Close()
if err != nil {
return err
}
privateKeyPEM := &pem.Block{
Type: "RSA PRIVATE KEY",
Bytes: x509.MarshalPKCS1PrivateKey(privateKey),
}
if err := pem.Encode(privateKeyFile, privateKeyPEM); err != nil {
return err
}
// generate and write public key
pub, err := ssh.NewPublicKey(&privateKey.PublicKey)
if err != nil {
return err
}
return os.WriteFile(pubKeyPath, ssh.MarshalAuthorizedKey(pub), 0o655)
}
func TestSftpCreate(t *testing.T) {
os.Mkdir("./test", 0o777)
MakeSSHKeyPair(1024, "./test/id_rsa.pub", "./test/id_rsa")
go RunSftpServer("./test/")
time.Sleep(5 * time.Second)
ctx, err := SftpConnect("test", "test", "localhost:2022")
if err != nil {
t.Fatal(err)
}
defer ctx.Disconnect()
fs := New(ctx.sftpc)
fs.MkdirAll("test/dir1/dir2/dir3", os.FileMode(0o777))
fs.Mkdir("test/foo", os.FileMode(0o000))
fs.Chmod("test/foo", os.FileMode(0o700))
fs.Mkdir("test/bar", os.FileMode(0o777))
file, err := fs.Create("file1")
if err != nil {
t.Error(err)
}
defer file.Close()
file.Write([]byte("hello "))
file.WriteString("world!\n")
f1, err := fs.Open("file1")
if err != nil {
log.Fatalf("open: %v", err)
}
defer f1.Close()
b := make([]byte, 100)
_, _ = f1.Read(b)
fmt.Println(string(b))
fmt.Println("done")
// TODO check here if "hello\tworld\n" is in buffer b
}
spf13-afero-18d690e/symlink.go 0000664 0000000 0000000 00000003765 15057601571 0016141 0 ustar 00root root 0000000 0000000 // Copyright © 2018 Steve Francia .
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"errors"
)
// Symlinker is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It indicates support for 3 symlink related interfaces that implement the
// behaviors of the os methods:
// - Lstat
// - Symlink, and
// - Readlink
type Symlinker interface {
Lstater
Linker
LinkReader
}
// Linker is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
// It will call Symlink if the filesystem itself is, or it delegates to, the os filesystem,
// or the filesystem otherwise supports Symlink's.
type Linker interface {
SymlinkIfPossible(oldname, newname string) error
}
// ErrNoSymlink is the error that will be wrapped in an os.LinkError if a file system
// does not support Symlink's either directly or through its delegated filesystem.
// As expressed by support for the Linker interface.
var ErrNoSymlink = errors.New("symlink not supported")
// LinkReader is an optional interface in Afero. It is only implemented by the
// filesystems saying so.
type LinkReader interface {
ReadlinkIfPossible(name string) (string, error)
}
// ErrNoReadlink is the error that will be wrapped in an os.Path if a file system
// does not support the readlink operation either directly or through its delegated filesystem.
// As expressed by support for the LinkReader interface.
var ErrNoReadlink = errors.New("readlink not supported")
spf13-afero-18d690e/symlink_test.go 0000664 0000000 0000000 00000012060 15057601571 0017164 0 ustar 00root root 0000000 0000000 package afero
import (
"os"
"path/filepath"
"strings"
"testing"
)
func TestSymlinkIfPossible(t *testing.T) {
wd, _ := os.Getwd()
defer func() {
os.Chdir(wd)
}()
osFs := &OsFs{}
workDir, err := TempDir(osFs, "", "afero-symlink")
if err != nil {
t.Fatal(err)
}
defer func() {
osFs.RemoveAll(workDir)
}()
memWorkDir := "/sym"
memFs := NewMemMapFs()
overlayFs1 := &CopyOnWriteFs{base: osFs, layer: memFs}
overlayFs2 := &CopyOnWriteFs{base: memFs, layer: osFs}
overlayFsMemOnly := &CopyOnWriteFs{base: memFs, layer: NewMemMapFs()}
basePathFs := &BasePathFs{source: osFs, path: workDir}
basePathFsMem := &BasePathFs{source: memFs, path: memWorkDir}
roFs := &ReadOnlyFs{source: osFs}
roFsMem := &ReadOnlyFs{source: memFs}
pathFileMem := filepath.Join(memWorkDir, "aferom.txt")
osPath := filepath.Join(workDir, "afero.txt")
WriteFile(osFs, osPath, []byte("Hi, Afero!"), 0o777)
WriteFile(memFs, filepath.Join(pathFileMem), []byte("Hi, Afero!"), 0o777)
testLink := func(l Linker, source, destination string, output *string) {
if fs, ok := l.(Fs); ok {
dir := filepath.Dir(destination)
if dir != "" {
fs.MkdirAll(dir, 0o777)
}
}
err := l.SymlinkIfPossible(source, destination)
if (err == nil) && (output != nil) {
t.Fatalf("Error creating symlink, succeeded when expecting error %v", *output)
} else if (err != nil) && (output == nil) {
t.Fatalf("Error creating symlink, expected success, got %v", err)
} else if err != nil && err.Error() != *output && !strings.HasSuffix(err.Error(), *output) {
t.Fatalf("Error creating symlink, expected error '%v', instead got output '%v'", *output, err)
} else {
// test passed, if expecting a successful link, check the link with lstat if able
if output == nil {
if lst, ok := l.(Lstater); ok {
_, ok, err := lst.LstatIfPossible(destination)
if !ok {
if err != nil {
t.Fatalf("Error calling lstat on file after successful link, got: %v", err)
} else {
t.Fatalf("Error calling lstat on file after successful link, result didn't use lstat (not link)")
}
return
}
}
}
}
}
notSupported := ErrNoSymlink.Error()
testLink(osFs, osPath, filepath.Join(workDir, "os/link.txt"), nil)
testLink(overlayFs1, osPath, filepath.Join(workDir, "overlay/link1.txt"), ¬Supported)
testLink(overlayFs2, pathFileMem, filepath.Join(workDir, "overlay2/link2.txt"), nil)
testLink(
overlayFsMemOnly,
pathFileMem,
filepath.Join(memWorkDir, "overlay3/link.txt"),
¬Supported,
)
testLink(basePathFs, "afero.txt", "basepath/link.txt", nil)
testLink(basePathFsMem, pathFileMem, "link/file.txt", ¬Supported)
testLink(roFs, osPath, filepath.Join(workDir, "ro/link.txt"), ¬Supported)
testLink(roFsMem, pathFileMem, filepath.Join(memWorkDir, "ro/link.txt"), ¬Supported)
}
func TestReadlinkIfPossible(t *testing.T) {
wd, _ := os.Getwd()
defer func() {
os.Chdir(wd)
}()
osFs := &OsFs{}
workDir, err := TempDir(osFs, "", "afero-readlink")
if err != nil {
t.Fatal(err)
}
defer func() {
osFs.RemoveAll(workDir)
}()
memWorkDir := "/read"
memFs := NewMemMapFs()
overlayFs1 := &CopyOnWriteFs{base: osFs, layer: memFs}
overlayFs2 := &CopyOnWriteFs{base: memFs, layer: osFs}
overlayFsMemOnly := &CopyOnWriteFs{base: memFs, layer: NewMemMapFs()}
basePathFs := &BasePathFs{source: osFs, path: workDir}
basePathFsMem := &BasePathFs{source: memFs, path: memWorkDir}
roFs := &ReadOnlyFs{source: osFs}
roFsMem := &ReadOnlyFs{source: memFs}
pathFileMem := filepath.Join(memWorkDir, "aferom.txt")
osPath := filepath.Join(workDir, "afero.txt")
WriteFile(osFs, osPath, []byte("Hi, Afero!"), 0o777)
WriteFile(memFs, filepath.Join(pathFileMem), []byte("Hi, Afero!"), 0o777)
createLink := func(l Linker, source, destination string) error {
if fs, ok := l.(Fs); ok {
dir := filepath.Dir(destination)
if dir != "" {
fs.MkdirAll(dir, 0o777)
}
}
return l.SymlinkIfPossible(source, destination)
}
testRead := func(r LinkReader, name string, output *string) {
_, err := r.ReadlinkIfPossible(name)
if (err != nil) && (output == nil) {
t.Fatalf("Error reading link, expected success, got error: %v", err)
} else if (err == nil) && (output != nil) {
t.Fatalf("Error reading link, succeeded when expecting error: %v", *output)
} else if err != nil && err.Error() != *output && !strings.HasSuffix(err.Error(), *output) {
t.Fatalf("Error reading link, expected error '%v', instead received '%v'", *output, err)
}
}
notSupported := ErrNoReadlink.Error()
err = createLink(osFs, osPath, filepath.Join(workDir, "os/link.txt"))
if err != nil {
t.Fatal("Error creating test link: ", err)
}
testRead(osFs, filepath.Join(workDir, "os/link.txt"), nil)
testRead(overlayFs1, filepath.Join(workDir, "os/link.txt"), nil)
testRead(overlayFs2, filepath.Join(workDir, "os/link.txt"), nil)
testRead(overlayFsMemOnly, pathFileMem, ¬Supported)
testRead(basePathFs, "os/link.txt", nil)
testRead(basePathFsMem, pathFileMem, ¬Supported)
testRead(roFs, filepath.Join(workDir, "os/link.txt"), nil)
testRead(roFsMem, pathFileMem, ¬Supported)
}
spf13-afero-18d690e/tarfs/ 0000775 0000000 0000000 00000000000 15057601571 0015230 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/tarfs/file.go 0000664 0000000 0000000 00000004703 15057601571 0016502 0 ustar 00root root 0000000 0000000 package tarfs
import (
"archive/tar"
"bytes"
"os"
"path/filepath"
"sort"
"syscall"
"github.com/spf13/afero"
)
type File struct {
h *tar.Header
data *bytes.Reader
closed bool
fs *Fs
}
func (f *File) Close() error {
if f.closed {
return afero.ErrFileClosed
}
f.closed = true
f.h = nil
f.data = nil
f.fs = nil
return nil
}
func (f *File) Read(p []byte) (n int, err error) {
if f.closed {
return 0, afero.ErrFileClosed
}
if f.h.Typeflag == tar.TypeDir {
return 0, syscall.EISDIR
}
return f.data.Read(p)
}
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
if f.closed {
return 0, afero.ErrFileClosed
}
if f.h.Typeflag == tar.TypeDir {
return 0, syscall.EISDIR
}
return f.data.ReadAt(p, off)
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.closed {
return 0, afero.ErrFileClosed
}
if f.h.Typeflag == tar.TypeDir {
return 0, syscall.EISDIR
}
return f.data.Seek(offset, whence)
}
func (f *File) Write(p []byte) (n int, err error) { return 0, syscall.EROFS }
func (f *File) WriteAt(p []byte, off int64) (n int, err error) { return 0, syscall.EROFS }
func (f *File) Name() string {
return filepath.Join(splitpath(f.h.Name))
}
func (f *File) getDirectoryNames() ([]string, error) {
d, ok := f.fs.files[f.Name()]
if !ok {
return nil, &os.PathError{Op: "readdir", Path: f.Name(), Err: syscall.ENOENT}
}
var names []string
for n := range d {
names = append(names, n)
}
sort.Strings(names)
return names, nil
}
func (f *File) Readdir(count int) ([]os.FileInfo, error) {
if f.closed {
return nil, afero.ErrFileClosed
}
if !f.h.FileInfo().IsDir() {
return nil, syscall.ENOTDIR
}
names, err := f.getDirectoryNames()
if err != nil {
return nil, err
}
d := f.fs.files[f.Name()]
var fi []os.FileInfo
for _, n := range names {
if n == "" {
continue
}
f := d[n]
fi = append(fi, f.h.FileInfo())
if count > 0 && len(fi) >= count {
break
}
}
return fi, nil
}
func (f *File) Readdirnames(n int) ([]string, error) {
fi, err := f.Readdir(n)
if err != nil {
return nil, err
}
var names []string
for _, f := range fi {
names = append(names, f.Name())
}
return names, nil
}
func (f *File) Stat() (os.FileInfo, error) { return f.h.FileInfo(), nil }
func (f *File) Sync() error { return nil }
func (f *File) Truncate(size int64) error { return syscall.EROFS }
func (f *File) WriteString(s string) (ret int, err error) { return 0, syscall.EROFS }
spf13-afero-18d690e/tarfs/fs.go 0000664 0000000 0000000 00000006057 15057601571 0016177 0 ustar 00root root 0000000 0000000 // package tarfs implements a read-only in-memory representation of a tar archive
package tarfs
import (
"archive/tar"
"bytes"
"io"
"os"
"path/filepath"
"syscall"
"time"
"github.com/spf13/afero"
)
type Fs struct {
files map[string]map[string]*File
}
func splitpath(name string) (dir, file string) {
name = filepath.ToSlash(name)
if len(name) == 0 || name[0] != '/' {
name = "/" + name
}
name = filepath.Clean(name)
dir, file = filepath.Split(name)
dir = filepath.Clean(dir)
return
}
func New(t *tar.Reader) *Fs {
fs := &Fs{files: make(map[string]map[string]*File)}
for {
hdr, err := t.Next()
if err == io.EOF {
break
}
if err != nil {
return nil
}
d, f := splitpath(hdr.Name)
if _, ok := fs.files[d]; !ok {
fs.files[d] = make(map[string]*File)
}
var buf bytes.Buffer
size, err := buf.ReadFrom(t)
if err != nil {
panic("tarfs: reading from tar:" + err.Error())
}
if size != hdr.Size {
panic("tarfs: size mismatch")
}
file := &File{
h: hdr,
data: bytes.NewReader(buf.Bytes()),
fs: fs,
}
fs.files[d][f] = file
}
if fs.files[afero.FilePathSeparator] == nil {
fs.files[afero.FilePathSeparator] = make(map[string]*File)
}
// Add a pseudoroot
fs.files[afero.FilePathSeparator][""] = &File{
h: &tar.Header{
Name: afero.FilePathSeparator,
Typeflag: tar.TypeDir,
Size: 0,
},
data: bytes.NewReader(nil),
fs: fs,
}
return fs
}
func (fs *Fs) Open(name string) (afero.File, error) {
d, f := splitpath(name)
if _, ok := fs.files[d]; !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
}
file, ok := fs.files[d][f]
if !ok {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
}
nf := *file
return &nf, nil
}
func (fs *Fs) Name() string { return "tarfs" }
func (fs *Fs) Create(name string) (afero.File, error) { return nil, syscall.EROFS }
func (fs *Fs) Mkdir(name string, perm os.FileMode) error { return syscall.EROFS }
func (fs *Fs) MkdirAll(path string, perm os.FileMode) error { return syscall.EROFS }
func (fs *Fs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
if flag != os.O_RDONLY {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.EPERM}
}
return fs.Open(name)
}
func (fs *Fs) Remove(name string) error { return syscall.EROFS }
func (fs *Fs) RemoveAll(path string) error { return syscall.EROFS }
func (fs *Fs) Rename(oldname string, newname string) error { return syscall.EROFS }
func (fs *Fs) Stat(name string) (os.FileInfo, error) {
d, f := splitpath(name)
if _, ok := fs.files[d]; !ok {
return nil, &os.PathError{Op: "stat", Path: name, Err: syscall.ENOENT}
}
file, ok := fs.files[d][f]
if !ok {
return nil, &os.PathError{Op: "stat", Path: name, Err: syscall.ENOENT}
}
return file.h.FileInfo(), nil
}
func (fs *Fs) Chmod(name string, mode os.FileMode) error { return syscall.EROFS }
func (fs *Fs) Chown(name string, uid, gid int) error { return syscall.EROFS }
func (fs *Fs) Chtimes(name string, atime time.Time, mtime time.Time) error { return syscall.EROFS }
spf13-afero-18d690e/tarfs/tarfs_test.go 0000664 0000000 0000000 00000021145 15057601571 0017740 0 ustar 00root root 0000000 0000000 // Most of the tests are stolen from the zipfs implementation
package tarfs
import (
"archive/tar"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"strings"
"syscall"
"testing"
"github.com/spf13/afero"
)
var files = []struct {
name string
exists bool
isdir bool
size int64
content string
contentAt4k string
}{
{"/", true, true, 0, "", ""},
{"/sub", true, true, 0, "", ""},
{"/sub/testDir2", true, true, 0, "", ""},
{"/sub/testDir2/testFile", true, false, 8192, "cccccccc", "ccccdddd"},
{"/testFile", true, false, 8192, "aaaaaaaa", "aaaabbbb"},
{"/testDir1/testFile", true, false, 8192, "bbbbbbbb", "bbbbcccc"},
{"/nonExisting", false, false, 0, "", ""},
}
var dirs = []struct {
name string
children []string
}{
{"/", []string{"sub", "testDir1", "testFile"}},
{"/sub", []string{"testDir2"}},
{"/sub/testDir2", []string{"testFile"}},
{"/testDir1", []string{"testFile"}},
}
var afs *afero.Afero
func TestMain(m *testing.M) {
tf, err := os.Open("testdata/t.tar")
if err != nil {
fmt.Print(err)
os.Exit(1)
}
tfs := New(tar.NewReader(tf))
afs = &afero.Afero{Fs: tfs}
// Check that an empty reader does not panic.
_ = New(tar.NewReader(strings.NewReader("")))
os.Exit(m.Run())
}
func TestFsOpen(t *testing.T) {
for _, f := range files {
file, err := afs.Open(f.name)
if (err == nil) != f.exists {
t.Errorf("%v exists = %v, but got err = %v", f.name, f.exists, err)
}
if !f.exists {
continue
}
if err != nil {
t.Fatalf("%v: %v", f.name, err)
}
if file.Name() != filepath.FromSlash(f.name) {
t.Errorf("Name(), got %v, expected %v", file.Name(), filepath.FromSlash(f.name))
}
s, err := file.Stat()
if err != nil {
t.Fatalf("stat %v: got error '%v'", file.Name(), err)
}
if isdir := s.IsDir(); isdir != f.isdir {
t.Errorf("%v directory, got: %v, expected: %v", file.Name(), isdir, f.isdir)
}
if size := s.Size(); size != f.size {
t.Errorf("%v size, got: %v, expected: %v", file.Name(), size, f.size)
}
}
}
func TestRead(t *testing.T) {
for _, f := range files {
if !f.exists {
continue
}
file, err := afs.Open(f.name)
if err != nil {
t.Fatalf("opening %v: %v", f.name, err)
}
buf := make([]byte, 8)
n, err := file.Read(buf)
if err != nil {
if f.isdir && (err != syscall.EISDIR) {
t.Errorf("%v got error %v, expected EISDIR", f.name, err)
} else if !f.isdir {
t.Errorf("%v: %v", f.name, err)
}
} else if n != 8 {
t.Errorf("%v: got %d read bytes, expected 8", f.name, n)
} else if string(buf) != f.content {
t.Errorf("%v: got <%s>, expected <%s>", f.name, f.content, string(buf))
}
}
}
func TestReadAt(t *testing.T) {
for _, f := range files {
if !f.exists {
continue
}
file, err := afs.Open(f.name)
if err != nil {
t.Fatalf("opening %v: %v", f.name, err)
}
buf := make([]byte, 8)
n, err := file.ReadAt(buf, 4092)
if err != nil {
if f.isdir && (err != syscall.EISDIR) {
t.Errorf("%v got error %v, expected EISDIR", f.name, err)
} else if !f.isdir {
t.Errorf("%v: %v", f.name, err)
}
} else if n != 8 {
t.Errorf("%v: got %d read bytes, expected 8", f.name, n)
} else if string(buf) != f.contentAt4k {
t.Errorf("%v: got <%s>, expected <%s>", f.name, f.contentAt4k, string(buf))
}
}
}
func TestSeek(t *testing.T) {
for _, f := range files {
if !f.exists {
continue
}
file, err := afs.Open(f.name)
if err != nil {
t.Fatalf("opening %v: %v", f.name, err)
}
tests := []struct {
offin int64
whence int
offout int64
}{
{0, io.SeekStart, 0},
{10, io.SeekStart, 10},
{1, io.SeekCurrent, 11},
{10, io.SeekCurrent, 21},
{0, io.SeekEnd, f.size},
{-1, io.SeekEnd, f.size - 1},
}
for _, s := range tests {
n, err := file.Seek(s.offin, s.whence)
if err != nil {
if f.isdir && err == syscall.EISDIR {
continue
}
t.Errorf("%v: %v", f.name, err)
}
if n != s.offout {
t.Errorf(
"%v: (off: %v, whence: %v): got %v, expected %v",
f.name,
s.offin,
s.whence,
n,
s.offout,
)
}
}
}
}
func TestName(t *testing.T) {
for _, f := range files {
if !f.exists {
continue
}
file, err := afs.Open(f.name)
if err != nil {
t.Fatalf("opening %v: %v", f.name, err)
}
n := file.Name()
if n != filepath.FromSlash(f.name) {
t.Errorf("got: %v, expected: %v", n, filepath.FromSlash(f.name))
}
}
}
func TestClose(t *testing.T) {
for _, f := range files {
if !f.exists {
continue
}
file, err := afs.Open(f.name)
if err != nil {
t.Fatalf("opening %v: %v", f.name, err)
}
err = file.Close()
if err != nil {
t.Errorf("%v: %v", f.name, err)
}
err = file.Close()
if err == nil {
t.Errorf("%v: closing twice should return an error", f.name)
}
buf := make([]byte, 8)
n, err := file.Read(buf)
if n != 0 || err == nil {
t.Errorf("%v: could read from a closed file", f.name)
}
n, err = file.ReadAt(buf, 256)
if n != 0 || err == nil {
t.Errorf("%v: could readAt from a closed file", f.name)
}
off, err := file.Seek(0, io.SeekStart)
if off != 0 || err == nil {
t.Errorf("%v: could seek from a closed file", f.name)
}
}
}
func TestOpenFile(t *testing.T) {
for _, f := range files {
file, err := afs.OpenFile(f.name, os.O_RDONLY, 0o400)
if !f.exists {
if !errors.Is(err, syscall.ENOENT) {
t.Errorf("%v: got %v, expected%v", f.name, err, syscall.ENOENT)
}
continue
}
if err != nil {
t.Fatalf("%v: %v", f.name, err)
}
file.Close()
_, err = afs.OpenFile(f.name, os.O_CREATE, 0o600)
if !errors.Is(err, syscall.EPERM) {
t.Errorf("%v: open for write: got %v, expected %v", f.name, err, syscall.EPERM)
}
}
}
func TestFsStat(t *testing.T) {
for _, f := range files {
fi, err := afs.Stat(f.name)
if !f.exists {
if !errors.Is(err, syscall.ENOENT) {
t.Errorf("%v: got %v, expected%v", f.name, err, syscall.ENOENT)
}
continue
}
if err != nil {
t.Fatalf("stat %v: got error '%v'", f.name, err)
}
if isdir := fi.IsDir(); isdir != f.isdir {
t.Errorf("%v directory, got: %v, expected: %v", f.name, isdir, f.isdir)
}
if size := fi.Size(); size != f.size {
t.Errorf("%v size, got: %v, expected: %v", f.name, size, f.size)
}
}
}
func TestReaddir(t *testing.T) {
for _, d := range dirs {
dir, err := afs.Open(d.name)
if err != nil {
t.Fatal(err)
}
fi, err := dir.Readdir(0)
if err != nil {
t.Fatal(err)
}
var names []string
for _, f := range fi {
names = append(names, f.Name())
}
if !reflect.DeepEqual(names, d.children) {
t.Errorf("%v: children, got '%v', expected '%v'", d.name, names, d.children)
}
fi, err = dir.Readdir(1)
if err != nil {
t.Fatal(err)
}
names = []string{}
for _, f := range fi {
names = append(names, f.Name())
}
if !reflect.DeepEqual(names, d.children[0:1]) {
t.Errorf("%v: children, got '%v', expected '%v'", d.name, names, d.children[0:1])
}
}
dir, err := afs.Open("/testFile")
if err != nil {
t.Fatal(err)
}
_, err = dir.Readdir(-1)
if err != syscall.ENOTDIR {
t.Fatal("Expected error")
}
}
func TestReaddirnames(t *testing.T) {
for _, d := range dirs {
dir, err := afs.Open(d.name)
if err != nil {
t.Fatal(err)
}
names, err := dir.Readdirnames(0)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(names, d.children) {
t.Errorf("%v: children, got '%v', expected '%v'", d.name, names, d.children)
}
names, err = dir.Readdirnames(1)
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(names, d.children[0:1]) {
t.Errorf("%v: children, got '%v', expected '%v'", d.name, names, d.children[0:1])
}
}
dir, err := afs.Open("/testFile")
if err != nil {
t.Fatal(err)
}
_, err = dir.Readdir(-1)
if err != syscall.ENOTDIR {
t.Fatal("Expected error")
}
}
func TestGlob(t *testing.T) {
for _, s := range []struct {
glob string
entries []string
}{
{filepath.FromSlash("/*"), []string{filepath.FromSlash("/sub"), filepath.FromSlash("/testDir1"), filepath.FromSlash("/testFile")}},
{filepath.FromSlash("*"), []string{filepath.FromSlash("sub"), filepath.FromSlash("testDir1"), filepath.FromSlash("testFile")}},
{filepath.FromSlash("sub/*"), []string{filepath.FromSlash("sub/testDir2")}},
{filepath.FromSlash("sub/testDir2/*"), []string{filepath.FromSlash("sub/testDir2/testFile")}},
{filepath.FromSlash("testDir1/*"), []string{filepath.FromSlash("testDir1/testFile")}},
} {
entries, err := afero.Glob(afs.Fs, s.glob)
if err != nil {
t.Error(err)
}
if reflect.DeepEqual(entries, s.entries) {
t.Logf("glob: %s: glob ok", s.glob)
} else {
t.Errorf("glob: %s: got %#v, expected %#v", s.glob, entries, s.entries)
}
}
}
spf13-afero-18d690e/tarfs/testdata/ 0000775 0000000 0000000 00000000000 15057601571 0017041 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/tarfs/testdata/t.tar 0000664 0000000 0000000 00000074000 15057601571 0020015 0 ustar 00root root 0000000 0000000 sub/ 0000755 0001750 0001750 00000000000 13203043560 012023 5 ustar agimenez agimenez sub/testDir2/ 0000755 0001750 0001750 00000000000 13203044274 013526 5 ustar agimenez agimenez sub/testDir2/testFile 0000644 0001750 0001750 00000020000 13203043633 015216 0 ustar agimenez agimenez ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddddtestDir1/ 0000755 0001750 0001750 00000000000 13203044262 012731 5 ustar agimenez agimenez testDir1/testFile 0000644 0001750 0001750 00000020000 13203043607 014425 0 ustar agimenez agimenez bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbcccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccctestFile 0000644 0001750 0001750 00000020000 13203040337 012723 0 ustar agimenez agimenez aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb spf13-afero-18d690e/unionFile.go 0000664 0000000 0000000 00000015537 15057601571 0016403 0 ustar 00root root 0000000 0000000 package afero
import (
"io"
"os"
"path/filepath"
"syscall"
)
// The UnionFile implements the afero.File interface and will be returned
// when reading a directory present at least in the overlay or opening a file
// for writing.
//
// The calls to
// Readdir() and Readdirnames() merge the file os.FileInfo / names from the
// base and the overlay - for files present in both layers, only those
// from the overlay will be used.
//
// When opening files for writing (Create() / OpenFile() with the right flags)
// the operations will be done in both layers, starting with the overlay. A
// successful read in the overlay will move the cursor position in the base layer
// by the number of bytes read.
type UnionFile struct {
Base File
Layer File
Merger DirsMerger
off int
files []os.FileInfo
}
func (f *UnionFile) Close() error {
// first close base, so we have a newer timestamp in the overlay. If we'd close
// the overlay first, we'd get a cacheStale the next time we access this file
// -> cache would be useless ;-)
if f.Base != nil {
f.Base.Close()
}
if f.Layer != nil {
return f.Layer.Close()
}
return BADFD
}
func (f *UnionFile) Read(s []byte) (int, error) {
if f.Layer != nil {
n, err := f.Layer.Read(s)
if (err == nil || err == io.EOF) && f.Base != nil {
// advance the file position also in the base file, the next
// call may be a write at this position (or a seek with SEEK_CUR)
if _, seekErr := f.Base.Seek(int64(n), io.SeekCurrent); seekErr != nil {
// only overwrite err in case the seek fails: we need to
// report an eventual io.EOF to the caller
err = seekErr
}
}
return n, err
}
if f.Base != nil {
return f.Base.Read(s)
}
return 0, BADFD
}
func (f *UnionFile) ReadAt(s []byte, o int64) (int, error) {
if f.Layer != nil {
n, err := f.Layer.ReadAt(s, o)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o+int64(n), io.SeekStart)
}
return n, err
}
if f.Base != nil {
return f.Base.ReadAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Seek(o int64, w int) (pos int64, err error) {
if f.Layer != nil {
pos, err = f.Layer.Seek(o, w)
if (err == nil || err == io.EOF) && f.Base != nil {
_, err = f.Base.Seek(o, w)
}
return pos, err
}
if f.Base != nil {
return f.Base.Seek(o, w)
}
return 0, BADFD
}
func (f *UnionFile) Write(s []byte) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.Write(s)
if err == nil &&
f.Base != nil { // hmm, do we have fixed size files where a write may hit the EOF mark?
_, err = f.Base.Write(s)
}
return n, err
}
if f.Base != nil {
return f.Base.Write(s)
}
return 0, BADFD
}
func (f *UnionFile) WriteAt(s []byte, o int64) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteAt(s, o)
if err == nil && f.Base != nil {
_, err = f.Base.WriteAt(s, o)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteAt(s, o)
}
return 0, BADFD
}
func (f *UnionFile) Name() string {
if f.Layer != nil {
return f.Layer.Name()
}
return f.Base.Name()
}
// DirsMerger is how UnionFile weaves two directories together.
// It takes the FileInfo slices from the layer and the base and returns a
// single view.
type DirsMerger func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error)
var defaultUnionMergeDirsFn = func(lofi, bofi []os.FileInfo) ([]os.FileInfo, error) {
files := make(map[string]os.FileInfo)
for _, fi := range lofi {
files[fi.Name()] = fi
}
for _, fi := range bofi {
if _, exists := files[fi.Name()]; !exists {
files[fi.Name()] = fi
}
}
rfi := make([]os.FileInfo, len(files))
i := 0
for _, fi := range files {
rfi[i] = fi
i++
}
return rfi, nil
}
// Readdir will weave the two directories together and
// return a single view of the overlayed directories.
// At the end of the directory view, the error is io.EOF if c > 0.
func (f *UnionFile) Readdir(c int) (ofi []os.FileInfo, err error) {
merge := f.Merger
if merge == nil {
merge = defaultUnionMergeDirsFn
}
if f.off == 0 {
var lfi []os.FileInfo
if f.Layer != nil {
lfi, err = f.Layer.Readdir(-1)
if err != nil {
return nil, err
}
}
var bfi []os.FileInfo
if f.Base != nil {
bfi, err = f.Base.Readdir(-1)
if err != nil {
return nil, err
}
}
merged, err := merge(lfi, bfi)
if err != nil {
return nil, err
}
f.files = append(f.files, merged...)
}
files := f.files[f.off:]
if c <= 0 {
return files, nil
}
if len(files) == 0 {
return nil, io.EOF
}
if c > len(files) {
c = len(files)
}
defer func() { f.off += c }()
return files[:c], nil
}
func (f *UnionFile) Readdirnames(c int) ([]string, error) {
rfi, err := f.Readdir(c)
if err != nil {
return nil, err
}
var names []string
for _, fi := range rfi {
names = append(names, fi.Name())
}
return names, nil
}
func (f *UnionFile) Stat() (os.FileInfo, error) {
if f.Layer != nil {
return f.Layer.Stat()
}
if f.Base != nil {
return f.Base.Stat()
}
return nil, BADFD
}
func (f *UnionFile) Sync() (err error) {
if f.Layer != nil {
err = f.Layer.Sync()
if err == nil && f.Base != nil {
err = f.Base.Sync()
}
return err
}
if f.Base != nil {
return f.Base.Sync()
}
return BADFD
}
func (f *UnionFile) Truncate(s int64) (err error) {
if f.Layer != nil {
err = f.Layer.Truncate(s)
if err == nil && f.Base != nil {
err = f.Base.Truncate(s)
}
return err
}
if f.Base != nil {
return f.Base.Truncate(s)
}
return BADFD
}
func (f *UnionFile) WriteString(s string) (n int, err error) {
if f.Layer != nil {
n, err = f.Layer.WriteString(s)
if err == nil && f.Base != nil {
_, err = f.Base.WriteString(s)
}
return n, err
}
if f.Base != nil {
return f.Base.WriteString(s)
}
return 0, BADFD
}
func copyFile(base Fs, layer Fs, name string, bfh File) error {
// First make sure the directory exists
exists, err := Exists(layer, filepath.Dir(name))
if err != nil {
return err
}
if !exists {
err = layer.MkdirAll(filepath.Dir(name), 0o777) // FIXME?
if err != nil {
return err
}
}
// Create the file on the overlay
lfh, err := layer.Create(name)
if err != nil {
return err
}
n, err := io.Copy(lfh, bfh)
if err != nil {
// If anything fails, clean up the file
layer.Remove(name)
lfh.Close()
return err
}
bfi, err := bfh.Stat()
if err != nil || bfi.Size() != n {
layer.Remove(name)
lfh.Close()
return syscall.EIO
}
err = lfh.Close()
if err != nil {
layer.Remove(name)
lfh.Close()
return err
}
return layer.Chtimes(name, bfi.ModTime(), bfi.ModTime())
}
func copyToLayer(base Fs, layer Fs, name string) error {
bfh, err := base.Open(name)
if err != nil {
return err
}
defer bfh.Close()
return copyFile(base, layer, name, bfh)
}
func copyFileToLayer(base Fs, layer Fs, name string, flag int, perm os.FileMode) error {
bfh, err := base.OpenFile(name, flag, perm)
if err != nil {
return err
}
defer bfh.Close()
return copyFile(base, layer, name, bfh)
}
spf13-afero-18d690e/util.go 0000664 0000000 0000000 00000016245 15057601571 0015425 0 ustar 00root root 0000000 0000000 // Copyright ©2015 Steve Francia
// Portions Copyright ©2015 The Hugo Authors
// Portions Copyright 2016-present Bjørn Erik Pedersen
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package afero
import (
"bytes"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"unicode"
"golang.org/x/text/runes"
"golang.org/x/text/transform"
"golang.org/x/text/unicode/norm"
)
// Filepath separator defined by os.Separator.
const FilePathSeparator = string(filepath.Separator)
// Takes a reader and a path and writes the content
func (a Afero) WriteReader(path string, r io.Reader) (err error) {
return WriteReader(a.Fs, path, r)
}
func WriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r
if err != nil {
if err != os.ErrExist {
return err
}
}
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
// Same as WriteReader but checks to see if file/directory already exists.
func (a Afero) SafeWriteReader(path string, r io.Reader) (err error) {
return SafeWriteReader(a.Fs, path, r)
}
func SafeWriteReader(fs Fs, path string, r io.Reader) (err error) {
dir, _ := filepath.Split(path)
ospath := filepath.FromSlash(dir)
if ospath != "" {
err = fs.MkdirAll(ospath, 0o777) // rwx, rw, r
if err != nil {
return
}
}
exists, err := Exists(fs, path)
if err != nil {
return
}
if exists {
return fmt.Errorf("%v already exists", path)
}
file, err := fs.Create(path)
if err != nil {
return
}
defer file.Close()
_, err = io.Copy(file, r)
return
}
func (a Afero) GetTempDir(subPath string) string {
return GetTempDir(a.Fs, subPath)
}
// GetTempDir returns the default temp directory with trailing slash
// if subPath is not empty then it will be created recursively with mode 777 rwx rwx rwx
func GetTempDir(fs Fs, subPath string) string {
addSlash := func(p string) string {
if FilePathSeparator != p[len(p)-1:] {
p = p + FilePathSeparator
}
return p
}
dir := addSlash(os.TempDir())
if subPath != "" {
// preserve windows backslash :-(
if FilePathSeparator == "\\" {
subPath = strings.ReplaceAll(subPath, "\\", "____")
}
dir = dir + UnicodeSanitize((subPath))
if FilePathSeparator == "\\" {
dir = strings.ReplaceAll(dir, "____", "\\")
}
if exists, _ := Exists(fs, dir); exists {
return addSlash(dir)
}
err := fs.MkdirAll(dir, 0o777)
if err != nil {
panic(err)
}
dir = addSlash(dir)
}
return dir
}
// Rewrite string to remove non-standard path characters
func UnicodeSanitize(s string) string {
source := []rune(s)
target := make([]rune, 0, len(source))
for _, r := range source {
if unicode.IsLetter(r) ||
unicode.IsDigit(r) ||
unicode.IsMark(r) ||
r == '.' ||
r == '/' ||
r == '\\' ||
r == '_' ||
r == '-' ||
r == '%' ||
r == ' ' ||
r == '#' {
target = append(target, r)
}
}
return string(target)
}
// Transform characters with accents into plain forms.
func NeuterAccents(s string) string {
t := transform.Chain(norm.NFD, runes.Remove(runes.In(unicode.Mn)), norm.NFC)
result, _, _ := transform.String(t, string(s))
return result
}
func (a Afero) FileContainsBytes(filename string, subslice []byte) (bool, error) {
return FileContainsBytes(a.Fs, filename, subslice)
}
// Check if a file contains a specified byte slice.
func FileContainsBytes(fs Fs, filename string, subslice []byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslice), nil
}
func (a Afero) FileContainsAnyBytes(filename string, subslices [][]byte) (bool, error) {
return FileContainsAnyBytes(a.Fs, filename, subslices)
}
// Check if a file contains any of the specified byte slices.
func FileContainsAnyBytes(fs Fs, filename string, subslices [][]byte) (bool, error) {
f, err := fs.Open(filename)
if err != nil {
return false, err
}
defer f.Close()
return readerContainsAny(f, subslices...), nil
}
// readerContains reports whether any of the subslices is within r.
func readerContainsAny(r io.Reader, subslices ...[]byte) bool {
if r == nil || len(subslices) == 0 {
return false
}
largestSlice := 0
for _, sl := range subslices {
if len(sl) > largestSlice {
largestSlice = len(sl)
}
}
if largestSlice == 0 {
return false
}
bufflen := largestSlice * 4
halflen := bufflen / 2
buff := make([]byte, bufflen)
var err error
var n, i int
for {
i++
if i == 1 {
n, err = io.ReadAtLeast(r, buff[:halflen], halflen)
} else {
if i != 2 {
// shift left to catch overlapping matches
copy(buff[:], buff[halflen:])
}
n, err = io.ReadAtLeast(r, buff[halflen:], halflen)
}
if n > 0 {
for _, sl := range subslices {
if bytes.Contains(buff, sl) {
return true
}
}
}
if err != nil {
break
}
}
return false
}
func (a Afero) DirExists(path string) (bool, error) {
return DirExists(a.Fs, path)
}
// DirExists checks if a path exists and is a directory.
func DirExists(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err == nil && fi.IsDir() {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func (a Afero) IsDir(path string) (bool, error) {
return IsDir(a.Fs, path)
}
// IsDir checks if a given path is a directory.
func IsDir(fs Fs, path string) (bool, error) {
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
return fi.IsDir(), nil
}
func (a Afero) IsEmpty(path string) (bool, error) {
return IsEmpty(a.Fs, path)
}
// IsEmpty checks if a given file or directory is empty.
func IsEmpty(fs Fs, path string) (bool, error) {
if b, _ := Exists(fs, path); !b {
return false, fmt.Errorf("%q path does not exist", path)
}
fi, err := fs.Stat(path)
if err != nil {
return false, err
}
if fi.IsDir() {
f, err := fs.Open(path)
if err != nil {
return false, err
}
defer f.Close()
list, err := f.Readdir(-1)
if err != nil {
return false, err
}
return len(list) == 0, nil
}
return fi.Size() == 0, nil
}
func (a Afero) Exists(path string) (bool, error) {
return Exists(a.Fs, path)
}
// Check if a file or directory exists.
func Exists(fs Fs, path string) (bool, error) {
_, err := fs.Stat(path)
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
func FullBaseFsPath(basePathFs *BasePathFs, relativePath string) string {
combinedPath := filepath.Join(basePathFs.path, relativePath)
if parent, ok := basePathFs.source.(*BasePathFs); ok {
return FullBaseFsPath(parent, combinedPath)
}
return combinedPath
}
spf13-afero-18d690e/util_test.go 0000664 0000000 0000000 00000031457 15057601571 0016466 0 ustar 00root root 0000000 0000000 // Copyright ©2015 Steve Francia
// Portions Copyright ©2015 The Hugo Authors
//
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package afero
import (
"fmt"
"os"
"path/filepath"
"strconv"
"strings"
"testing"
"time"
)
var testFS = new(MemMapFs)
func TestDirExists(t *testing.T) {
type test struct {
input string
expected bool
}
// First create a couple directories so there is something in the filesystem
// testFS := new(MemMapFs)
testFS.MkdirAll("/foo/bar", 0o777)
data := []test{
{".", true},
{"./", true},
{"..", true},
{"../", true},
{"./..", true},
{"./../", true},
{"/foo/", true},
{"/foo", true},
{"/foo/bar", true},
{"/foo/bar/", true},
{"/", true},
{"/some-really-random-directory-name", false},
{"/some/really/random/directory/name", false},
{"./some-really-random-local-directory-name", false},
{"./some/really/random/local/directory/name", false},
}
for i, d := range data {
exists, _ := DirExists(testFS, filepath.FromSlash(d.input))
if d.expected != exists {
t.Errorf("Test %d %q failed. Expected %t got %t", i, d.input, d.expected, exists)
}
}
}
func TestIsDir(t *testing.T) {
testFS = new(MemMapFs)
type test struct {
input string
expected bool
}
data := []test{
{"./", true},
{"/", true},
{"./this-directory-does-not-existi", false},
{"/this-absolute-directory/does-not-exist", false},
}
for i, d := range data {
exists, _ := IsDir(testFS, d.input)
if d.expected != exists {
t.Errorf("Test %d failed. Expected %t got %t", i, d.expected, exists)
}
}
}
func TestIsEmpty(t *testing.T) {
testFS = new(MemMapFs)
zeroSizedFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(zeroSizedFile)
nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir()
defer deleteFileInTempDir(nonZeroSizedFile)
emptyDirectory, _ := createEmptyTempDir()
defer deleteTempDir(emptyDirectory)
nonEmptyZeroLengthFilesDirectory, _ := createTempDirWithZeroLengthFiles()
defer deleteTempDir(nonEmptyZeroLengthFilesDirectory)
nonEmptyNonZeroLengthFilesDirectory, _ := createTempDirWithNonZeroLengthFiles()
defer deleteTempDir(nonEmptyNonZeroLengthFilesDirectory)
nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt"
nonExistentDir := os.TempDir() + "/this/directory/does/not/exist/"
fileDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentFile)
dirDoesNotExist := fmt.Errorf("%q path does not exist", nonExistentDir)
type test struct {
input string
expectedResult bool
expectedErr error
}
data := []test{
{zeroSizedFile.Name(), true, nil},
{nonZeroSizedFile.Name(), false, nil},
{emptyDirectory, true, nil},
{nonEmptyZeroLengthFilesDirectory, false, nil},
{nonEmptyNonZeroLengthFilesDirectory, false, nil},
{nonExistentFile, false, fileDoesNotExist},
{nonExistentDir, false, dirDoesNotExist},
}
for i, d := range data {
exists, err := IsEmpty(testFS, d.input)
if d.expectedResult != exists {
t.Errorf(
"Test %d %q failed exists. Expected result %t got %t",
i,
d.input,
d.expectedResult,
exists,
)
}
if d.expectedErr != nil {
if d.expectedErr.Error() != err.Error() {
t.Errorf(
"Test %d failed with err. Expected %q(%#v) got %q(%#v)",
i,
d.expectedErr,
d.expectedErr,
err,
err,
)
}
} else {
if d.expectedErr != err {
t.Errorf("Test %d failed. Expected error %q(%#v) got %q(%#v)", i, d.expectedErr, d.expectedErr, err, err)
}
}
}
}
func TestReaderContains(t *testing.T) {
for i, this := range []struct {
v1 string
v2 [][]byte
expect bool
}{
{"abc", [][]byte{[]byte("a")}, true},
{"abc", [][]byte{[]byte("b")}, true},
{"abcdefg", [][]byte{[]byte("efg")}, true},
{"abc", [][]byte{[]byte("d")}, false},
{"abc", [][]byte{[]byte("d"), []byte("e")}, false},
{"abc", [][]byte{[]byte("d"), []byte("a")}, true},
{"abc", [][]byte{[]byte("b"), []byte("e")}, true},
{"", nil, false},
{"", [][]byte{[]byte("a")}, false},
{"a", [][]byte{[]byte("")}, false},
{"", [][]byte{[]byte("")}, false},
} {
result := readerContainsAny(strings.NewReader(this.v1), this.v2...)
if result != this.expect {
t.Errorf("[%d] readerContains: got %t but expected %t", i, result, this.expect)
}
}
if readerContainsAny(nil, []byte("a")) {
t.Error("readerContains with nil reader")
}
if readerContainsAny(nil, nil) {
t.Error("readerContains with nil arguments")
}
}
func createZeroSizedFileInTempDir() (File, error) {
filePrefix := "_path_test_"
f, e := TempFile(testFS, "", filePrefix) // dir is os.TempDir()
if e != nil {
// if there was an error no file was created.
// => no requirement to delete the file
return nil, e
}
return f, nil
}
func createNonZeroSizedFileInTempDir() (File, error) {
f, err := createZeroSizedFileInTempDir()
if err != nil {
return nil, err
}
byteString := []byte("byteString")
err = WriteFile(testFS, f.Name(), byteString, 0o644)
if err != nil {
// delete the file
deleteFileInTempDir(f)
return nil, err
}
return f, nil
}
func deleteFileInTempDir(f File) {
err := testFS.Remove(f.Name())
if err != nil {
panic(err)
}
}
func createEmptyTempDir() (string, error) {
dirPrefix := "_dir_prefix_"
d, e := TempDir(testFS, "", dirPrefix) // will be in os.TempDir()
if e != nil {
// no directory to delete - it was never created
return "", e
}
return d, nil
}
func createTempDirWithZeroLengthFiles() (string, error) {
d, dirErr := createEmptyTempDir()
if dirErr != nil {
return "", dirErr
}
filePrefix := "_path_test_"
_, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir()
if fileErr != nil {
// if there was an error no file was created.
// but we need to remove the directory to clean-up
deleteTempDir(d)
return "", fileErr
}
// the dir now has one, zero length file in it
return d, nil
}
func createTempDirWithNonZeroLengthFiles() (string, error) {
d, dirErr := createEmptyTempDir()
if dirErr != nil {
return "", dirErr
}
filePrefix := "_path_test_"
f, fileErr := TempFile(testFS, d, filePrefix) // dir is os.TempDir()
if fileErr != nil {
// if there was an error no file was created.
// but we need to remove the directory to clean-up
deleteTempDir(d)
return "", fileErr
}
byteString := []byte("byteString")
fileErr = WriteFile(testFS, f.Name(), byteString, 0o644)
if fileErr != nil {
// delete the file
deleteFileInTempDir(f)
// also delete the directory
deleteTempDir(d)
return "", fileErr
}
// the dir now has one, zero length file in it
return d, nil
}
func TestExists(t *testing.T) {
zeroSizedFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(zeroSizedFile)
nonZeroSizedFile, _ := createNonZeroSizedFileInTempDir()
defer deleteFileInTempDir(nonZeroSizedFile)
emptyDirectory, _ := createEmptyTempDir()
defer deleteTempDir(emptyDirectory)
nonExistentFile := os.TempDir() + "/this-file-does-not-exist.txt"
nonExistentDir := os.TempDir() + "/this/directory/does/not/exist/"
type test struct {
input string
expectedResult bool
expectedErr error
}
data := []test{
{zeroSizedFile.Name(), true, nil},
{nonZeroSizedFile.Name(), true, nil},
{emptyDirectory, true, nil},
{nonExistentFile, false, nil},
{nonExistentDir, false, nil},
}
for i, d := range data {
exists, err := Exists(testFS, d.input)
if d.expectedResult != exists {
t.Errorf("Test %d failed. Expected result %t got %t", i, d.expectedResult, exists)
}
if d.expectedErr != err {
t.Errorf("Test %d failed. Expected %q got %q", i, d.expectedErr, err)
}
}
}
func TestSafeWriteToDisk(t *testing.T) {
emptyFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(emptyFile)
tmpDir, _ := createEmptyTempDir()
defer deleteTempDir(tmpDir)
randomString := "This is a random string!"
reader := strings.NewReader(randomString)
fileExists := fmt.Errorf("%v already exists", emptyFile.Name())
type test struct {
filename string
expectedErr error
}
now := time.Now().Unix()
nowStr := strconv.FormatInt(now, 10)
data := []test{
{emptyFile.Name(), fileExists},
{tmpDir + "/" + nowStr, nil},
}
for i, d := range data {
e := SafeWriteReader(testFS, d.filename, reader)
if d.expectedErr != nil {
if d.expectedErr.Error() != e.Error() {
t.Errorf(
"Test %d failed. Expected error %q but got %q",
i,
d.expectedErr.Error(),
e.Error(),
)
}
} else {
if d.expectedErr != e {
t.Errorf("Test %d failed. Expected %q but got %q", i, d.expectedErr, e)
}
contents, _ := ReadFile(testFS, d.filename)
if randomString != string(contents) {
t.Errorf("Test %d failed. Expected contents %q but got %q", i, randomString, string(contents))
}
}
reader.Seek(0, 0)
}
}
func TestWriteToDisk(t *testing.T) {
emptyFile, _ := createZeroSizedFileInTempDir()
defer deleteFileInTempDir(emptyFile)
tmpDir, _ := createEmptyTempDir()
defer deleteTempDir(tmpDir)
randomString := "This is a random string!"
reader := strings.NewReader(randomString)
type test struct {
filename string
expectedErr error
}
now := time.Now().Unix()
nowStr := strconv.FormatInt(now, 10)
data := []test{
{emptyFile.Name(), nil},
{tmpDir + "/" + nowStr, nil},
}
for i, d := range data {
e := WriteReader(testFS, d.filename, reader)
if d.expectedErr != e {
t.Errorf(
"Test %d failed. WriteToDisk Error Expected %q but got %q",
i,
d.expectedErr,
e,
)
}
contents, e := ReadFile(testFS, d.filename)
if e != nil {
t.Errorf("Test %d failed. Could not read file %s. Reason: %s\n", i, d.filename, e)
}
if randomString != string(contents) {
t.Errorf(
"Test %d failed. Expected contents %q but got %q",
i,
randomString,
string(contents),
)
}
reader.Seek(0, 0)
}
}
func TestGetTempDir(t *testing.T) {
dir := os.TempDir()
if FilePathSeparator != dir[len(dir)-1:] {
dir = dir + FilePathSeparator
}
testDir := "hugoTestFolder" + FilePathSeparator
tests := []struct {
input string
expected string
}{
{"", dir},
{testDir + " Foo bar ", dir + testDir + " Foo bar " + FilePathSeparator},
{
testDir + "Foo.Bar/foo_Bar-Foo",
dir + testDir + "Foo.Bar/foo_Bar-Foo" + FilePathSeparator,
},
{testDir + "fOO,bar:foo%bAR", dir + testDir + "fOObarfoo%bAR" + FilePathSeparator},
{testDir + "FOo/BaR.html", dir + testDir + "FOo/BaR.html" + FilePathSeparator},
{testDir + "трям/трям", dir + testDir + "трям/трям" + FilePathSeparator},
{testDir + "은행", dir + testDir + "은행" + FilePathSeparator},
{testDir + "Банковский кассир", dir + testDir + "Банковский кассир" + FilePathSeparator},
}
for _, test := range tests {
output := GetTempDir(new(MemMapFs), test.input)
if output != test.expected {
t.Errorf("Expected %#v, got %#v\n", test.expected, output)
}
}
}
// This function is very dangerous. Don't use it.
func deleteTempDir(d string) {
err := os.RemoveAll(d)
if err != nil {
panic(err)
}
}
func TestFullBaseFsPath(t *testing.T) {
type dirSpec struct {
Dir1, Dir2, Dir3 string
}
dirSpecs := []dirSpec{
{Dir1: "/", Dir2: "/", Dir3: "/"},
{Dir1: "/", Dir2: "/path2", Dir3: "/"},
{Dir1: "/path1/dir", Dir2: "/path2/dir/", Dir3: "/path3/dir"},
{Dir1: "C:/path1", Dir2: "path2/dir", Dir3: "/path3/dir/"},
}
for _, ds := range dirSpecs {
memFs := NewMemMapFs()
level1Fs := NewBasePathFs(memFs, ds.Dir1)
level2Fs := NewBasePathFs(level1Fs, ds.Dir2)
level3Fs := NewBasePathFs(level2Fs, ds.Dir3)
type spec struct {
BaseFs Fs
FileName string
ExpectedPath string
}
specs := []spec{
{
BaseFs: level3Fs,
FileName: "f.txt",
ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, "f.txt"),
},
{
BaseFs: level3Fs,
FileName: "",
ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, ds.Dir3, ""),
},
{
BaseFs: level2Fs,
FileName: "f.txt",
ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "f.txt"),
},
{BaseFs: level2Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, ds.Dir2, "")},
{BaseFs: level1Fs, FileName: "f.txt", ExpectedPath: filepath.Join(ds.Dir1, "f.txt")},
{BaseFs: level1Fs, FileName: "", ExpectedPath: filepath.Join(ds.Dir1, "")},
}
for _, s := range specs {
if actualPath := FullBaseFsPath(s.BaseFs.(*BasePathFs), s.FileName); actualPath != s.ExpectedPath {
t.Errorf("Expected \n%s got \n%s", s.ExpectedPath, actualPath)
}
}
}
}
spf13-afero-18d690e/zipfs/ 0000775 0000000 0000000 00000000000 15057601571 0015244 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/zipfs/file.go 0000664 0000000 0000000 00000006475 15057601571 0016526 0 ustar 00root root 0000000 0000000 package zipfs
import (
"archive/zip"
"io"
"os"
"path/filepath"
"syscall"
"github.com/spf13/afero"
)
type File struct {
fs *Fs
zipfile *zip.File
reader io.ReadCloser
offset int64
isdir, closed bool
buf []byte
}
func (f *File) fillBuffer(offset int64) (err error) {
if f.reader == nil {
if f.reader, err = f.zipfile.Open(); err != nil {
return
}
}
if offset > int64(f.zipfile.UncompressedSize64) {
offset = int64(f.zipfile.UncompressedSize64)
err = io.EOF
}
if len(f.buf) >= int(offset) {
return
}
buf := make([]byte, int(offset)-len(f.buf))
if n, readErr := io.ReadFull(f.reader, buf); n > 0 {
f.buf = append(f.buf, buf[:n]...)
} else if readErr != nil {
err = readErr
}
return
}
func (f *File) Close() (err error) {
f.zipfile = nil
f.closed = true
f.buf = nil
if f.reader != nil {
err = f.reader.Close()
f.reader = nil
}
return
}
func (f *File) Read(p []byte) (n int, err error) {
if f.isdir {
return 0, syscall.EISDIR
}
if f.closed {
return 0, afero.ErrFileClosed
}
err = f.fillBuffer(f.offset + int64(len(p)))
n = copy(p, f.buf[f.offset:])
f.offset += int64(n)
return
}
func (f *File) ReadAt(p []byte, off int64) (n int, err error) {
if f.isdir {
return 0, syscall.EISDIR
}
if f.closed {
return 0, afero.ErrFileClosed
}
err = f.fillBuffer(off + int64(len(p)))
n = copy(p, f.buf[int(off):])
return
}
func (f *File) Seek(offset int64, whence int) (int64, error) {
if f.isdir {
return 0, syscall.EISDIR
}
if f.closed {
return 0, afero.ErrFileClosed
}
switch whence {
case io.SeekStart:
case io.SeekCurrent:
offset += f.offset
case io.SeekEnd:
offset += int64(f.zipfile.UncompressedSize64)
default:
return 0, syscall.EINVAL
}
if offset < 0 || offset > int64(f.zipfile.UncompressedSize64) {
return 0, afero.ErrOutOfRange
}
f.offset = offset
return offset, nil
}
func (f *File) Write(p []byte) (n int, err error) { return 0, syscall.EPERM }
func (f *File) WriteAt(p []byte, off int64) (n int, err error) { return 0, syscall.EPERM }
func (f *File) Name() string {
if f.zipfile == nil {
return string(filepath.Separator)
}
return filepath.Join(splitpath(f.zipfile.Name))
}
func (f *File) getDirEntries() (map[string]*zip.File, error) {
if !f.isdir {
return nil, syscall.ENOTDIR
}
name := f.Name()
entries, ok := f.fs.files[name]
if !ok {
return nil, &os.PathError{Op: "readdir", Path: name, Err: syscall.ENOENT}
}
return entries, nil
}
func (f *File) Readdir(count int) (fi []os.FileInfo, err error) {
zipfiles, err := f.getDirEntries()
if err != nil {
return nil, err
}
for _, zipfile := range zipfiles {
fi = append(fi, zipfile.FileInfo())
if count > 0 && len(fi) >= count {
break
}
}
return
}
func (f *File) Readdirnames(count int) (names []string, err error) {
zipfiles, err := f.getDirEntries()
if err != nil {
return nil, err
}
for filename := range zipfiles {
names = append(names, filename)
if count > 0 && len(names) >= count {
break
}
}
return
}
func (f *File) Stat() (os.FileInfo, error) {
if f.zipfile == nil {
return &pseudoRoot{}, nil
}
return f.zipfile.FileInfo(), nil
}
func (f *File) Sync() error { return nil }
func (f *File) Truncate(size int64) error { return syscall.EPERM }
func (f *File) WriteString(s string) (ret int, err error) { return 0, syscall.EPERM }
spf13-afero-18d690e/zipfs/file_test.go 0000664 0000000 0000000 00000001574 15057601571 0017560 0 ustar 00root root 0000000 0000000 package zipfs
import (
"archive/zip"
"io"
"testing"
)
func TestFileRead(t *testing.T) {
zrc, err := zip.OpenReader("testdata/small.zip")
if err != nil {
t.Fatal(err)
}
zfs := New(&zrc.Reader)
f, err := zfs.Open("smallFile")
if err != nil {
t.Fatal(err)
}
info, err := f.Stat()
if err != nil {
t.Fatal(err)
}
chunkSize := info.Size() * 2 // read with extra large buffer
buf := make([]byte, chunkSize)
n, err := f.Read(buf)
if err != io.EOF {
t.Fatal("Failed to read file to completion:", err)
}
if n != int(info.Size()) {
t.Errorf("Expected read length to be %d, found: %d", info.Size(), n)
}
// read a second time to check f.offset and f.buf are correct
buf = make([]byte, chunkSize)
n, err = f.Read(buf)
if err != io.EOF {
t.Fatal("Failed to read a fully read file:", err)
}
if n != 0 {
t.Errorf("Expected read length to be 0, found: %d", n)
}
}
spf13-afero-18d690e/zipfs/fs.go 0000664 0000000 0000000 00000006025 15057601571 0016206 0 ustar 00root root 0000000 0000000 package zipfs
import (
"archive/zip"
"os"
"path/filepath"
"syscall"
"time"
"github.com/spf13/afero"
)
type Fs struct {
r *zip.Reader
files map[string]map[string]*zip.File
}
func splitpath(name string) (dir, file string) {
name = filepath.ToSlash(name)
if len(name) == 0 || name[0] != '/' {
name = "/" + name
}
name = filepath.Clean(name)
dir, file = filepath.Split(name)
dir = filepath.Clean(dir)
return
}
func New(r *zip.Reader) afero.Fs {
fs := &Fs{r: r, files: make(map[string]map[string]*zip.File)}
for _, file := range r.File {
d, f := splitpath(file.Name)
if _, ok := fs.files[d]; !ok {
fs.files[d] = make(map[string]*zip.File)
}
if _, ok := fs.files[d][f]; !ok {
fs.files[d][f] = file
}
if file.FileInfo().IsDir() {
dirname := filepath.Join(d, f)
if _, ok := fs.files[dirname]; !ok {
fs.files[dirname] = make(map[string]*zip.File)
}
}
}
return fs
}
func (fs *Fs) Create(name string) (afero.File, error) { return nil, syscall.EPERM }
func (fs *Fs) Mkdir(name string, perm os.FileMode) error { return syscall.EPERM }
func (fs *Fs) MkdirAll(path string, perm os.FileMode) error { return syscall.EPERM }
func (fs *Fs) Open(name string) (afero.File, error) {
d, f := splitpath(name)
if f == "" {
return &File{fs: fs, isdir: true}, nil
}
if _, ok := fs.files[d]; !ok {
return nil, &os.PathError{Op: "stat", Path: name, Err: syscall.ENOENT}
}
file, ok := fs.files[d][f]
if !ok {
return nil, &os.PathError{Op: "stat", Path: name, Err: syscall.ENOENT}
}
return &File{fs: fs, zipfile: file, isdir: file.FileInfo().IsDir()}, nil
}
func (fs *Fs) OpenFile(name string, flag int, perm os.FileMode) (afero.File, error) {
if flag != os.O_RDONLY {
return nil, syscall.EPERM
}
return fs.Open(name)
}
func (fs *Fs) Remove(name string) error { return syscall.EPERM }
func (fs *Fs) RemoveAll(path string) error { return syscall.EPERM }
func (fs *Fs) Rename(oldname, newname string) error { return syscall.EPERM }
type pseudoRoot struct{}
func (p *pseudoRoot) Name() string { return string(filepath.Separator) }
func (p *pseudoRoot) Size() int64 { return 0 }
func (p *pseudoRoot) Mode() os.FileMode { return os.ModeDir | os.ModePerm }
func (p *pseudoRoot) ModTime() time.Time { return time.Now() }
func (p *pseudoRoot) IsDir() bool { return true }
func (p *pseudoRoot) Sys() interface{} { return nil }
func (fs *Fs) Stat(name string) (os.FileInfo, error) {
d, f := splitpath(name)
if f == "" {
return &pseudoRoot{}, nil
}
if _, ok := fs.files[d]; !ok {
return nil, &os.PathError{Op: "stat", Path: name, Err: syscall.ENOENT}
}
file, ok := fs.files[d][f]
if !ok {
return nil, &os.PathError{Op: "stat", Path: name, Err: syscall.ENOENT}
}
return file.FileInfo(), nil
}
func (fs *Fs) Name() string { return "zipfs" }
func (fs *Fs) Chmod(name string, mode os.FileMode) error { return syscall.EPERM }
func (fs *Fs) Chown(name string, uid, gid int) error { return syscall.EPERM }
func (fs *Fs) Chtimes(name string, atime time.Time, mtime time.Time) error { return syscall.EPERM }
spf13-afero-18d690e/zipfs/testdata/ 0000775 0000000 0000000 00000000000 15057601571 0017055 5 ustar 00root root 0000000 0000000 spf13-afero-18d690e/zipfs/testdata/small.zip 0000664 0000000 0000000 00000000225 15057601571 0020710 0 ustar 00root root 0000000 0000000 PK smallFileHW(/I PKR PK R smallFilePK 7 H spf13-afero-18d690e/zipfs/testdata/t.zip 0000664 0000000 0000000 00000001767 15057601571 0020057 0 ustar 00root root 0000000 0000000 PK
woK sub/UT pGZtGZux PK
0xoK
sub/testDir2/UT HZHZux PK woK^b sub/testDir2/testFileUT GZHZux C?BM w
PK
+xoK testDir1/UT HZHZux PK woK testDir1/testFileUT GZGZux C?BM w
PK soKLP testFileUT @Z@Zux D?BM w
PK
woK A sub/UT pGZux PK
0xoK
A> sub/testDir2/UT HZux PK woK^b sub/testDir2/testFileUT GZux PK
+xoK A testDir1/UT HZux PK woK 2 testDir1/testFileUT GZux PK soKLP testFileUT @Zux PK spf13-afero-18d690e/zipfs/zipfs_test.go 0000664 0000000 0000000 00000005135 15057601571 0017771 0 ustar 00root root 0000000 0000000 package zipfs
import (
"archive/zip"
"path/filepath"
"reflect"
"testing"
"github.com/spf13/afero"
)
func TestZipFS(t *testing.T) {
zrc, err := zip.OpenReader("testdata/t.zip")
if err != nil {
t.Fatal(err)
}
zfs := New(&zrc.Reader)
a := &afero.Afero{Fs: zfs}
buf, err := a.ReadFile("testFile")
if err != nil {
t.Error(err)
}
if len(buf) != 8192 {
t.Errorf("short read: %d != 8192", len(buf))
}
buf = make([]byte, 8)
f, err := a.Open("testFile")
if err != nil {
t.Error(err)
}
if n, err := f.ReadAt(buf, 4092); err != nil {
t.Error(err)
} else if n != 8 {
t.Errorf("expected to read 8 bytes, got %d", n)
} else if string(buf) != "aaaabbbb" {
t.Errorf("expected to get , got <%s>", string(buf))
}
d, err := a.Open("/")
if d == nil {
t.Error(`Open("/") returns nil`)
}
if err != nil {
t.Errorf(`Open("/"): err = %v`, err)
}
if s, _ := d.Stat(); !s.IsDir() {
t.Error(`expected root ("/") to be a directory`)
}
if n := d.Name(); n != string(filepath.Separator) {
t.Errorf("Wrong Name() of root directory: Expected: '%c', got '%s'", filepath.Separator, n)
}
buf = make([]byte, 8192)
if n, err := f.Read(buf); err != nil {
t.Error(err)
} else if n != 8192 {
t.Errorf("expected to read 8192 bytes, got %d", n)
} else if buf[4095] != 'a' || buf[4096] != 'b' {
t.Error("got wrong contents")
}
for _, s := range []struct {
path string
dir bool
}{
{"/", true},
{"testDir1", true},
{"testDir1/testFile", false},
{"testFile", false},
{"sub", true},
{"sub/testDir2", true},
{"sub/testDir2/testFile", false},
} {
if dir, _ := a.IsDir(s.path); dir == s.dir {
t.Logf("%s: directory check ok", s.path)
} else {
t.Errorf("%s: directory check NOT ok: %t, expected %t", s.path, dir, s.dir)
}
}
for _, s := range []struct {
glob string
entries []string
}{
{filepath.FromSlash("/*"), []string{filepath.FromSlash("/sub"), filepath.FromSlash("/testDir1"), filepath.FromSlash("/testFile")}},
{filepath.FromSlash("*"), []string{filepath.FromSlash("sub"), filepath.FromSlash("testDir1"), filepath.FromSlash("testFile")}},
{filepath.FromSlash("sub/*"), []string{filepath.FromSlash("sub/testDir2")}},
{filepath.FromSlash("sub/testDir2/*"), []string{filepath.FromSlash("sub/testDir2/testFile")}},
{filepath.FromSlash("testDir1/*"), []string{filepath.FromSlash("testDir1/testFile")}},
} {
entries, err := afero.Glob(zfs, s.glob)
if err != nil {
t.Error(err)
}
if reflect.DeepEqual(entries, s.entries) {
t.Logf("glob: %s: glob ok", s.glob)
} else {
t.Errorf("glob: %s: got %#v, expected %#v", s.glob, entries, s.entries)
}
}
}