mirror of
https://github.com/caddyserver/caddy.git
synced 2025-06-23 15:31:40 -04:00
Compare commits
No commits in common. "master" and "v2.10.0-beta.4" have entirely different histories.
master
...
v2.10.0-be
4
.github/SECURITY.md
vendored
4
.github/SECURITY.md
vendored
@ -48,9 +48,9 @@ We consider publicly-registered domain names to be public information. This nece
|
||||
|
||||
It will speed things up if you suggest a working patch, such as a code diff, and explain why and how it works. Reports that are not actionable, do not contain enough information, are too pushy/demanding, or are not able to convince us that it is a viable and practical attack on the web server itself may be deferred to a later time or possibly ignored, depending on available resources. Priority will be given to credible, responsible reports that are constructive, specific, and actionable. (We get a lot of invalid reports.) Thank you for understanding.
|
||||
|
||||
When you are ready, please submit a [new private vulnerability report](https://github.com/caddyserver/caddy/security/advisories/new).
|
||||
When you are ready, please email Matt Holt (the author) directly: matt at dyanim dot com.
|
||||
|
||||
Please don't encrypt the message. It only makes the process more complicated.
|
||||
Please don't encrypt the email body. It only makes the process more complicated.
|
||||
|
||||
Please also understand that due to our nature as an open source project, we do not have a budget to award security bounties. We can only thank you.
|
||||
|
||||
|
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@ -5,8 +5,3 @@ updates:
|
||||
directory: "/"
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
|
||||
- package-ecosystem: gomod
|
||||
directory: /
|
||||
schedule:
|
||||
interval: weekly
|
||||
|
46
.github/workflows/ci.yml
vendored
46
.github/workflows/ci.yml
vendored
@ -16,9 +16,6 @@ env:
|
||||
# https://github.com/actions/setup-go/issues/491
|
||||
GOTOOLCHAIN: local
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
@ -58,21 +55,13 @@ jobs:
|
||||
SUCCESS: 'True'
|
||||
|
||||
runs-on: ${{ matrix.OS_LABEL }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
actions: write # to allow uploading artifacts and cache
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.GO_SEMVER }}
|
||||
check-latest: true
|
||||
@ -119,7 +108,7 @@ jobs:
|
||||
./caddy stop
|
||||
|
||||
- name: Publish Build Artifact
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: caddy_${{ runner.os }}_go${{ matrix.go }}_${{ steps.vars.outputs.short_sha }}
|
||||
path: ${{ matrix.CADDY_BIN_PATH }}
|
||||
@ -153,21 +142,12 @@ jobs:
|
||||
|
||||
s390x-test:
|
||||
name: test (s390x on IBM Z)
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
runs-on: ubuntu-latest
|
||||
if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
|
||||
continue-on-error: true # August 2020: s390x VM is down due to weather and power issues
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
allowed-endpoints: ci-s390x.caddyserver.com:22
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
- name: Run Tests
|
||||
run: |
|
||||
set +e
|
||||
@ -214,25 +194,17 @@ jobs:
|
||||
|
||||
goreleaser-check:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
if: github.event.pull_request.head.repo.full_name == 'caddyserver/caddy' && github.actor != 'dependabot[bot]'
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: check
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: "~1.24"
|
||||
check-latest: true
|
||||
@ -240,7 +212,7 @@ jobs:
|
||||
run: |
|
||||
go install github.com/caddyserver/xcaddy/cmd/xcaddy@latest
|
||||
xcaddy version
|
||||
- uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
- uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: build --single-target --snapshot
|
||||
|
15
.github/workflows/cross-build.yml
vendored
15
.github/workflows/cross-build.yml
vendored
@ -14,9 +14,6 @@ env:
|
||||
# https://github.com/actions/setup-go/issues/491
|
||||
GOTOOLCHAIN: local
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
build:
|
||||
strategy:
|
||||
@ -43,21 +40,13 @@ jobs:
|
||||
GO_SEMVER: '~1.24.1'
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
continue-on-error: true
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.GO_SEMVER }}
|
||||
check-latest: true
|
||||
|
42
.github/workflows/lint.yml
vendored
42
.github/workflows/lint.yml
vendored
@ -44,19 +44,14 @@ jobs:
|
||||
runs-on: ${{ matrix.OS_LABEL }}
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '~1.24'
|
||||
check-latest: true
|
||||
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@4afd733a84b1f43292c63897423277bb7f4313a9 # v8.0.0
|
||||
uses: golangci/golangci-lint-action@v6
|
||||
with:
|
||||
version: latest
|
||||
|
||||
@ -67,39 +62,10 @@ jobs:
|
||||
# only-new-issues: true
|
||||
|
||||
govulncheck:
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: govulncheck
|
||||
uses: golang/govulncheck-action@b625fbe08f3bccbe446d94fbf87fcc875a4f50ee # v1.0.4
|
||||
uses: golang/govulncheck-action@v1
|
||||
with:
|
||||
go-version-input: '~1.24.1'
|
||||
check-latest: true
|
||||
|
||||
dependency-review:
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: 'Checkout Repository'
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
- name: 'Dependency Review'
|
||||
uses: actions/dependency-review-action@da24556b548a50705dd671f47852072ea4c105d9 # v4.7.1
|
||||
with:
|
||||
comment-summary-in-pr: on-failure
|
||||
# https://github.com/actions/dependency-review-action/issues/430#issuecomment-1468975566
|
||||
base-ref: ${{ github.event.pull_request.base.sha || 'master' }}
|
||||
head-ref: ${{ github.event.pull_request.head.sha || github.ref }}
|
||||
|
20
.github/workflows/release.yml
vendored
20
.github/workflows/release.yml
vendored
@ -9,9 +9,6 @@ env:
|
||||
# https://github.com/actions/setup-go/issues/491
|
||||
GOTOOLCHAIN: local
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release
|
||||
@ -38,24 +35,19 @@ jobs:
|
||||
contents: write
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@d35c59abb061a4a6fb18e82ac0862c26744d6ab5 # v5.5.0
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: ${{ matrix.GO_SEMVER }}
|
||||
check-latest: true
|
||||
|
||||
# Force fetch upstream tags -- because 65 minutes
|
||||
# tl;dr: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 runs this line:
|
||||
# tl;dr: actions/checkout@v4 runs this line:
|
||||
# git -c protocol.version=2 fetch --no-tags --prune --progress --no-recurse-submodules --depth=1 origin +ebc278ec98bb24f2852b61fde2a9bf2e3d83818b:refs/tags/
|
||||
# which makes its own local lightweight tag, losing all the annotations in the process. Our earlier script ran:
|
||||
# git fetch --prune --unshallow
|
||||
@ -109,11 +101,11 @@ jobs:
|
||||
git verify-tag "${{ steps.vars.outputs.version_tag }}" || exit 1
|
||||
|
||||
- name: Install Cosign
|
||||
uses: sigstore/cosign-installer@e9a05e6d32d7ed22b5656cd874ef31af58d05bfa # main
|
||||
uses: sigstore/cosign-installer@main
|
||||
- name: Cosign version
|
||||
run: cosign version
|
||||
- name: Install Syft
|
||||
uses: anchore/sbom-action/download-syft@9246b90769f852b3a8921f330c59e0b3f439d6e9 # main
|
||||
uses: anchore/sbom-action/download-syft@main
|
||||
- name: Syft version
|
||||
run: syft version
|
||||
- name: Install xcaddy
|
||||
@ -122,7 +114,7 @@ jobs:
|
||||
xcaddy version
|
||||
# GoReleaser will take care of publishing those artifacts into the release
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@9c156ee8a17a598857849441385a2041ef570552 # v6.3.0
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
version: latest
|
||||
args: release --clean --timeout 60m
|
||||
|
17
.github/workflows/release_published.yml
vendored
17
.github/workflows/release_published.yml
vendored
@ -5,9 +5,6 @@ on:
|
||||
release:
|
||||
types: [published]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
release:
|
||||
name: Release Published
|
||||
@ -16,20 +13,12 @@ jobs:
|
||||
os:
|
||||
- ubuntu-latest
|
||||
runs-on: ${{ matrix.os }}
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
actions: write
|
||||
|
||||
steps:
|
||||
|
||||
# See https://github.com/peter-evans/repository-dispatch
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: Trigger event on caddyserver/dist
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
||||
repository: caddyserver/dist
|
||||
@ -37,7 +26,7 @@ jobs:
|
||||
client-payload: '{"tag": "${{ github.event.release.tag_name }}"}'
|
||||
|
||||
- name: Trigger event on caddyserver/caddy-docker
|
||||
uses: peter-evans/repository-dispatch@ff45666b9427631e3450c54a1bcbee4d9ff4d7c0 # v3.0.0
|
||||
uses: peter-evans/repository-dispatch@v3
|
||||
with:
|
||||
token: ${{ secrets.REPO_DISPATCH_TOKEN }}
|
||||
repository: caddyserver/caddy-docker
|
||||
|
86
.github/workflows/scorecard.yml
vendored
86
.github/workflows/scorecard.yml
vendored
@ -1,86 +0,0 @@
|
||||
# This workflow uses actions that are not certified by GitHub. They are provided
|
||||
# by a third-party and are governed by separate terms of service, privacy
|
||||
# policy, and support documentation.
|
||||
|
||||
name: OpenSSF Scorecard supply-chain security
|
||||
on:
|
||||
# For Branch-Protection check. Only the default branch is supported. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
|
||||
branch_protection_rule:
|
||||
# To guarantee Maintained check is occasionally updated. See
|
||||
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
|
||||
schedule:
|
||||
- cron: '20 2 * * 5'
|
||||
push:
|
||||
branches: [ "master", "2.*" ]
|
||||
pull_request:
|
||||
branches: [ "master", "2.*" ]
|
||||
|
||||
|
||||
# Declare default permissions as read only.
|
||||
permissions: read-all
|
||||
|
||||
jobs:
|
||||
analysis:
|
||||
name: Scorecard analysis
|
||||
runs-on: ubuntu-latest
|
||||
# `publish_results: true` only works when run from the default branch. conditional can be removed if disabled.
|
||||
if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request'
|
||||
permissions:
|
||||
# Needed to upload the results to code-scanning dashboard.
|
||||
security-events: write
|
||||
# Needed to publish results and get a badge (see publish_results below).
|
||||
id-token: write
|
||||
# Uncomment the permissions below if installing in a private repository.
|
||||
# contents: read
|
||||
# actions: read
|
||||
|
||||
steps:
|
||||
- name: Harden the runner (Audit all outbound calls)
|
||||
uses: step-security/harden-runner@002fdce3c6a235733a90a27c80493a3241e56863 # v2.12.1
|
||||
with:
|
||||
egress-policy: audit
|
||||
|
||||
- name: "Checkout code"
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
|
||||
with:
|
||||
persist-credentials: false
|
||||
|
||||
- name: "Run analysis"
|
||||
uses: ossf/scorecard-action@f49aabe0b5af0936a0987cfb85d86b75731b0186 # v2.4.1
|
||||
with:
|
||||
results_file: results.sarif
|
||||
results_format: sarif
|
||||
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
|
||||
# - you want to enable the Branch-Protection check on a *public* repository, or
|
||||
# - you are installing Scorecard on a *private* repository
|
||||
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
|
||||
# repo_token: ${{ secrets.SCORECARD_TOKEN }}
|
||||
|
||||
# Public repositories:
|
||||
# - Publish results to OpenSSF REST API for easy access by consumers
|
||||
# - Allows the repository to include the Scorecard badge.
|
||||
# - See https://github.com/ossf/scorecard-action#publishing-results.
|
||||
# For private repositories:
|
||||
# - `publish_results` will always be set to `false`, regardless
|
||||
# of the value entered here.
|
||||
publish_results: true
|
||||
|
||||
# (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore
|
||||
# file_mode: git
|
||||
|
||||
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
|
||||
# format to the repository Actions tab.
|
||||
- name: "Upload artifact"
|
||||
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
|
||||
with:
|
||||
name: SARIF file
|
||||
path: results.sarif
|
||||
retention-days: 5
|
||||
|
||||
# Upload the results to GitHub's code scanning dashboard (optional).
|
||||
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
|
||||
- name: "Upload to code-scanning"
|
||||
uses: github/codeql-action/upload-sarif@ce28f5bb42b7a9f2c824e633a3f6ee835bab6858 # v3.29.0
|
||||
with:
|
||||
sarif_file: results.sarif
|
238
.golangci.yml
238
.golangci.yml
@ -1,15 +1,27 @@
|
||||
version: "2"
|
||||
run:
|
||||
issues-exit-code: 1
|
||||
tests: false
|
||||
output:
|
||||
formats:
|
||||
text:
|
||||
path: stdout
|
||||
print-linter-name: true
|
||||
print-issued-lines: true
|
||||
linters-settings:
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- fmt.*
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddObject
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddArray
|
||||
gci:
|
||||
sections:
|
||||
- standard # Standard section: captures all standard packages.
|
||||
- default # Default section: contains all imports that could not be matched to another section type.
|
||||
- prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break.
|
||||
- prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix.
|
||||
# Skip generated files.
|
||||
# Default: true
|
||||
skip-generated: true
|
||||
# Enable custom order of sections.
|
||||
# If `true`, make the section order the same as the order of `sections`.
|
||||
# Default: false
|
||||
custom-order: true
|
||||
exhaustive:
|
||||
ignore-enum-types: reflect.Kind|svc.Cmd
|
||||
|
||||
linters:
|
||||
default: none
|
||||
disable-all: true
|
||||
enable:
|
||||
- asasalint
|
||||
- asciicheck
|
||||
@ -23,96 +35,148 @@ linters:
|
||||
- errcheck
|
||||
- errname
|
||||
- exhaustive
|
||||
- gci
|
||||
- gofmt
|
||||
- goimports
|
||||
- gofumpt
|
||||
- gosec
|
||||
- gosimple
|
||||
- govet
|
||||
- importas
|
||||
- ineffassign
|
||||
- importas
|
||||
- misspell
|
||||
- prealloc
|
||||
- promlinter
|
||||
- sloglint
|
||||
- sqlclosecheck
|
||||
- staticcheck
|
||||
- tenv
|
||||
- testableexamples
|
||||
- testifylint
|
||||
- tparallel
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- wastedassign
|
||||
- whitespace
|
||||
- zerologlint
|
||||
settings:
|
||||
staticcheck:
|
||||
checks: ["all", "-ST1000", "-ST1003", "-ST1016", "-ST1020", "-ST1021", "-ST1022", "-QF1006", "-QF1008"] # default, and exclude 1 more undesired check
|
||||
errcheck:
|
||||
exclude-functions:
|
||||
- fmt.*
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddObject
|
||||
- (go.uber.org/zap/zapcore.ObjectEncoder).AddArray
|
||||
exhaustive:
|
||||
ignore-enum-types: reflect.Kind|svc.Cmd
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
rules:
|
||||
- linters:
|
||||
- gosec
|
||||
text: G115 # TODO: Either we should fix the issues or nuke the linter if it's bad
|
||||
- linters:
|
||||
- gosec
|
||||
text: G107 # we aren't calling unknown URL
|
||||
- linters:
|
||||
- gosec
|
||||
text: G203 # as a web server that's expected to handle any template, this is totally in the hands of the user.
|
||||
- linters:
|
||||
- gosec
|
||||
text: G204 # we're shelling out to known commands, not relying on user-defined input.
|
||||
- linters:
|
||||
- gosec
|
||||
# the choice of weakrand is deliberate, hence the named import "weakrand"
|
||||
path: modules/caddyhttp/reverseproxy/selectionpolicies.go
|
||||
text: G404
|
||||
- linters:
|
||||
- gosec
|
||||
path: modules/caddyhttp/reverseproxy/streaming.go
|
||||
text: G404
|
||||
- linters:
|
||||
- dupl
|
||||
path: modules/logging/filters.go
|
||||
- linters:
|
||||
- dupl
|
||||
path: modules/caddyhttp/matchers.go
|
||||
- linters:
|
||||
- dupl
|
||||
path: modules/caddyhttp/vars.go
|
||||
- linters:
|
||||
- errcheck
|
||||
path: _test\.go
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
formatters:
|
||||
enable:
|
||||
- gci
|
||||
- gofmt
|
||||
- gofumpt
|
||||
- goimports
|
||||
settings:
|
||||
gci:
|
||||
sections:
|
||||
- standard # Standard section: captures all standard packages.
|
||||
- default # Default section: contains all imports that could not be matched to another section type.
|
||||
- prefix(github.com/caddyserver/caddy/v2/cmd) # ensure that this is always at the top and always has a line break.
|
||||
- prefix(github.com/caddyserver/caddy) # Custom section: groups all imports with the specified Prefix.
|
||||
custom-order: true
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
# these are implicitly disabled:
|
||||
# - containedctx
|
||||
# - contextcheck
|
||||
# - cyclop
|
||||
# - depguard
|
||||
# - errchkjson
|
||||
# - errorlint
|
||||
# - exhaustruct
|
||||
# - execinquery
|
||||
# - exhaustruct
|
||||
# - forbidigo
|
||||
# - forcetypeassert
|
||||
# - funlen
|
||||
# - ginkgolinter
|
||||
# - gocheckcompilerdirectives
|
||||
# - gochecknoglobals
|
||||
# - gochecknoinits
|
||||
# - gochecksumtype
|
||||
# - gocognit
|
||||
# - goconst
|
||||
# - gocritic
|
||||
# - gocyclo
|
||||
# - godot
|
||||
# - godox
|
||||
# - goerr113
|
||||
# - goheader
|
||||
# - gomnd
|
||||
# - gomoddirectives
|
||||
# - gomodguard
|
||||
# - goprintffuncname
|
||||
# - gosmopolitan
|
||||
# - grouper
|
||||
# - inamedparam
|
||||
# - interfacebloat
|
||||
# - ireturn
|
||||
# - lll
|
||||
# - loggercheck
|
||||
# - maintidx
|
||||
# - makezero
|
||||
# - mirror
|
||||
# - musttag
|
||||
# - nakedret
|
||||
# - nestif
|
||||
# - nilerr
|
||||
# - nilnil
|
||||
# - nlreturn
|
||||
# - noctx
|
||||
# - nolintlint
|
||||
# - nonamedreturns
|
||||
# - nosprintfhostport
|
||||
# - paralleltest
|
||||
# - perfsprint
|
||||
# - predeclared
|
||||
# - protogetter
|
||||
# - reassign
|
||||
# - revive
|
||||
# - rowserrcheck
|
||||
# - stylecheck
|
||||
# - tagalign
|
||||
# - tagliatelle
|
||||
# - testpackage
|
||||
# - thelper
|
||||
# - unparam
|
||||
# - usestdlibvars
|
||||
# - varnamelen
|
||||
# - wrapcheck
|
||||
# - wsl
|
||||
|
||||
run:
|
||||
# default concurrency is a available CPU number.
|
||||
# concurrency: 4 # explicitly omit this value to fully utilize available resources.
|
||||
timeout: 5m
|
||||
issues-exit-code: 1
|
||||
tests: false
|
||||
|
||||
# output configuration options
|
||||
output:
|
||||
formats:
|
||||
- format: 'colored-line-number'
|
||||
print-issued-lines: true
|
||||
print-linter-name: true
|
||||
|
||||
issues:
|
||||
exclude-rules:
|
||||
- text: 'G115' # TODO: Either we should fix the issues or nuke the linter if it's bad
|
||||
linters:
|
||||
- gosec
|
||||
# we aren't calling unknown URL
|
||||
- text: 'G107' # G107: Url provided to HTTP request as taint input
|
||||
linters:
|
||||
- gosec
|
||||
# as a web server that's expected to handle any template, this is totally in the hands of the user.
|
||||
- text: 'G203' # G203: Use of unescaped data in HTML templates
|
||||
linters:
|
||||
- gosec
|
||||
# we're shelling out to known commands, not relying on user-defined input.
|
||||
- text: 'G204' # G204: Audit use of command execution
|
||||
linters:
|
||||
- gosec
|
||||
# the choice of weakrand is deliberate, hence the named import "weakrand"
|
||||
- path: modules/caddyhttp/reverseproxy/selectionpolicies.go
|
||||
text: 'G404' # G404: Insecure random number source (rand)
|
||||
linters:
|
||||
- gosec
|
||||
- path: modules/caddyhttp/reverseproxy/streaming.go
|
||||
text: 'G404' # G404: Insecure random number source (rand)
|
||||
linters:
|
||||
- gosec
|
||||
- path: modules/logging/filters.go
|
||||
linters:
|
||||
- dupl
|
||||
- path: modules/caddyhttp/matchers.go
|
||||
linters:
|
||||
- dupl
|
||||
- path: modules/caddyhttp/vars.go
|
||||
linters:
|
||||
- dupl
|
||||
- path: _test\.go
|
||||
linters:
|
||||
- errcheck
|
||||
|
@ -1,20 +0,0 @@
|
||||
repos:
|
||||
- repo: https://github.com/gitleaks/gitleaks
|
||||
rev: v8.16.3
|
||||
hooks:
|
||||
- id: gitleaks
|
||||
- repo: https://github.com/golangci/golangci-lint
|
||||
rev: v1.52.2
|
||||
hooks:
|
||||
- id: golangci-lint-config-verify
|
||||
- id: golangci-lint
|
||||
- id: golangci-lint-fmt
|
||||
- repo: https://github.com/jumanjihouse/pre-commit-hooks
|
||||
rev: 3.0.0
|
||||
hooks:
|
||||
- id: shellcheck
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v4.4.0
|
||||
hooks:
|
||||
- id: end-of-file-fixer
|
||||
- id: trailing-whitespace
|
@ -14,7 +14,6 @@
|
||||
<p align="center">Caddy is an extensible server platform that uses TLS by default.</p>
|
||||
<p align="center">
|
||||
<a href="https://github.com/caddyserver/caddy/actions/workflows/ci.yml"><img src="https://github.com/caddyserver/caddy/actions/workflows/ci.yml/badge.svg"></a>
|
||||
<a href="https://www.bestpractices.dev/projects/7141"><img src="https://www.bestpractices.dev/projects/7141/badge"></a>
|
||||
<a href="https://pkg.go.dev/github.com/caddyserver/caddy/v2"><img src="https://img.shields.io/badge/godoc-reference-%23007d9c.svg"></a>
|
||||
<br>
|
||||
<a href="https://x.com/caddyserver" title="@caddyserver on Twitter"><img src="https://img.shields.io/twitter/follow/caddyserver" alt="@caddyserver on Twitter"></a>
|
||||
|
93
admin.go
93
admin.go
@ -221,8 +221,7 @@ func (admin *AdminConfig) newAdminHandler(addr NetworkAddress, remote bool, _ Co
|
||||
if remote {
|
||||
muxWrap.remoteControl = admin.Remote
|
||||
} else {
|
||||
// see comment in allowedOrigins() as to why we disable the host check for unix/fd networks
|
||||
muxWrap.enforceHost = !addr.isWildcardInterface() && !addr.IsUnixNetwork() && !addr.IsFdNetwork()
|
||||
muxWrap.enforceHost = !addr.isWildcardInterface()
|
||||
muxWrap.allowedOrigins = admin.allowedOrigins(addr)
|
||||
muxWrap.enforceOrigin = admin.EnforceOrigin
|
||||
}
|
||||
@ -311,43 +310,47 @@ func (admin AdminConfig) allowedOrigins(addr NetworkAddress) []*url.URL {
|
||||
for _, o := range admin.Origins {
|
||||
uniqueOrigins[o] = struct{}{}
|
||||
}
|
||||
// RFC 2616, Section 14.26:
|
||||
// "A client MUST include a Host header field in all HTTP/1.1 request
|
||||
// messages. If the requested URI does not include an Internet host
|
||||
// name for the service being requested, then the Host header field MUST
|
||||
// be given with an empty value."
|
||||
//
|
||||
// UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
|
||||
// Understandable, but frustrating. See:
|
||||
// https://github.com/golang/go/issues/60374
|
||||
// See also the discussion here:
|
||||
// https://github.com/golang/go/issues/61431
|
||||
//
|
||||
// We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
|
||||
// in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
|
||||
// bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
|
||||
// security checks, the infosec community assures me that it is secure to do
|
||||
// so, because:
|
||||
//
|
||||
// 1) Browsers do not allow access to unix sockets
|
||||
// 2) DNS is irrelevant to unix sockets
|
||||
//
|
||||
// If either of those two statements ever fail to hold true, it is not the
|
||||
// fault of Caddy.
|
||||
//
|
||||
// Thus, we do not fill out allowed origins and do not enforce Host
|
||||
// requirements for unix sockets. Enforcing it leads to confusion and
|
||||
// frustration, when UDS have their own permissions from the OS.
|
||||
// Enforcing host requirements here is effectively security theater,
|
||||
// and a false sense of security.
|
||||
//
|
||||
// See also the discussion in #6832.
|
||||
if admin.Origins == nil && !addr.IsUnixNetwork() && !addr.IsFdNetwork() {
|
||||
if admin.Origins == nil {
|
||||
if addr.isLoopback() {
|
||||
uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
|
||||
} else {
|
||||
if addr.IsUnixNetwork() || addr.IsFdNetwork() {
|
||||
// RFC 2616, Section 14.26:
|
||||
// "A client MUST include a Host header field in all HTTP/1.1 request
|
||||
// messages. If the requested URI does not include an Internet host
|
||||
// name for the service being requested, then the Host header field MUST
|
||||
// be given with an empty value."
|
||||
//
|
||||
// UPDATE July 2023: Go broke this by patching a minor security bug in 1.20.6.
|
||||
// Understandable, but frustrating. See:
|
||||
// https://github.com/golang/go/issues/60374
|
||||
// See also the discussion here:
|
||||
// https://github.com/golang/go/issues/61431
|
||||
//
|
||||
// We can no longer conform to RFC 2616 Section 14.26 from either Go or curl
|
||||
// in purity. (Curl allowed no host between 7.40 and 7.50, but now requires a
|
||||
// bogus host; see https://superuser.com/a/925610.) If we disable Host/Origin
|
||||
// security checks, the infosec community assures me that it is secure to do
|
||||
// so, because:
|
||||
// 1) Browsers do not allow access to unix sockets
|
||||
// 2) DNS is irrelevant to unix sockets
|
||||
//
|
||||
// I am not quite ready to trust either of those external factors, so instead
|
||||
// of disabling Host/Origin checks, we now allow specific Host values when
|
||||
// accessing the admin endpoint over unix sockets. I definitely don't trust
|
||||
// DNS (e.g. I don't trust 'localhost' to always resolve to the local host),
|
||||
// and IP shouldn't even be used, but if it is for some reason, I think we can
|
||||
// at least be reasonably assured that 127.0.0.1 and ::1 route to the local
|
||||
// machine, meaning that a hypothetical browser origin would have to be on the
|
||||
// local machine as well.
|
||||
uniqueOrigins[""] = struct{}{}
|
||||
uniqueOrigins["127.0.0.1"] = struct{}{}
|
||||
uniqueOrigins["::1"] = struct{}{}
|
||||
} else {
|
||||
uniqueOrigins[net.JoinHostPort("localhost", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("::1", addr.port())] = struct{}{}
|
||||
uniqueOrigins[net.JoinHostPort("127.0.0.1", addr.port())] = struct{}{}
|
||||
}
|
||||
}
|
||||
if !addr.IsUnixNetwork() && !addr.IsFdNetwork() {
|
||||
uniqueOrigins[addr.JoinHostPort(0)] = struct{}{}
|
||||
}
|
||||
}
|
||||
@ -424,13 +427,6 @@ func replaceLocalAdminServer(cfg *Config, ctx Context) error {
|
||||
|
||||
handler := cfg.Admin.newAdminHandler(addr, false, ctx)
|
||||
|
||||
// run the provisioners for loaded modules to make sure local
|
||||
// state is properly re-initialized in the new admin server
|
||||
err = cfg.Admin.provisionAdminRouters(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ln, err := addr.Listen(context.TODO(), 0, net.ListenConfig{})
|
||||
if err != nil {
|
||||
return err
|
||||
@ -552,13 +548,6 @@ func replaceRemoteAdminServer(ctx Context, cfg *Config) error {
|
||||
// because we are using TLS authentication instead
|
||||
handler := cfg.Admin.newAdminHandler(addr, true, ctx)
|
||||
|
||||
// run the provisioners for loaded modules to make sure local
|
||||
// state is properly re-initialized in the new admin server
|
||||
err = cfg.Admin.provisionAdminRouters(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// create client certificate pool for TLS mutual auth, and extract public keys
|
||||
// so that we can enforce access controls at the application layer
|
||||
clientCertPool := x509.NewCertPool()
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"maps"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
@ -336,7 +335,9 @@ func TestAdminHandlerBuiltinRouteErrors(t *testing.T) {
|
||||
|
||||
func testGetMetricValue(labels map[string]string) float64 {
|
||||
promLabels := prometheus.Labels{}
|
||||
maps.Copy(promLabels, labels)
|
||||
for k, v := range labels {
|
||||
promLabels[k] = v
|
||||
}
|
||||
|
||||
metric, err := adminMetrics.requestErrors.GetMetricWith(promLabels)
|
||||
if err != nil {
|
||||
@ -376,7 +377,9 @@ func (m *mockModule) CaddyModule() ModuleInfo {
|
||||
|
||||
func TestNewAdminHandlerRouterRegistration(t *testing.T) {
|
||||
originalModules := make(map[string]ModuleInfo)
|
||||
maps.Copy(originalModules, modules)
|
||||
for k, v := range modules {
|
||||
originalModules[k] = v
|
||||
}
|
||||
defer func() {
|
||||
modules = originalModules
|
||||
}()
|
||||
@ -476,7 +479,9 @@ func TestAdminRouterProvisioning(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
originalModules := make(map[string]ModuleInfo)
|
||||
maps.Copy(originalModules, modules)
|
||||
for k, v := range modules {
|
||||
originalModules[k] = v
|
||||
}
|
||||
defer func() {
|
||||
modules = originalModules
|
||||
}()
|
||||
@ -526,7 +531,6 @@ func TestAdminRouterProvisioning(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllowedOriginsUnixSocket(t *testing.T) {
|
||||
// see comment in allowedOrigins() as to why we do not fill out allowed origins for UDS
|
||||
tests := []struct {
|
||||
name string
|
||||
addr NetworkAddress
|
||||
@ -539,8 +543,12 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
||||
Network: "unix",
|
||||
Host: "/tmp/caddy.sock",
|
||||
},
|
||||
origins: nil, // default origins
|
||||
expectOrigins: []string{},
|
||||
origins: nil, // default origins
|
||||
expectOrigins: []string{
|
||||
"", // empty host as per RFC 2616
|
||||
"127.0.0.1",
|
||||
"::1",
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unix socket with custom origins",
|
||||
@ -570,7 +578,7 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
for i, test := range tests {
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
admin := AdminConfig{
|
||||
Origins: test.origins,
|
||||
@ -584,7 +592,7 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
||||
}
|
||||
|
||||
if len(gotOrigins) != len(test.expectOrigins) {
|
||||
t.Errorf("%d: Expected %d origins but got %d", i, len(test.expectOrigins), len(gotOrigins))
|
||||
t.Errorf("Expected %d origins but got %d", len(test.expectOrigins), len(gotOrigins))
|
||||
return
|
||||
}
|
||||
|
||||
@ -599,7 +607,7 @@ func TestAllowedOriginsUnixSocket(t *testing.T) {
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectMap, gotMap) {
|
||||
t.Errorf("%d: Origins mismatch.\nExpected: %v\nGot: %v", i, test.expectOrigins, gotOrigins)
|
||||
t.Errorf("Origins mismatch.\nExpected: %v\nGot: %v", test.expectOrigins, gotOrigins)
|
||||
}
|
||||
})
|
||||
}
|
||||
@ -769,7 +777,9 @@ func (m *mockIssuerModule) CaddyModule() ModuleInfo {
|
||||
|
||||
func TestManageIdentity(t *testing.T) {
|
||||
originalModules := make(map[string]ModuleInfo)
|
||||
maps.Copy(originalModules, modules)
|
||||
for k, v := range modules {
|
||||
originalModules[k] = v
|
||||
}
|
||||
defer func() {
|
||||
modules = originalModules
|
||||
}()
|
||||
|
144
caddy.go
144
caddy.go
@ -81,14 +81,13 @@ type Config struct {
|
||||
// associated value.
|
||||
AppsRaw ModuleMap `json:"apps,omitempty" caddy:"namespace="`
|
||||
|
||||
apps map[string]App
|
||||
storage certmagic.Storage
|
||||
eventEmitter eventEmitter
|
||||
apps map[string]App
|
||||
storage certmagic.Storage
|
||||
|
||||
cancelFunc context.CancelFunc
|
||||
|
||||
// fileSystems is a dict of fileSystems that will later be loaded from and added to.
|
||||
fileSystems FileSystems
|
||||
// filesystems is a dict of filesystems that will later be loaded from and added to.
|
||||
filesystems FileSystems
|
||||
}
|
||||
|
||||
// App is a thing that Caddy runs.
|
||||
@ -408,23 +407,11 @@ func run(newCfg *Config, start bool) (Context, error) {
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
defer func() {
|
||||
// if newCfg fails to start completely, clean up the already provisioned modules
|
||||
// partially copied from provisionContext
|
||||
if err != nil {
|
||||
globalMetrics.configSuccess.Set(0)
|
||||
ctx.cfg.cancelFunc()
|
||||
|
||||
if currentCtx.cfg != nil {
|
||||
certmagic.Default.Storage = currentCtx.cfg.storage
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Provision any admin routers which may need to access
|
||||
// some of the other apps at runtime
|
||||
err = ctx.cfg.Admin.provisionAdminRouters(ctx)
|
||||
if err != nil {
|
||||
globalMetrics.configSuccess.Set(0)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
@ -450,18 +437,14 @@ func run(newCfg *Config, start bool) (Context, error) {
|
||||
return nil
|
||||
}()
|
||||
if err != nil {
|
||||
globalMetrics.configSuccess.Set(0)
|
||||
return ctx, err
|
||||
}
|
||||
globalMetrics.configSuccess.Set(1)
|
||||
globalMetrics.configSuccessTime.SetToCurrentTime()
|
||||
|
||||
// TODO: This event is experimental and subject to change.
|
||||
ctx.emitEvent("started", nil)
|
||||
|
||||
// now that the user's config is running, finish setting up anything else,
|
||||
// such as remote admin endpoint, config loader, etc.
|
||||
err = finishSettingUp(ctx, ctx.cfg)
|
||||
return ctx, err
|
||||
return ctx, finishSettingUp(ctx, ctx.cfg)
|
||||
}
|
||||
|
||||
// provisionContext creates a new context from the given configuration and provisions
|
||||
@ -517,8 +500,16 @@ func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// start the admin endpoint (and stop any prior one)
|
||||
if replaceAdminServer {
|
||||
err = replaceLocalAdminServer(newCfg, ctx)
|
||||
if err != nil {
|
||||
return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// create the new filesystem map
|
||||
newCfg.fileSystems = &filesystems.FileSystemMap{}
|
||||
newCfg.filesystems = &filesystems.FilesystemMap{}
|
||||
|
||||
// prepare the new config for use
|
||||
newCfg.apps = make(map[string]App)
|
||||
@ -548,14 +539,6 @@ func provisionContext(newCfg *Config, replaceAdminServer bool) (Context, error)
|
||||
return ctx, err
|
||||
}
|
||||
|
||||
// start the admin endpoint (and stop any prior one)
|
||||
if replaceAdminServer {
|
||||
err = replaceLocalAdminServer(newCfg, ctx)
|
||||
if err != nil {
|
||||
return ctx, fmt.Errorf("starting caddy administration endpoint: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load and Provision each app and their submodules
|
||||
err = func() error {
|
||||
for appName := range newCfg.AppsRaw {
|
||||
@ -713,9 +696,6 @@ func unsyncedStop(ctx Context) {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: This event is experimental and subject to change.
|
||||
ctx.emitEvent("stopping", nil)
|
||||
|
||||
// stop each app
|
||||
for name, a := range ctx.cfg.apps {
|
||||
err := a.Stop()
|
||||
@ -1058,98 +1038,6 @@ func Version() (simple, full string) {
|
||||
return
|
||||
}
|
||||
|
||||
// Event represents something that has happened or is happening.
|
||||
// An Event value is not synchronized, so it should be copied if
|
||||
// being used in goroutines.
|
||||
//
|
||||
// EXPERIMENTAL: Events are subject to change.
|
||||
type Event struct {
|
||||
// If non-nil, the event has been aborted, meaning
|
||||
// propagation has stopped to other handlers and
|
||||
// the code should stop what it was doing. Emitters
|
||||
// may choose to use this as a signal to adjust their
|
||||
// code path appropriately.
|
||||
Aborted error
|
||||
|
||||
// The data associated with the event. Usually the
|
||||
// original emitter will be the only one to set or
|
||||
// change these values, but the field is exported
|
||||
// so handlers can have full access if needed.
|
||||
// However, this map is not synchronized, so
|
||||
// handlers must not use this map directly in new
|
||||
// goroutines; instead, copy the map to use it in a
|
||||
// goroutine. Data may be nil.
|
||||
Data map[string]any
|
||||
|
||||
id uuid.UUID
|
||||
ts time.Time
|
||||
name string
|
||||
origin Module
|
||||
}
|
||||
|
||||
// NewEvent creates a new event, but does not emit the event. To emit an
|
||||
// event, call Emit() on the current instance of the caddyevents app insteaad.
|
||||
//
|
||||
// EXPERIMENTAL: Subject to change.
|
||||
func NewEvent(ctx Context, name string, data map[string]any) (Event, error) {
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
return Event{}, fmt.Errorf("generating new event ID: %v", err)
|
||||
}
|
||||
name = strings.ToLower(name)
|
||||
return Event{
|
||||
Data: data,
|
||||
id: id,
|
||||
ts: time.Now(),
|
||||
name: name,
|
||||
origin: ctx.Module(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (e Event) ID() uuid.UUID { return e.id }
|
||||
func (e Event) Timestamp() time.Time { return e.ts }
|
||||
func (e Event) Name() string { return e.name }
|
||||
func (e Event) Origin() Module { return e.origin } // Returns the module that originated the event. May be nil, usually if caddy core emits the event.
|
||||
|
||||
// CloudEvent exports event e as a structure that, when
|
||||
// serialized as JSON, is compatible with the
|
||||
// CloudEvents spec.
|
||||
func (e Event) CloudEvent() CloudEvent {
|
||||
dataJSON, _ := json.Marshal(e.Data)
|
||||
var source string
|
||||
if e.Origin() == nil {
|
||||
source = "caddy"
|
||||
} else {
|
||||
source = string(e.Origin().CaddyModule().ID)
|
||||
}
|
||||
return CloudEvent{
|
||||
ID: e.id.String(),
|
||||
Source: source,
|
||||
SpecVersion: "1.0",
|
||||
Type: e.name,
|
||||
Time: e.ts,
|
||||
DataContentType: "application/json",
|
||||
Data: dataJSON,
|
||||
}
|
||||
}
|
||||
|
||||
// CloudEvent is a JSON-serializable structure that
|
||||
// is compatible with the CloudEvents specification.
|
||||
// See https://cloudevents.io.
|
||||
// EXPERIMENTAL: Subject to change.
|
||||
type CloudEvent struct {
|
||||
ID string `json:"id"`
|
||||
Source string `json:"source"`
|
||||
SpecVersion string `json:"specversion"`
|
||||
Type string `json:"type"`
|
||||
Time time.Time `json:"time"`
|
||||
DataContentType string `json:"datacontenttype,omitempty"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// ErrEventAborted cancels an event.
|
||||
var ErrEventAborted = errors.New("event aborted")
|
||||
|
||||
// ActiveContext returns the currently-active context.
|
||||
// This function is experimental and might be changed
|
||||
// or removed in the future.
|
||||
|
@ -15,7 +15,6 @@
|
||||
package caddy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
@ -73,21 +72,3 @@ func TestParseDuration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEvent_CloudEvent_NilOrigin(t *testing.T) {
|
||||
ctx, _ := NewContext(Context{Context: context.Background()}) // module will be nil by default
|
||||
event, err := NewEvent(ctx, "started", nil)
|
||||
if err != nil {
|
||||
t.Fatalf("NewEvent() error = %v", err)
|
||||
}
|
||||
|
||||
// This should not panic
|
||||
ce := event.CloudEvent()
|
||||
|
||||
if ce.Source != "caddy" {
|
||||
t.Errorf("Expected CloudEvent Source to be 'caddy', got '%s'", ce.Source)
|
||||
}
|
||||
if ce.Type != "started" {
|
||||
t.Errorf("Expected CloudEvent Type to be 'started', got '%s'", ce.Type)
|
||||
}
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func (a Adapter) Adapt(body []byte, options map[string]any) ([]byte, []caddyconf
|
||||
// TODO: also perform this check on imported files
|
||||
func FormattingDifference(filename string, body []byte) (caddyconfig.Warning, bool) {
|
||||
// replace windows-style newlines to normalize comparison
|
||||
normalizedBody := bytes.ReplaceAll(body, []byte("\r\n"), []byte("\n"))
|
||||
normalizedBody := bytes.Replace(body, []byte("\r\n"), []byte("\n"), -1)
|
||||
|
||||
formatted := Format(normalizedBody)
|
||||
if bytes.Equal(formatted, normalizedBody) {
|
||||
|
@ -94,7 +94,7 @@ func Format(input []byte) []byte {
|
||||
}
|
||||
|
||||
// detect whether we have the start of a heredoc
|
||||
if !quoted && (heredoc == heredocClosed && !heredocEscaped) &&
|
||||
if !quoted && !(heredoc != heredocClosed || heredocEscaped) &&
|
||||
space && last == '<' && ch == '<' {
|
||||
write(ch)
|
||||
heredoc = heredocOpening
|
||||
|
@ -137,7 +137,7 @@ func (l *lexer) next() (bool, error) {
|
||||
}
|
||||
|
||||
// detect whether we have the start of a heredoc
|
||||
if (!quoted && !btQuoted) && (!inHeredoc && !heredocEscaped) &&
|
||||
if !(quoted || btQuoted) && !(inHeredoc || heredocEscaped) &&
|
||||
len(val) > 1 && string(val[:2]) == "<<" {
|
||||
// a space means it's just a regular token and not a heredoc
|
||||
if ch == ' ' {
|
||||
|
@ -15,7 +15,6 @@
|
||||
package httpcaddyfile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"html"
|
||||
"net/http"
|
||||
@ -844,18 +843,13 @@ func parseHandleErrors(h Helper) ([]ConfigValue, error) {
|
||||
return nil, h.Errf("segment was not parsed as a subroute")
|
||||
}
|
||||
|
||||
// wrap the subroutes
|
||||
wrappingRoute := caddyhttp.Route{
|
||||
HandlersRaw: []json.RawMessage{caddyconfig.JSONModuleObject(subroute, "handler", "subroute", nil)},
|
||||
}
|
||||
subroute = &caddyhttp.Subroute{
|
||||
Routes: []caddyhttp.Route{wrappingRoute},
|
||||
}
|
||||
if expression != "" {
|
||||
statusMatcher := caddy.ModuleMap{
|
||||
"expression": h.JSON(caddyhttp.MatchExpression{Expr: expression}),
|
||||
}
|
||||
subroute.Routes[0].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
|
||||
for i := range subroute.Routes {
|
||||
subroute.Routes[i].MatcherSetsRaw = []caddy.ModuleMap{statusMatcher}
|
||||
}
|
||||
}
|
||||
return []ConfigValue{
|
||||
{
|
||||
@ -1166,11 +1160,6 @@ func parseLogSkip(h Helper) (caddyhttp.MiddlewareHandler, error) {
|
||||
if h.NextArg() {
|
||||
return nil, h.ArgErr()
|
||||
}
|
||||
|
||||
if h.NextBlock(0) {
|
||||
return nil, h.Err("log_skip directive does not accept blocks")
|
||||
}
|
||||
|
||||
return caddyhttp.VarsMiddleware{"log_skip": true}, nil
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,6 @@ package httpcaddyfile
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"maps"
|
||||
"net"
|
||||
"slices"
|
||||
"sort"
|
||||
@ -174,12 +173,10 @@ func RegisterDirectiveOrder(dir string, position Positional, standardDir string)
|
||||
if d != standardDir {
|
||||
continue
|
||||
}
|
||||
switch position {
|
||||
case Before:
|
||||
if position == Before {
|
||||
newOrder = append(newOrder[:i], append([]string{dir}, newOrder[i:]...)...)
|
||||
case After:
|
||||
} else if position == After {
|
||||
newOrder = append(newOrder[:i+1], append([]string{dir}, newOrder[i+1:]...)...)
|
||||
case First, Last:
|
||||
}
|
||||
break
|
||||
}
|
||||
@ -368,7 +365,9 @@ func parseSegmentAsConfig(h Helper) ([]ConfigValue, error) {
|
||||
// copy existing matcher definitions so we can augment
|
||||
// new ones that are defined only in this scope
|
||||
matcherDefs := make(map[string]caddy.ModuleMap, len(h.matcherDefs))
|
||||
maps.Copy(matcherDefs, h.matcherDefs)
|
||||
for key, val := range h.matcherDefs {
|
||||
matcherDefs[key] = val
|
||||
}
|
||||
|
||||
// find and extract any embedded matcher definitions in this scope
|
||||
for i := 0; i < len(segments); i++ {
|
||||
@ -484,29 +483,12 @@ func sortRoutes(routes []ConfigValue) {
|
||||
// we can only confidently compare path lengths if both
|
||||
// directives have a single path to match (issue #5037)
|
||||
if iPathLen > 0 && jPathLen > 0 {
|
||||
// trim the trailing wildcard if there is one
|
||||
iPathTrimmed := strings.TrimSuffix(iPM[0], "*")
|
||||
jPathTrimmed := strings.TrimSuffix(jPM[0], "*")
|
||||
|
||||
// if both paths are the same except for a trailing wildcard,
|
||||
// sort by the shorter path first (which is more specific)
|
||||
if iPathTrimmed == jPathTrimmed {
|
||||
if strings.TrimSuffix(iPM[0], "*") == strings.TrimSuffix(jPM[0], "*") {
|
||||
return iPathLen < jPathLen
|
||||
}
|
||||
|
||||
// we use the trimmed length to compare the paths
|
||||
// https://github.com/caddyserver/caddy/issues/7012#issuecomment-2870142195
|
||||
// credit to https://github.com/Hellio404
|
||||
// for sorts with many items, mixing matchers w/ and w/o wildcards will confuse the sort and result in incorrect orders
|
||||
iPathLen = len(iPathTrimmed)
|
||||
jPathLen = len(jPathTrimmed)
|
||||
|
||||
// if both paths have the same length, sort lexically
|
||||
// https://github.com/caddyserver/caddy/pull/7015#issuecomment-2871993588
|
||||
if iPathLen == jPathLen {
|
||||
return iPathTrimmed < jPathTrimmed
|
||||
}
|
||||
|
||||
// sort most-specific (longest) path first
|
||||
return iPathLen > jPathLen
|
||||
}
|
||||
|
@ -633,6 +633,12 @@ func (st *ServerType) serversFromPairings(
|
||||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.IgnoreLoadedCerts = true
|
||||
|
||||
case "prefer_wildcard":
|
||||
if srv.AutoHTTPS == nil {
|
||||
srv.AutoHTTPS = new(caddyhttp.AutoHTTPSConfig)
|
||||
}
|
||||
srv.AutoHTTPS.PreferWildcard = true
|
||||
}
|
||||
}
|
||||
|
||||
@ -700,6 +706,16 @@ func (st *ServerType) serversFromPairings(
|
||||
return specificity(iLongestHost) > specificity(jLongestHost)
|
||||
})
|
||||
|
||||
// collect all hosts that have a wildcard in them
|
||||
wildcardHosts := []string{}
|
||||
for _, sblock := range p.serverBlocks {
|
||||
for _, addr := range sblock.parsedKeys {
|
||||
if strings.HasPrefix(addr.Host, "*.") {
|
||||
wildcardHosts = append(wildcardHosts, addr.Host[2:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var hasCatchAllTLSConnPolicy, addressQualifiesForTLS bool
|
||||
autoHTTPSWillAddConnPolicy := srv.AutoHTTPS == nil || !srv.AutoHTTPS.Disabled
|
||||
|
||||
@ -785,13 +801,7 @@ func (st *ServerType) serversFromPairings(
|
||||
cp.FallbackSNI = fallbackSNI
|
||||
}
|
||||
|
||||
// only append this policy if it actually changes something,
|
||||
// or if the configuration explicitly automates certs for
|
||||
// these names (this is necessary to hoist a connection policy
|
||||
// above one that may manually load a wildcard cert that would
|
||||
// otherwise clobber the automated one; the code that appends
|
||||
// policies that manually load certs comes later, so they're
|
||||
// lower in the list)
|
||||
// only append this policy if it actually changes something
|
||||
if !cp.SettingsEmpty() || mapContains(forceAutomatedNames, hosts) {
|
||||
srv.TLSConnPolicies = append(srv.TLSConnPolicies, cp)
|
||||
hasCatchAllTLSConnPolicy = len(hosts) == 0
|
||||
@ -831,6 +841,18 @@ func (st *ServerType) serversFromPairings(
|
||||
addressQualifiesForTLS = true
|
||||
}
|
||||
|
||||
// If prefer wildcard is enabled, then we add hosts that are
|
||||
// already covered by the wildcard to the skip list
|
||||
if addressQualifiesForTLS && srv.AutoHTTPS != nil && srv.AutoHTTPS.PreferWildcard {
|
||||
baseDomain := addr.Host
|
||||
if idx := strings.Index(baseDomain, "."); idx != -1 {
|
||||
baseDomain = baseDomain[idx+1:]
|
||||
}
|
||||
if !strings.HasPrefix(addr.Host, "*.") && slices.Contains(wildcardHosts, baseDomain) {
|
||||
srv.AutoHTTPS.SkipCerts = append(srv.AutoHTTPS.SkipCerts, addr.Host)
|
||||
}
|
||||
}
|
||||
|
||||
// predict whether auto-HTTPS will add the conn policy for us; if so, we
|
||||
// may not need to add one for this server
|
||||
autoHTTPSWillAddConnPolicy = autoHTTPSWillAddConnPolicy &&
|
||||
@ -1061,40 +1083,11 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
|
||||
|
||||
// if they're exactly equal in every way, just keep one of them
|
||||
if reflect.DeepEqual(cps[i], cps[j]) {
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
cps = append(cps[:j], cps[j+1:]...)
|
||||
i--
|
||||
break
|
||||
}
|
||||
|
||||
// as a special case, if there are adjacent TLS conn policies that are identical except
|
||||
// by their matchers, and the matchers are specifically just ServerName ("sni") matchers
|
||||
// (by far the most common), we can combine them into a single policy
|
||||
if i == j-1 && len(cps[i].MatchersRaw) == 1 && len(cps[j].MatchersRaw) == 1 {
|
||||
if iSNIMatcherJSON, ok := cps[i].MatchersRaw["sni"]; ok {
|
||||
if jSNIMatcherJSON, ok := cps[j].MatchersRaw["sni"]; ok {
|
||||
// position of policies and the matcher criteria check out; if settings are
|
||||
// the same, then we can combine the policies; we have to unmarshal and
|
||||
// remarshal the matchers though
|
||||
if cps[i].SettingsEqual(*cps[j]) {
|
||||
var iSNIMatcher caddytls.MatchServerName
|
||||
if err := json.Unmarshal(iSNIMatcherJSON, &iSNIMatcher); err == nil {
|
||||
var jSNIMatcher caddytls.MatchServerName
|
||||
if err := json.Unmarshal(jSNIMatcherJSON, &jSNIMatcher); err == nil {
|
||||
iSNIMatcher = append(iSNIMatcher, jSNIMatcher...)
|
||||
cps[i].MatchersRaw["sni"], err = json.Marshal(iSNIMatcher)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("recombining SNI matchers: %v", err)
|
||||
}
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if they have the same matcher, try to reconcile each field: either they must
|
||||
// be identical, or we have to be able to combine them safely
|
||||
if reflect.DeepEqual(cps[i].MatchersRaw, cps[j].MatchersRaw) {
|
||||
@ -1196,13 +1189,12 @@ func consolidateConnPolicies(cps caddytls.ConnectionPolicies) (caddytls.Connecti
|
||||
}
|
||||
}
|
||||
|
||||
cps = slices.Delete(cps, j, j+1)
|
||||
cps = append(cps[:j], cps[j+1:]...)
|
||||
i--
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return cps, nil
|
||||
}
|
||||
|
||||
|
@ -92,9 +92,11 @@ func (st ServerType) buildTLSApp(
|
||||
tlsApp.Automation.Policies = append(tlsApp.Automation.Policies, catchAllAP)
|
||||
}
|
||||
|
||||
var wildcardHosts []string // collect all hosts that have a wildcard in them, and aren't HTTP
|
||||
forcedAutomatedNames := make(map[string]struct{}) // explicitly configured to be automated, even if covered by a wildcard
|
||||
|
||||
// collect all hosts that have a wildcard in them, and arent HTTP
|
||||
wildcardHosts := []string{}
|
||||
// hosts that have been explicitly marked to be automated,
|
||||
// even if covered by another wildcard
|
||||
forcedAutomatedNames := make(map[string]struct{})
|
||||
for _, p := range pairings {
|
||||
var addresses []string
|
||||
for _, addressWithProtocols := range p.addressesWithProtocols {
|
||||
@ -151,7 +153,7 @@ func (st ServerType) buildTLSApp(
|
||||
ap.OnDemand = true
|
||||
}
|
||||
|
||||
// collect hosts that are forced to have certs automated for their specific name
|
||||
// collect hosts that are forced to be automated
|
||||
if _, ok := sblock.pile["tls.force_automate"]; ok {
|
||||
for _, host := range sblockHosts {
|
||||
forcedAutomatedNames[host] = struct{}{}
|
||||
@ -373,9 +375,7 @@ func (st ServerType) buildTLSApp(
|
||||
return nil, warnings, err
|
||||
}
|
||||
for _, cfg := range ech.Configs {
|
||||
if cfg.PublicName != "" {
|
||||
ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName)
|
||||
}
|
||||
ap.SubjectsRaw = append(ap.SubjectsRaw, cfg.PublicName)
|
||||
}
|
||||
if tlsApp.Automation == nil {
|
||||
tlsApp.Automation = new(caddytls.AutomationConfig)
|
||||
|
@ -281,7 +281,7 @@ func validateTestPrerequisites(tc *Tester) error {
|
||||
tc.t.Cleanup(func() {
|
||||
os.Remove(f.Name())
|
||||
})
|
||||
if _, err := fmt.Fprintf(f, initConfig, tc.config.AdminPort); err != nil {
|
||||
if _, err := f.WriteString(fmt.Sprintf(initConfig, tc.config.AdminPort)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -12,14 +12,13 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
"github.com/mholt/acmez/v3"
|
||||
"github.com/mholt/acmez/v3/acme"
|
||||
smallstepacme "github.com/smallstep/certificates/acme"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/exp/zapslog"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
)
|
||||
|
||||
const acmeChallengePort = 9081
|
||||
|
@ -9,12 +9,11 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
"github.com/mholt/acmez/v3"
|
||||
"github.com/mholt/acmez/v3/acme"
|
||||
"go.uber.org/zap"
|
||||
"go.uber.org/zap/exp/zapslog"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
)
|
||||
|
||||
func TestACMEServerDirectory(t *testing.T) {
|
||||
|
@ -1,72 +0,0 @@
|
||||
{
|
||||
pki {
|
||||
ca custom-ca {
|
||||
name "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acme.example.com {
|
||||
acme_server {
|
||||
ca custom-ca
|
||||
allow {
|
||||
domains host-1.internal.example.com host-2.internal.example.com
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"acme.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"ca": "custom-ca",
|
||||
"handler": "acme_server",
|
||||
"policy": {
|
||||
"allow": {
|
||||
"domains": [
|
||||
"host-1.internal.example.com",
|
||||
"host-2.internal.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"custom-ca": {
|
||||
"name": "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
{
|
||||
pki {
|
||||
ca custom-ca {
|
||||
name "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acme.example.com {
|
||||
acme_server {
|
||||
ca custom-ca
|
||||
allow {
|
||||
domains host-1.internal.example.com host-2.internal.example.com
|
||||
}
|
||||
deny {
|
||||
domains dc.internal.example.com
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"acme.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"ca": "custom-ca",
|
||||
"handler": "acme_server",
|
||||
"policy": {
|
||||
"allow": {
|
||||
"domains": [
|
||||
"host-1.internal.example.com",
|
||||
"host-2.internal.example.com"
|
||||
]
|
||||
},
|
||||
"deny": {
|
||||
"domains": [
|
||||
"dc.internal.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"custom-ca": {
|
||||
"name": "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,71 +0,0 @@
|
||||
{
|
||||
pki {
|
||||
ca custom-ca {
|
||||
name "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
acme.example.com {
|
||||
acme_server {
|
||||
ca custom-ca
|
||||
deny {
|
||||
domains dc.internal.example.com
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"acme.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"ca": "custom-ca",
|
||||
"handler": "acme_server",
|
||||
"policy": {
|
||||
"deny": {
|
||||
"domains": [
|
||||
"dc.internal.example.com"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
},
|
||||
"pki": {
|
||||
"certificate_authorities": {
|
||||
"custom-ca": {
|
||||
"name": "Custom CA"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,109 @@
|
||||
{
|
||||
auto_https prefer_wildcard
|
||||
}
|
||||
|
||||
*.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "fallback"
|
||||
}
|
||||
|
||||
foo.example.com {
|
||||
respond "foo"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"skip_certificates": [
|
||||
"foo.example.com"
|
||||
],
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"*.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -0,0 +1,268 @@
|
||||
{
|
||||
auto_https prefer_wildcard
|
||||
}
|
||||
|
||||
# Covers two domains
|
||||
*.one.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "one fallback"
|
||||
}
|
||||
|
||||
# Is covered, should not get its own AP
|
||||
foo.one.example.com {
|
||||
respond "foo one"
|
||||
}
|
||||
|
||||
# This one has its own tls config so it doesn't get covered (escape hatch)
|
||||
bar.one.example.com {
|
||||
respond "bar one"
|
||||
tls bar@bar.com
|
||||
}
|
||||
|
||||
# Covers nothing but AP gets consolidated with the first
|
||||
*.two.example.com {
|
||||
tls {
|
||||
dns mock
|
||||
}
|
||||
respond "two fallback"
|
||||
}
|
||||
|
||||
# Is HTTP so it should not cover
|
||||
http://*.three.example.com {
|
||||
respond "three fallback"
|
||||
}
|
||||
|
||||
# Has no wildcard coverage so it gets an AP
|
||||
foo.three.example.com {
|
||||
respond "foo three"
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.three.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo three",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"foo.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "foo one",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"bar.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "bar one",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.one.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "one fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
},
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.two.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "two fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"skip_certificates": [
|
||||
"foo.one.example.com",
|
||||
"bar.one.example.com"
|
||||
],
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
},
|
||||
"srv1": {
|
||||
"listen": [
|
||||
":80"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"*.three.example.com"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "three fallback",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"automatic_https": {
|
||||
"prefer_wildcard": true
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"tls": {
|
||||
"automation": {
|
||||
"policies": [
|
||||
{
|
||||
"subjects": [
|
||||
"foo.three.example.com"
|
||||
]
|
||||
},
|
||||
{
|
||||
"subjects": [
|
||||
"bar.one.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"email": "bar@bar.com",
|
||||
"module": "acme"
|
||||
},
|
||||
{
|
||||
"ca": "https://acme.zerossl.com/v2/DV90",
|
||||
"email": "bar@bar.com",
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"subjects": [
|
||||
"*.one.example.com",
|
||||
"*.two.example.com"
|
||||
],
|
||||
"issuers": [
|
||||
{
|
||||
"challenges": {
|
||||
"dns": {
|
||||
"provider": {
|
||||
"name": "mock"
|
||||
}
|
||||
}
|
||||
},
|
||||
"module": "acme"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -106,29 +106,20 @@ example.com {
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group0",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group0",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "rewrite",
|
||||
"uri": "/{http.error.status_code}.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "file_server",
|
||||
"hide": [
|
||||
"./Caddyfile"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
"handler": "rewrite",
|
||||
"uri": "/{http.error.status_code}.html"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "file_server",
|
||||
"hide": [
|
||||
"./Caddyfile"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
@ -165,17 +165,8 @@ bar.localhost {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
@ -187,17 +178,8 @@ bar.localhost {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error In range [500 .. 599]",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Error In range [500 .. 599]",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
@ -226,17 +208,8 @@ bar.localhost {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error from second site",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "404 or 410 error from second site",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
@ -248,17 +221,8 @@ bar.localhost {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error In range [500 .. 599] from second site",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Error In range [500 .. 599] from second site",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -96,17 +96,8 @@ localhost:3010 {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -116,17 +116,8 @@ localhost:2099 {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
@ -138,17 +129,8 @@ localhost:2099 {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error code is equal to 500 or in the [300..399] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Error code is equal to 500 or in the [300..399] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -96,17 +96,8 @@ localhost:3010 {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "404 or 410 error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
|
@ -116,17 +116,8 @@ localhost:2099 {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Error in the [400 .. 499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
@ -138,17 +129,8 @@ localhost:2099 {
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Fallback route: code outside the [400..499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"body": "Fallback route: code outside the [400..499] range",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -1,260 +0,0 @@
|
||||
{
|
||||
http_port 2099
|
||||
}
|
||||
localhost:2099 {
|
||||
root * /var/www/
|
||||
file_server
|
||||
|
||||
handle_errors 404 {
|
||||
handle /en/* {
|
||||
respond "not found" 404
|
||||
}
|
||||
handle /es/* {
|
||||
respond "no encontrado"
|
||||
}
|
||||
handle {
|
||||
respond "default not found"
|
||||
}
|
||||
}
|
||||
handle_errors {
|
||||
handle /en/* {
|
||||
respond "English error"
|
||||
}
|
||||
handle /es/* {
|
||||
respond "Spanish error"
|
||||
}
|
||||
handle {
|
||||
respond "Default error"
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"http_port": 2099,
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":2099"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "vars",
|
||||
"root": "/var/www/"
|
||||
},
|
||||
{
|
||||
"handler": "file_server",
|
||||
"hide": [
|
||||
"./Caddyfile"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"errors": {
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group3",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "not found",
|
||||
"handler": "static_response",
|
||||
"status_code": 404
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/en/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group3",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "no encontrado",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/es/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group3",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "default not found",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"expression": "{http.error.status_code} in [404]"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"group": "group8",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "English error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/en/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group8",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Spanish error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"match": [
|
||||
{
|
||||
"path": [
|
||||
"/es/*"
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"group": "group8",
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "Default error",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -131,7 +131,13 @@ shadowed.example.com {
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"automated1.example.com",
|
||||
"automated1.example.com"
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"automated2.example.com"
|
||||
]
|
||||
}
|
||||
|
@ -1,87 +0,0 @@
|
||||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
file ../caddy.ca.cer
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf file ../caddy.ca.cer
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,94 +0,0 @@
|
||||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
file ../caddy.ca.cer
|
||||
file ../caddy.ca.cer
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
},
|
||||
{
|
||||
"files": [
|
||||
"../caddy.ca.cer"
|
||||
],
|
||||
"loader": "file"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,87 +0,0 @@
|
||||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
folder ../
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,85 +0,0 @@
|
||||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf folder ../
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,94 +0,0 @@
|
||||
localhost
|
||||
|
||||
respond "hello from localhost"
|
||||
tls {
|
||||
client_auth {
|
||||
mode request
|
||||
trust_pool inline {
|
||||
trust_der MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ==
|
||||
}
|
||||
verifier leaf {
|
||||
folder ../
|
||||
folder ../
|
||||
}
|
||||
}
|
||||
}
|
||||
----------
|
||||
{
|
||||
"apps": {
|
||||
"http": {
|
||||
"servers": {
|
||||
"srv0": {
|
||||
"listen": [
|
||||
":443"
|
||||
],
|
||||
"routes": [
|
||||
{
|
||||
"match": [
|
||||
{
|
||||
"host": [
|
||||
"localhost"
|
||||
]
|
||||
}
|
||||
],
|
||||
"handle": [
|
||||
{
|
||||
"handler": "subroute",
|
||||
"routes": [
|
||||
{
|
||||
"handle": [
|
||||
{
|
||||
"body": "hello from localhost",
|
||||
"handler": "static_response"
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"terminal": true
|
||||
}
|
||||
],
|
||||
"tls_connection_policies": [
|
||||
{
|
||||
"match": {
|
||||
"sni": [
|
||||
"localhost"
|
||||
]
|
||||
},
|
||||
"client_authentication": {
|
||||
"ca": {
|
||||
"provider": "inline",
|
||||
"trusted_ca_certs": [
|
||||
"MIIDSzCCAjOgAwIBAgIUfIRObjWNUA4jxQ/0x8BOCvE2Vw4wDQYJKoZIhvcNAQELBQAwFjEUMBIGA1UEAwwLRWFzeS1SU0EgQ0EwHhcNMTkwODI4MTYyNTU5WhcNMjkwODI1MTYyNTU5WjAWMRQwEgYDVQQDDAtFYXN5LVJTQSBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAK5m5elxhQfMp/3aVJ4JnpN9PUSz6LlP6LePAPFU7gqohVVFVtDkChJAG3FNkNQNlieVTja/bgH9IcC6oKbROwdY1h0MvNV8AHHigvl03WuJD8g2ReVFXXwsnrPmKXCFzQyMI6TYk3m2gYrXsZOU1GLnfMRC3KAMRgE2F45twOs9hqG169YJ6mM2eQjzjCHWI6S2/iUYvYxRkCOlYUbLsMD/AhgAf1plzg6LPqNxtdlwxZnA0ytgkmhK67HtzJu0+ovUCsMv0RwcMhsEo9T8nyFAGt9XLZ63X5WpBCTUApaAUhnG0XnerjmUWb6eUWw4zev54sEfY5F3x002iQaW6cECAwEAAaOBkDCBjTAdBgNVHQ4EFgQU4CBUbZsS2GaNIkGRz/cBsD5ivjswUQYDVR0jBEowSIAU4CBUbZsS2GaNIkGRz/cBsD5ivjuhGqQYMBYxFDASBgNVBAMMC0Vhc3ktUlNBIENBghR8hE5uNY1QDiPFD/THwE4K8TZXDjAMBgNVHRMEBTADAQH/MAsGA1UdDwQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAQEAKB3V4HIzoiO/Ch6WMj9bLJ2FGbpkMrcb/Eq01hT5zcfKD66lVS1MlK+cRL446Z2b2KDP1oFyVs+qmrmtdwrWgD+nfe2sBmmIHo9m9KygMkEOfG3MghGTEcS+0cTKEcoHYWYyOqQh6jnedXY8Cdm4GM1hAc9MiL3/sqV8YCVSLNnkoNysmr06/rZ0MCUZPGUtRmfd0heWhrfzAKw2HLgX+RAmpOE2MZqWcjvqKGyaRiaZks4nJkP6521aC2Lgp0HhCz1j8/uQ5ldoDszCnu/iro0NAsNtudTMD+YoLQxLqdleIh6CW+illc2VdXwj7mn6J04yns9jfE2jRjW/yTLFuQ=="
|
||||
]
|
||||
},
|
||||
"verifiers": [
|
||||
{
|
||||
"leaf_certs_loaders": [
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
},
|
||||
{
|
||||
"folders": [
|
||||
"../"
|
||||
],
|
||||
"loader": "folder"
|
||||
}
|
||||
],
|
||||
"verifier": "leaf"
|
||||
}
|
||||
],
|
||||
"mode": "request"
|
||||
}
|
||||
},
|
||||
{}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -10,6 +10,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
|
||||
_ "github.com/caddyserver/caddy/v2/internal/testmocks"
|
||||
)
|
||||
|
||||
|
@ -615,6 +615,7 @@ func TestReplaceWithReplacementPlaceholder(t *testing.T) {
|
||||
respond "{query}"`, "caddyfile")
|
||||
|
||||
tester.AssertGetResponse("http://localhost:9080/endpoint?placeholder=baz&foo=bar", 200, "foo=baz&placeholder=baz")
|
||||
|
||||
}
|
||||
|
||||
func TestReplaceWithKeyPlaceholder(t *testing.T) {
|
||||
@ -782,46 +783,6 @@ func TestHandleErrorRangeAndCodes(t *testing.T) {
|
||||
tester.AssertGetResponse("http://localhost:9080/private", 410, "Error in the [400 .. 499] range")
|
||||
}
|
||||
|
||||
func TestHandleErrorSubHandlers(t *testing.T) {
|
||||
tester := caddytest.NewTester(t)
|
||||
tester.InitServer(`{
|
||||
admin localhost:2999
|
||||
http_port 9080
|
||||
}
|
||||
localhost:9080 {
|
||||
root * /srv
|
||||
file_server
|
||||
error /*/internalerr* "Internal Server Error" 500
|
||||
|
||||
handle_errors 404 {
|
||||
handle /en/* {
|
||||
respond "not found" 404
|
||||
}
|
||||
handle /es/* {
|
||||
respond "no encontrado" 404
|
||||
}
|
||||
handle {
|
||||
respond "default not found"
|
||||
}
|
||||
}
|
||||
handle_errors {
|
||||
handle {
|
||||
respond "Default error"
|
||||
}
|
||||
handle /en/* {
|
||||
respond "English error"
|
||||
}
|
||||
}
|
||||
}
|
||||
`, "caddyfile")
|
||||
// act and assert
|
||||
tester.AssertGetResponse("http://localhost:9080/en/notfound", 404, "not found")
|
||||
tester.AssertGetResponse("http://localhost:9080/es/notfound", 404, "no encontrado")
|
||||
tester.AssertGetResponse("http://localhost:9080/notfound", 404, "default not found")
|
||||
tester.AssertGetResponse("http://localhost:9080/es/internalerr", 500, "Default error")
|
||||
tester.AssertGetResponse("http://localhost:9080/en/internalerr", 500, "English error")
|
||||
}
|
||||
|
||||
func TestInvalidSiteAddressesAsDirectives(t *testing.T) {
|
||||
type testCase struct {
|
||||
config, expectedError string
|
||||
|
@ -3,11 +3,10 @@ package integration
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/caddyserver/certmagic"
|
||||
"github.com/libdns/libdns"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
"github.com/caddyserver/certmagic"
|
||||
"github.com/libdns/libdns"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -56,9 +55,7 @@ func (MockDNSProvider) SetRecords(ctx context.Context, zone string, recs []libdn
|
||||
}
|
||||
|
||||
// Interface guard
|
||||
var (
|
||||
_ caddyfile.Unmarshaler = (*MockDNSProvider)(nil)
|
||||
_ certmagic.DNSProvider = (*MockDNSProvider)(nil)
|
||||
_ caddy.Provisioner = (*MockDNSProvider)(nil)
|
||||
_ caddy.Module = (*MockDNSProvider)(nil)
|
||||
)
|
||||
var _ caddyfile.Unmarshaler = (*MockDNSProvider)(nil)
|
||||
var _ certmagic.DNSProvider = (*MockDNSProvider)(nil)
|
||||
var _ caddy.Provisioner = (*MockDNSProvider)(nil)
|
||||
var _ caddy.Module = (*MockDNSProvider)(nil)
|
||||
|
@ -13,10 +13,9 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
"golang.org/x/net/http2"
|
||||
"golang.org/x/net/http2/h2c"
|
||||
|
||||
"github.com/caddyserver/caddy/v2/caddytest"
|
||||
)
|
||||
|
||||
// (see https://github.com/caddyserver/caddy/issues/3556 for use case)
|
||||
|
@ -24,7 +24,6 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
"log"
|
||||
"maps"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@ -704,7 +703,9 @@ func AdminAPIRequest(adminAddr, method, uri string, headers http.Header, body io
|
||||
if body != nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
}
|
||||
maps.Copy(req.Header, headers)
|
||||
for k, v := range headers {
|
||||
req.Header[k] = v
|
||||
}
|
||||
|
||||
// make an HTTP client that dials our network type, since admin
|
||||
// endpoints aren't always TCP, which is what the default transport
|
||||
|
@ -20,7 +20,6 @@ import (
|
||||
"os"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/cobra/doc"
|
||||
@ -81,16 +80,10 @@ type CommandFunc func(Flags) (int, error)
|
||||
// Commands returns a list of commands initialised by
|
||||
// RegisterCommand
|
||||
func Commands() map[string]Command {
|
||||
commandsMu.RLock()
|
||||
defer commandsMu.RUnlock()
|
||||
|
||||
return commands
|
||||
}
|
||||
|
||||
var (
|
||||
commandsMu sync.RWMutex
|
||||
commands = make(map[string]Command)
|
||||
)
|
||||
var commands = make(map[string]Command)
|
||||
|
||||
func init() {
|
||||
RegisterCommand(Command{
|
||||
@ -448,7 +441,7 @@ EXPERIMENTAL: May be changed or removed.
|
||||
})
|
||||
|
||||
defaultFactory.Use(func(rootCmd *cobra.Command) {
|
||||
manpageCommand := Command{
|
||||
rootCmd.AddCommand(caddyCmdToCobra(Command{
|
||||
Name: "manpage",
|
||||
Usage: "--directory <path>",
|
||||
Short: "Generates the manual pages for Caddy commands",
|
||||
@ -478,12 +471,11 @@ argument of --directory. If the directory does not exist, it will be created.
|
||||
return caddy.ExitCodeSuccess, nil
|
||||
})
|
||||
},
|
||||
}
|
||||
}))
|
||||
|
||||
// source: https://github.com/spf13/cobra/blob/main/shell_completions.md
|
||||
completionCommand := Command{
|
||||
Name: "completion",
|
||||
Usage: "[bash|zsh|fish|powershell]",
|
||||
rootCmd.AddCommand(&cobra.Command{
|
||||
Use: "completion [bash|zsh|fish|powershell]",
|
||||
Short: "Generate completion script",
|
||||
Long: fmt.Sprintf(`To load completions:
|
||||
|
||||
@ -524,37 +516,24 @@ argument of --directory. If the directory does not exist, it will be created.
|
||||
PS> %[1]s completion powershell > %[1]s.ps1
|
||||
# and source this file from your PowerShell profile.
|
||||
`, rootCmd.Root().Name()),
|
||||
CobraFunc: func(cmd *cobra.Command) {
|
||||
cmd.DisableFlagsInUseLine = true
|
||||
cmd.ValidArgs = []string{"bash", "zsh", "fish", "powershell"}
|
||||
cmd.Args = cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs)
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
return cmd.Root().GenBashCompletion(os.Stdout)
|
||||
case "zsh":
|
||||
return cmd.Root().GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
return cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized shell: %s", args[0])
|
||||
}
|
||||
DisableFlagsInUseLine: true,
|
||||
ValidArgs: []string{"bash", "zsh", "fish", "powershell"},
|
||||
Args: cobra.MatchAll(cobra.ExactArgs(1), cobra.OnlyValidArgs),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
switch args[0] {
|
||||
case "bash":
|
||||
return cmd.Root().GenBashCompletion(os.Stdout)
|
||||
case "zsh":
|
||||
return cmd.Root().GenZshCompletion(os.Stdout)
|
||||
case "fish":
|
||||
return cmd.Root().GenFishCompletion(os.Stdout, true)
|
||||
case "powershell":
|
||||
return cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout)
|
||||
default:
|
||||
return fmt.Errorf("unrecognized shell: %s", args[0])
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
rootCmd.AddCommand(caddyCmdToCobra(manpageCommand))
|
||||
rootCmd.AddCommand(caddyCmdToCobra(completionCommand))
|
||||
|
||||
// add manpage and completion commands to the map of
|
||||
// available commands, because they're not registered
|
||||
// through RegisterCommand.
|
||||
commandsMu.Lock()
|
||||
commands[manpageCommand.Name] = manpageCommand
|
||||
commands[completionCommand.Name] = completionCommand
|
||||
commandsMu.Unlock()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
@ -573,9 +552,6 @@ argument of --directory. If the directory does not exist, it will be created.
|
||||
//
|
||||
// This function should be used in init().
|
||||
func RegisterCommand(cmd Command) {
|
||||
commandsMu.Lock()
|
||||
defer commandsMu.Unlock()
|
||||
|
||||
if cmd.Name == "" {
|
||||
panic("command name is required")
|
||||
}
|
||||
@ -594,7 +570,6 @@ func RegisterCommand(cmd Command) {
|
||||
defaultFactory.Use(func(rootCmd *cobra.Command) {
|
||||
rootCmd.AddCommand(caddyCmdToCobra(cmd))
|
||||
})
|
||||
commands[cmd.Name] = cmd
|
||||
}
|
||||
|
||||
var commandNameRegex = regexp.MustCompile(`^[a-z0-9]$|^([a-z0-9]+-?[a-z0-9]*)+[a-z0-9]$`)
|
||||
|
@ -1,39 +0,0 @@
|
||||
package caddycmd
|
||||
|
||||
import (
|
||||
"maps"
|
||||
"reflect"
|
||||
"slices"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestCommandsAreAvailable(t *testing.T) {
|
||||
// trigger init, and build the default factory, so that
|
||||
// all commands from this package are available
|
||||
cmd := defaultFactory.Build()
|
||||
if cmd == nil {
|
||||
t.Fatal("default factory failed to build")
|
||||
}
|
||||
|
||||
// check that the default factory has 17 commands; it doesn't
|
||||
// include the commands registered through calls to init in
|
||||
// other packages
|
||||
cmds := Commands()
|
||||
if len(cmds) != 17 {
|
||||
t.Errorf("expected 17 commands, got %d", len(cmds))
|
||||
}
|
||||
|
||||
commandNames := slices.Collect(maps.Keys(cmds))
|
||||
slices.Sort(commandNames)
|
||||
|
||||
expectedCommandNames := []string{
|
||||
"adapt", "add-package", "build-info", "completion",
|
||||
"environ", "fmt", "list-modules", "manpage",
|
||||
"reload", "remove-package", "run", "start",
|
||||
"stop", "storage", "upgrade", "validate", "version",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectedCommandNames, commandNames) {
|
||||
t.Errorf("expected %v, got %v", expectedCommandNames, commandNames)
|
||||
}
|
||||
}
|
@ -418,7 +418,7 @@ func parseEnvFile(envInput io.Reader) (map[string]string, error) {
|
||||
// quoted value: support newlines
|
||||
if strings.HasPrefix(val, `"`) || strings.HasPrefix(val, "'") {
|
||||
quote := string(val[0])
|
||||
for !strings.HasSuffix(line, quote) || strings.HasSuffix(line, `\`+quote) {
|
||||
for !(strings.HasSuffix(line, quote) && !strings.HasSuffix(line, `\`+quote)) {
|
||||
val = strings.ReplaceAll(val, `\`+quote, quote)
|
||||
if !scanner.Scan() {
|
||||
break
|
||||
|
@ -235,6 +235,7 @@ func Test_isCaddyfile(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
|
||||
name: "json is not caddyfile but not error",
|
||||
args: args{
|
||||
configFile: "./Caddyfile.json",
|
||||
@ -244,6 +245,7 @@ func Test_isCaddyfile(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
|
||||
name: "prefix of Caddyfile and ./ with any extension is Caddyfile",
|
||||
args: args{
|
||||
configFile: "./Caddyfile.prd",
|
||||
@ -253,6 +255,7 @@ func Test_isCaddyfile(t *testing.T) {
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
|
||||
name: "prefix of Caddyfile without ./ with any extension is Caddyfile",
|
||||
args: args{
|
||||
configFile: "Caddyfile.prd",
|
||||
|
@ -84,7 +84,7 @@ func cmdAddPackage(fl Flags) (int, error) {
|
||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("invalid module name: %v", err)
|
||||
}
|
||||
// only allow a version to be specified if it's different from the existing version
|
||||
if _, ok := pluginPkgs[module]; ok && (version == "" || pluginPkgs[module].Version == version) {
|
||||
if _, ok := pluginPkgs[module]; ok && !(version != "" && pluginPkgs[module].Version != version) {
|
||||
return caddy.ExitCodeFailedStartup, fmt.Errorf("package is already added")
|
||||
}
|
||||
pluginPkgs[module] = pluginPackage{Version: version, Path: module}
|
||||
|
36
context.go
36
context.go
@ -91,14 +91,14 @@ func (ctx *Context) OnCancel(f func()) {
|
||||
ctx.cleanupFuncs = append(ctx.cleanupFuncs, f)
|
||||
}
|
||||
|
||||
// FileSystems returns a ref to the FilesystemMap.
|
||||
// Filesystems returns a ref to the FilesystemMap.
|
||||
// EXPERIMENTAL: This API is subject to change.
|
||||
func (ctx *Context) FileSystems() FileSystems {
|
||||
func (ctx *Context) Filesystems() FileSystems {
|
||||
// if no config is loaded, we use a default filesystemmap, which includes the osfs
|
||||
if ctx.cfg == nil {
|
||||
return &filesystems.FileSystemMap{}
|
||||
return &filesystems.FilesystemMap{}
|
||||
}
|
||||
return ctx.cfg.fileSystems
|
||||
return ctx.cfg.filesystems
|
||||
}
|
||||
|
||||
// Returns the active metrics registry for the context
|
||||
@ -277,14 +277,6 @@ func (ctx Context) LoadModule(structPointer any, fieldName string) (any, error)
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// emitEvent is a small convenience method so the caddy core can emit events, if the event app is configured.
|
||||
func (ctx Context) emitEvent(name string, data map[string]any) Event {
|
||||
if ctx.cfg == nil || ctx.cfg.eventEmitter == nil {
|
||||
return Event{}
|
||||
}
|
||||
return ctx.cfg.eventEmitter.Emit(ctx, name, data)
|
||||
}
|
||||
|
||||
// loadModulesFromSomeMap loads modules from val, which must be a type of map[string]any.
|
||||
// Depending on inlineModuleKey, it will be interpreted as either a ModuleMap (key is the module
|
||||
// name) or as a regular map (key is not the module name, and module name is defined inline).
|
||||
@ -437,14 +429,6 @@ func (ctx Context) LoadModuleByID(id string, rawMsg json.RawMessage) (any, error
|
||||
|
||||
ctx.moduleInstances[id] = append(ctx.moduleInstances[id], val)
|
||||
|
||||
// if the loaded module happens to be an app that can emit events, store it so the
|
||||
// core can have access to emit events without an import cycle
|
||||
if ee, ok := val.(eventEmitter); ok {
|
||||
if _, ok := ee.(App); ok {
|
||||
ctx.cfg.eventEmitter = ee
|
||||
}
|
||||
}
|
||||
|
||||
return val, nil
|
||||
}
|
||||
|
||||
@ -577,11 +561,11 @@ func (ctx Context) Slogger() *slog.Logger {
|
||||
if err != nil {
|
||||
panic("config missing, unable to create dev logger: " + err.Error())
|
||||
}
|
||||
return slog.New(zapslog.NewHandler(l.Core()))
|
||||
return slog.New(zapslog.NewHandler(l.Core(), nil))
|
||||
}
|
||||
mod := ctx.Module()
|
||||
if mod == nil {
|
||||
return slog.New(zapslog.NewHandler(Log().Core()))
|
||||
return slog.New(zapslog.NewHandler(Log().Core(), nil))
|
||||
}
|
||||
return slog.New(zapslog.NewHandler(ctx.cfg.Logging.Logger(mod).Core(),
|
||||
zapslog.WithName(string(mod.CaddyModule().ID)),
|
||||
@ -616,11 +600,3 @@ func (ctx *Context) WithValue(key, value any) Context {
|
||||
exitFuncs: ctx.exitFuncs,
|
||||
}
|
||||
}
|
||||
|
||||
// eventEmitter is a small interface that inverts dependencies for
|
||||
// the caddyevents package, so the core can emit events without an
|
||||
// import cycle (i.e. the caddy package doesn't have to import
|
||||
// the caddyevents package, which imports the caddy package).
|
||||
type eventEmitter interface {
|
||||
Emit(ctx Context, eventName string, data map[string]any) Event
|
||||
}
|
||||
|
12
go.mod
12
go.mod
@ -8,18 +8,18 @@ require (
|
||||
github.com/Masterminds/sprig/v3 v3.3.0
|
||||
github.com/alecthomas/chroma/v2 v2.15.0
|
||||
github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b
|
||||
github.com/caddyserver/certmagic v0.23.0
|
||||
github.com/caddyserver/certmagic v0.22.2
|
||||
github.com/caddyserver/zerossl v0.1.3
|
||||
github.com/cloudflare/circl v1.6.1
|
||||
github.com/cloudflare/circl v1.6.0
|
||||
github.com/dustin/go-humanize v1.0.1
|
||||
github.com/go-chi/chi/v5 v5.2.1
|
||||
github.com/google/cel-go v0.24.1
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/klauspost/compress v1.18.0
|
||||
github.com/klauspost/cpuid/v2 v2.2.10
|
||||
github.com/mholt/acmez/v3 v3.1.2
|
||||
github.com/mholt/acmez/v3 v3.1.1
|
||||
github.com/prometheus/client_golang v1.19.1
|
||||
github.com/quic-go/quic-go v0.51.0
|
||||
github.com/quic-go/quic-go v0.50.1
|
||||
github.com/smallstep/certificates v0.26.1
|
||||
github.com/smallstep/nosql v0.6.1
|
||||
github.com/smallstep/truststore v0.13.0
|
||||
@ -39,7 +39,7 @@ require (
|
||||
go.uber.org/zap/exp v0.3.0
|
||||
golang.org/x/crypto v0.36.0
|
||||
golang.org/x/crypto/x509roots/fallback v0.0.0-20250305170421-49bf5b80c810
|
||||
golang.org/x/net v0.38.0
|
||||
golang.org/x/net v0.37.0
|
||||
golang.org/x/sync v0.12.0
|
||||
golang.org/x/term v0.30.0
|
||||
golang.org/x/time v0.11.0
|
||||
@ -116,7 +116,7 @@ require (
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgtype v1.14.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.18.3 // indirect
|
||||
github.com/libdns/libdns v1.0.0-beta.1
|
||||
github.com/libdns/libdns v0.2.3
|
||||
github.com/manifoldco/promptui v0.9.0 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
|
24
go.sum
24
go.sum
@ -93,8 +93,8 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/caddyserver/certmagic v0.23.0 h1:CfpZ/50jMfG4+1J/u2LV6piJq4HOfO6ppOnOf7DkFEU=
|
||||
github.com/caddyserver/certmagic v0.23.0/go.mod h1:9mEZIWqqWoI+Gf+4Trh04MOVPD0tGSxtqsxg87hAIH4=
|
||||
github.com/caddyserver/certmagic v0.22.2 h1:qzZURXlrxwR5m25/jpvVeEyJHeJJMvAwe5zlMufOTQk=
|
||||
github.com/caddyserver/certmagic v0.22.2/go.mod h1:hbqE7BnkjhX5IJiFslPmrSeobSeZvI6ux8tyxhsd6qs=
|
||||
github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+YTAyA=
|
||||
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
@ -113,8 +113,8 @@ github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMn
|
||||
github.com/chzyer/test v1.0.0 h1:p3BQDXSxOhOG0P9z6/hGnII4LGiEPOYBhs8asl/fC04=
|
||||
github.com/chzyer/test v1.0.0/go.mod h1:2JlltgoNkt4TW/z9V/IzDdFaMTM2JPIi26O1pF38GC8=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
|
||||
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk=
|
||||
github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@ -327,8 +327,8 @@ github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/libdns/libdns v1.0.0-beta.1 h1:KIf4wLfsrEpXpZ3vmc/poM8zCATXT2klbdPe6hyOBjQ=
|
||||
github.com/libdns/libdns v1.0.0-beta.1/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
||||
github.com/libdns/libdns v0.2.3 h1:ba30K4ObwMGB/QTmqUxf3H4/GmUrCAIkMWejeGl12v8=
|
||||
github.com/libdns/libdns v0.2.3/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
@ -347,8 +347,8 @@ github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI=
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/mholt/acmez/v3 v3.1.2 h1:auob8J/0FhmdClQicvJvuDavgd5ezwLBfKuYmynhYzc=
|
||||
github.com/mholt/acmez/v3 v3.1.2/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
|
||||
github.com/mholt/acmez/v3 v3.1.1 h1:Jh+9uKHkPxUJdxM16q5mOr+G2V0aqkuFtNA28ihCxhQ=
|
||||
github.com/mholt/acmez/v3 v3.1.1/go.mod h1:L1wOU06KKvq7tswuMDwKdcHeKpFFgkppZy/y0DFxagQ=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.63 h1:8M5aAw6OMZfFXTT7K5V0Eu5YiiL8l7nUAkyN6C9YwaY=
|
||||
github.com/miekg/dns v1.1.63/go.mod h1:6NGHfjhpmr5lt3XPLuyfDJi5AXbNIPM9PY6H6sF1Nfs=
|
||||
@ -397,8 +397,8 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k
|
||||
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
|
||||
github.com/quic-go/qpack v0.5.1 h1:giqksBPnT/HDtZ6VhtFKgoLOWmlyo9Ei6u9PqzIMbhI=
|
||||
github.com/quic-go/qpack v0.5.1/go.mod h1:+PC4XFrEskIVkcLzpEkbLqq1uCoxPhQuvK5rH1ZgaEg=
|
||||
github.com/quic-go/quic-go v0.51.0 h1:K8exxe9zXxeRKxaXxi/GpUqYiTrtdiWP8bo1KFya6Wc=
|
||||
github.com/quic-go/quic-go v0.51.0/go.mod h1:MFlGGpcpJqRAfmYi6NC2cptDPSxRWTOGNuP4wqrWmzQ=
|
||||
github.com/quic-go/quic-go v0.50.1 h1:unsgjFIUqW8a2oopkY7YNONpV1gYND6Nt9hnt1PN94Q=
|
||||
github.com/quic-go/quic-go v0.50.1/go.mod h1:Vim6OmUvlYdwBhXP9ZVrtGmCMWa3wEqhq3NgYrI8b4E=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
@ -633,8 +633,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
|
||||
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/net v0.37.0 h1:1zLorHbz+LYj7MQlSf1+2tPIIgibq2eL5xkrGk6f+2c=
|
||||
golang.org/x/net v0.37.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
|
@ -7,10 +7,10 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultFileSystemKey = "default"
|
||||
DefaultFilesystemKey = "default"
|
||||
)
|
||||
|
||||
var DefaultFileSystem = &wrapperFs{key: DefaultFileSystemKey, FS: OsFS{}}
|
||||
var DefaultFilesystem = &wrapperFs{key: DefaultFilesystemKey, FS: OsFS{}}
|
||||
|
||||
// wrapperFs exists so can easily add to wrapperFs down the line
|
||||
type wrapperFs struct {
|
||||
@ -18,24 +18,24 @@ type wrapperFs struct {
|
||||
fs.FS
|
||||
}
|
||||
|
||||
// FileSystemMap stores a map of filesystems
|
||||
// FilesystemMap stores a map of filesystems
|
||||
// the empty key will be overwritten to be the default key
|
||||
// it includes a default filesystem, based off the os fs
|
||||
type FileSystemMap struct {
|
||||
type FilesystemMap struct {
|
||||
m sync.Map
|
||||
}
|
||||
|
||||
// note that the first invocation of key cannot be called in a racy context.
|
||||
func (f *FileSystemMap) key(k string) string {
|
||||
func (f *FilesystemMap) key(k string) string {
|
||||
if k == "" {
|
||||
k = DefaultFileSystemKey
|
||||
k = DefaultFilesystemKey
|
||||
}
|
||||
return k
|
||||
}
|
||||
|
||||
// Register will add the filesystem with key to later be retrieved
|
||||
// A call with a nil fs will call unregister, ensuring that a call to Default() will never be nil
|
||||
func (f *FileSystemMap) Register(k string, v fs.FS) {
|
||||
func (f *FilesystemMap) Register(k string, v fs.FS) {
|
||||
k = f.key(k)
|
||||
if v == nil {
|
||||
f.Unregister(k)
|
||||
@ -47,23 +47,23 @@ func (f *FileSystemMap) Register(k string, v fs.FS) {
|
||||
// Unregister will remove the filesystem with key from the filesystem map
|
||||
// if the key is the default key, it will set the default to the osFS instead of deleting it
|
||||
// modules should call this on cleanup to be safe
|
||||
func (f *FileSystemMap) Unregister(k string) {
|
||||
func (f *FilesystemMap) Unregister(k string) {
|
||||
k = f.key(k)
|
||||
if k == DefaultFileSystemKey {
|
||||
f.m.Store(k, DefaultFileSystem)
|
||||
if k == DefaultFilesystemKey {
|
||||
f.m.Store(k, DefaultFilesystem)
|
||||
} else {
|
||||
f.m.Delete(k)
|
||||
}
|
||||
}
|
||||
|
||||
// Get will get a filesystem with a given key
|
||||
func (f *FileSystemMap) Get(k string) (v fs.FS, ok bool) {
|
||||
func (f *FilesystemMap) Get(k string) (v fs.FS, ok bool) {
|
||||
k = f.key(k)
|
||||
c, ok := f.m.Load(strings.TrimSpace(k))
|
||||
if !ok {
|
||||
if k == DefaultFileSystemKey {
|
||||
f.m.Store(k, DefaultFileSystem)
|
||||
return DefaultFileSystem, true
|
||||
if k == DefaultFilesystemKey {
|
||||
f.m.Store(k, DefaultFilesystem)
|
||||
return DefaultFilesystem, true
|
||||
}
|
||||
return nil, ok
|
||||
}
|
||||
@ -71,7 +71,7 @@ func (f *FileSystemMap) Get(k string) (v fs.FS, ok bool) {
|
||||
}
|
||||
|
||||
// Default will get the default filesystem in the filesystem map
|
||||
func (f *FileSystemMap) Default() fs.FS {
|
||||
val, _ := f.Get(DefaultFileSystemKey)
|
||||
func (f *FilesystemMap) Default() fs.FS {
|
||||
val, _ := f.Get(DefaultFilesystemKey)
|
||||
return val
|
||||
}
|
||||
|
@ -1,22 +0,0 @@
|
||||
package internal
|
||||
|
||||
import "fmt"
|
||||
|
||||
// MaxSizeSubjectsListForLog returns the keys in the map as a slice of maximum length
|
||||
// maxToDisplay. It is useful for logging domains being managed, for example, since a
|
||||
// map is typically needed for quick lookup, but a slice is needed for logging, and this
|
||||
// can be quite a doozy since there may be a huge amount (hundreds of thousands).
|
||||
func MaxSizeSubjectsListForLog(subjects map[string]struct{}, maxToDisplay int) []string {
|
||||
numberOfNamesToDisplay := min(len(subjects), maxToDisplay)
|
||||
domainsToDisplay := make([]string, 0, numberOfNamesToDisplay)
|
||||
for domain := range subjects {
|
||||
domainsToDisplay = append(domainsToDisplay, domain)
|
||||
if len(domainsToDisplay) >= numberOfNamesToDisplay {
|
||||
break
|
||||
}
|
||||
}
|
||||
if len(subjects) > maxToDisplay {
|
||||
domainsToDisplay = append(domainsToDisplay, fmt.Sprintf("(and %d more...)", len(subjects)-maxToDisplay))
|
||||
}
|
||||
return domainsToDisplay
|
||||
}
|
@ -210,7 +210,7 @@ func (na NetworkAddress) IsUnixNetwork() bool {
|
||||
return IsUnixNetwork(na.Network)
|
||||
}
|
||||
|
||||
// IsFdNetwork returns true if na.Network is
|
||||
// IsUnixNetwork returns true if na.Network is
|
||||
// fd or fdgram.
|
||||
func (na NetworkAddress) IsFdNetwork() bool {
|
||||
return IsFdNetwork(na.Network)
|
||||
@ -641,7 +641,7 @@ func RegisterNetwork(network string, getListener ListenerFunc) {
|
||||
if network == "tcp" || network == "tcp4" || network == "tcp6" ||
|
||||
network == "udp" || network == "udp4" || network == "udp6" ||
|
||||
network == "unix" || network == "unixpacket" || network == "unixgram" ||
|
||||
strings.HasPrefix(network, "ip:") || strings.HasPrefix(network, "ip4:") || strings.HasPrefix(network, "ip6:") ||
|
||||
strings.HasPrefix("ip:", network) || strings.HasPrefix("ip4:", network) || strings.HasPrefix("ip6:", network) ||
|
||||
network == "fd" || network == "fdgram" {
|
||||
panic("network type " + network + " is reserved")
|
||||
}
|
||||
|
@ -30,7 +30,7 @@ func TestSplitNetworkAddress(t *testing.T) {
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
input: "",
|
||||
expectHost: "",
|
||||
},
|
||||
{
|
||||
@ -41,7 +41,7 @@ func TestSplitNetworkAddress(t *testing.T) {
|
||||
input: ":", // empty host & empty port
|
||||
},
|
||||
{
|
||||
input: "::",
|
||||
input: "::",
|
||||
expectHost: "::",
|
||||
},
|
||||
{
|
||||
@ -184,8 +184,9 @@ func TestParseNetworkAddress(t *testing.T) {
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
expectAddr: NetworkAddress{},
|
||||
input: "",
|
||||
expectAddr: NetworkAddress{
|
||||
},
|
||||
},
|
||||
{
|
||||
input: ":",
|
||||
@ -310,8 +311,9 @@ func TestParseNetworkAddressWithDefaults(t *testing.T) {
|
||||
expectErr bool
|
||||
}{
|
||||
{
|
||||
input: "",
|
||||
expectAddr: NetworkAddress{},
|
||||
input: "",
|
||||
expectAddr: NetworkAddress{
|
||||
},
|
||||
},
|
||||
{
|
||||
input: ":",
|
||||
|
11
logging.go
11
logging.go
@ -20,7 +20,6 @@ import (
|
||||
"io"
|
||||
"log"
|
||||
"os"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
@ -162,9 +161,7 @@ func (logging *Logging) setupNewDefault(ctx Context) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("setting up default log: %v", err)
|
||||
}
|
||||
|
||||
filteringCore := &filteringCore{newDefault.CustomLog.core, newDefault.CustomLog}
|
||||
newDefault.logger = zap.New(filteringCore, options...)
|
||||
newDefault.logger = zap.New(newDefault.CustomLog.core, options...)
|
||||
|
||||
// redirect the default caddy logs
|
||||
defaultLoggerMu.Lock()
|
||||
@ -493,8 +490,10 @@ func (cl *CustomLog) provision(ctx Context, logging *Logging) error {
|
||||
if len(cl.Include) > 0 && len(cl.Exclude) > 0 {
|
||||
// prevent intersections
|
||||
for _, allow := range cl.Include {
|
||||
if slices.Contains(cl.Exclude, allow) {
|
||||
return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
|
||||
for _, deny := range cl.Exclude {
|
||||
if allow == deny {
|
||||
return fmt.Errorf("include and exclude must not intersect, but found %s in both lists", allow)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
106
logging_test.go
106
logging_test.go
@ -1,106 +0,0 @@
|
||||
// Copyright 2015 Matthew Holt and The Caddy Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package caddy
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestCustomLog_loggerAllowed(t *testing.T) {
|
||||
type fields struct {
|
||||
BaseLog BaseLog
|
||||
Include []string
|
||||
Exclude []string
|
||||
}
|
||||
type args struct {
|
||||
name string
|
||||
isModule bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "include",
|
||||
fields: fields{
|
||||
Include: []string{"foo"},
|
||||
},
|
||||
args: args{
|
||||
name: "foo",
|
||||
isModule: true,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "exclude",
|
||||
fields: fields{
|
||||
Exclude: []string{"foo"},
|
||||
},
|
||||
args: args{
|
||||
name: "foo",
|
||||
isModule: true,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "include and exclude",
|
||||
fields: fields{
|
||||
Include: []string{"foo"},
|
||||
Exclude: []string{"foo"},
|
||||
},
|
||||
args: args{
|
||||
name: "foo",
|
||||
isModule: true,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "include and exclude (longer namespace)",
|
||||
fields: fields{
|
||||
Include: []string{"foo.bar"},
|
||||
Exclude: []string{"foo"},
|
||||
},
|
||||
args: args{
|
||||
name: "foo.bar",
|
||||
isModule: true,
|
||||
},
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "excluded module is not printed",
|
||||
fields: fields{
|
||||
Include: []string{"admin.api.load"},
|
||||
Exclude: []string{"admin.api"},
|
||||
},
|
||||
args: args{
|
||||
name: "admin.api",
|
||||
isModule: false,
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
cl := &CustomLog{
|
||||
BaseLog: tt.fields.BaseLog,
|
||||
Include: tt.fields.Include,
|
||||
Exclude: tt.fields.Exclude,
|
||||
}
|
||||
if got := cl.loggerAllowed(tt.args.name, tt.args.isModule); got != tt.want {
|
||||
t.Errorf("CustomLog.loggerAllowed() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
@ -20,7 +20,9 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
@ -204,26 +206,27 @@ func (app *App) On(eventName string, handler Handler) error {
|
||||
//
|
||||
// Note that the data map is not copied, for efficiency. After Emit() is called, the
|
||||
// data passed in should not be changed in other goroutines.
|
||||
func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) caddy.Event {
|
||||
func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) Event {
|
||||
logger := app.logger.With(zap.String("name", eventName))
|
||||
|
||||
e, err := caddy.NewEvent(ctx, eventName, data)
|
||||
id, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
logger.Error("failed to create event", zap.Error(err))
|
||||
logger.Error("failed generating new event ID", zap.Error(err))
|
||||
}
|
||||
|
||||
var originModule caddy.ModuleInfo
|
||||
var originModuleID caddy.ModuleID
|
||||
var originModuleName string
|
||||
if origin := e.Origin(); origin != nil {
|
||||
originModule = origin.CaddyModule()
|
||||
originModuleID = originModule.ID
|
||||
originModuleName = originModule.String()
|
||||
eventName = strings.ToLower(eventName)
|
||||
|
||||
e := Event{
|
||||
Data: data,
|
||||
id: id,
|
||||
ts: time.Now(),
|
||||
name: eventName,
|
||||
origin: ctx.Module(),
|
||||
}
|
||||
|
||||
logger = logger.With(
|
||||
zap.String("id", e.ID().String()),
|
||||
zap.String("origin", originModuleName))
|
||||
zap.String("id", e.id.String()),
|
||||
zap.String("origin", e.origin.CaddyModule().String()))
|
||||
|
||||
// add event info to replacer, make sure it's in the context
|
||||
repl, ok := ctx.Context.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
@ -236,15 +239,15 @@ func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) c
|
||||
case "event":
|
||||
return e, true
|
||||
case "event.id":
|
||||
return e.ID(), true
|
||||
return e.id, true
|
||||
case "event.name":
|
||||
return e.Name(), true
|
||||
return e.name, true
|
||||
case "event.time":
|
||||
return e.Timestamp(), true
|
||||
return e.ts, true
|
||||
case "event.time_unix":
|
||||
return e.Timestamp().UnixMilli(), true
|
||||
return e.ts.UnixMilli(), true
|
||||
case "event.module":
|
||||
return originModuleID, true
|
||||
return e.origin.CaddyModule().ID, true
|
||||
case "event.data":
|
||||
return e.Data, true
|
||||
}
|
||||
@ -266,7 +269,7 @@ func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) c
|
||||
// invoke handlers bound to the event by name and also all events; this for loop
|
||||
// iterates twice at most: once for the event name, once for "" (all events)
|
||||
for {
|
||||
moduleID := originModuleID
|
||||
moduleID := e.origin.CaddyModule().ID
|
||||
|
||||
// implement propagation up the module tree (i.e. start with "a.b.c" then "a.b" then "a" then "")
|
||||
for {
|
||||
@ -289,7 +292,7 @@ func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) c
|
||||
zap.Any("handler", handler))
|
||||
|
||||
if err := handler.Handle(ctx, e); err != nil {
|
||||
aborted := errors.Is(err, caddy.ErrEventAborted)
|
||||
aborted := errors.Is(err, ErrAborted)
|
||||
|
||||
logger.Error("handler error",
|
||||
zap.Error(err),
|
||||
@ -323,9 +326,76 @@ func (app *App) Emit(ctx caddy.Context, eventName string, data map[string]any) c
|
||||
return e
|
||||
}
|
||||
|
||||
// Event represents something that has happened or is happening.
|
||||
// An Event value is not synchronized, so it should be copied if
|
||||
// being used in goroutines.
|
||||
//
|
||||
// EXPERIMENTAL: As with the rest of this package, events are
|
||||
// subject to change.
|
||||
type Event struct {
|
||||
// If non-nil, the event has been aborted, meaning
|
||||
// propagation has stopped to other handlers and
|
||||
// the code should stop what it was doing. Emitters
|
||||
// may choose to use this as a signal to adjust their
|
||||
// code path appropriately.
|
||||
Aborted error
|
||||
|
||||
// The data associated with the event. Usually the
|
||||
// original emitter will be the only one to set or
|
||||
// change these values, but the field is exported
|
||||
// so handlers can have full access if needed.
|
||||
// However, this map is not synchronized, so
|
||||
// handlers must not use this map directly in new
|
||||
// goroutines; instead, copy the map to use it in a
|
||||
// goroutine.
|
||||
Data map[string]any
|
||||
|
||||
id uuid.UUID
|
||||
ts time.Time
|
||||
name string
|
||||
origin caddy.Module
|
||||
}
|
||||
|
||||
func (e Event) ID() uuid.UUID { return e.id }
|
||||
func (e Event) Timestamp() time.Time { return e.ts }
|
||||
func (e Event) Name() string { return e.name }
|
||||
func (e Event) Origin() caddy.Module { return e.origin }
|
||||
|
||||
// CloudEvent exports event e as a structure that, when
|
||||
// serialized as JSON, is compatible with the
|
||||
// CloudEvents spec.
|
||||
func (e Event) CloudEvent() CloudEvent {
|
||||
dataJSON, _ := json.Marshal(e.Data)
|
||||
return CloudEvent{
|
||||
ID: e.id.String(),
|
||||
Source: e.origin.CaddyModule().String(),
|
||||
SpecVersion: "1.0",
|
||||
Type: e.name,
|
||||
Time: e.ts,
|
||||
DataContentType: "application/json",
|
||||
Data: dataJSON,
|
||||
}
|
||||
}
|
||||
|
||||
// CloudEvent is a JSON-serializable structure that
|
||||
// is compatible with the CloudEvents specification.
|
||||
// See https://cloudevents.io.
|
||||
type CloudEvent struct {
|
||||
ID string `json:"id"`
|
||||
Source string `json:"source"`
|
||||
SpecVersion string `json:"specversion"`
|
||||
Type string `json:"type"`
|
||||
Time time.Time `json:"time"`
|
||||
DataContentType string `json:"datacontenttype,omitempty"`
|
||||
Data json.RawMessage `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// ErrAborted cancels an event.
|
||||
var ErrAborted = errors.New("event aborted")
|
||||
|
||||
// Handler is a type that can handle events.
|
||||
type Handler interface {
|
||||
Handle(context.Context, caddy.Event) error
|
||||
Handle(context.Context, Event) error
|
||||
}
|
||||
|
||||
// Interface guards
|
||||
|
@ -69,11 +69,11 @@ func (xs *Filesystems) Provision(ctx caddy.Context) error {
|
||||
}
|
||||
// register that module
|
||||
ctx.Logger().Debug("registering fs", zap.String("fs", f.Key))
|
||||
ctx.FileSystems().Register(f.Key, f.fileSystem)
|
||||
ctx.Filesystems().Register(f.Key, f.fileSystem)
|
||||
// remember to unregister the module when we are done
|
||||
xs.defers = append(xs.defers, func() {
|
||||
ctx.Logger().Debug("unregistering fs", zap.String("fs", f.Key))
|
||||
ctx.FileSystems().Unregister(f.Key)
|
||||
ctx.Filesystems().Unregister(f.Key)
|
||||
})
|
||||
}
|
||||
return nil
|
||||
|
@ -73,7 +73,7 @@ func init() {
|
||||
// `{http.request.local.host}` | The host (IP) part of the local address the connection arrived on
|
||||
// `{http.request.local.port}` | The port part of the local address the connection arrived on
|
||||
// `{http.request.local}` | The local address the connection arrived on
|
||||
// `{http.request.remote.host}` | The host (IP) part of the remote client's address, if available (not known with HTTP/3 early data)
|
||||
// `{http.request.remote.host}` | The host (IP) part of the remote client's address
|
||||
// `{http.request.remote.port}` | The port part of the remote client's address
|
||||
// `{http.request.remote}` | The address of the remote client
|
||||
// `{http.request.scheme}` | The request scheme, typically `http` or `https`
|
||||
@ -152,7 +152,7 @@ type App struct {
|
||||
tlsApp *caddytls.TLS
|
||||
|
||||
// used temporarily between phases 1 and 2 of auto HTTPS
|
||||
allCertDomains map[string]struct{}
|
||||
allCertDomains []string
|
||||
}
|
||||
|
||||
// CaddyModule returns the Caddy module information.
|
||||
|
@ -25,7 +25,6 @@ import (
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/internal"
|
||||
"github.com/caddyserver/caddy/v2/modules/caddytls"
|
||||
)
|
||||
|
||||
@ -66,6 +65,12 @@ type AutoHTTPSConfig struct {
|
||||
// enabled. To force automated certificate management
|
||||
// regardless of loaded certificates, set this to true.
|
||||
IgnoreLoadedCerts bool `json:"ignore_loaded_certificates,omitempty"`
|
||||
|
||||
// If true, automatic HTTPS will prefer wildcard names
|
||||
// and ignore non-wildcard names if both are available.
|
||||
// This allows for writing a config with top-level host
|
||||
// matchers without having those names produce certificates.
|
||||
PreferWildcard bool `json:"prefer_wildcard,omitempty"`
|
||||
}
|
||||
|
||||
// automaticHTTPSPhase1 provisions all route matchers, determines
|
||||
@ -158,8 +163,33 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
|
||||
}
|
||||
}
|
||||
|
||||
// trim the list of domains covered by wildcards, if configured
|
||||
if srv.AutoHTTPS.PreferWildcard {
|
||||
wildcards := make(map[string]struct{})
|
||||
for d := range serverDomainSet {
|
||||
if strings.HasPrefix(d, "*.") {
|
||||
wildcards[d[2:]] = struct{}{}
|
||||
}
|
||||
}
|
||||
for d := range serverDomainSet {
|
||||
if strings.HasPrefix(d, "*.") {
|
||||
continue
|
||||
}
|
||||
base := d
|
||||
if idx := strings.Index(d, "."); idx != -1 {
|
||||
base = d[idx+1:]
|
||||
}
|
||||
if _, ok := wildcards[base]; ok {
|
||||
delete(serverDomainSet, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// build the list of domains that could be used with ECH (if enabled)
|
||||
// so the TLS app can know to publish ECH configs for them
|
||||
// so the TLS app can know to publish ECH configs for them; we do this
|
||||
// after trimming domains covered by wildcards because, presumably,
|
||||
// if the user wants to use wildcard certs, they also want to use the
|
||||
// wildcard for ECH, rather than individual subdomains
|
||||
echDomains := make([]string, 0, len(serverDomainSet))
|
||||
for d := range serverDomainSet {
|
||||
echDomains = append(echDomains, d)
|
||||
@ -265,10 +295,19 @@ func (app *App) automaticHTTPSPhase1(ctx caddy.Context, repl *caddy.Replacer) er
|
||||
}
|
||||
}
|
||||
|
||||
// we now have a list of all the unique names for which we need certs
|
||||
// we now have a list of all the unique names for which we need certs;
|
||||
// turn the set into a slice so that phase 2 can use it
|
||||
app.allCertDomains = make([]string, 0, len(uniqueDomainsForCerts))
|
||||
var internal, tailscale []string
|
||||
uniqueDomainsLoop:
|
||||
for d := range uniqueDomainsForCerts {
|
||||
if !isTailscaleDomain(d) {
|
||||
// whether or not there is already an automation policy for this
|
||||
// name, we should add it to the list to manage a cert for it,
|
||||
// unless it's a Tailscale domain, because we don't manage those
|
||||
app.allCertDomains = append(app.allCertDomains, d)
|
||||
}
|
||||
|
||||
// some names we've found might already have automation policies
|
||||
// explicitly specified for them; we should exclude those from
|
||||
// our hidden/implicit policy, since applying a name to more than
|
||||
@ -307,7 +346,6 @@ uniqueDomainsLoop:
|
||||
}
|
||||
if isTailscaleDomain(d) {
|
||||
tailscale = append(tailscale, d)
|
||||
delete(uniqueDomainsForCerts, d) // not managed by us; handled separately
|
||||
} else if shouldUseInternal(d) {
|
||||
internal = append(internal, d)
|
||||
}
|
||||
@ -343,7 +381,7 @@ uniqueDomainsLoop:
|
||||
// match on known domain names, unless it's our special case of a
|
||||
// catch-all which is an empty string (common among catch-all sites
|
||||
// that enable on-demand TLS for yet-unknown domain names)
|
||||
if len(domains) != 1 || domains[0] != "" {
|
||||
if !(len(domains) == 1 && domains[0] == "") {
|
||||
matcherSet = append(matcherSet, MatchHost(domains))
|
||||
}
|
||||
|
||||
@ -437,9 +475,6 @@ redirServersLoop:
|
||||
}
|
||||
}
|
||||
|
||||
// persist the domains/IPs we're managing certs for through provisioning/startup
|
||||
app.allCertDomains = uniqueDomainsForCerts
|
||||
|
||||
logger.Debug("adjusted config",
|
||||
zap.Reflect("tls", app.tlsApp),
|
||||
zap.Reflect("http", app))
|
||||
@ -742,7 +777,7 @@ func (app *App) automaticHTTPSPhase2() error {
|
||||
return nil
|
||||
}
|
||||
app.logger.Info("enabling automatic TLS certificate management",
|
||||
zap.Strings("domains", internal.MaxSizeSubjectsListForLog(app.allCertDomains, 1000)),
|
||||
zap.Strings("domains", app.allCertDomains),
|
||||
)
|
||||
err := app.tlsApp.Manage(app.allCertDomains)
|
||||
if err != nil {
|
||||
|
@ -236,7 +236,10 @@ func (c *Cache) makeRoom() {
|
||||
// the cache is on a long tail, we can save a lot of CPU
|
||||
// time by doing a whole bunch of deletions now and then
|
||||
// we won't have to do them again for a while
|
||||
numToDelete := max(len(c.cache)/10, 1)
|
||||
numToDelete := len(c.cache) / 10
|
||||
if numToDelete < 1 {
|
||||
numToDelete = 1
|
||||
}
|
||||
for deleted := 0; deleted <= numToDelete; deleted++ {
|
||||
// Go maps are "nondeterministic" not actually random,
|
||||
// so although we could just chop off the "front" of the
|
||||
|
@ -37,10 +37,6 @@ func init() {
|
||||
// `{http.auth.user.*}` placeholders may be set for any authentication
|
||||
// modules that provide user metadata.
|
||||
//
|
||||
// In case of an error, the placeholder `{http.auth.<provider>.error}`
|
||||
// will be set to the error message returned by the authentication
|
||||
// provider.
|
||||
//
|
||||
// Its API is still experimental and may be subject to change.
|
||||
type Authentication struct {
|
||||
// A set of authentication providers. If none are specified,
|
||||
@ -75,7 +71,6 @@ func (a *Authentication) Provision(ctx caddy.Context) error {
|
||||
}
|
||||
|
||||
func (a Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
var user User
|
||||
var authed bool
|
||||
var err error
|
||||
@ -85,9 +80,6 @@ func (a Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||
if c := a.logger.Check(zapcore.ErrorLevel, "auth provider returned error"); c != nil {
|
||||
c.Write(zap.String("provider", provName), zap.Error(err))
|
||||
}
|
||||
// Set the error from the authentication provider in a placeholder,
|
||||
// so it can be used in the handle_errors directive.
|
||||
repl.Set("http.auth."+provName+".error", err.Error())
|
||||
continue
|
||||
}
|
||||
if authed {
|
||||
@ -98,6 +90,7 @@ func (a Authentication) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||
return caddyhttp.Error(http.StatusUnauthorized, fmt.Errorf("not authenticated"))
|
||||
}
|
||||
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
repl.Set("http.auth.user.id", user.ID)
|
||||
for k, v := range user.Metadata {
|
||||
repl.Set("http.auth.user."+k, v)
|
||||
|
@ -26,7 +26,7 @@
|
||||
<path d="M9 7l4 0"/>
|
||||
<path d="M9 11l4 0"/>
|
||||
</svg>
|
||||
{{- else if .HasExt ".jpg" ".jpeg" ".png" ".gif" ".webp" ".tiff" ".bmp" ".heif" ".heic" ".svg" ".avif"}}
|
||||
{{- else if .HasExt ".jpg" ".jpeg" ".png" ".gif" ".webp" ".tiff" ".bmp" ".heif" ".heic" ".svg"}}
|
||||
{{- if eq .Tpl.Layout "grid"}}
|
||||
<img loading="lazy" src="{{.Name | pathEscape}}">
|
||||
{{- else}}
|
||||
@ -802,7 +802,7 @@ footer {
|
||||
<b>{{.NumFiles}}</b> file{{if ne 1 .NumFiles}}s{{end}}
|
||||
</span>
|
||||
<span class="meta-item">
|
||||
<b>{{.HumanTotalFileSize}}</b> total
|
||||
<b>{{.HumanTotalFileSize}}</b> total
|
||||
</span>
|
||||
{{- if ne 0 .Limit}}
|
||||
<span class="meta-item">
|
||||
@ -828,96 +828,6 @@ footer {
|
||||
</svg>
|
||||
Grid
|
||||
</a>
|
||||
{{- if and (eq .Layout "grid") (eq .Sort "name") (ne .Order "asc")}}
|
||||
<a href="?sort=name&order=asc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<text x="2" y="10" font-size="9" fill="currentColor">Z</text>
|
||||
<text x="2" y="20" font-size="9" fill="currentColor">A</text>
|
||||
<path d="M13 4v12"></path>
|
||||
<path d="M12 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- else if and (eq .Layout "grid") (eq .Sort "name") (ne .Order "desc")}}
|
||||
<a href="?sort=name&order=desc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<text x="2" y="10" font-size="9" fill="currentColor">A</text>
|
||||
<text x="2" y="20" font-size="9" fill="currentColor">Z</text>
|
||||
<path d="M13 4v12"></path>
|
||||
<path d="M12 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- else if and (eq .Layout "grid")}}
|
||||
<a href="?sort=name&order=asc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<text x="2" y="20" font-size="9" fill="currentColor">A</text>
|
||||
<text x="2" y="10" font-size="9" fill="currentColor">Z</text>
|
||||
<path d="M13 4v12"></path>
|
||||
<path d="M12 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- end}}
|
||||
{{- if and (eq .Layout "grid") (eq .Sort "size") (ne .Order "asc")}}
|
||||
<a href="?sort=size&order=asc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect x="2" y="4" width="4" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<rect x="2" y="10" width="8" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<rect x="2" y="16" width="12" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<path d="M18 4v12"></path>
|
||||
<path d="M17 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- else if and (eq .Layout "grid") (eq .Sort "size") (ne .Order "desc")}}
|
||||
<a href="?sort=size&order=desc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect x="2" y="4" width="12" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<rect x="2" y="10" width="8" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<rect x="2" y="16" width="4" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<path d="M18 4v12"></path>
|
||||
<path d="M17 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- else if and (eq .Layout "grid")}}
|
||||
<a href="?sort=size&order=asc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<rect x="2" y="4" width="4" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<rect x="2" y="10" width="8" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<rect x="2" y="16" width="12" height="3" rx="0.4" ry="0.4"></rect>
|
||||
<path d="M18 4v12"></path>
|
||||
<path d="M17 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- end}}
|
||||
{{- if and (eq .Layout "grid") (eq .Sort "time") (ne .Order "asc")}}
|
||||
<a href="?sort=time&order=asc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="9" cy="11" r="8"></circle>
|
||||
<line x1="9" y1="12" x2="9" y2="7" stroke-linecap="round"></line>
|
||||
<line x1="9" y1="12" x2="12" y2="12" stroke-linecap="round"></line>
|
||||
<path d="M20 4v12"></path>
|
||||
<path d="M19 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- else if and (eq .Layout "grid") (eq .Sort "time") (ne .Order "desc")}}
|
||||
<a href="?sort=time&order=desc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="9" cy="11" r="8"></circle>
|
||||
<line x1="9" y1="12" x2="9" y2="7" stroke-linecap="round"></line>
|
||||
<line x1="9" y1="12" x2="12" y2="12" stroke-linecap="round"></line>
|
||||
<path d="M20 4v12"></path>
|
||||
<path d="M19 5l1 -2l1 2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- else if and (eq .Layout "grid")}}
|
||||
<a href="?sort=time&order=asc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}&layout=grid">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" class="icon icon-tabler icon-tabler-layout-grid" width="16" height="16" viewBox="0 0 24 24" stroke-width="2" stroke="currentColor" fill="none" stroke-linecap="round" stroke-linejoin="round">
|
||||
<circle cx="9" cy="11" r="8"></circle>
|
||||
<line x1="9" y1="12" x2="9" y2="7" stroke-linecap="round"></line>
|
||||
<line x1="9" y1="12" x2="12" y2="12" stroke-linecap="round"></line>
|
||||
<path d="M20 4v12"></path>
|
||||
<path d="M19 16l1 2l1 -2"></path>
|
||||
</svg>
|
||||
</a>
|
||||
{{- end}}
|
||||
</div>
|
||||
<div class='listing{{if eq .Layout "grid"}} grid{{end}}'>
|
||||
{{- if eq .Layout "grid"}}
|
||||
@ -958,7 +868,7 @@ footer {
|
||||
</svg>
|
||||
</a>
|
||||
{{- end}}
|
||||
|
||||
|
||||
{{- if and (eq .Sort "name") (ne .Order "desc")}}
|
||||
<a href="?sort=name&order=desc{{if ne 0 .Limit}}&limit={{.Limit}}{{end}}{{if ne 0 .Offset}}&offset={{.Offset}}{{end}}">
|
||||
Name
|
||||
|
@ -252,7 +252,7 @@ func celFileMatcherMacroExpander() parser.MacroExpander {
|
||||
}
|
||||
|
||||
for _, arg := range args {
|
||||
if !isCELStringLiteral(arg) && !isCELCaddyPlaceholderCall(arg) {
|
||||
if !(isCELStringLiteral(arg) || isCELCaddyPlaceholderCall(arg)) {
|
||||
return nil, &common.Error{
|
||||
Location: eh.OffsetLocation(arg.ID()),
|
||||
Message: "matcher only supports repeated string literal arguments",
|
||||
@ -274,7 +274,7 @@ func celFileMatcherMacroExpander() parser.MacroExpander {
|
||||
func (m *MatchFile) Provision(ctx caddy.Context) error {
|
||||
m.logger = ctx.Logger()
|
||||
|
||||
m.fsmap = ctx.FileSystems()
|
||||
m.fsmap = ctx.Filesystems()
|
||||
|
||||
if m.Root == "" {
|
||||
m.Root = "{http.vars.root}"
|
||||
@ -616,16 +616,15 @@ func isCELTryFilesLiteral(e ast.Expr) bool {
|
||||
return false
|
||||
}
|
||||
mapKeyStr := mapKey.AsLiteral().ConvertToType(types.StringType).Value()
|
||||
switch mapKeyStr {
|
||||
case "try_files", "split_path":
|
||||
if mapKeyStr == "try_files" || mapKeyStr == "split_path" {
|
||||
if !isCELStringListLiteral(mapVal) {
|
||||
return false
|
||||
}
|
||||
case "try_policy", "root":
|
||||
} else if mapKeyStr == "try_policy" || mapKeyStr == "root" {
|
||||
if !(isCELStringExpr(mapVal)) {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func TestFileMatcher(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
m := &MatchFile{
|
||||
fsmap: &filesystems.FileSystemMap{},
|
||||
fsmap: &filesystems.FilesystemMap{},
|
||||
Root: "./testdata",
|
||||
TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/"},
|
||||
}
|
||||
@ -229,7 +229,7 @@ func TestPHPFileMatcher(t *testing.T) {
|
||||
},
|
||||
} {
|
||||
m := &MatchFile{
|
||||
fsmap: &filesystems.FileSystemMap{},
|
||||
fsmap: &filesystems.FilesystemMap{},
|
||||
Root: "./testdata",
|
||||
TryFiles: []string{"{http.request.uri.path}", "{http.request.uri.path}/index.php"},
|
||||
SplitPath: []string{".php"},
|
||||
@ -273,7 +273,7 @@ func TestPHPFileMatcher(t *testing.T) {
|
||||
func TestFirstSplit(t *testing.T) {
|
||||
m := MatchFile{
|
||||
SplitPath: []string{".php"},
|
||||
fsmap: &filesystems.FileSystemMap{},
|
||||
fsmap: &filesystems.FilesystemMap{},
|
||||
}
|
||||
actual, remainder := m.firstSplit("index.PHP/somewhere")
|
||||
expected := "index.PHP"
|
||||
|
@ -186,7 +186,7 @@ func (FileServer) CaddyModule() caddy.ModuleInfo {
|
||||
func (fsrv *FileServer) Provision(ctx caddy.Context) error {
|
||||
fsrv.logger = ctx.Logger()
|
||||
|
||||
fsrv.fsmap = ctx.FileSystems()
|
||||
fsrv.fsmap = ctx.Filesystems()
|
||||
|
||||
if fsrv.FileSystem == "" {
|
||||
fsrv.FileSystem = "{http.vars.fs}"
|
||||
@ -300,10 +300,8 @@ func (fsrv *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request, next c
|
||||
info, err := fs.Stat(fileSystem, filename)
|
||||
if err != nil {
|
||||
err = fsrv.mapDirOpenError(fileSystem, err, filename)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
if errors.Is(err, fs.ErrNotExist) || errors.Is(err, fs.ErrInvalid) {
|
||||
return fsrv.notFound(w, r, next)
|
||||
} else if errors.Is(err, fs.ErrInvalid) {
|
||||
return caddyhttp.Error(http.StatusBadRequest, err)
|
||||
} else if errors.Is(err, fs.ErrPermission) {
|
||||
return caddyhttp.Error(http.StatusForbidden, err)
|
||||
}
|
||||
@ -613,11 +611,6 @@ func (fsrv *FileServer) mapDirOpenError(fileSystem fs.FS, originalErr error, nam
|
||||
return originalErr
|
||||
}
|
||||
|
||||
var pathErr *fs.PathError
|
||||
if errors.As(originalErr, &pathErr) {
|
||||
return fs.ErrInvalid
|
||||
}
|
||||
|
||||
parts := strings.Split(name, separator)
|
||||
for i := range parts {
|
||||
if parts[i] == "" {
|
||||
|
@ -118,11 +118,6 @@ func (irh interceptedResponseHandler) WriteHeader(statusCode int) {
|
||||
irh.ResponseRecorder.WriteHeader(statusCode)
|
||||
}
|
||||
|
||||
// EXPERIMENTAL: Subject to change or removal.
|
||||
func (irh interceptedResponseHandler) Unwrap() http.ResponseWriter {
|
||||
return irh.ResponseRecorder
|
||||
}
|
||||
|
||||
// EXPERIMENTAL: Subject to change or removal.
|
||||
func (ir Intercept) ServeHTTP(w http.ResponseWriter, r *http.Request, next caddyhttp.Handler) error {
|
||||
buf := bufPool.Get().(*bytes.Buffer)
|
||||
|
@ -552,6 +552,7 @@ func (MatchPath) matchPatternWithEscapeSequence(escapedPath, matchPath string) b
|
||||
if iPattern >= len(matchPath) || iPath >= len(escapedPath) {
|
||||
break
|
||||
}
|
||||
|
||||
// get the next character from the request path
|
||||
|
||||
pathCh := string(escapedPath[iPath])
|
||||
|
@ -9,9 +9,8 @@ import (
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/prometheus/client_golang/prometheus/testutil"
|
||||
)
|
||||
|
||||
func TestServerNameFromContext(t *testing.T) {
|
||||
|
@ -363,13 +363,13 @@ func addHTTPVarsToReplacer(repl *caddy.Replacer, req *http.Request, w http.Respo
|
||||
}
|
||||
}
|
||||
|
||||
switch key {
|
||||
case "http.shutting_down":
|
||||
switch {
|
||||
case key == "http.shutting_down":
|
||||
server := req.Context().Value(ServerCtxKey).(*Server)
|
||||
server.shutdownAtMu.RLock()
|
||||
defer server.shutdownAtMu.RUnlock()
|
||||
return !server.shutdownAt.IsZero(), true
|
||||
case "http.time_until_shutdown":
|
||||
case key == "http.time_until_shutdown":
|
||||
server := req.Context().Value(ServerCtxKey).(*Server)
|
||||
server.shutdownAtMu.RLock()
|
||||
defer server.shutdownAtMu.RUnlock()
|
||||
|
@ -665,10 +665,9 @@ func (h *Handler) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
if d.NextArg() {
|
||||
return d.ArgErr()
|
||||
}
|
||||
switch subdir {
|
||||
case "request_buffers":
|
||||
if subdir == "request_buffers" {
|
||||
h.RequestBuffers = size
|
||||
case "response_buffers":
|
||||
} else if subdir == "response_buffers" {
|
||||
h.ResponseBuffers = size
|
||||
}
|
||||
|
||||
|
@ -122,10 +122,9 @@ func cmdReverseProxy(fs caddycmd.Flags) (int, error) {
|
||||
}
|
||||
}
|
||||
if fromAddr.Port == "" {
|
||||
switch fromAddr.Scheme {
|
||||
case "http":
|
||||
if fromAddr.Scheme == "http" {
|
||||
fromAddr.Port = httpPort
|
||||
case "https":
|
||||
} else if fromAddr.Scheme == "https" {
|
||||
fromAddr.Port = httpsPort
|
||||
}
|
||||
}
|
||||
|
@ -17,7 +17,6 @@ package fastcgi
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -315,7 +314,7 @@ func parsePHPFastCGI(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error
|
||||
|
||||
// if the index is turned off, we skip the redirect and try_files
|
||||
if indexFile != "off" {
|
||||
var dirRedir bool
|
||||
dirRedir := false
|
||||
dirIndex := "{http.request.uri.path}/" + indexFile
|
||||
tryPolicy := "first_exist_fallback"
|
||||
|
||||
@ -329,7 +328,13 @@ func parsePHPFastCGI(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error
|
||||
tryPolicy = ""
|
||||
}
|
||||
|
||||
dirRedir = slices.Contains(tryFiles, dirIndex)
|
||||
for _, tf := range tryFiles {
|
||||
if tf == dirIndex {
|
||||
dirRedir = true
|
||||
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if dirRedir {
|
||||
|
@ -309,9 +309,7 @@ func (h *Handler) doActiveHealthCheckForAllHosts() {
|
||||
}
|
||||
}()
|
||||
|
||||
repl := caddy.NewReplacer()
|
||||
|
||||
networkAddr, err := repl.ReplaceOrErr(upstream.Dial, true, true)
|
||||
networkAddr, err := caddy.NewReplacer().ReplaceOrErr(upstream.Dial, true, true)
|
||||
if err != nil {
|
||||
if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "invalid use of placeholders in dial address for active health checks"); c != nil {
|
||||
c.Write(
|
||||
@ -346,24 +344,14 @@ func (h *Handler) doActiveHealthCheckForAllHosts() {
|
||||
return
|
||||
}
|
||||
hostAddr := addr.JoinHostPort(0)
|
||||
dialAddr := hostAddr
|
||||
if addr.IsUnixNetwork() || addr.IsFdNetwork() {
|
||||
// this will be used as the Host portion of a http.Request URL, and
|
||||
// paths to socket files would produce an error when creating URL,
|
||||
// so use a fake Host value instead; unix sockets are usually local
|
||||
hostAddr = "localhost"
|
||||
}
|
||||
|
||||
// Fill in the dial info for the upstream
|
||||
// If the upstream is set, use that instead
|
||||
dialInfoUpstream := upstream
|
||||
if h.HealthChecks.Active.Upstream != "" {
|
||||
dialInfoUpstream = &Upstream{
|
||||
Dial: h.HealthChecks.Active.Upstream,
|
||||
}
|
||||
}
|
||||
dialInfo, _ := dialInfoUpstream.fillDialInfo(repl)
|
||||
|
||||
err = h.doActiveHealthCheck(dialInfo, hostAddr, networkAddr, upstream)
|
||||
err = h.doActiveHealthCheck(DialInfo{Network: addr.Network, Address: dialAddr}, hostAddr, networkAddr, upstream)
|
||||
if err != nil {
|
||||
if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "active health check failed"); c != nil {
|
||||
c.Write(
|
||||
@ -484,7 +472,7 @@ func (h *Handler) doActiveHealthCheck(dialInfo DialInfo, hostAddr string, networ
|
||||
|
||||
markHealthy := func() {
|
||||
// increment passes and then check if it has reached the threshold to be healthy
|
||||
err := upstream.countHealthPass(1)
|
||||
err := upstream.Host.countHealthPass(1)
|
||||
if err != nil {
|
||||
if c := h.HealthChecks.Active.logger.Check(zapcore.ErrorLevel, "could not count active health pass"); c != nil {
|
||||
c.Write(
|
||||
|
@ -17,6 +17,7 @@ package reverseproxy
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/netip"
|
||||
"strconv"
|
||||
"sync/atomic"
|
||||
@ -99,7 +100,8 @@ func (u *Upstream) Full() bool {
|
||||
|
||||
// fillDialInfo returns a filled DialInfo for upstream u, using the request
|
||||
// context. Note that the returned value is not a pointer.
|
||||
func (u *Upstream) fillDialInfo(repl *caddy.Replacer) (DialInfo, error) {
|
||||
func (u *Upstream) fillDialInfo(r *http.Request) (DialInfo, error) {
|
||||
repl := r.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
var addr caddy.NetworkAddress
|
||||
|
||||
// use provided dial address
|
||||
|
@ -353,7 +353,7 @@ func (h *HTTPTransport) NewTransport(caddyCtx caddy.Context) (*http.Transport, e
|
||||
h.NetworkProxyRaw = caddyconfig.JSONModuleObject(u, "from", "url", nil)
|
||||
}
|
||||
if len(h.NetworkProxyRaw) != 0 {
|
||||
proxyMod, err := caddyCtx.LoadModule(h, "NetworkProxyRaw")
|
||||
proxyMod, err := caddyCtx.LoadModule(h, "ForwardProxyRaw")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load network_proxy module: %v", err)
|
||||
}
|
||||
@ -382,36 +382,6 @@ func (h *HTTPTransport) NewTransport(caddyCtx caddy.Context) (*http.Transport, e
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("making TLS client config: %v", err)
|
||||
}
|
||||
|
||||
// servername has a placeholder, so we need to replace it
|
||||
if strings.Contains(h.TLS.ServerName, "{") {
|
||||
rt.DialTLSContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
// reuses the dialer from above to establish a plaintext connection
|
||||
conn, err := dialContext(ctx, network, addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// but add our own handshake logic
|
||||
repl := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
tlsConfig := rt.TLSClientConfig.Clone()
|
||||
tlsConfig.ServerName = repl.ReplaceAll(tlsConfig.ServerName, "")
|
||||
tlsConn := tls.Client(conn, tlsConfig)
|
||||
|
||||
// complete the handshake before returning the connection
|
||||
if rt.TLSHandshakeTimeout != 0 {
|
||||
var cancel context.CancelFunc
|
||||
ctx, cancel = context.WithTimeout(ctx, rt.TLSHandshakeTimeout)
|
||||
defer cancel()
|
||||
}
|
||||
err = tlsConn.HandshakeContext(ctx)
|
||||
if err != nil {
|
||||
_ = tlsConn.Close()
|
||||
return nil, err
|
||||
}
|
||||
return tlsConn, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if h.KeepAlive != nil {
|
||||
@ -483,9 +453,45 @@ func (h *HTTPTransport) NewTransport(caddyCtx caddy.Context) (*http.Transport, e
|
||||
return rt, nil
|
||||
}
|
||||
|
||||
// replaceTLSServername checks TLS servername to see if it needs replacing
|
||||
// if it does need replacing, it creates a new cloned HTTPTransport object to avoid any races
|
||||
// and does the replacing of the TLS servername on that and returns the new object
|
||||
// if no replacement is necessary it returns the original
|
||||
func (h *HTTPTransport) replaceTLSServername(repl *caddy.Replacer) *HTTPTransport {
|
||||
// check whether we have TLS and need to replace the servername in the TLSClientConfig
|
||||
if h.TLSEnabled() && strings.Contains(h.TLS.ServerName, "{") {
|
||||
// make a new h, "copy" the parts we don't need to touch, add a new *tls.Config and replace servername
|
||||
newtransport := &HTTPTransport{
|
||||
Resolver: h.Resolver,
|
||||
TLS: h.TLS,
|
||||
KeepAlive: h.KeepAlive,
|
||||
Compression: h.Compression,
|
||||
MaxConnsPerHost: h.MaxConnsPerHost,
|
||||
DialTimeout: h.DialTimeout,
|
||||
FallbackDelay: h.FallbackDelay,
|
||||
ResponseHeaderTimeout: h.ResponseHeaderTimeout,
|
||||
ExpectContinueTimeout: h.ExpectContinueTimeout,
|
||||
MaxResponseHeaderSize: h.MaxResponseHeaderSize,
|
||||
WriteBufferSize: h.WriteBufferSize,
|
||||
ReadBufferSize: h.ReadBufferSize,
|
||||
Versions: h.Versions,
|
||||
Transport: h.Transport.Clone(),
|
||||
h2cTransport: h.h2cTransport,
|
||||
}
|
||||
newtransport.Transport.TLSClientConfig.ServerName = repl.ReplaceAll(newtransport.Transport.TLSClientConfig.ServerName, "")
|
||||
return newtransport
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// RoundTrip implements http.RoundTripper.
|
||||
func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
h.SetScheme(req)
|
||||
// Try to replace TLS servername if needed
|
||||
repl := req.Context().Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
transport := h.replaceTLSServername(repl)
|
||||
|
||||
transport.SetScheme(req)
|
||||
|
||||
// use HTTP/3 if enabled (TODO: This is EXPERIMENTAL)
|
||||
if h.h3Transport != nil {
|
||||
@ -501,7 +507,7 @@ func (h *HTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return h.h2cTransport.RoundTrip(req)
|
||||
}
|
||||
|
||||
return h.Transport.RoundTrip(req)
|
||||
return transport.Transport.RoundTrip(req)
|
||||
}
|
||||
|
||||
// SetScheme ensures that the outbound request req
|
||||
@ -528,7 +534,13 @@ func (h *HTTPTransport) shouldUseTLS(req *http.Request) bool {
|
||||
}
|
||||
|
||||
port := req.URL.Port()
|
||||
return !slices.Contains(h.TLS.ExceptPorts, port)
|
||||
for i := range h.TLS.ExceptPorts {
|
||||
if h.TLS.ExceptPorts[i] == port {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// TLSEnabled returns true if TLS is enabled.
|
||||
@ -640,7 +652,7 @@ func (t *TLSConfig) MakeTLSClientConfig(ctx caddy.Context) (*tls.Config, error)
|
||||
return nil, fmt.Errorf("getting tls app: %v", err)
|
||||
}
|
||||
tlsApp := tlsAppIface.(*caddytls.TLS)
|
||||
err = tlsApp.Manage(map[string]struct{}{t.ClientCertificateAutomate: {}})
|
||||
err = tlsApp.Manage([]string{t.ClientCertificateAutomate})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("managing client certificate: %v", err)
|
||||
}
|
||||
|
@ -532,7 +532,7 @@ func (h *Handler) proxyLoopIteration(r *http.Request, origReq *http.Request, w h
|
||||
// the dial address may vary per-request if placeholders are
|
||||
// used, so perform those replacements here; the resulting
|
||||
// DialInfo struct should have valid network address syntax
|
||||
dialInfo, err := upstream.fillDialInfo(repl)
|
||||
dialInfo, err := upstream.fillDialInfo(r)
|
||||
if err != nil {
|
||||
return true, fmt.Errorf("making dial info: %v", err)
|
||||
}
|
||||
@ -1150,7 +1150,7 @@ func (lb LoadBalancing) tryAgain(ctx caddy.Context, start time.Time, retries int
|
||||
// we have to assume the upstream received the request, and
|
||||
// retries need to be carefully decided, because some requests
|
||||
// are not idempotent
|
||||
if !isDialError && (!isHandlerError || !errors.Is(herr, errNoUpstream)) {
|
||||
if !isDialError && !(isHandlerError && errors.Is(herr, errNoUpstream)) {
|
||||
if lb.RetryMatch == nil && req.Method != "GET" {
|
||||
// by default, don't retry requests if they aren't GET
|
||||
return false
|
||||
|
@ -219,7 +219,10 @@ func (r RandomChoiceSelection) Validate() error {
|
||||
|
||||
// Select returns an available host, if any.
|
||||
func (r RandomChoiceSelection) Select(pool UpstreamPool, _ *http.Request, _ http.ResponseWriter) *Upstream {
|
||||
k := min(r.Choose, len(pool))
|
||||
k := r.Choose
|
||||
if k > len(pool) {
|
||||
k = len(pool)
|
||||
}
|
||||
choices := make([]*Upstream, k)
|
||||
for i, upstream := range pool {
|
||||
if !upstream.Available() {
|
||||
@ -805,7 +808,7 @@ func leastRequests(upstreams []*Upstream) *Upstream {
|
||||
return nil
|
||||
}
|
||||
var best []*Upstream
|
||||
bestReqs := -1
|
||||
var bestReqs int = -1
|
||||
for _, upstream := range upstreams {
|
||||
if upstream == nil {
|
||||
continue
|
||||
|
@ -52,4 +52,5 @@ func TestResolveIpVersion(t *testing.T) {
|
||||
t.Errorf("resolveIpVersion(): Expected %s got %s", test.expectedIpVersion, ipVersion)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -377,7 +377,11 @@ func buildQueryString(qs string, repl *caddy.Replacer) string {
|
||||
// performed in normalized/unescaped space.
|
||||
func trimPathPrefix(escapedPath, prefix string) string {
|
||||
var iPath, iPrefix int
|
||||
for iPath < len(escapedPath) && iPrefix < len(prefix) {
|
||||
for {
|
||||
if iPath >= len(escapedPath) || iPrefix >= len(prefix) {
|
||||
break
|
||||
}
|
||||
|
||||
prefixCh := prefix[iPrefix]
|
||||
ch := string(escapedPath[iPath])
|
||||
|
||||
|
@ -171,7 +171,6 @@ func BenchmarkServer_LogRequest_WithTrace(b *testing.B) {
|
||||
s.logRequest(accLog, req, wrec, &duration, repl, bodyReader, false)
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_TrustedRealClientIP_NoTrustedHeaders(t *testing.T) {
|
||||
req := httptest.NewRequest("GET", "/", nil)
|
||||
req.RemoteAddr = "192.0.2.1:12345"
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"net/http"
|
||||
"net/textproto"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/template"
|
||||
@ -324,7 +323,13 @@ func cmdRespond(fl caddycmd.Flags) (int, error) {
|
||||
|
||||
// figure out if status code was explicitly specified; this lets
|
||||
// us set a non-zero value as the default but is a little hacky
|
||||
statusCodeFlagSpecified := slices.Contains(os.Args, "--status")
|
||||
var statusCodeFlagSpecified bool
|
||||
for _, fl := range os.Args {
|
||||
if fl == "--status" {
|
||||
statusCodeFlagSpecified = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// try to determine what kind of parameter the unnamed argument is
|
||||
if arg != "" {
|
||||
|
@ -91,17 +91,19 @@ func parseACMEServer(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error
|
||||
acmeServer.Policy.AllowWildcardNames = true
|
||||
case "allow":
|
||||
r := &RuleSet{}
|
||||
for nesting := h.Nesting(); h.NextBlock(nesting); {
|
||||
if h.CountRemainingArgs() == 0 {
|
||||
return nil, h.ArgErr() // TODO:
|
||||
}
|
||||
switch h.Val() {
|
||||
case "domains":
|
||||
r.Domains = append(r.Domains, h.RemainingArgs()...)
|
||||
case "ip_ranges":
|
||||
r.IPRanges = append(r.IPRanges, h.RemainingArgs()...)
|
||||
default:
|
||||
return nil, h.Errf("unrecognized 'allow' subdirective: %s", h.Val())
|
||||
for h.Next() {
|
||||
for h.NextBlock(h.Nesting() - 1) {
|
||||
if h.CountRemainingArgs() == 0 {
|
||||
return nil, h.ArgErr() // TODO:
|
||||
}
|
||||
switch h.Val() {
|
||||
case "domains":
|
||||
r.Domains = append(r.Domains, h.RemainingArgs()...)
|
||||
case "ip_ranges":
|
||||
r.IPRanges = append(r.IPRanges, h.RemainingArgs()...)
|
||||
default:
|
||||
return nil, h.Errf("unrecognized 'allow' subdirective: %s", h.Val())
|
||||
}
|
||||
}
|
||||
}
|
||||
if acmeServer.Policy == nil {
|
||||
@ -110,17 +112,19 @@ func parseACMEServer(h httpcaddyfile.Helper) ([]httpcaddyfile.ConfigValue, error
|
||||
acmeServer.Policy.Allow = r
|
||||
case "deny":
|
||||
r := &RuleSet{}
|
||||
for nesting := h.Nesting(); h.NextBlock(nesting); {
|
||||
if h.CountRemainingArgs() == 0 {
|
||||
return nil, h.ArgErr() // TODO:
|
||||
}
|
||||
switch h.Val() {
|
||||
case "domains":
|
||||
r.Domains = append(r.Domains, h.RemainingArgs()...)
|
||||
case "ip_ranges":
|
||||
r.IPRanges = append(r.IPRanges, h.RemainingArgs()...)
|
||||
default:
|
||||
return nil, h.Errf("unrecognized 'deny' subdirective: %s", h.Val())
|
||||
for h.Next() {
|
||||
for h.NextBlock(h.Nesting() - 1) {
|
||||
if h.CountRemainingArgs() == 0 {
|
||||
return nil, h.ArgErr() // TODO:
|
||||
}
|
||||
switch h.Val() {
|
||||
case "domains":
|
||||
r.Domains = append(r.Domains, h.RemainingArgs()...)
|
||||
case "ip_ranges":
|
||||
r.IPRanges = append(r.IPRanges, h.RemainingArgs()...)
|
||||
default:
|
||||
return nil, h.Errf("unrecognized 'deny' subdirective: %s", h.Val())
|
||||
}
|
||||
}
|
||||
}
|
||||
if acmeServer.Policy == nil {
|
||||
|
@ -220,7 +220,7 @@ func (iss *ACMEIssuer) makeIssuerTemplate(ctx caddy.Context) (certmagic.ACMEIssu
|
||||
}
|
||||
|
||||
if len(iss.NetworkProxyRaw) != 0 {
|
||||
proxyMod, err := ctx.LoadModule(iss, "NetworkProxyRaw")
|
||||
proxyMod, err := ctx.LoadModule(iss, "ForwardProxyRaw")
|
||||
if err != nil {
|
||||
return template, fmt.Errorf("failed to load network_proxy module: %v", err)
|
||||
}
|
||||
|
@ -388,8 +388,10 @@ func (ap *AutomationPolicy) onlyInternalIssuer() bool {
|
||||
// isWildcardOrDefault determines if the subjects include any wildcard domains,
|
||||
// or is the "default" policy (i.e. no subjects) which is unbounded.
|
||||
func (ap *AutomationPolicy) isWildcardOrDefault() bool {
|
||||
isWildcardOrDefault := len(ap.subjects) == 0
|
||||
|
||||
isWildcardOrDefault := false
|
||||
if len(ap.subjects) == 0 {
|
||||
isWildcardOrDefault = true
|
||||
}
|
||||
for _, sub := range ap.subjects {
|
||||
if strings.HasPrefix(sub, "*") {
|
||||
isWildcardOrDefault = true
|
||||
|
@ -5,7 +5,6 @@ import (
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -144,10 +143,6 @@ func (hcg HTTPCertGetter) GetCertificate(ctx context.Context, hello *tls.ClientH
|
||||
qs.Set("server_name", hello.ServerName)
|
||||
qs.Set("signature_schemes", strings.Join(sigs, ","))
|
||||
qs.Set("cipher_suites", strings.Join(suites, ","))
|
||||
localIP, _, err := net.SplitHostPort(hello.Conn.LocalAddr().String())
|
||||
if err == nil && localIP != "" {
|
||||
qs.Set("local_ip", localIP)
|
||||
}
|
||||
parsed.RawQuery = qs.Encode()
|
||||
|
||||
req, err := http.NewRequestWithContext(hcg.ctx, http.MethodGet, parsed.String(), nil)
|
||||
|
@ -87,7 +87,13 @@ nextChoice:
|
||||
}
|
||||
|
||||
if len(p.AnyTag) > 0 {
|
||||
found := slices.ContainsFunc(p.AnyTag, cert.HasTag)
|
||||
var found bool
|
||||
for _, tag := range p.AnyTag {
|
||||
if cert.HasTag(tag) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
@ -24,8 +24,6 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/mholt/acmez/v3"
|
||||
@ -370,7 +368,13 @@ func (p *ConnectionPolicy) buildStandardTLSConfig(ctx caddy.Context) error {
|
||||
}
|
||||
|
||||
// ensure ALPN includes the ACME TLS-ALPN protocol
|
||||
alpnFound := slices.Contains(p.ALPN, acmez.ACMETLS1Protocol)
|
||||
var alpnFound bool
|
||||
for _, a := range p.ALPN {
|
||||
if a == acmez.ACMETLS1Protocol {
|
||||
alpnFound = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !alpnFound && (cfg.NextProtos == nil || len(cfg.NextProtos) > 0) {
|
||||
cfg.NextProtos = append(cfg.NextProtos, acmez.ACMETLS1Protocol)
|
||||
}
|
||||
@ -457,14 +461,6 @@ func (p ConnectionPolicy) SettingsEmpty() bool {
|
||||
p.InsecureSecretsLog == ""
|
||||
}
|
||||
|
||||
// SettingsEmpty returns true if p's settings (fields
|
||||
// except the matchers) are the same as q.
|
||||
func (p ConnectionPolicy) SettingsEqual(q ConnectionPolicy) bool {
|
||||
p.MatchersRaw = nil
|
||||
q.MatchersRaw = nil
|
||||
return reflect.DeepEqual(p, q)
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile sets up the ConnectionPolicy from Caddyfile tokens. Syntax:
|
||||
//
|
||||
// connection_policy {
|
||||
@ -989,48 +985,6 @@ func (l *LeafCertClientAuth) Provision(ctx caddy.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (l *LeafCertClientAuth) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
d.NextArg()
|
||||
|
||||
// accommodate the use of one-liners
|
||||
if d.CountRemainingArgs() > 1 {
|
||||
d.NextArg()
|
||||
modName := d.Val()
|
||||
mod, err := caddyfile.UnmarshalModule(d, "tls.leaf_cert_loader."+modName)
|
||||
if err != nil {
|
||||
return d.WrapErr(err)
|
||||
}
|
||||
vMod, ok := mod.(LeafCertificateLoader)
|
||||
if !ok {
|
||||
return fmt.Errorf("leaf module '%s' is not a leaf certificate loader", vMod)
|
||||
}
|
||||
l.LeafCertificateLoadersRaw = append(
|
||||
l.LeafCertificateLoadersRaw,
|
||||
caddyconfig.JSONModuleObject(vMod, "loader", modName, nil),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
||||
// accommodate the use of nested blocks
|
||||
for nesting := d.Nesting(); d.NextBlock(nesting); {
|
||||
modName := d.Val()
|
||||
mod, err := caddyfile.UnmarshalModule(d, "tls.leaf_cert_loader."+modName)
|
||||
if err != nil {
|
||||
return d.WrapErr(err)
|
||||
}
|
||||
vMod, ok := mod.(LeafCertificateLoader)
|
||||
if !ok {
|
||||
return fmt.Errorf("leaf module '%s' is not a leaf certificate loader", vMod)
|
||||
}
|
||||
l.LeafCertificateLoadersRaw = append(
|
||||
l.LeafCertificateLoadersRaw,
|
||||
caddyconfig.JSONModuleObject(vMod, "loader", modName, nil),
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l LeafCertClientAuth) VerifyClientCertificate(rawCerts [][]byte, _ [][]*x509.Certificate) error {
|
||||
if len(rawCerts) == 0 {
|
||||
return fmt.Errorf("no client certificate provided")
|
||||
@ -1041,8 +995,10 @@ func (l LeafCertClientAuth) VerifyClientCertificate(rawCerts [][]byte, _ [][]*x5
|
||||
return fmt.Errorf("can't parse the given certificate: %s", err.Error())
|
||||
}
|
||||
|
||||
if slices.ContainsFunc(l.trustedLeafCerts, remoteLeafCert.Equal) {
|
||||
return nil
|
||||
for _, trustedLeafCert := range l.trustedLeafCerts {
|
||||
if remoteLeafCert.Equal(trustedLeafCert) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("client leaf certificate failed validation")
|
||||
@ -1092,7 +1048,6 @@ var secretsLogPool = caddy.NewUsagePool()
|
||||
var (
|
||||
_ caddyfile.Unmarshaler = (*ClientAuthentication)(nil)
|
||||
_ caddyfile.Unmarshaler = (*ConnectionPolicy)(nil)
|
||||
_ caddyfile.Unmarshaler = (*LeafCertClientAuth)(nil)
|
||||
)
|
||||
|
||||
// ParseCaddyfileNestedMatcherSet parses the Caddyfile tokens for a nested
|
||||
|
@ -138,6 +138,7 @@ func (ech *ECH) Provision(ctx caddy.Context) ([]string, error) {
|
||||
// all existing configs are now loaded; see if we need to make any new ones
|
||||
// based on the input configuration, and also mark the most recent one(s) as
|
||||
// current/active, so they can be used for ECH retries
|
||||
|
||||
for _, cfg := range ech.Configs {
|
||||
publicName := strings.ToLower(strings.TrimSpace(cfg.PublicName))
|
||||
|
||||
@ -278,7 +279,7 @@ func (t *TLS) publishECHConfigs() error {
|
||||
// if all the (inner) domains have had this ECH config list published
|
||||
// by this publisher, then try the next publication config
|
||||
if len(serverNamesSet) == 0 {
|
||||
logger.Debug("ECH config list already published by publisher for associated domains (or no domains to publish for)",
|
||||
logger.Debug("ECH config list already published by publisher for associated domains",
|
||||
zap.Uint8s("config_ids", configIDs),
|
||||
zap.String("publisher", publisherKey))
|
||||
continue
|
||||
@ -299,7 +300,7 @@ func (t *TLS) publishECHConfigs() error {
|
||||
err := publisher.PublishECHConfigList(t.ctx, dnsNamesToPublish, echCfgListBin)
|
||||
if err == nil {
|
||||
t.logger.Info("published ECH configuration list",
|
||||
zap.Strings("domains", dnsNamesToPublish),
|
||||
zap.Strings("domains", publication.Domains),
|
||||
zap.Uint8s("config_ids", configIDs),
|
||||
zap.Error(err))
|
||||
// update publication history, so that we don't unnecessarily republish every time
|
||||
@ -389,33 +390,27 @@ func loadECHConfig(ctx caddy.Context, configID string) (echConfig, error) {
|
||||
return echConfig{}, nil
|
||||
}
|
||||
metaBytes, err := storage.Load(ctx, metaKey)
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
logger.Warn("ECH config metadata file missing; will recreate at next publication",
|
||||
zap.String("config_id", configID),
|
||||
zap.Error(err))
|
||||
} else if err != nil {
|
||||
if err != nil {
|
||||
delErr := storage.Delete(ctx, cfgIDKey)
|
||||
if delErr != nil {
|
||||
return echConfig{}, fmt.Errorf("error loading ECH config metadata (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr)
|
||||
return echConfig{}, fmt.Errorf("error loading ECH metadata (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr)
|
||||
}
|
||||
logger.Warn("could not load ECH config metadata; deleted its folder",
|
||||
logger.Warn("could not load ECH metadata; deleted its config folder",
|
||||
zap.String("config_id", configID),
|
||||
zap.Error(err))
|
||||
return echConfig{}, nil
|
||||
}
|
||||
var meta echConfigMeta
|
||||
if len(metaBytes) > 0 {
|
||||
if err := json.Unmarshal(metaBytes, &meta); err != nil {
|
||||
// even though it's just metadata, reset the whole config since we can't reliably maintain it
|
||||
delErr := storage.Delete(ctx, cfgIDKey)
|
||||
if delErr != nil {
|
||||
return echConfig{}, fmt.Errorf("error decoding ECH metadata (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr)
|
||||
}
|
||||
logger.Warn("could not JSON-decode ECH metadata; deleted its config folder",
|
||||
zap.String("config_id", configID),
|
||||
zap.Error(err))
|
||||
return echConfig{}, nil
|
||||
if err := json.Unmarshal(metaBytes, &meta); err != nil {
|
||||
// even though it's just metadata, reset the whole config since we can't reliably maintain it
|
||||
delErr := storage.Delete(ctx, cfgIDKey)
|
||||
if delErr != nil {
|
||||
return echConfig{}, fmt.Errorf("error decoding ECH metadata (%v) and cleaning up parent storage key %s: %v", err, cfgIDKey, delErr)
|
||||
}
|
||||
logger.Warn("could not JSON-decode ECH metadata; deleted its config folder",
|
||||
zap.String("config_id", configID),
|
||||
zap.Error(err))
|
||||
return echConfig{}, nil
|
||||
}
|
||||
|
||||
cfg.privKeyBin = privKeyBytes
|
||||
@ -646,6 +641,10 @@ nextName:
|
||||
}
|
||||
|
||||
relName := libdns.RelativeName(domain+".", zone)
|
||||
// TODO: libdns.RelativeName should probably return "@" instead of "". (The latest commits of libdns do this, so remove this logic once upgraded.)
|
||||
if relName == "" {
|
||||
relName = "@"
|
||||
}
|
||||
|
||||
// get existing records for this domain; we need to make sure another
|
||||
// record exists for it so we don't accidentally trample a wildcard; we
|
||||
@ -658,25 +657,22 @@ nextName:
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
var httpsRec libdns.ServiceBinding
|
||||
var httpsRec libdns.Record
|
||||
var nameHasExistingRecord bool
|
||||
for _, rec := range recs {
|
||||
rr := rec.RR()
|
||||
if rr.Name == relName {
|
||||
if rec.Name == relName {
|
||||
// CNAME records are exclusive of all other records, so we cannot publish an HTTPS
|
||||
// record for a domain that is CNAME'd. See #6922.
|
||||
if rr.Type == "CNAME" {
|
||||
if rec.Type == "CNAME" {
|
||||
dnsPub.logger.Warn("domain has CNAME record, so unable to publish ECH data to HTTPS record",
|
||||
zap.String("domain", domain),
|
||||
zap.String("cname_value", rr.Data))
|
||||
zap.String("cname_value", rec.Value))
|
||||
continue nextName
|
||||
}
|
||||
nameHasExistingRecord = true
|
||||
if svcb, ok := rec.(libdns.ServiceBinding); ok && svcb.Scheme == "https" {
|
||||
if svcb.Target == "" || svcb.Target == "." {
|
||||
httpsRec = svcb
|
||||
break
|
||||
}
|
||||
if rec.Type == "HTTPS" && (rec.Target == "" || rec.Target == ".") {
|
||||
httpsRec = rec
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -692,24 +688,31 @@ nextName:
|
||||
zap.String("zone", zone))
|
||||
continue
|
||||
}
|
||||
params := httpsRec.Params
|
||||
if params == nil {
|
||||
params = make(libdns.SvcParams)
|
||||
params := make(svcParams)
|
||||
if httpsRec.Value != "" {
|
||||
params, err = parseSvcParams(httpsRec.Value)
|
||||
if err != nil {
|
||||
dnsPub.logger.Error("unable to parse existing DNS record to publish ECH data to HTTPS DNS record",
|
||||
zap.String("domain", domain),
|
||||
zap.String("https_rec_value", httpsRec.Value),
|
||||
zap.Error(err))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// overwrite only the "ech" SvcParamKey
|
||||
// overwrite only the ech SvcParamKey
|
||||
params["ech"] = []string{base64.StdEncoding.EncodeToString(configListBin)}
|
||||
|
||||
// publish record
|
||||
_, err = dnsPub.provider.SetRecords(ctx, zone, []libdns.Record{
|
||||
libdns.ServiceBinding{
|
||||
{
|
||||
// HTTPS and SVCB RRs: RFC 9460 (https://www.rfc-editor.org/rfc/rfc9460)
|
||||
Scheme: "https",
|
||||
Type: "HTTPS",
|
||||
Name: relName,
|
||||
TTL: 5 * time.Minute, // TODO: low hard-coded value only temporary; change to a higher value once more field-tested and key rotation is implemented
|
||||
Priority: 2, // allows a manual override with priority 1
|
||||
Priority: 2, // allows a manual override with priority 1
|
||||
Target: ".",
|
||||
Params: params,
|
||||
Value: params.String(),
|
||||
TTL: 1 * time.Minute, // TODO: for testing only
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
@ -948,6 +951,172 @@ func newECHConfigID(ctx caddy.Context) (uint8, error) {
|
||||
return 0, fmt.Errorf("depleted attempts to find an available config_id")
|
||||
}
|
||||
|
||||
// svcParams represents SvcParamKey and SvcParamValue pairs as
|
||||
// described in https://www.rfc-editor.org/rfc/rfc9460 (section 2.1).
|
||||
type svcParams map[string][]string
|
||||
|
||||
// parseSvcParams parses service parameters into a structured type
|
||||
// for safer manipulation.
|
||||
func parseSvcParams(input string) (svcParams, error) {
|
||||
if len(input) > 4096 {
|
||||
return nil, fmt.Errorf("input too long: %d", len(input))
|
||||
}
|
||||
|
||||
params := make(svcParams)
|
||||
input = strings.TrimSpace(input) + " "
|
||||
|
||||
for cursor := 0; cursor < len(input); cursor++ {
|
||||
var key, rawVal string
|
||||
|
||||
keyValPair:
|
||||
for i := cursor; i < len(input); i++ {
|
||||
switch input[i] {
|
||||
case '=':
|
||||
key = strings.ToLower(strings.TrimSpace(input[cursor:i]))
|
||||
i++
|
||||
cursor = i
|
||||
|
||||
var quoted bool
|
||||
if input[cursor] == '"' {
|
||||
quoted = true
|
||||
i++
|
||||
cursor = i
|
||||
}
|
||||
|
||||
var escaped bool
|
||||
|
||||
for j := cursor; j < len(input); j++ {
|
||||
switch input[j] {
|
||||
case '"':
|
||||
if !quoted {
|
||||
return nil, fmt.Errorf("illegal DQUOTE at position %d", j)
|
||||
}
|
||||
if !escaped {
|
||||
// end of quoted value
|
||||
rawVal = input[cursor:j]
|
||||
j++
|
||||
cursor = j
|
||||
break keyValPair
|
||||
}
|
||||
case '\\':
|
||||
escaped = true
|
||||
case ' ', '\t', '\n', '\r':
|
||||
if !quoted {
|
||||
// end of unquoted value
|
||||
rawVal = input[cursor:j]
|
||||
cursor = j
|
||||
break keyValPair
|
||||
}
|
||||
default:
|
||||
escaped = false
|
||||
}
|
||||
}
|
||||
|
||||
case ' ', '\t', '\n', '\r':
|
||||
// key with no value (flag)
|
||||
key = input[cursor:i]
|
||||
params[key] = []string{}
|
||||
cursor = i
|
||||
break keyValPair
|
||||
}
|
||||
}
|
||||
|
||||
if rawVal == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
var sb strings.Builder
|
||||
|
||||
var escape int // start of escape sequence (after \, so 0 is never a valid start)
|
||||
for i := 0; i < len(rawVal); i++ {
|
||||
ch := rawVal[i]
|
||||
if escape > 0 {
|
||||
// validate escape sequence
|
||||
// (RFC 9460 Appendix A)
|
||||
// escaped: "\" ( non-digit / dec-octet )
|
||||
// non-digit: "%x21-2F / %x3A-7E"
|
||||
// dec-octet: "0-255 as a 3-digit decimal number"
|
||||
if ch >= '0' && ch <= '9' {
|
||||
// advance to end of decimal octet, which must be 3 digits
|
||||
i += 2
|
||||
if i > len(rawVal) {
|
||||
return nil, fmt.Errorf("value ends with incomplete escape sequence: %s", rawVal[escape:])
|
||||
}
|
||||
decOctet, err := strconv.Atoi(rawVal[escape : i+1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if decOctet < 0 || decOctet > 255 {
|
||||
return nil, fmt.Errorf("invalid decimal octet in escape sequence: %s (%d)", rawVal[escape:i], decOctet)
|
||||
}
|
||||
sb.WriteRune(rune(decOctet))
|
||||
escape = 0
|
||||
continue
|
||||
} else if (ch < 0x21 || ch > 0x2F) && (ch < 0x3A && ch > 0x7E) {
|
||||
return nil, fmt.Errorf("illegal escape sequence %s", rawVal[escape:i])
|
||||
}
|
||||
}
|
||||
switch ch {
|
||||
case ';', '(', ')':
|
||||
// RFC 9460 Appendix A:
|
||||
// > contiguous = 1*( non-special / escaped )
|
||||
// > non-special is VCHAR minus DQUOTE, ";", "(", ")", and "\".
|
||||
return nil, fmt.Errorf("illegal character in value %q at position %d: %s", rawVal, i, string(ch))
|
||||
case '\\':
|
||||
escape = i + 1
|
||||
default:
|
||||
sb.WriteByte(ch)
|
||||
escape = 0
|
||||
}
|
||||
}
|
||||
|
||||
params[key] = strings.Split(sb.String(), ",")
|
||||
}
|
||||
|
||||
return params, nil
|
||||
}
|
||||
|
||||
// String serializes svcParams into zone presentation format.
|
||||
func (params svcParams) String() string {
|
||||
var sb strings.Builder
|
||||
for key, vals := range params {
|
||||
if sb.Len() > 0 {
|
||||
sb.WriteRune(' ')
|
||||
}
|
||||
sb.WriteString(key)
|
||||
var hasVal, needsQuotes bool
|
||||
for _, val := range vals {
|
||||
if len(val) > 0 {
|
||||
hasVal = true
|
||||
}
|
||||
if strings.ContainsAny(val, `" `) {
|
||||
needsQuotes = true
|
||||
}
|
||||
if hasVal && needsQuotes {
|
||||
break
|
||||
}
|
||||
}
|
||||
if hasVal {
|
||||
sb.WriteRune('=')
|
||||
}
|
||||
if needsQuotes {
|
||||
sb.WriteRune('"')
|
||||
}
|
||||
for i, val := range vals {
|
||||
if i > 0 {
|
||||
sb.WriteRune(',')
|
||||
}
|
||||
val = strings.ReplaceAll(val, `"`, `\"`)
|
||||
val = strings.ReplaceAll(val, `,`, `\,`)
|
||||
sb.WriteString(val)
|
||||
}
|
||||
if needsQuotes {
|
||||
sb.WriteRune('"')
|
||||
}
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// ECHPublisher is an interface for publishing ECHConfigList values
|
||||
// so that they can be used by clients.
|
||||
type ECHPublisher interface {
|
||||
|
129
modules/caddytls/ech_test.go
Normal file
129
modules/caddytls/ech_test.go
Normal file
@ -0,0 +1,129 @@
|
||||
package caddytls
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestParseSvcParams(t *testing.T) {
|
||||
for i, test := range []struct {
|
||||
input string
|
||||
expect svcParams
|
||||
shouldErr bool
|
||||
}{
|
||||
{
|
||||
input: `alpn="h2,h3" no-default-alpn ipv6hint=2001:db8::1 port=443`,
|
||||
expect: svcParams{
|
||||
"alpn": {"h2", "h3"},
|
||||
"no-default-alpn": {},
|
||||
"ipv6hint": {"2001:db8::1"},
|
||||
"port": {"443"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `key=value quoted="some string" flag`,
|
||||
expect: svcParams{
|
||||
"key": {"value"},
|
||||
"quoted": {"some string"},
|
||||
"flag": {},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `key="nested \"quoted\" value,foobar"`,
|
||||
expect: svcParams{
|
||||
"key": {`nested "quoted" value`, "foobar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `alpn=h3,h2 tls-supported-groups=29,23 no-default-alpn ech="foobar"`,
|
||||
expect: svcParams{
|
||||
"alpn": {"h3", "h2"},
|
||||
"tls-supported-groups": {"29", "23"},
|
||||
"no-default-alpn": {},
|
||||
"ech": {"foobar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `escape=\097`,
|
||||
expect: svcParams{
|
||||
"escape": {"a"},
|
||||
},
|
||||
},
|
||||
{
|
||||
input: `escapes=\097\098c`,
|
||||
expect: svcParams{
|
||||
"escapes": {"abc"},
|
||||
},
|
||||
},
|
||||
} {
|
||||
actual, err := parseSvcParams(test.input)
|
||||
if err != nil && !test.shouldErr {
|
||||
t.Errorf("Test %d: Expected no error, but got: %v (input=%q)", i, err, test.input)
|
||||
continue
|
||||
} else if err == nil && test.shouldErr {
|
||||
t.Errorf("Test %d: Expected an error, but got no error (input=%q)", i, test.input)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(test.expect, actual) {
|
||||
t.Errorf("Test %d: Expected %v, got %v (input=%q)", i, test.expect, actual, test.input)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSvcParamsString(t *testing.T) {
|
||||
// this test relies on the parser also working
|
||||
// because we can't just compare string outputs
|
||||
// since map iteration is unordered
|
||||
for i, test := range []svcParams{
|
||||
|
||||
{
|
||||
"alpn": {"h2", "h3"},
|
||||
"no-default-alpn": {},
|
||||
"ipv6hint": {"2001:db8::1"},
|
||||
"port": {"443"},
|
||||
},
|
||||
|
||||
{
|
||||
"key": {"value"},
|
||||
"quoted": {"some string"},
|
||||
"flag": {},
|
||||
},
|
||||
{
|
||||
"key": {`nested "quoted" value`, "foobar"},
|
||||
},
|
||||
{
|
||||
"alpn": {"h3", "h2"},
|
||||
"tls-supported-groups": {"29", "23"},
|
||||
"no-default-alpn": {},
|
||||
"ech": {"foobar"},
|
||||
},
|
||||
} {
|
||||
combined := test.String()
|
||||
parsed, err := parseSvcParams(combined)
|
||||
if err != nil {
|
||||
t.Errorf("Test %d: Expected no error, but got: %v (input=%q)", i, err, test)
|
||||
continue
|
||||
}
|
||||
if len(parsed) != len(test) {
|
||||
t.Errorf("Test %d: Expected %d keys, but got %d", i, len(test), len(parsed))
|
||||
continue
|
||||
}
|
||||
for key, expectedVals := range test {
|
||||
if expected, actual := len(expectedVals), len(parsed[key]); expected != actual {
|
||||
t.Errorf("Test %d: Expected key %s to have %d values, but had %d", i, key, expected, actual)
|
||||
continue
|
||||
}
|
||||
for j, expected := range expectedVals {
|
||||
if actual := parsed[key][j]; actual != expected {
|
||||
t.Errorf("Test %d key %q value %d: Expected '%s' but got '%s'", i, key, j, expected, actual)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
if !reflect.DeepEqual(parsed, test) {
|
||||
t.Errorf("Test %d: Expected %#v, got %#v", i, test, combined)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
@ -21,7 +21,6 @@ import (
|
||||
"os"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -33,14 +32,6 @@ type LeafFileLoader struct {
|
||||
Files []string `json:"files,omitempty"`
|
||||
}
|
||||
|
||||
// CaddyModule returns the Caddy module information.
|
||||
func (LeafFileLoader) CaddyModule() caddy.ModuleInfo {
|
||||
return caddy.ModuleInfo{
|
||||
ID: "tls.leaf_cert_loader.file",
|
||||
New: func() caddy.Module { return new(LeafFileLoader) },
|
||||
}
|
||||
}
|
||||
|
||||
// Provision implements caddy.Provisioner.
|
||||
func (fl *LeafFileLoader) Provision(ctx caddy.Context) error {
|
||||
repl, ok := ctx.Value(caddy.ReplacerCtxKey).(*caddy.Replacer)
|
||||
@ -53,11 +44,12 @@ func (fl *LeafFileLoader) Provision(ctx caddy.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (fl *LeafFileLoader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
d.NextArg()
|
||||
fl.Files = append(fl.Files, d.RemainingArgs()...)
|
||||
return nil
|
||||
// CaddyModule returns the Caddy module information.
|
||||
func (LeafFileLoader) CaddyModule() caddy.ModuleInfo {
|
||||
return caddy.ModuleInfo{
|
||||
ID: "tls.leaf_cert_loader.file",
|
||||
New: func() caddy.Module { return new(LeafFileLoader) },
|
||||
}
|
||||
}
|
||||
|
||||
// LoadLeafCertificates returns the certificates to be loaded by fl.
|
||||
@ -104,5 +96,4 @@ func convertPEMFilesToDERBytes(filename string) ([]byte, error) {
|
||||
var (
|
||||
_ LeafCertificateLoader = (*LeafFileLoader)(nil)
|
||||
_ caddy.Provisioner = (*LeafFileLoader)(nil)
|
||||
_ caddyfile.Unmarshaler = (*LeafFileLoader)(nil)
|
||||
)
|
||||
|
@ -22,7 +22,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -56,13 +55,6 @@ func (fl *LeafFolderLoader) Provision(ctx caddy.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (fl *LeafFolderLoader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
d.NextArg()
|
||||
fl.Folders = append(fl.Folders, d.RemainingArgs()...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadLeafCertificates loads all the leaf certificates in the directories
|
||||
// listed in fl from all files ending with .pem.
|
||||
func (fl LeafFolderLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
|
||||
@ -102,5 +94,4 @@ func (fl LeafFolderLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
|
||||
var (
|
||||
_ LeafCertificateLoader = (*LeafFolderLoader)(nil)
|
||||
_ caddy.Provisioner = (*LeafFolderLoader)(nil)
|
||||
_ caddyfile.Unmarshaler = (*LeafFolderLoader)(nil)
|
||||
)
|
||||
|
@ -19,7 +19,6 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/caddyserver/caddy/v2"
|
||||
"github.com/caddyserver/caddy/v2/caddyconfig/caddyfile"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -53,13 +52,6 @@ func (LeafPEMLoader) CaddyModule() caddy.ModuleInfo {
|
||||
}
|
||||
}
|
||||
|
||||
// UnmarshalCaddyfile implements caddyfile.Unmarshaler.
|
||||
func (fl *LeafPEMLoader) UnmarshalCaddyfile(d *caddyfile.Dispenser) error {
|
||||
d.NextArg()
|
||||
fl.Certificates = append(fl.Certificates, d.RemainingArgs()...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadLeafCertificates returns the certificates contained in pl.
|
||||
func (pl LeafPEMLoader) LoadLeafCertificates() ([]*x509.Certificate, error) {
|
||||
certs := make([]*x509.Certificate, 0, len(pl.Certificates))
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user