mirror of
https://github.com/kyverno/kyverno.git
synced 2025-04-08 18:15:48 +00:00
update vendor
This commit is contained in:
parent
b2ad71cc5e
commit
7a9b75adcb
711 changed files with 74987 additions and 66747 deletions
5
vendor/github.com/cenkalti/backoff/README.md
generated
vendored
5
vendor/github.com/cenkalti/backoff/README.md
generated
vendored
|
@ -9,7 +9,10 @@ The retries exponentially increase and stop increasing when a certain threshold
|
|||
|
||||
## Usage
|
||||
|
||||
See https://godoc.org/github.com/cenkalti/backoff#pkg-examples
|
||||
Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end.
|
||||
|
||||
godoc.org does not support modules yet,
|
||||
so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
|
6
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
6
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface {
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ type backOffContext struct {
|
|||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext {
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (b *backOffContext) NextBackOff() time.Duration {
|
|||
default:
|
||||
}
|
||||
next := b.BackOff.NextBackOff()
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple
|
||||
return Stop
|
||||
}
|
||||
return next
|
||||
|
|
5
vendor/github.com/cenkalti/backoff/example_test.go
generated
vendored
5
vendor/github.com/cenkalti/backoff/example_test.go
generated
vendored
|
@ -20,7 +20,7 @@ func ExampleRetry() {
|
|||
// Operation is successful.
|
||||
}
|
||||
|
||||
func ExampleRetryContext() {
|
||||
func ExampleRetryContext() { // nolint: govet
|
||||
// A context
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -52,7 +52,7 @@ func ExampleTicker() {
|
|||
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
for _ = range ticker.C {
|
||||
for range ticker.C {
|
||||
if err = operation(); err != nil {
|
||||
log.Println(err, "will retry...")
|
||||
continue
|
||||
|
@ -68,5 +68,4 @@ func ExampleTicker() {
|
|||
}
|
||||
|
||||
// Operation is successful.
|
||||
return
|
||||
}
|
||||
|
|
3
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
3
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
|
@ -103,13 +103,14 @@ func (t systemClock) Now() time.Time {
|
|||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
|
||||
|
|
4
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
4
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
|
@ -74,6 +74,10 @@ func (e *PermanentError) Error() string {
|
|||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) *PermanentError {
|
||||
return &PermanentError{
|
||||
|
|
4
vendor/github.com/cenkalti/backoff/ticker_test.go
generated
vendored
4
vendor/github.com/cenkalti/backoff/ticker_test.go
generated
vendored
|
@ -35,7 +35,7 @@ func TestTicker(t *testing.T) {
|
|||
}
|
||||
|
||||
var err error
|
||||
for _ = range ticker.C {
|
||||
for range ticker.C {
|
||||
if err = f(); err != nil {
|
||||
t.Log(err)
|
||||
continue
|
||||
|
@ -77,7 +77,7 @@ func TestTickerContext(t *testing.T) {
|
|||
ticker := NewTicker(b)
|
||||
|
||||
var err error
|
||||
for _ = range ticker.C {
|
||||
for range ticker.C {
|
||||
if err = f(); err != nil {
|
||||
t.Log(err)
|
||||
continue
|
||||
|
|
1
vendor/github.com/cenkalti/backoff/tries_test.go
generated
vendored
1
vendor/github.com/cenkalti/backoff/tries_test.go
generated
vendored
|
@ -40,7 +40,6 @@ func TestMaxTriesHappy(t *testing.T) {
|
|||
if d == Stop {
|
||||
t.Error("returned Stop after reset")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMaxTriesZero(t *testing.T) {
|
||||
|
|
2
vendor/github.com/minio/minio/.travis.yml
generated
vendored
2
vendor/github.com/minio/minio/.travis.yml
generated
vendored
|
@ -46,7 +46,7 @@ matrix:
|
|||
go: 1.13.x
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
- go test -v --timeout 20m ./...
|
||||
- for d in $(go list ./... | grep -v browser); do go test -v --timeout 20m "$d"; done
|
||||
|
||||
before_script:
|
||||
# Add an IPv6 config - see the corresponding Travis issue
|
||||
|
|
669
vendor/github.com/minio/minio/CREDITS
generated
vendored
669
vendor/github.com/minio/minio/CREDITS
generated
vendored
|
@ -691,616 +691,59 @@ For the lib/nodejs/lib/thrift/json_parse.js:
|
|||
|
||||
================================================================
|
||||
|
||||
github.com/Azure/azure-sdk-for-go
|
||||
https://github.com/Azure/azure-sdk-for-go
|
||||
github.com/Azure/azure-pipeline-go
|
||||
https://github.com/Azure/azure-pipeline-go
|
||||
----------------------------------------------------------------
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
MIT License
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
1. Definitions.
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/Azure/azure-sdk-for-go
|
||||
https://github.com/Azure/azure-sdk-for-go
|
||||
github.com/Azure/azure-storage-blob-go
|
||||
https://github.com/Azure/azure-storage-blob-go
|
||||
----------------------------------------------------------------
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/Azure/go-autorest
|
||||
https://github.com/Azure/go-autorest
|
||||
----------------------------------------------------------------
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2015 Microsoft Corporation
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
MIT License
|
||||
|
||||
Copyright (c) Microsoft Corporation. All rights reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE
|
||||
|
||||
================================================================
|
||||
|
||||
|
@ -10910,6 +10353,34 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|||
SOFTWARE.
|
||||
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/klauspost/readahead
|
||||
https://github.com/klauspost/readahead
|
||||
----------------------------------------------------------------
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 Klaus Post
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
|
||||
================================================================
|
||||
|
||||
github.com/klauspost/reedsolomon
|
||||
|
|
6
vendor/github.com/minio/minio/Makefile
generated
vendored
6
vendor/github.com/minio/minio/Makefile
generated
vendored
|
@ -65,11 +65,9 @@ test: verifiers build
|
|||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
# Verify minio binary
|
||||
# TODO: enable races as well
|
||||
# @GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
verify:
|
||||
@echo "Verifying build"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@echo "Verifying build with race"
|
||||
@GO111MODULE=on CGO_ENABLED=1 go build -race -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@(env bash $(PWD)/buildscripts/verify-build.sh)
|
||||
|
||||
coverage: build
|
||||
|
|
6
vendor/github.com/minio/minio/README.md
generated
vendored
6
vendor/github.com/minio/minio/README.md
generated
vendored
|
@ -3,9 +3,7 @@
|
|||
|
||||
[](https://min.io)
|
||||
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
|
||||
MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL.
|
||||
MinIO is High Performance Object Storage released under Apache License v2.0. It is API compatible with Amazon S3 cloud storage service. Using MinIO build high performance infrastructure for machine learning, analytics and application data workloads.
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
|
@ -85,7 +83,7 @@ service minio start
|
|||
```
|
||||
|
||||
## Install from Source
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.12](https://golang.org/dl/#stable)
|
||||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.13](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
|
|
34
vendor/github.com/minio/minio/browser/app/js/browser/Login.js
generated
vendored
34
vendor/github.com/minio/minio/browser/app/js/browser/Login.js
generated
vendored
|
@ -16,16 +16,13 @@
|
|||
|
||||
import React from "react"
|
||||
import { connect } from "react-redux"
|
||||
import classNames from "classnames"
|
||||
import logo from "../../img/logo.svg"
|
||||
import Alert from "../alert/Alert"
|
||||
import * as actionsAlert from "../alert/actions"
|
||||
import InputGroup from "./InputGroup"
|
||||
import web from "../web"
|
||||
import { Redirect, Link } from "react-router-dom"
|
||||
import qs from "query-string"
|
||||
import storage from "local-storage-fallback"
|
||||
import history from "../history"
|
||||
import OpenIDLoginButton from './OpenIDLoginButton'
|
||||
|
||||
export class Login extends React.Component {
|
||||
constructor(props) {
|
||||
|
@ -33,7 +30,8 @@ export class Login extends React.Component {
|
|||
this.state = {
|
||||
accessKey: "",
|
||||
secretKey: "",
|
||||
discoveryDoc: {}
|
||||
discoveryDoc: {},
|
||||
clientId: ""
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -88,8 +86,9 @@ export class Login extends React.Component {
|
|||
}
|
||||
|
||||
componentDidMount() {
|
||||
web.GetDiscoveryDoc().then(({ DiscoveryDoc }) => {
|
||||
web.GetDiscoveryDoc().then(({ DiscoveryDoc, clientId }) => {
|
||||
this.setState({
|
||||
clientId,
|
||||
discoveryDoc: DiscoveryDoc
|
||||
})
|
||||
})
|
||||
|
@ -107,6 +106,8 @@ export class Login extends React.Component {
|
|||
let alertBox = <Alert {...alert} onDismiss={clearAlert} />
|
||||
// Make sure you don't show a fading out alert box on the initial web-page load.
|
||||
if (!alert.message) alertBox = ""
|
||||
|
||||
const showOpenID = Boolean(this.state.discoveryDoc && this.state.discoveryDoc.authorization_endpoint)
|
||||
return (
|
||||
<div className="login">
|
||||
{alertBox}
|
||||
|
@ -139,13 +140,24 @@ export class Login extends React.Component {
|
|||
<i className="fas fa-sign-in-alt" />
|
||||
</button>
|
||||
</form>
|
||||
{this.state.discoveryDoc &&
|
||||
this.state.discoveryDoc.authorization_endpoint && (
|
||||
{showOpenID && (
|
||||
<div className="openid-login">
|
||||
<div className="or">or</div>
|
||||
<a href={"/login/openid"} className="btn openid-btn">
|
||||
Log in with OpenID
|
||||
</a>
|
||||
{
|
||||
this.state.clientId ? (
|
||||
<OpenIDLoginButton
|
||||
className="btn openid-btn"
|
||||
clientId={this.state.clientId}
|
||||
authorizationEndpoint={this.state.discoveryDoc.authorization_endpoint}
|
||||
>
|
||||
Log in with OpenID
|
||||
</OpenIDLoginButton>
|
||||
) : (
|
||||
<Link to={"/login/openid"} className="btn openid-btn">
|
||||
Log in with OpenID
|
||||
</Link>
|
||||
)
|
||||
}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
|
22
vendor/github.com/minio/minio/browser/app/js/browser/OpenIDLogin.js
generated
vendored
22
vendor/github.com/minio/minio/browser/app/js/browser/OpenIDLogin.js
generated
vendored
|
@ -26,6 +26,7 @@ import qs from "query-string"
|
|||
import { getRandomString } from "../utils"
|
||||
import storage from "local-storage-fallback"
|
||||
import jwtDecode from "jwt-decode"
|
||||
import { buildOpenIDAuthURL, OPEN_ID_NONCE_KEY } from './utils'
|
||||
|
||||
export class OpenIDLogin extends React.Component {
|
||||
constructor(props) {
|
||||
|
@ -58,20 +59,17 @@ export class OpenIDLogin extends React.Component {
|
|||
|
||||
if (this.state.discoveryDoc && this.state.discoveryDoc.authorization_endpoint) {
|
||||
const redirectURI = window.location.href.split("#")[0]
|
||||
var params = new URLSearchParams()
|
||||
params.set("response_type", "id_token")
|
||||
params.set("scope", "openid")
|
||||
params.set("client_id", this.state.clientID)
|
||||
params.set("redirect_uri", redirectURI)
|
||||
|
||||
// Store nonce in localstorage to check again after the redirect
|
||||
const nonce = getRandomString(16)
|
||||
params.set("nonce", nonce)
|
||||
storage.setItem("openIDKey", nonce)
|
||||
storage.setItem(OPEN_ID_NONCE_KEY, nonce)
|
||||
|
||||
const authURL = `${
|
||||
this.state.discoveryDoc.authorization_endpoint
|
||||
}?${params.toString()}`
|
||||
const authURL = buildOpenIDAuthURL(
|
||||
this.state.discoveryDoc.authorization_endpoint,
|
||||
redirectURI,
|
||||
this.state.clientID,
|
||||
nonce
|
||||
)
|
||||
window.location = authURL
|
||||
}
|
||||
}
|
||||
|
@ -99,13 +97,13 @@ export class OpenIDLogin extends React.Component {
|
|||
if (values.id_token) {
|
||||
// Check nonce on the token to prevent replay attacks
|
||||
const tokenJSON = jwtDecode(values.id_token)
|
||||
if (storage.getItem("openIDKey") !== tokenJSON.nonce) {
|
||||
if (storage.getItem(OPEN_ID_NONCE_KEY) !== tokenJSON.nonce) {
|
||||
this.props.showAlert("danger", "Invalid auth token")
|
||||
return
|
||||
}
|
||||
|
||||
web.LoginSTS({ token: values.id_token }).then(() => {
|
||||
storage.removeItem("openIDKey")
|
||||
storage.removeItem(OPEN_ID_NONCE_KEY)
|
||||
this.forceUpdate()
|
||||
return
|
||||
})
|
||||
|
|
57
vendor/github.com/minio/minio/browser/app/js/browser/OpenIDLoginButton.js
generated
vendored
Normal file
57
vendor/github.com/minio/minio/browser/app/js/browser/OpenIDLoginButton.js
generated
vendored
Normal file
|
@ -0,0 +1,57 @@
|
|||
/*
|
||||
* MinIO Cloud Storage (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
import React from "react"
|
||||
import { getRandomString } from "../utils"
|
||||
import storage from "local-storage-fallback"
|
||||
import { buildOpenIDAuthURL, OPEN_ID_NONCE_KEY } from './utils'
|
||||
|
||||
export class OpenIDLoginButton extends React.Component {
|
||||
constructor(props) {
|
||||
super(props)
|
||||
this.handleClick = this.handleClick.bind(this)
|
||||
}
|
||||
|
||||
handleClick(event) {
|
||||
event.stopPropagation()
|
||||
const { authorizationEndpoint, clientId } = this.props
|
||||
|
||||
let redirectURI = window.location.href.split("#")[0]
|
||||
if (redirectURI.endsWith('/')) {
|
||||
redirectURI += 'openid'
|
||||
} else {
|
||||
redirectURI += '/openid'
|
||||
}
|
||||
|
||||
// Store nonce in localstorage to check again after the redirect
|
||||
const nonce = getRandomString(16)
|
||||
storage.setItem(OPEN_ID_NONCE_KEY, nonce)
|
||||
|
||||
const authURL = buildOpenIDAuthURL(authorizationEndpoint, redirectURI, clientId, nonce)
|
||||
window.location = authURL
|
||||
}
|
||||
|
||||
render() {
|
||||
const { children, className } = this.props
|
||||
return (
|
||||
<div onClick={this.handleClick} className={className}>
|
||||
{children}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
export default OpenIDLoginButton
|
29
vendor/github.com/minio/minio/browser/app/js/browser/utils.js
generated
vendored
Normal file
29
vendor/github.com/minio/minio/browser/app/js/browser/utils.js
generated
vendored
Normal file
|
@ -0,0 +1,29 @@
|
|||
/*
|
||||
* MinIO Cloud Storage (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
export const OPEN_ID_NONCE_KEY = 'openIDKey'
|
||||
|
||||
export const buildOpenIDAuthURL = (authorizationEndpoint, redirectURI, clientID, nonce) => {
|
||||
const params = new URLSearchParams()
|
||||
params.set("response_type", "id_token")
|
||||
params.set("scope", "openid")
|
||||
params.set("client_id", clientID)
|
||||
params.set("redirect_uri", redirectURI)
|
||||
params.set("nonce", nonce)
|
||||
|
||||
return `${authorizationEndpoint}?${params.toString()}`
|
||||
}
|
||||
|
7
vendor/github.com/minio/minio/browser/app/less/inc/login.less
generated
vendored
7
vendor/github.com/minio/minio/browser/app/less/inc/login.less
generated
vendored
|
@ -101,14 +101,15 @@
|
|||
|
||||
.openid-btn {
|
||||
display: inline-block;
|
||||
color: @link-color;
|
||||
margin-top: 30px;
|
||||
border-width: 1px;
|
||||
border-style: solid;
|
||||
opacity: 0.6;
|
||||
font-size: 14px;
|
||||
&:hover {
|
||||
color: @link-color;
|
||||
opacity: 1;
|
||||
cursor: pointer;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -118,7 +119,7 @@
|
|||
align-items: center;
|
||||
color:grey;
|
||||
}
|
||||
|
||||
|
||||
.or:after,
|
||||
.or:before {
|
||||
content: "";
|
||||
|
@ -136,4 +137,4 @@ input:-webkit-autofill {
|
|||
-webkit-box-shadow:0 0 0 50px #002a37 inset !important;
|
||||
-webkit-text-fill-color: @white !important;
|
||||
caret-color: white;
|
||||
}
|
||||
}
|
||||
|
|
86
vendor/github.com/minio/minio/browser/ui-assets.go
generated
vendored
86
vendor/github.com/minio/minio/browser/ui-assets.go
generated
vendored
File diff suppressed because one or more lines are too long
4
vendor/github.com/minio/minio/buildscripts/gateway-tests.sh
generated
vendored
4
vendor/github.com/minio/minio/buildscripts/gateway-tests.sh
generated
vendored
|
@ -24,7 +24,7 @@ function start_minio_server()
|
|||
MINIO_ACCESS_KEY=minio MINIO_SECRET_KEY=minio123 \
|
||||
minio --quiet --json server /data --address 127.0.0.1:24242 > server.log 2>&1 &
|
||||
server_pid=$!
|
||||
sleep 3
|
||||
sleep 10
|
||||
|
||||
echo "$server_pid"
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ function start_minio_gateway_s3()
|
|||
minio --quiet --json gateway s3 http://127.0.0.1:24242 \
|
||||
--address 127.0.0.1:24240 > gateway.log 2>&1 &
|
||||
gw_pid=$!
|
||||
sleep 3
|
||||
sleep 10
|
||||
|
||||
echo "$gw_pid"
|
||||
}
|
||||
|
|
110
vendor/github.com/minio/minio/buildscripts/verify-build.sh
generated
vendored
110
vendor/github.com/minio/minio/buildscripts/verify-build.sh
generated
vendored
|
@ -69,63 +69,35 @@ function start_minio_erasure_sets()
|
|||
echo "$minio_pid"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets_ipv6()
|
||||
function start_minio_zone_erasure_sets()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9002" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9003" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9004" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9005" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9006" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9007" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9008" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address="[::1]:9009" "http://[::1]:9000${WORK_DIR}/dist-disk-sets1" "http://[::1]:9001${WORK_DIR}/dist-disk-sets2" "http://[::1]:9002${WORK_DIR}/dist-disk-sets3" "http://[::1]:9003${WORK_DIR}/dist-disk-sets4" "http://[::1]:9004${WORK_DIR}/dist-disk-sets5" "http://[::1]:9005${WORK_DIR}/dist-disk-sets6" "http://[::1]:9006${WORK_DIR}/dist-disk-sets7" "http://[::1]:9007${WORK_DIR}/dist-disk-sets8" "http://[::1]:9008${WORK_DIR}/dist-disk-sets9" "http://[::1]:9009${WORK_DIR}/dist-disk-sets10" "http://[::1]:9000${WORK_DIR}/dist-disk-sets11" "http://[::1]:9001${WORK_DIR}/dist-disk-sets12" "http://[::1]:9002${WORK_DIR}/dist-disk-sets13" "http://[::1]:9003${WORK_DIR}/dist-disk-sets14" "http://[::1]:9004${WORK_DIR}/dist-disk-sets15" "http://[::1]:9005${WORK_DIR}/dist-disk-sets16" "http://[::1]:9006${WORK_DIR}/dist-disk-sets17" "http://[::1]:9007${WORK_DIR}/dist-disk-sets18" "http://[::1]:9008${WORK_DIR}/dist-disk-sets19" "http://[::1]:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-v6-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 35
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://127.0.0.1:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
function start_minio_dist_erasure_sets()
|
||||
function start_minio_zone_erasure_sets_ipv6()
|
||||
{
|
||||
declare -a minio_pids
|
||||
export MINIO_ACCESS_KEY=$ACCESS_KEY
|
||||
export MINIO_SECRET_KEY=$SECRET_KEY
|
||||
"${MINIO[@]}" server --address=:9000 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
"${MINIO[@]}" server --address=:9001 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
"${MINIO[@]}" server --address=:9002 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9002.log" 2>&1 &
|
||||
minio_pids[2]=$!
|
||||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
"${MINIO[@]}" server --address=:9004 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9004.log" 2>&1 &
|
||||
minio_pids[4]=$!
|
||||
"${MINIO[@]}" server --address=:9005 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9005.log" 2>&1 &
|
||||
minio_pids[5]=$!
|
||||
"${MINIO[@]}" server --address=:9006 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9006.log" 2>&1 &
|
||||
minio_pids[6]=$!
|
||||
"${MINIO[@]}" server --address=:9007 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9007.log" 2>&1 &
|
||||
minio_pids[7]=$!
|
||||
"${MINIO[@]}" server --address=:9008 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9008.log" 2>&1 &
|
||||
minio_pids[8]=$!
|
||||
"${MINIO[@]}" server --address=:9009 "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets4" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets5" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets6" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets7" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets8" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets9" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets10" "http://127.0.0.1:9000${WORK_DIR}/dist-disk-sets11" "http://127.0.0.1:9001${WORK_DIR}/dist-disk-sets12" "http://127.0.0.1:9002${WORK_DIR}/dist-disk-sets13" "http://127.0.0.1:9003${WORK_DIR}/dist-disk-sets14" "http://127.0.0.1:9004${WORK_DIR}/dist-disk-sets15" "http://127.0.0.1:9005${WORK_DIR}/dist-disk-sets16" "http://127.0.0.1:9006${WORK_DIR}/dist-disk-sets17" "http://127.0.0.1:9007${WORK_DIR}/dist-disk-sets18" "http://127.0.0.1:9008${WORK_DIR}/dist-disk-sets19" "http://127.0.0.1:9009${WORK_DIR}/dist-disk-sets20" >"$WORK_DIR/dist-minio-9009.log" 2>&1 &
|
||||
minio_pids[9]=$!
|
||||
|
||||
sleep 30
|
||||
"${MINIO[@]}" server --address="[::1]:9000" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9000.log" 2>&1 &
|
||||
minio_pids[0]=$!
|
||||
|
||||
"${MINIO[@]}" server --address="[::1]:9001" "http://[::1]:9000${WORK_DIR}/zone-disk-sets{1...4}" "http://[::1]:9001${WORK_DIR}/zone-disk-sets{5...8}" >"$WORK_DIR/zone-minio-9001.log" 2>&1 &
|
||||
minio_pids[1]=$!
|
||||
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
|
@ -143,7 +115,7 @@ function start_minio_dist_erasure()
|
|||
"${MINIO[@]}" server --address=:9003 "http://127.0.0.1:9000${WORK_DIR}/dist-disk1" "http://127.0.0.1:9001${WORK_DIR}/dist-disk2" "http://127.0.0.1:9002${WORK_DIR}/dist-disk3" "http://127.0.0.1:9003${WORK_DIR}/dist-disk4" >"$WORK_DIR/dist-minio-9003.log" 2>&1 &
|
||||
minio_pids[3]=$!
|
||||
|
||||
sleep 30
|
||||
sleep 40
|
||||
echo "${minio_pids[@]}"
|
||||
}
|
||||
|
||||
|
@ -210,9 +182,9 @@ function run_test_dist_erasure_sets_ipv6()
|
|||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_dist_erasure_sets()
|
||||
function run_test_zone_erasure_sets()
|
||||
{
|
||||
minio_pids=( $(start_minio_dist_erasure_sets) )
|
||||
minio_pids=( $(start_minio_zone_erasure_sets) )
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
@ -223,14 +195,42 @@ function run_test_dist_erasure_sets()
|
|||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 9); do
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/dist-minio-900$i.log"
|
||||
cat "$WORK_DIR/zone-minio-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 9); do
|
||||
rm -f "$WORK_DIR/dist-minio-900$i.log"
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
}
|
||||
|
||||
function run_test_zone_erasure_sets_ipv6()
|
||||
{
|
||||
minio_pids=( $(start_minio_zone_erasure_sets_ipv6) )
|
||||
|
||||
export SERVER_ENDPOINT="[::1]:9000"
|
||||
|
||||
(cd "$WORK_DIR" && "$FUNCTIONAL_TESTS")
|
||||
rv=$?
|
||||
|
||||
for pid in "${minio_pids[@]}"; do
|
||||
kill "$pid"
|
||||
done
|
||||
sleep 3
|
||||
|
||||
if [ "$rv" -ne 0 ]; then
|
||||
for i in $(seq 0 1); do
|
||||
echo "server$i log:"
|
||||
cat "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
done
|
||||
fi
|
||||
|
||||
for i in $(seq 0 1); do
|
||||
rm -f "$WORK_DIR/zone-minio-ipv6-900$i.log"
|
||||
done
|
||||
|
||||
return "$rv"
|
||||
|
@ -351,15 +351,15 @@ function main()
|
|||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets"
|
||||
if ! run_test_dist_erasure_sets; then
|
||||
echo "Testing in Distributed Eraure expanded setup"
|
||||
if ! run_test_zone_erasure_sets; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Testing in Distributed Erasure setup as sets with ipv6"
|
||||
if ! run_test_dist_erasure_sets_ipv6; then
|
||||
echo "Testing in Distributed Erasure expanded setup with ipv6"
|
||||
if ! run_test_zone_erasure_sets_ipv6; then
|
||||
echo "FAILED"
|
||||
purge "$WORK_DIR"
|
||||
exit 1
|
||||
|
|
171
vendor/github.com/minio/minio/cmd/admin-handlers-config-kv.go
generated
vendored
171
vendor/github.com/minio/minio/cmd/admin-handlers-config-kv.go
generated
vendored
|
@ -17,17 +17,25 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/policy/opa"
|
||||
"github.com/minio/minio/cmd/config/storageclass"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
|
@ -40,7 +48,7 @@ func validateAdminReqConfigKV(ctx context.Context, w http.ResponseWriter, r *htt
|
|||
}
|
||||
|
||||
// Validate request signature.
|
||||
adminAPIErr := checkAdminRequestAuthType(ctx, r, "")
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ConfigUpdateAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil
|
||||
|
@ -77,30 +85,19 @@ func (a adminAPIHandlers) DelConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
oldCfg := cfg.Clone()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
|
||||
for scanner.Scan() {
|
||||
// Skip any empty lines
|
||||
if scanner.Text() == "" {
|
||||
continue
|
||||
}
|
||||
if err = cfg.DelKVS(scanner.Text()); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
if err = cfg.DelFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, oldCfg); err != nil {
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -134,33 +131,14 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// Config not found for some reason, allow things to continue
|
||||
// by initializing a new fresh config in safe mode.
|
||||
if err == errConfigNotFound && globalSafeMode {
|
||||
cfg = newServerConfig()
|
||||
err = nil
|
||||
} else {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
defaultKVS := configDefaultKVS()
|
||||
oldCfg := cfg.Clone()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
|
||||
for scanner.Scan() {
|
||||
// Skip any empty lines
|
||||
if scanner.Text() == "" {
|
||||
continue
|
||||
}
|
||||
if err = cfg.SetKVS(scanner.Text(), defaultKVS); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -171,7 +149,7 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
}
|
||||
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, oldCfg); err != nil {
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -186,6 +164,8 @@ func (a adminAPIHandlers) SetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
if globalConfigEncrypted {
|
||||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// GetConfigKVHandler - GET /minio/admin/v2/get-config-kv?key={key}
|
||||
|
@ -209,23 +189,10 @@ func (a adminAPIHandlers) GetConfigKVHandler(w http.ResponseWriter, r *http.Requ
|
|||
|
||||
vars := mux.Vars(r)
|
||||
var buf = &bytes.Buffer{}
|
||||
key := vars["key"]
|
||||
if key != "" {
|
||||
kvs, err := cfg.GetKVS(key)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
for k, kv := range kvs {
|
||||
buf.WriteString(k)
|
||||
buf.WriteString(config.KvSpaceSeparator)
|
||||
buf.WriteString(kv.String())
|
||||
if len(kvs) > 1 {
|
||||
buf.WriteString(config.KvNewline)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
buf.WriteString(cfg.String())
|
||||
cw := config.NewConfigWriteTo(cfg, vars["key"])
|
||||
if _, err := cw.WriteTo(buf); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
|
@ -296,31 +263,11 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
|||
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
// Config not found for some reason, allow things to continue
|
||||
// by initializing a new fresh config in safe mode.
|
||||
if err == errConfigNotFound && globalSafeMode {
|
||||
cfg = newServerConfig()
|
||||
err = nil
|
||||
} else {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
defaultKVS := configDefaultKVS()
|
||||
oldCfg := cfg.Clone()
|
||||
scanner := bufio.NewScanner(bytes.NewReader(kvBytes))
|
||||
for scanner.Scan() {
|
||||
// Skip any empty lines
|
||||
if scanner.Text() == "" {
|
||||
continue
|
||||
}
|
||||
if err = cfg.SetKVS(scanner.Text(), defaultKVS); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
}
|
||||
if err = scanner.Err(); err != nil {
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -330,7 +277,7 @@ func (a adminAPIHandlers) RestoreConfigHistoryKVHandler(w http.ResponseWriter, r
|
|||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, oldCfg); err != nil {
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -424,17 +371,16 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
kvBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var cfg config.Config
|
||||
if err = json.Unmarshal(configBytes, &cfg); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
writeCustomErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminConfigBadJSON), err.Error(), r.URL)
|
||||
cfg := newServerConfig()
|
||||
if _, err = cfg.ReadFrom(bytes.NewReader(kvBytes)); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -443,7 +389,14 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg, nil); err != nil {
|
||||
// Update the actual server config on disk.
|
||||
if err = saveServerConfig(ctx, objectAPI, cfg); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Write to the config input KV to history.
|
||||
if err = saveServerConfigHistory(ctx, objectAPI, kvBytes); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
@ -453,7 +406,6 @@ func (a adminAPIHandlers) SetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
saveConfig(context.Background(), objectAPI, backendEncryptedFile, backendEncryptedMigrationComplete)
|
||||
}
|
||||
|
||||
// Reply to the client before restarting minio server.
|
||||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
|
@ -467,20 +419,51 @@ func (a adminAPIHandlers) GetConfigHandler(w http.ResponseWriter, r *http.Reques
|
|||
return
|
||||
}
|
||||
|
||||
config, err := readServerConfig(ctx, objectAPI)
|
||||
cfg, err := readServerConfig(ctx, objectAPI)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
configData, err := json.MarshalIndent(config, "", "\t")
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
var s strings.Builder
|
||||
hkvs := config.HelpSubSysMap[""]
|
||||
for _, hkv := range hkvs {
|
||||
v := cfg[hkv.Key]
|
||||
for target, kv := range v {
|
||||
off := kv.Get(config.Enable) == config.EnableOff
|
||||
switch hkv.Key {
|
||||
case config.EtcdSubSys:
|
||||
off = !etcd.Enabled(kv)
|
||||
case config.CacheSubSys:
|
||||
off = !cache.Enabled(kv)
|
||||
case config.StorageClassSubSys:
|
||||
off = !storageclass.Enabled(kv)
|
||||
case config.KmsVaultSubSys:
|
||||
off = !crypto.Enabled(kv)
|
||||
case config.PolicyOPASubSys:
|
||||
off = !opa.Enabled(kv)
|
||||
case config.IdentityOpenIDSubSys:
|
||||
off = !openid.Enabled(kv)
|
||||
case config.IdentityLDAPSubSys:
|
||||
off = !xldap.Enabled(kv)
|
||||
}
|
||||
if off {
|
||||
s.WriteString(config.KvComment)
|
||||
s.WriteString(config.KvSpaceSeparator)
|
||||
}
|
||||
s.WriteString(hkv.Key)
|
||||
if target != config.Default {
|
||||
s.WriteString(config.SubSystemSeparator)
|
||||
s.WriteString(target)
|
||||
}
|
||||
s.WriteString(config.KvSpaceSeparator)
|
||||
s.WriteString(kv.String())
|
||||
s.WriteString(config.KvNewline)
|
||||
}
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
econfigData, err := madmin.EncryptData(password, configData)
|
||||
econfigData, err := madmin.EncryptData(password, []byte(s.String()))
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
|
|
44
vendor/github.com/minio/minio/cmd/admin-handlers-users.go
generated
vendored
44
vendor/github.com/minio/minio/cmd/admin-handlers-users.go
generated
vendored
|
@ -25,33 +25,37 @@ import (
|
|||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
|
||||
func validateAdminUsersReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil || globalIAMSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
adminAPIErr := checkAdminRequestAuthType(ctx, r, "")
|
||||
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
return objectAPI
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// RemoveUser - DELETE /minio/admin/v2/remove-user?accessKey=<access_key>
|
||||
func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveUser")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeleteUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -83,7 +87,7 @@ func (a adminAPIHandlers) RemoveUser(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListUsers")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUsersAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -114,7 +118,7 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetUserInfo")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -141,7 +145,7 @@ func (a adminAPIHandlers) GetUserInfo(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AddUserToGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -184,7 +188,7 @@ func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Requ
|
|||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -211,7 +215,7 @@ func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListGroupsAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -235,7 +239,7 @@ func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableGroupAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -270,7 +274,7 @@ func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request)
|
|||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.EnableUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -309,7 +313,7 @@ func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request)
|
|||
func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddUser")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, cred := validateAdminUsersReq(ctx, w, r, iampolicy.CreateUserAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -335,7 +339,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
password := globalActiveCred.SecretKey
|
||||
password := cred.SecretKey
|
||||
configBytes, err := madmin.DecryptData(password, io.LimitReader(r.Body, r.ContentLength))
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -368,7 +372,7 @@ func (a adminAPIHandlers) AddUser(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "InfoCannedPolicy")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.GetPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -387,7 +391,7 @@ func (a adminAPIHandlers) InfoCannedPolicy(w http.ResponseWriter, r *http.Reques
|
|||
func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListCannedPolicies")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.ListUserPoliciesAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -410,7 +414,7 @@ func (a adminAPIHandlers) ListCannedPolicies(w http.ResponseWriter, r *http.Requ
|
|||
func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "RemoveCannedPolicy")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.DeletePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -442,7 +446,7 @@ func (a adminAPIHandlers) RemoveCannedPolicy(w http.ResponseWriter, r *http.Requ
|
|||
func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "AddCannedPolicy")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.CreatePolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -498,7 +502,7 @@ func (a adminAPIHandlers) AddCannedPolicy(w http.ResponseWriter, r *http.Request
|
|||
func (a adminAPIHandlers) SetPolicyForUserOrGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetPolicyForUserOrGroup")
|
||||
|
||||
objectAPI := validateAdminUsersReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminUsersReq(ctx, w, r, iampolicy.AttachPolicyAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
|
71
vendor/github.com/minio/minio/cmd/admin-handlers.go
generated
vendored
71
vendor/github.com/minio/minio/cmd/admin-handlers.go
generated
vendored
|
@ -39,8 +39,10 @@ import (
|
|||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/cpu"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/minio/minio/pkg/mem"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
|
@ -98,7 +100,7 @@ func updateServer(updateURL, sha256Hex string, latestReleaseTime time.Time) (us
|
|||
func (a adminAPIHandlers) ServerUpdateHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ServerUpdate")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ServerUpdateAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -180,7 +182,7 @@ func (a adminAPIHandlers) ServiceActionHandler(w http.ResponseWriter, r *http.Re
|
|||
vars := mux.Vars(r)
|
||||
action := vars["action"]
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, "")
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -265,7 +267,7 @@ type ServerInfo struct {
|
|||
// Get server information
|
||||
func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ServerInfo")
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -306,7 +308,7 @@ func (a adminAPIHandlers) ServerInfoHandler(w http.ResponseWriter, r *http.Reque
|
|||
// Get server information
|
||||
func (a adminAPIHandlers) StorageInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StorageInfo")
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -360,7 +362,7 @@ type ServerNetReadPerfInfo struct {
|
|||
func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "PerfInfo")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -476,7 +478,13 @@ func (a adminAPIHandlers) PerfInfoHandler(w http.ResponseWriter, r *http.Request
|
|||
}
|
||||
|
||||
func newLockEntry(l lockRequesterInfo, resource, server string) *madmin.LockEntry {
|
||||
entry := &madmin.LockEntry{Timestamp: l.Timestamp, Resource: resource, ServerList: []string{server}, Owner: l.Node, Source: l.Source, ID: l.UID}
|
||||
entry := &madmin.LockEntry{
|
||||
Timestamp: l.Timestamp,
|
||||
Resource: resource,
|
||||
ServerList: []string{server},
|
||||
Source: l.Source,
|
||||
ID: l.UID,
|
||||
}
|
||||
if l.Writer {
|
||||
entry.Type = "Write"
|
||||
} else {
|
||||
|
@ -491,12 +499,14 @@ func topLockEntries(peerLocks []*PeerLocks) madmin.LockEntries {
|
|||
if peerLock == nil {
|
||||
continue
|
||||
}
|
||||
for k, v := range peerLock.Locks {
|
||||
for _, lockReqInfo := range v {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = newLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
for _, locks := range peerLock.Locks {
|
||||
for k, v := range locks {
|
||||
for _, lockReqInfo := range v {
|
||||
if val, ok := entryMap[lockReqInfo.UID]; ok {
|
||||
val.ServerList = append(val.ServerList, peerLock.Addr)
|
||||
} else {
|
||||
entryMap[lockReqInfo.UID] = newLockEntry(lockReqInfo, k, peerLock.Addr)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -523,7 +533,7 @@ type PeerLocks struct {
|
|||
func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "TopLocks")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -531,10 +541,13 @@ func (a adminAPIHandlers) TopLocksHandler(w http.ResponseWriter, r *http.Request
|
|||
peerLocks := globalNotificationSys.GetLocks(ctx)
|
||||
// Once we have received all the locks currently used from peers
|
||||
// add the local peer locks list as well.
|
||||
localLocks := globalLockServer.ll.DupLockMap()
|
||||
var getRespLocks GetLocksResp
|
||||
for _, llocker := range globalLockServers {
|
||||
getRespLocks = append(getRespLocks, llocker.DupLockMap())
|
||||
}
|
||||
peerLocks = append(peerLocks, &PeerLocks{
|
||||
Addr: getHostName(r),
|
||||
Locks: localLocks,
|
||||
Locks: getRespLocks,
|
||||
})
|
||||
|
||||
topLocks := topLockEntries(peerLocks)
|
||||
|
@ -565,7 +578,7 @@ type StartProfilingResult struct {
|
|||
func (a adminAPIHandlers) StartProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "StartProfiling")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -647,7 +660,7 @@ func (f dummyFileInfo) Sys() interface{} { return f.sys }
|
|||
func (a adminAPIHandlers) DownloadProfilingHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "DownloadProfiling")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -739,7 +752,7 @@ func extractHealInitParams(vars map[string]string, qParms url.Values, r io.Reade
|
|||
func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "Heal")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -883,7 +896,7 @@ func (a adminAPIHandlers) HealHandler(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HealBackgroundStatus")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.HealAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -923,22 +936,24 @@ func (a adminAPIHandlers) BackgroundHealStatusHandler(w http.ResponseWriter, r *
|
|||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request) ObjectLayer {
|
||||
func validateAdminReq(ctx context.Context, w http.ResponseWriter, r *http.Request, action iampolicy.AdminAction) (ObjectLayer, auth.Credentials) {
|
||||
var cred auth.Credentials
|
||||
var adminAPIErr APIErrorCode
|
||||
// Get current object layer instance.
|
||||
objectAPI := newObjectLayerWithoutSafeModeFn()
|
||||
if objectAPI == nil || globalNotificationSys == nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL)
|
||||
return nil
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
// Validate request signature.
|
||||
adminAPIErr := checkAdminRequestAuthType(ctx, r, "")
|
||||
cred, adminAPIErr = checkAdminRequestAuthType(ctx, r, action, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return nil
|
||||
return nil, cred
|
||||
}
|
||||
|
||||
return objectAPI
|
||||
return objectAPI, cred
|
||||
}
|
||||
|
||||
// AdminError - is a generic error for all admin APIs.
|
||||
|
@ -1029,7 +1044,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
trcErr := r.URL.Query().Get("err") == "true"
|
||||
|
||||
// Validate request signature.
|
||||
adminAPIErr := checkAdminRequestAuthType(ctx, r, "")
|
||||
_, adminAPIErr := checkAdminRequestAuthType(ctx, r, iampolicy.ListServerInfoAdminAction, "")
|
||||
if adminAPIErr != ErrNone {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(adminAPIErr), r.URL)
|
||||
return
|
||||
|
@ -1083,7 +1098,7 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ConsoleLog")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1154,7 +1169,7 @@ func (a adminAPIHandlers) ConsoleLogHandler(w http.ResponseWriter, r *http.Reque
|
|||
func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "KMSKeyStatusHandler")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
@ -1239,7 +1254,7 @@ func (a adminAPIHandlers) KMSKeyStatusHandler(w http.ResponseWriter, r *http.Req
|
|||
func (a adminAPIHandlers) ServerHardwareInfoHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "HardwareInfo")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
objectAPI, _ := validateAdminReq(ctx, w, r, iampolicy.ListServerInfoAdminAction)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
|
51
vendor/github.com/minio/minio/cmd/admin-handlers_test.go
generated
vendored
51
vendor/github.com/minio/minio/cmd/admin-handlers_test.go
generated
vendored
|
@ -27,7 +27,6 @@ import (
|
|||
"net/url"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
|
@ -62,16 +61,12 @@ func prepareAdminXLTestBed() (*adminXLTestBed, error) {
|
|||
// Initialize boot time
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
globalEndpoints = mustGetNewEndpointList(xlDirs...)
|
||||
globalEndpoints = mustGetZoneEndpoints(xlDirs...)
|
||||
|
||||
// Set globalIsXL to indicate that the setup uses an erasure
|
||||
// code backend.
|
||||
globalIsXL = true
|
||||
|
||||
// initialize NSLock.
|
||||
isDistXL := false
|
||||
initNSLock(isDistXL)
|
||||
|
||||
// Init global heal state
|
||||
if globalIsXL {
|
||||
globalAllHealState = initHealState()
|
||||
|
@ -118,8 +113,8 @@ func initTestXLObjLayer() (ObjectLayer, []string, error) {
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
endpoints := mustGetNewEndpointList(xlDirs...)
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 16)
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
format, err := waitForFormatXL(true, endpoints, 1, 16, "")
|
||||
if err != nil {
|
||||
removeRoots(xlDirs)
|
||||
return nil, nil, err
|
||||
|
@ -345,46 +340,6 @@ func TestToAdminAPIErrCode(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestTopLockEntries(t *testing.T) {
|
||||
t1 := UTCNow()
|
||||
t2 := UTCNow().Add(10 * time.Second)
|
||||
peerLocks := []*PeerLocks{
|
||||
{
|
||||
Addr: "1",
|
||||
Locks: map[string][]lockRequesterInfo{
|
||||
"1": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
"2": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Addr: "2",
|
||||
Locks: map[string][]lockRequesterInfo{
|
||||
"1": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
"2": {
|
||||
{false, "node2", "ep2", "2", t2, t2, ""},
|
||||
{true, "node1", "ep1", "1", t1, t1, ""},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
les := topLockEntries(peerLocks)
|
||||
if len(les) != 2 {
|
||||
t.Fatalf("Did not get 2 results")
|
||||
}
|
||||
if les[0].Timestamp.After(les[1].Timestamp) {
|
||||
t.Fatalf("Got wrong sorted value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractHealInitParams(t *testing.T) {
|
||||
mkParams := func(clientToken string, forceStart, forceStop bool) url.Values {
|
||||
v := url.Values{}
|
||||
|
|
157
vendor/github.com/minio/minio/cmd/admin-server-info.go
generated
vendored
157
vendor/github.com/minio/minio/cmd/admin-server-info.go
generated
vendored
|
@ -30,27 +30,28 @@ import (
|
|||
cpuhw "github.com/shirou/gopsutil/cpu"
|
||||
)
|
||||
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInfo {
|
||||
// getLocalMemUsage - returns ServerMemUsageInfo for all zones, endpoints.
|
||||
func getLocalMemUsage(endpointZones EndpointZones, r *http.Request) ServerMemUsageInfo {
|
||||
var memUsages []mem.Usage
|
||||
var historicUsages []mem.Usage
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
memUsages = append(memUsages, mem.GetUsage())
|
||||
historicUsages = append(historicUsages, mem.GetHistoricUsage())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerMemUsageInfo{
|
||||
Addr: addr,
|
||||
|
@ -59,27 +60,28 @@ func getLocalMemUsage(endpoints EndpointList, r *http.Request) ServerMemUsageInf
|
|||
}
|
||||
}
|
||||
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo {
|
||||
// getLocalCPULoad - returns ServerCPULoadInfo for all zones, endpoints.
|
||||
func getLocalCPULoad(endpointZones EndpointZones, r *http.Request) ServerCPULoadInfo {
|
||||
var cpuLoads []cpu.Load
|
||||
var historicLoads []cpu.Load
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuLoads = append(cpuLoads, cpu.GetLoad())
|
||||
historicLoads = append(historicLoads, cpu.GetHistoricLoad())
|
||||
}
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return ServerCPULoadInfo{
|
||||
Addr: addr,
|
||||
|
@ -88,26 +90,27 @@ func getLocalCPULoad(endpoints EndpointList, r *http.Request) ServerCPULoadInfo
|
|||
}
|
||||
}
|
||||
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for only the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
// getLocalDrivesPerf - returns ServerDrivesPerfInfo for all zones, endpoints.
|
||||
func getLocalDrivesPerf(endpointZones EndpointZones, size int64, r *http.Request) madmin.ServerDrivesPerfInfo {
|
||||
var dps []disk.Performance
|
||||
for _, endpoint := range endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
if _, err := os.Stat(endpoint.Path); err != nil {
|
||||
// Since this drive is not available, add relevant details and proceed
|
||||
dps = append(dps, disk.Performance{Path: endpoint.Path, Error: err.Error()})
|
||||
continue
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
dp := disk.GetPerformance(pathJoin(endpoint.Path, minioMetaTmpBucket, mustGetUUID()), size)
|
||||
dp.Path = endpoint.Path
|
||||
dps = append(dps, dp)
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
return madmin.ServerDrivesPerfInfo{
|
||||
Addr: addr,
|
||||
|
@ -116,31 +119,32 @@ func getLocalDrivesPerf(endpoints EndpointList, size int64, r *http.Request) mad
|
|||
}
|
||||
}
|
||||
|
||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo only for the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalCPUInfo(endpoints EndpointList, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||
// getLocalCPUInfo - returns ServerCPUHardwareInfo for all zones, endpoints.
|
||||
func getLocalCPUInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerCPUHardwareInfo {
|
||||
var cpuHardwares []cpuhw.InfoStat
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuHardware, err := cpuhw.Info()
|
||||
if err != nil {
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
cpuHardware, err := cpuhw.Info()
|
||||
if err != nil {
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||
}
|
||||
cpuHardwares = append(cpuHardwares, cpuHardware...)
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerCPUHardwareInfo{
|
||||
|
@ -149,31 +153,32 @@ func getLocalCPUInfo(endpoints EndpointList, r *http.Request) madmin.ServerCPUHa
|
|||
}
|
||||
}
|
||||
|
||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo only for the
|
||||
// local endpoints from given list of endpoints
|
||||
func getLocalNetworkInfo(endpoints EndpointList, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||
// getLocalNetworkInfo - returns ServerNetworkHardwareInfo for all zones, endpoints.
|
||||
func getLocalNetworkInfo(endpointZones EndpointZones, r *http.Request) madmin.ServerNetworkHardwareInfo {
|
||||
var networkHardwares []net.Interface
|
||||
seenHosts := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
networkHardware, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
// Add to the list of visited hosts
|
||||
seenHosts.Add(endpoint.Host)
|
||||
// Only proceed for local endpoints
|
||||
if endpoint.IsLocal {
|
||||
networkHardware, err := net.Interfaces()
|
||||
if err != nil {
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
Error: err.Error(),
|
||||
}
|
||||
}
|
||||
networkHardwares = append(networkHardwares, networkHardware...)
|
||||
}
|
||||
networkHardwares = append(networkHardwares, networkHardware...)
|
||||
}
|
||||
}
|
||||
addr := r.Host
|
||||
if globalIsDistXL {
|
||||
addr = GetLocalPeer(endpoints)
|
||||
addr = GetLocalPeer(endpointZones)
|
||||
}
|
||||
|
||||
return madmin.ServerNetworkHardwareInfo{
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/api-datatypes.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/api-datatypes.go
generated
vendored
|
@ -26,7 +26,7 @@ type ObjectIdentifier struct {
|
|||
}
|
||||
|
||||
// createBucketConfiguration container for bucket configuration request from client.
|
||||
// Used for parsing the location from the request body for MakeBucketbucket.
|
||||
// Used for parsing the location from the request body for Makebucket.
|
||||
type createBucketLocationConfiguration struct {
|
||||
XMLName xml.Name `xml:"CreateBucketConfiguration" json:"-"`
|
||||
Location string `xml:"LocationConstraint"`
|
||||
|
|
57
vendor/github.com/minio/minio/cmd/api-errors.go
generated
vendored
57
vendor/github.com/minio/minio/cmd/api-errors.go
generated
vendored
|
@ -23,15 +23,15 @@ import (
|
|||
"net/http"
|
||||
"strings"
|
||||
|
||||
"github.com/Azure/azure-sdk-for-go/storage"
|
||||
"github.com/Azure/azure-storage-blob-go/azblob"
|
||||
"github.com/aliyun/aliyun-oss-go-sdk/oss"
|
||||
"google.golang.org/api/googleapi"
|
||||
|
||||
minio "github.com/minio/minio-go/v6"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
@ -141,6 +141,12 @@ const (
|
|||
ErrInvalidPrefixMarker
|
||||
ErrBadRequest
|
||||
ErrKeyTooLongError
|
||||
ErrInvalidBucketObjectLockConfiguration
|
||||
ErrObjectLocked
|
||||
ErrInvalidRetentionDate
|
||||
ErrPastObjectLockRetainDate
|
||||
ErrUnknownWORMModeDirective
|
||||
ErrObjectLockInvalidHeaders
|
||||
// Add new error codes here.
|
||||
|
||||
// SSE-S3 related API errors
|
||||
|
@ -720,7 +726,36 @@ var errorCodes = errorCodeMap{
|
|||
Description: "Duration provided in the request is invalid.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
|
||||
ErrInvalidBucketObjectLockConfiguration: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Bucket is missing ObjectLockConfiguration",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLocked: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Object is WORM protected and cannot be overwritten",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrInvalidRetentionDate: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "Date must be provided in ISO 8601 format",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrPastObjectLockRetainDate: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "the retain until date must be in the future",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrUnknownWORMModeDirective: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "unknown wormMode directive",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrObjectLockInvalidHeaders: {
|
||||
Code: "InvalidRequest",
|
||||
Description: "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
/// Bucket notification related errors.
|
||||
ErrEventNotification: {
|
||||
Code: "InvalidArgument",
|
||||
|
@ -1569,6 +1604,14 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrOperationTimedOut
|
||||
case errDiskNotFound:
|
||||
apiErr = ErrSlowDown
|
||||
case errInvalidRetentionDate:
|
||||
apiErr = ErrInvalidRetentionDate
|
||||
case errPastObjectLockRetainDate:
|
||||
apiErr = ErrPastObjectLockRetainDate
|
||||
case errUnknownWORMModeDirective:
|
||||
apiErr = ErrUnknownWORMModeDirective
|
||||
case errObjectLockInvalidHeaders:
|
||||
apiErr = ErrObjectLockInvalidHeaders
|
||||
}
|
||||
|
||||
// Compression errors
|
||||
|
@ -1753,11 +1796,11 @@ func toAPIError(ctx context.Context, err error) APIError {
|
|||
apiErr.Code = e.Errors[0].Reason
|
||||
|
||||
}
|
||||
case storage.AzureStorageServiceError:
|
||||
case azblob.StorageError:
|
||||
apiErr = APIError{
|
||||
Code: e.Code,
|
||||
Description: e.Message,
|
||||
HTTPStatusCode: e.StatusCode,
|
||||
Code: string(e.ServiceCode()),
|
||||
Description: e.Error(),
|
||||
HTTPStatusCode: e.Response().StatusCode,
|
||||
}
|
||||
case oss.ServiceError:
|
||||
apiErr = APIError{
|
||||
|
|
47
vendor/github.com/minio/minio/cmd/api-response.go
generated
vendored
47
vendor/github.com/minio/minio/cmd/api-response.go
generated
vendored
|
@ -239,6 +239,37 @@ type ObjectVersion struct {
|
|||
IsLatest bool
|
||||
}
|
||||
|
||||
// StringMap is a map[string]string.
|
||||
type StringMap map[string]string
|
||||
|
||||
// MarshalXML - StringMap marshals into XML.
|
||||
func (s StringMap) MarshalXML(e *xml.Encoder, start xml.StartElement) error {
|
||||
|
||||
tokens := []xml.Token{start}
|
||||
|
||||
for key, value := range s {
|
||||
t := xml.StartElement{}
|
||||
t.Name = xml.Name{
|
||||
Space: "",
|
||||
Local: key,
|
||||
}
|
||||
tokens = append(tokens, t, xml.CharData(value), xml.EndElement{Name: t.Name})
|
||||
}
|
||||
|
||||
tokens = append(tokens, xml.EndElement{
|
||||
Name: start.Name,
|
||||
})
|
||||
|
||||
for _, t := range tokens {
|
||||
if err := e.EncodeToken(t); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// flush to ensure tokens are written
|
||||
return e.Flush()
|
||||
}
|
||||
|
||||
// Object container for object metadata
|
||||
type Object struct {
|
||||
Key string
|
||||
|
@ -251,6 +282,9 @@ type Object struct {
|
|||
|
||||
// The class of storage used to store the object.
|
||||
StorageClass string
|
||||
|
||||
// UserMetadata user-defined metadata
|
||||
UserMetadata StringMap `xml:"UserMetadata,omitempty"`
|
||||
}
|
||||
|
||||
// CopyObjectResponse container returns ETag and LastModified of the successfully copied object
|
||||
|
@ -466,7 +500,7 @@ func generateListObjectsV1Response(bucket, prefix, marker, delimiter, encodingTy
|
|||
}
|
||||
|
||||
// generates an ListObjectsV2 response for the said bucket with other enumerated options.
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string) ListObjectsV2Response {
|
||||
func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter, delimiter, encodingType string, fetchOwner, isTruncated bool, maxKeys int, objects []ObjectInfo, prefixes []string, metadata bool) ListObjectsV2Response {
|
||||
var contents []Object
|
||||
var commonPrefixes []CommonPrefix
|
||||
var owner = Owner{}
|
||||
|
@ -489,6 +523,17 @@ func generateListObjectsV2Response(bucket, prefix, token, nextToken, startAfter,
|
|||
content.Size = object.Size
|
||||
content.StorageClass = object.StorageClass
|
||||
content.Owner = owner
|
||||
if metadata {
|
||||
content.UserMetadata = make(StringMap)
|
||||
for k, v := range CleanMinioInternalMetadataKeys(object.UserDefined) {
|
||||
if hasPrefix(k, ReservedMetadataPrefix) {
|
||||
// Do not need to send any internal metadata
|
||||
// values to client.
|
||||
continue
|
||||
}
|
||||
content.UserMetadata[k] = v
|
||||
}
|
||||
}
|
||||
contents = append(contents, content)
|
||||
}
|
||||
data.Name = bucket
|
||||
|
|
42
vendor/github.com/minio/minio/cmd/api-router.go
generated
vendored
42
vendor/github.com/minio/minio/cmd/api-router.go
generated
vendored
|
@ -23,6 +23,12 @@ import (
|
|||
xhttp "github.com/minio/minio/cmd/http"
|
||||
)
|
||||
|
||||
func newHTTPServerFn() *xhttp.Server {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
return globalHTTPServer
|
||||
}
|
||||
|
||||
func newObjectLayerWithoutSafeModeFn() ObjectLayer {
|
||||
globalObjLayerMutex.Lock()
|
||||
defer globalObjLayerMutex.Unlock()
|
||||
|
@ -103,30 +109,30 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjecttagging", httpTraceHdrs(api.GetObjectTaggingHandler))).Queries("tagging", "")
|
||||
// SelectObjectContent
|
||||
bucket.Methods(http.MethodPost).Path("/{object:.+}").HandlerFunc(collectAPIStats("selectobjectcontent", httpTraceHdrs(api.SelectObjectContentHandler))).Queries("select", "").Queries("select-type", "2")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectretention", httpTraceHdrs(api.GetObjectRetentionHandler))).Queries("retention", "")
|
||||
// GetObject
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobject", httpTraceHdrs(api.GetObjectHandler)))
|
||||
// CopyObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HeadersRegexp(xhttp.AmzCopySource, ".*?(\\/|%2F).*?").HandlerFunc(collectAPIStats("copyobject", httpTraceAll(api.CopyObjectHandler)))
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectretention", httpTraceHdrs(api.PutObjectRetentionHandler))).Queries("retention", "")
|
||||
// PutObject
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobject", httpTraceHdrs(api.PutObjectHandler)))
|
||||
// DeleteObject
|
||||
bucket.Methods(http.MethodDelete).Path("/{object:.+}").HandlerFunc(collectAPIStats("deleteobject", httpTraceAll(api.DeleteObjectHandler)))
|
||||
// PutObjectLegalHold
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectLegalHoldHandler)).Queries("legal-hold", "").Queries("versionId", "")
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(collectAPIStats("putobjectlegalhold", httpTraceHdrs(api.PutObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
// GetObjectLegalHold
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectLegalHoldHandler)).Queries("legal-hold", "").Queries("versionId", "")
|
||||
// PutObjectRetention
|
||||
bucket.Methods(http.MethodPut).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.PutObjectRetentionHandler)).Queries("retention", "").Queries("versionId", "")
|
||||
// GetObjectRetention
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(httpTraceHdrs(api.GetObjectRetentionHandler)).Queries("retention", "").Queries("versionId", "")
|
||||
bucket.Methods(http.MethodGet).Path("/{object:.+}").HandlerFunc(collectAPIStats("getobjectlegalhold", httpTraceHdrs(api.GetObjectLegalHoldHandler))).Queries("legal-hold", "")
|
||||
|
||||
/// Bucket operations
|
||||
// GetBucketLocation
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlocation", httpTraceAll(api.GetBucketLocationHandler))).Queries("location", "")
|
||||
// GetBucketPolicy
|
||||
bucket.Methods("GET").HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketpolicy", httpTraceAll(api.GetBucketPolicyHandler))).Queries("policy", "")
|
||||
// GetBucketLifecycle
|
||||
bucket.Methods("GET").HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketlifecycle", httpTraceAll(api.GetBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
|
||||
// Dummy Bucket Calls
|
||||
// GetBucketACL -- this is a dummy call.
|
||||
|
@ -153,30 +159,32 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebuckettagging", httpTraceAll(api.DeleteBucketTaggingHandler))).Queries("tagging", "")
|
||||
|
||||
// GetBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketObjectLockConfigHandler)).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketobjectlockconfiguration", httpTraceAll(api.GetBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
// GetBucketVersioning
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(httpTraceAll(api.GetBucketVersioningHandler)).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketversioning", httpTraceAll(api.GetBucketVersioningHandler))).Queries("versioning", "")
|
||||
// GetBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("getbucketnotification", httpTraceAll(api.GetBucketNotificationHandler))).Queries("notification", "")
|
||||
// ListenBucketNotification
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listenbucketnotification", httpTraceAll(api.ListenBucketNotificationHandler))).Queries("events", "{events:.*}")
|
||||
// ListMultipartUploads
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listmultipartuploads", httpTraceAll(api.ListMultipartUploadsHandler))).Queries("uploads", "")
|
||||
// ListObjectsV2M
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2M", httpTraceAll(api.ListObjectsV2MHandler))).Queries("list-type", "2", "metadata", "true")
|
||||
// ListObjectsV2
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv2", httpTraceAll(api.ListObjectsV2Handler))).Queries("list-type", "2")
|
||||
// ListBucketVersions
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listbucketversions", httpTraceAll(api.ListBucketObjectVersionsHandler))).Queries("versions", "")
|
||||
// ListObjectsV1 (Legacy)
|
||||
bucket.Methods("GET").HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
|
||||
bucket.Methods(http.MethodGet).HandlerFunc(collectAPIStats("listobjectsv1", httpTraceAll(api.ListObjectsV1Handler)))
|
||||
// PutBucketLifecycle
|
||||
bucket.Methods("PUT").HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketlifecycle", httpTraceAll(api.PutBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
// PutBucketPolicy
|
||||
bucket.Methods("PUT").HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketpolicy", httpTraceAll(api.PutBucketPolicyHandler))).Queries("policy", "")
|
||||
|
||||
// PutBucketObjectLockConfig
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketObjectLockConfigHandler)).Queries("object-lock", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketobjectlockconfig", httpTraceAll(api.PutBucketObjectLockConfigHandler))).Queries("object-lock", "")
|
||||
// PutBucketVersioning
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(httpTraceAll(api.PutBucketVersioningHandler)).Queries("versioning", "")
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketversioning", httpTraceAll(api.PutBucketVersioningHandler))).Queries("versioning", "")
|
||||
// PutBucketNotification
|
||||
bucket.Methods(http.MethodPut).HandlerFunc(collectAPIStats("putbucketnotification", httpTraceAll(api.PutBucketNotificationHandler))).Queries("notification", "")
|
||||
// PutBucket
|
||||
|
@ -188,9 +196,9 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||
// DeleteMultipleObjects
|
||||
bucket.Methods(http.MethodPost).HandlerFunc(collectAPIStats("deletemultipleobjects", httpTraceAll(api.DeleteMultipleObjectsHandler))).Queries("delete", "")
|
||||
// DeleteBucketPolicy
|
||||
bucket.Methods("DELETE").HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketpolicy", httpTraceAll(api.DeleteBucketPolicyHandler))).Queries("policy", "")
|
||||
// DeleteBucketLifecycle
|
||||
bucket.Methods("DELETE").HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucketlifecycle", httpTraceAll(api.DeleteBucketLifecycleHandler))).Queries("lifecycle", "")
|
||||
// DeleteBucket
|
||||
bucket.Methods(http.MethodDelete).HandlerFunc(collectAPIStats("deletebucket", httpTraceAll(api.DeleteBucketHandler)))
|
||||
}
|
||||
|
|
41
vendor/github.com/minio/minio/cmd/auth-handler.go
generated
vendored
41
vendor/github.com/minio/minio/cmd/auth-handler.go
generated
vendored
|
@ -120,20 +120,16 @@ func getRequestAuthType(r *http.Request) authType {
|
|||
|
||||
// checkAdminRequestAuthType checks whether the request is a valid signature V2 or V4 request.
|
||||
// It does not accept presigned or JWT or anonymous requests.
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region string) APIErrorCode {
|
||||
func checkAdminRequestAuthType(ctx context.Context, r *http.Request, action iampolicy.AdminAction, region string) (auth.Credentials, APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
s3Err := ErrAccessDenied
|
||||
if _, ok := r.Header[xhttp.AmzContentSha256]; ok &&
|
||||
getRequestAuthType(r) == authTypeSigned && !skipContentSha256Cksum(r) {
|
||||
// We only support admin credentials to access admin APIs.
|
||||
|
||||
var owner bool
|
||||
_, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
cred, owner, s3Err = getReqAccessKeyV4(r, region, serviceS3)
|
||||
if s3Err != ErrNone {
|
||||
return s3Err
|
||||
}
|
||||
|
||||
if !owner {
|
||||
return ErrAccessDenied
|
||||
return cred, s3Err
|
||||
}
|
||||
|
||||
// we only support V4 (no presign) with auth body
|
||||
|
@ -144,7 +140,25 @@ func checkAdminRequestAuthType(ctx context.Context, r *http.Request, region stri
|
|||
ctx := logger.SetReqInfo(ctx, reqInfo)
|
||||
logger.LogIf(ctx, errors.New(getAPIError(s3Err).Description), logger.Application)
|
||||
}
|
||||
return s3Err
|
||||
|
||||
var claims map[string]interface{}
|
||||
claims, s3Err = checkClaimsFromToken(r, cred)
|
||||
if s3Err != ErrNone {
|
||||
return cred, s3Err
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
IsOwner: owner,
|
||||
Claims: claims,
|
||||
}) {
|
||||
// Request is allowed return the appropriate access key.
|
||||
return cred, ErrNone
|
||||
}
|
||||
|
||||
return cred, ErrAccessDenied
|
||||
}
|
||||
|
||||
// Fetch the security token set by the client.
|
||||
|
@ -343,7 +357,6 @@ func checkRequestAuthTypeToAccessKey(ctx context.Context, r *http.Request, actio
|
|||
}
|
||||
return accessKey, owner, ErrAccessDenied
|
||||
}
|
||||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: iampolicy.Action(action),
|
||||
|
@ -473,10 +486,10 @@ func (a authHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
writeErrorResponse(context.Background(), w, errorCodes.ToAPIErr(ErrSignatureVersionNotSupported), r.URL, guessIsBrowserReq(r))
|
||||
}
|
||||
|
||||
// isPutAllowed - check if PUT operation is allowed on the resource, this
|
||||
// isPutActionAllowed - check if PUT operation is allowed on the resource, this
|
||||
// call verifies bucket policies and IAM policies, supports multi user
|
||||
// checks etc.
|
||||
func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request) (s3Err APIErrorCode) {
|
||||
func isPutActionAllowed(atype authType, bucketName, objectName string, r *http.Request, action iampolicy.Action) (s3Err APIErrorCode) {
|
||||
var cred auth.Credentials
|
||||
var owner bool
|
||||
switch atype {
|
||||
|
@ -513,7 +526,7 @@ func isPutAllowed(atype authType, bucketName, objectName string, r *http.Request
|
|||
|
||||
if globalIAMSys.IsAllowed(iampolicy.Args{
|
||||
AccountName: cred.AccessKey,
|
||||
Action: policy.PutObjectAction,
|
||||
Action: action,
|
||||
BucketName: bucketName,
|
||||
ConditionValues: getConditionValues(r, "", cred.AccessKey, claims),
|
||||
ObjectName: objectName,
|
||||
|
|
3
vendor/github.com/minio/minio/cmd/auth-handler_test.go
generated
vendored
3
vendor/github.com/minio/minio/cmd/auth-handler_test.go
generated
vendored
|
@ -28,6 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// Test get request auth type.
|
||||
|
@ -419,7 +420,7 @@ func TestCheckAdminRequestAuthType(t *testing.T) {
|
|||
}
|
||||
ctx := context.Background()
|
||||
for i, testCase := range testCases {
|
||||
if s3Error := checkAdminRequestAuthType(ctx, testCase.Request, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
if _, s3Error := checkAdminRequestAuthType(ctx, testCase.Request, iampolicy.AllAdminActions, globalServerRegion); s3Error != testCase.ErrCode {
|
||||
t.Errorf("Test %d: Unexpected s3error returned wanted %d, got %d", i, testCase.ErrCode, s3Error)
|
||||
}
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/background-heal-ops.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/background-heal-ops.go
generated
vendored
|
@ -61,11 +61,11 @@ func (h *healRoutine) run() {
|
|||
if !ok {
|
||||
break
|
||||
}
|
||||
if globalHTTPServer != nil {
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 10 minute for an inprogress request before proceeding to heal
|
||||
waitCount := 600
|
||||
// Any requests in progress, delay the heal.
|
||||
for (globalHTTPServer.GetRequestCount() >= int32(globalXLSetCount*globalXLSetDriveCount)) &&
|
||||
for (httpServer.GetRequestCount() >= int32(globalEndpoints.Nodes())) &&
|
||||
waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
|
|
76
vendor/github.com/minio/minio/cmd/background-newdisks-heal-ops.go
generated
vendored
76
vendor/github.com/minio/minio/cmd/background-newdisks-heal-ops.go
generated
vendored
|
@ -44,7 +44,7 @@ func monitorLocalDisksAndHeal() {
|
|||
break
|
||||
}
|
||||
|
||||
sets, ok := objAPI.(*xlSets)
|
||||
z, ok := objAPI.(*xlZones)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
@ -66,21 +66,24 @@ func monitorLocalDisksAndHeal() {
|
|||
for {
|
||||
time.Sleep(defaultMonitorNewDiskInterval)
|
||||
|
||||
localDisksToHeal := []Endpoint{}
|
||||
for _, endpoint := range globalEndpoints {
|
||||
if !endpoint.IsLocal {
|
||||
localDisksInZoneHeal := make([]Endpoints, len(z.zones))
|
||||
for i, ep := range globalEndpoints {
|
||||
localDisksToHeal := Endpoints{}
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if !endpoint.IsLocal {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
}
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
}
|
||||
// Try to connect to the current endpoint
|
||||
// and reformat if the current disk is not formatted
|
||||
_, _, err := connectEndpoint(endpoint)
|
||||
if err == errUnformattedDisk {
|
||||
localDisksToHeal = append(localDisksToHeal, endpoint)
|
||||
}
|
||||
}
|
||||
|
||||
if len(localDisksToHeal) == 0 {
|
||||
continue
|
||||
localDisksInZoneHeal[i] = localDisksToHeal
|
||||
}
|
||||
|
||||
// Reformat disks
|
||||
|
@ -88,31 +91,36 @@ func monitorLocalDisksAndHeal() {
|
|||
// Ensure that reformatting disks is finished
|
||||
bgSeq.sourceCh <- nopHeal
|
||||
|
||||
var erasureSetInZoneToHeal = make([][]int, len(localDisksInZoneHeal))
|
||||
// Compute the list of erasure set to heal
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(sets.format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
for i, localDisksToHeal := range localDisksInZoneHeal {
|
||||
var erasureSetToHeal []int
|
||||
for _, endpoint := range localDisksToHeal {
|
||||
// Load the new format of this passed endpoint
|
||||
_, format, err := connectEndpoint(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
// Calculate the set index where the current endpoint belongs
|
||||
setIndex, _, err := findDiskIndex(z.zones[i].format, format)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
continue
|
||||
}
|
||||
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
erasureSetToHeal = append(erasureSetToHeal, setIndex)
|
||||
}
|
||||
erasureSetInZoneToHeal[i] = erasureSetToHeal
|
||||
}
|
||||
|
||||
// Heal all erasure sets that need
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
xlObj := sets.sets[setIndex]
|
||||
err := healErasureSet(ctx, setIndex, xlObj)
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
|
||||
for _, setIndex := range erasureSetToHeal {
|
||||
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex])
|
||||
if err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
257
vendor/github.com/minio/minio/cmd/bootstrap-peer-server.go
generated
vendored
Normal file
257
vendor/github.com/minio/minio/cmd/bootstrap-peer-server.go
generated
vendored
Normal file
|
@ -0,0 +1,257 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
bootstrapRESTVersion = "v1"
|
||||
bootstrapRESTVersionPrefix = SlashSeparator + bootstrapRESTVersion
|
||||
bootstrapRESTPrefix = minioReservedBucketPath + "/bootstrap"
|
||||
bootstrapRESTPath = bootstrapRESTPrefix + bootstrapRESTVersionPrefix
|
||||
)
|
||||
|
||||
const (
|
||||
bootstrapRESTMethodVerify = "/verify"
|
||||
)
|
||||
|
||||
// To abstract a node over network.
|
||||
type bootstrapRESTServer struct{}
|
||||
|
||||
// ServerSystemConfig - captures information about server configuration.
|
||||
type ServerSystemConfig struct {
|
||||
MinioPlatform string
|
||||
MinioRuntime string
|
||||
MinioEndpoints EndpointZones
|
||||
}
|
||||
|
||||
// Diff - returns error on first difference found in two configs.
|
||||
func (s1 ServerSystemConfig) Diff(s2 ServerSystemConfig) error {
|
||||
if s1.MinioPlatform != s2.MinioPlatform {
|
||||
return fmt.Errorf("Expected platform '%s', found to be running '%s'",
|
||||
s1.MinioPlatform, s2.MinioPlatform)
|
||||
}
|
||||
if s1.MinioEndpoints.Nodes() != s2.MinioEndpoints.Nodes() {
|
||||
return fmt.Errorf("Expected number of endpoints %d, seen %d", s1.MinioEndpoints.Nodes(),
|
||||
s2.MinioEndpoints.Nodes())
|
||||
}
|
||||
|
||||
for i, ep := range s1.MinioEndpoints {
|
||||
if ep.SetCount != s2.MinioEndpoints[i].SetCount {
|
||||
return fmt.Errorf("Expected set count %d, seen %d", ep.SetCount,
|
||||
s2.MinioEndpoints[i].SetCount)
|
||||
}
|
||||
if ep.DrivesPerSet != s2.MinioEndpoints[i].DrivesPerSet {
|
||||
return fmt.Errorf("Expected drives pet set %d, seen %d", ep.DrivesPerSet,
|
||||
s2.MinioEndpoints[i].DrivesPerSet)
|
||||
}
|
||||
for j, endpoint := range ep.Endpoints {
|
||||
if endpoint.String() != s2.MinioEndpoints[i].Endpoints[j].String() {
|
||||
return fmt.Errorf("Expected endpoint %s, seen %s", endpoint,
|
||||
s2.MinioEndpoints[i].Endpoints[j])
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getServerSystemCfg() ServerSystemConfig {
|
||||
return ServerSystemConfig{
|
||||
MinioPlatform: fmt.Sprintf("OS: %s | Arch: %s", runtime.GOOS, runtime.GOARCH),
|
||||
MinioEndpoints: globalEndpoints,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bootstrapRESTServer) VerifyHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "VerifyHandler")
|
||||
cfg := getServerSystemCfg()
|
||||
logger.LogIf(ctx, json.NewEncoder(w).Encode(&cfg))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// registerBootstrapRESTHandlers - register bootstrap rest router.
|
||||
func registerBootstrapRESTHandlers(router *mux.Router) {
|
||||
server := &bootstrapRESTServer{}
|
||||
subrouter := router.PathPrefix(bootstrapRESTPrefix).Subrouter()
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path(bootstrapRESTVersionPrefix + bootstrapRESTMethodVerify).HandlerFunc(
|
||||
httpTraceHdrs(server.VerifyHandler))
|
||||
}
|
||||
|
||||
// client to talk to bootstrap Nodes.
|
||||
type bootstrapRESTClient struct {
|
||||
endpoint Endpoint
|
||||
restClient *rest.Client
|
||||
connected int32
|
||||
}
|
||||
|
||||
// Reconnect to a bootstrap rest server.k
|
||||
func (client *bootstrapRESTClient) reConnect() {
|
||||
atomic.StoreInt32(&client.connected, 1)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) call(method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
return client.callWithContext(context.Background(), method, values, body, length)
|
||||
}
|
||||
|
||||
// Wrapper to restClient.Call to handle network errors, in case of network error the connection is marked disconnected
|
||||
// permanently. The only way to restore the connection is at the xl-sets layer by xlsets.monitorAndConnectEndpoints()
|
||||
// after verifying format.json
|
||||
func (client *bootstrapRESTClient) callWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (respBody io.ReadCloser, err error) {
|
||||
if !client.IsOnline() {
|
||||
client.reConnect()
|
||||
}
|
||||
|
||||
if values == nil {
|
||||
values = make(url.Values)
|
||||
}
|
||||
|
||||
respBody, err = client.restClient.CallWithContext(ctx, method, values, body, length)
|
||||
if err == nil {
|
||||
return respBody, nil
|
||||
}
|
||||
|
||||
if isNetworkError(err) {
|
||||
atomic.StoreInt32(&client.connected, 0)
|
||||
}
|
||||
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Stringer provides a canonicalized representation of node.
|
||||
func (client *bootstrapRESTClient) String() string {
|
||||
return client.endpoint.String()
|
||||
}
|
||||
|
||||
// IsOnline - returns whether RPC client failed to connect or not.
|
||||
func (client *bootstrapRESTClient) IsOnline() bool {
|
||||
return atomic.LoadInt32(&client.connected) == 1
|
||||
}
|
||||
|
||||
// Close - marks the client as closed.
|
||||
func (client *bootstrapRESTClient) Close() error {
|
||||
atomic.StoreInt32(&client.connected, 0)
|
||||
client.restClient.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Verify - fetches system server config.
|
||||
func (client *bootstrapRESTClient) Verify(srcCfg ServerSystemConfig) (err error) {
|
||||
if newObjectLayerFn() != nil {
|
||||
return nil
|
||||
}
|
||||
respBody, err := client.call(bootstrapRESTMethodVerify, nil, nil, -1)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer xhttp.DrainBody(respBody)
|
||||
recvCfg := ServerSystemConfig{}
|
||||
if err = json.NewDecoder(respBody).Decode(&recvCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
return srcCfg.Diff(recvCfg)
|
||||
}
|
||||
|
||||
func verifyServerSystemConfig(endpointZones EndpointZones) error {
|
||||
srcCfg := getServerSystemCfg()
|
||||
clnts := newBootstrapRESTClients(endpointZones)
|
||||
var onlineServers int
|
||||
for onlineServers < len(clnts)/2 {
|
||||
for _, clnt := range clnts {
|
||||
if err := clnt.Verify(srcCfg); err != nil {
|
||||
if isNetworkError(err) {
|
||||
continue
|
||||
}
|
||||
return fmt.Errorf("%s as has incorrect configuration: %w", clnt.String(), err)
|
||||
}
|
||||
onlineServers++
|
||||
}
|
||||
// Sleep for a while - so that we don't go into
|
||||
// 100% CPU when half the endpoints are offline.
|
||||
time.Sleep(500 * time.Millisecond)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newBootstrapRESTClients(endpointZones EndpointZones) []*bootstrapRESTClient {
|
||||
seenHosts := set.NewStringSet()
|
||||
var clnts []*bootstrapRESTClient
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if seenHosts.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
seenHosts.Add(endpoint.Host)
|
||||
|
||||
// Only proceed for remote endpoints.
|
||||
if !endpoint.IsLocal {
|
||||
clnt, err := newBootstrapRESTClient(endpoint)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
clnts = append(clnts, clnt)
|
||||
}
|
||||
}
|
||||
}
|
||||
return clnts
|
||||
}
|
||||
|
||||
// Returns a new bootstrap client.
|
||||
func newBootstrapRESTClient(endpoint Endpoint) (*bootstrapRESTClient, error) {
|
||||
serverURL := &url.URL{
|
||||
Scheme: endpoint.Scheme,
|
||||
Host: endpoint.Host,
|
||||
Path: bootstrapRESTPath,
|
||||
}
|
||||
|
||||
var tlsConfig *tls.Config
|
||||
if globalIsSSL {
|
||||
tlsConfig = &tls.Config{
|
||||
ServerName: endpoint.Hostname(),
|
||||
RootCAs: globalRootCAs,
|
||||
}
|
||||
}
|
||||
|
||||
trFn := newCustomHTTPTransport(tlsConfig, rest.DefaultRESTTimeout, rest.DefaultRESTTimeout)
|
||||
restClient, err := rest.NewClient(serverURL, trFn, newAuthToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &bootstrapRESTClient{endpoint: endpoint, restClient: restClient, connected: 1}, nil
|
||||
}
|
90
vendor/github.com/minio/minio/cmd/bucket-handlers-listobjects.go
generated
vendored
90
vendor/github.com/minio/minio/cmd/bucket-handlers-listobjects.go
generated
vendored
|
@ -126,6 +126,90 @@ func (api objectAPIHandlers) ListBucketObjectVersionsHandler(w http.ResponseWrit
|
|||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// ListObjectsV2MHandler - GET Bucket (List Objects) Version 2 with metadata.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
// of the objects in a bucket. You can use the request parame<ters as selection
|
||||
// criteria to return a subset of the objects in a bucket.
|
||||
//
|
||||
// NOTE: It is recommended that this API to be used for application development.
|
||||
// MinIO continues to support ListObjectsV1 and V2 for supporting legacy tools.
|
||||
func (api objectAPIHandlers) ListObjectsV2MHandler(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListObjectsV2M")
|
||||
|
||||
defer logger.AuditLog(w, r, "ListObjectsV2M", mustGetClaimsFromToken(r))
|
||||
|
||||
vars := mux.Vars(r)
|
||||
bucket := vars["bucket"]
|
||||
|
||||
objectAPI := api.ObjectAPI()
|
||||
if objectAPI == nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.ListBucketAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
urlValues := r.URL.Query()
|
||||
|
||||
// Extract all the listObjectsV2 query params to their native values.
|
||||
prefix, token, startAfter, delimiter, fetchOwner, maxKeys, encodingType, errCode := getListObjectsV2Args(urlValues)
|
||||
if errCode != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(errCode), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
// Validate the query params before beginning to serve the request.
|
||||
// fetch-owner is not validated since it is a boolean
|
||||
if s3Error := validateListObjectsArgs(token, delimiter, encodingType, maxKeys); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
listObjectsV2 := objectAPI.ListObjectsV2
|
||||
|
||||
// Inititate a list objects operation based on the input params.
|
||||
// On success would return back ListObjectsInfo object to be
|
||||
// marshaled into S3 compatible XML header.
|
||||
listObjectsV2Info, err := listObjectsV2(ctx, bucket, prefix, token, delimiter, maxKeys, fetchOwner, startAfter)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i := range listObjectsV2Info.Objects {
|
||||
var actualSize int64
|
||||
if listObjectsV2Info.Objects[i].IsCompressed() {
|
||||
// Read the decompressed size from the meta.json.
|
||||
actualSize = listObjectsV2Info.Objects[i].GetActualSize()
|
||||
if actualSize < 0 {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInvalidDecompressedSize), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
// Set the info.Size to the actualSize.
|
||||
listObjectsV2Info.Objects[i].Size = actualSize
|
||||
} else if crypto.IsEncrypted(listObjectsV2Info.Objects[i].UserDefined) {
|
||||
listObjectsV2Info.Objects[i].ETag = getDecryptedETag(r.Header, listObjectsV2Info.Objects[i], false)
|
||||
listObjectsV2Info.Objects[i].Size, err = listObjectsV2Info.Objects[i].DecryptedSize()
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token,
|
||||
listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, true)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
}
|
||||
|
||||
// ListObjectsV2Handler - GET Bucket (List Objects) Version 2.
|
||||
// --------------------------
|
||||
// This implementation of the GET operation returns some or all (up to 10000)
|
||||
|
@ -201,8 +285,10 @@ func (api objectAPIHandlers) ListObjectsV2Handler(w http.ResponseWriter, r *http
|
|||
}
|
||||
}
|
||||
|
||||
response := generateListObjectsV2Response(bucket, prefix, token, listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated, maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes)
|
||||
response := generateListObjectsV2Response(bucket, prefix, token,
|
||||
listObjectsV2Info.NextContinuationToken, startAfter,
|
||||
delimiter, encodingType, fetchOwner, listObjectsV2Info.IsTruncated,
|
||||
maxKeys, listObjectsV2Info.Objects, listObjectsV2Info.Prefixes, false)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessResponseXML(w, encodeResponse(response))
|
||||
|
|
130
vendor/github.com/minio/minio/cmd/bucket-handlers.go
generated
vendored
130
vendor/github.com/minio/minio/cmd/bucket-handlers.go
generated
vendored
|
@ -32,10 +32,10 @@ import (
|
|||
"github.com/gorilla/mux"
|
||||
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
"github.com/minio/minio/cmd/crypto"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/handlers"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
|
@ -45,10 +45,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
|
||||
objectLockConfig = "object-lock.xml"
|
||||
objectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
objectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
getBucketVersioningResponse = `<VersioningConfiguration xmlns="http://s3.amazonaws.com/doc/2006-03-01/"/>`
|
||||
objectLockConfig = "object-lock.xml"
|
||||
bucketObjectLockEnabledConfigFile = "object-lock-enabled.json"
|
||||
bucketObjectLockEnabledConfig = `{"x-amz-bucket-object-lock-enabled":true}`
|
||||
)
|
||||
|
||||
// Check if there are buckets on server without corresponding entry in etcd backend and
|
||||
|
@ -117,7 +117,7 @@ func initFederatorBackend(buckets []BucketInfo, objLayer ObjectLayer) {
|
|||
|
||||
// We go to here, so we know the bucket no longer exists, but is registered in DNS to this server
|
||||
if err := globalDNSConfig.DeleteRecord(dnsBuckets[index]); err != nil {
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %v", dnsBuckets[index].Key, err)
|
||||
return fmt.Errorf("Failed to remove DNS entry for %s due to %w", dnsBuckets[index].Key, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -370,25 +370,17 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if _, isWORMBucket := isWORMEnabled(bucket); isWORMBucket {
|
||||
// Not required to check whether given objects exist or not, because
|
||||
// DeleteMultipleObject is always successful irrespective of object existence.
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
deleteObjectsFn := objectAPI.DeleteObjects
|
||||
if api.CacheAPI() != nil {
|
||||
deleteObjectsFn = api.CacheAPI().DeleteObjects
|
||||
}
|
||||
|
||||
type delObj struct {
|
||||
origIndex int
|
||||
name string
|
||||
var objectsToDelete = map[string]int{}
|
||||
getObjectInfoFn := objectAPI.GetObjectInfo
|
||||
if api.CacheAPI() != nil {
|
||||
getObjectInfoFn = api.CacheAPI().GetObjectInfo
|
||||
}
|
||||
|
||||
var objectsToDelete []delObj
|
||||
var dErrs = make([]APIErrorCode, len(deleteObjects.Objects))
|
||||
|
||||
for index, object := range deleteObjects.Objects {
|
||||
|
@ -399,26 +391,37 @@ func (api objectAPIHandlers) DeleteMultipleObjectsHandler(w http.ResponseWriter,
|
|||
}
|
||||
continue
|
||||
}
|
||||
|
||||
objectsToDelete = append(objectsToDelete, delObj{index, object.ObjectName})
|
||||
govBypassPerms := checkRequestAuthType(ctx, r, policy.BypassGovernanceRetentionAction, bucket, object.ObjectName)
|
||||
if _, err := enforceRetentionBypassForDelete(ctx, r, bucket, object.ObjectName, getObjectInfoFn, govBypassPerms); err != ErrNone {
|
||||
dErrs[index] = err
|
||||
continue
|
||||
}
|
||||
// Avoid duplicate objects, we use map to filter them out.
|
||||
if _, ok := objectsToDelete[object.ObjectName]; !ok {
|
||||
objectsToDelete[object.ObjectName] = index
|
||||
}
|
||||
}
|
||||
|
||||
toNames := func(input []delObj) (output []string) {
|
||||
toNames := func(input map[string]int) (output []string) {
|
||||
output = make([]string, len(input))
|
||||
for i := range input {
|
||||
output[i] = input[i].name
|
||||
idx := 0
|
||||
for name := range input {
|
||||
output[idx] = name
|
||||
idx++
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
errs, err := deleteObjectsFn(ctx, bucket, toNames(objectsToDelete))
|
||||
deleteList := toNames(objectsToDelete)
|
||||
errs, err := deleteObjectsFn(ctx, bucket, deleteList)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
for i, obj := range objectsToDelete {
|
||||
dErrs[obj.origIndex] = toAPIErrorCode(ctx, errs[i])
|
||||
for i, objName := range deleteList {
|
||||
dIdx := objectsToDelete[objName]
|
||||
dErrs[dIdx] = toAPIErrorCode(ctx, errs[i])
|
||||
}
|
||||
|
||||
// Collect deleted objects and errors if any.
|
||||
|
@ -519,8 +522,8 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(objectLockEnabledConfig)); err != nil {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
@ -555,11 +558,13 @@ func (api objectAPIHandlers) PutBucketHandler(w http.ResponseWriter, r *http.Req
|
|||
}
|
||||
|
||||
if objectLockEnabled {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(objectLockEnabledConfig)); err != nil {
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, []byte(bucketObjectLockEnabledConfig)); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
globalBucketObjectLockConfig.Set(bucket, Retention{})
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, Retention{})
|
||||
}
|
||||
|
||||
// Make sure to add Location information here only for bucket
|
||||
|
@ -892,12 +897,7 @@ func (api objectAPIHandlers) DeleteBucketHandler(w http.ResponseWriter, r *http.
|
|||
}
|
||||
}
|
||||
|
||||
globalBucketRetentionConfig.Delete(bucket)
|
||||
globalNotificationSys.RemoveNotification(bucket)
|
||||
globalPolicySys.Remove(bucket)
|
||||
globalNotificationSys.DeleteBucket(ctx, bucket)
|
||||
globalLifecycleSys.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketLifecycle(ctx, bucket)
|
||||
|
||||
// Write success response.
|
||||
writeSuccessNoContent(w)
|
||||
|
@ -981,14 +981,18 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMethodNotAllowed), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
config, err := parseObjectLockConfig(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrMalformedXML), r.URL, guessIsBrowserReq(r))
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.PutBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockEnabledConfigFile)
|
||||
config, err := parseObjectLockConfig(r.Body)
|
||||
if err != nil {
|
||||
apiErr := errorCodes.ToAPIErr(ErrMalformedXML)
|
||||
apiErr.Description = err.Error()
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
aerr := toAPIError(ctx, err)
|
||||
|
@ -999,29 +1003,27 @@ func (api objectAPIHandlers) PutBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
return
|
||||
}
|
||||
|
||||
if string(configData) != objectLockEnabledConfig {
|
||||
if string(configData) != bucketObjectLockEnabledConfig {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile = path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
if config.Rule != nil {
|
||||
data, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockConfig)
|
||||
if err = saveConfig(ctx, objectAPI, configFile, data); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
retention := config.ToRetention()
|
||||
globalBucketRetentionConfig.Set(bucket, retention)
|
||||
globalBucketObjectLockConfig.Set(bucket, retention)
|
||||
globalNotificationSys.PutBucketObjectLockConfig(ctx, bucket, retention)
|
||||
} else {
|
||||
globalBucketRetentionConfig.Delete(bucket)
|
||||
globalBucketObjectLockConfig.Remove(bucket)
|
||||
globalNotificationSys.RemoveBucketObjectLockConfig(ctx, bucket)
|
||||
}
|
||||
|
||||
// Write success response.
|
||||
|
@ -1046,19 +1048,25 @@ func (api objectAPIHandlers) GetBucketObjectLockConfigHandler(w http.ResponseWri
|
|||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrServerNotInitialized), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, objectLockEnabledConfigFile)
|
||||
// check if user has permissions to perform this operation
|
||||
if s3Error := checkRequestAuthType(ctx, r, policy.GetBucketObjectLockConfigurationAction, bucket, ""); s3Error != ErrNone {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
configFile := path.Join(bucketConfigPrefix, bucket, bucketObjectLockEnabledConfigFile)
|
||||
configData, err := readConfig(ctx, objectAPI, configFile)
|
||||
if err != nil {
|
||||
aerr := toAPIError(ctx, err)
|
||||
var aerr APIError
|
||||
if err == errConfigNotFound {
|
||||
aerr = errorCodes.ToAPIErr(ErrMethodNotAllowed)
|
||||
} else {
|
||||
aerr = toAPIError(ctx, err)
|
||||
}
|
||||
writeErrorResponse(ctx, w, aerr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
if string(configData) != objectLockEnabledConfig {
|
||||
if string(configData) != bucketObjectLockEnabledConfig {
|
||||
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(ErrInternalError), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/bucket-handlers_test.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/bucket-handlers_test.go
generated
vendored
|
@ -331,7 +331,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||
shouldPass: false,
|
||||
},
|
||||
// Test case -3.
|
||||
// Setting invalid delimiter, expecting the HTTP response status to be http.StatusNotImplemented.
|
||||
// Delimiter unsupported, but response is empty.
|
||||
{
|
||||
bucket: bucketName,
|
||||
prefix: "",
|
||||
|
@ -341,8 +341,8 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||
maxUploads: "0",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusNotImplemented,
|
||||
shouldPass: false,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
shouldPass: true,
|
||||
},
|
||||
// Test case - 4.
|
||||
// Setting Invalid prefix and marker combination.
|
||||
|
|
10
vendor/github.com/minio/minio/cmd/common-main.go
generated
vendored
10
vendor/github.com/minio/minio/cmd/common-main.go
generated
vendored
|
@ -158,7 +158,7 @@ func handleCommonCmdArgs(ctx *cli.Context) {
|
|||
|
||||
func handleCommonEnvVars() {
|
||||
var err error
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.StateOn))
|
||||
globalBrowserEnabled, err = config.ParseBool(env.Get(config.EnvBrowser, config.EnableOn))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidBrowserValue(err), "Invalid MINIO_BROWSER value in environment variable")
|
||||
}
|
||||
|
@ -202,12 +202,10 @@ func handleCommonEnvVars() {
|
|||
// In place update is true by default if the MINIO_UPDATE is not set
|
||||
// or is not set to 'off', if MINIO_UPDATE is set to 'off' then
|
||||
// in-place update is off.
|
||||
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.StateOn), config.StateOff)
|
||||
globalInplaceUpdateDisabled = strings.EqualFold(env.Get(config.EnvUpdate, config.EnableOn), config.EnableOff)
|
||||
|
||||
accessKey := env.Get(config.EnvAccessKey, "")
|
||||
secretKey := env.Get(config.EnvSecretKey, "")
|
||||
if accessKey != "" && secretKey != "" {
|
||||
cred, err := auth.CreateCredentials(accessKey, secretKey)
|
||||
if env.IsSet(config.EnvAccessKey) || env.IsSet(config.EnvSecretKey) {
|
||||
cred, err := auth.CreateCredentials(env.Get(config.EnvAccessKey, ""), env.Get(config.EnvSecretKey, ""))
|
||||
if err != nil {
|
||||
logger.Fatal(config.ErrInvalidCredentials(err),
|
||||
"Unable to validate credentials inherited from the shell environment")
|
||||
|
|
355
vendor/github.com/minio/minio/cmd/config-current.go
generated
vendored
355
vendor/github.com/minio/minio/cmd/config-current.go
generated
vendored
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
|
@ -25,6 +26,8 @@ import (
|
|||
"github.com/minio/minio/cmd/config/cache"
|
||||
"github.com/minio/minio/cmd/config/compress"
|
||||
"github.com/minio/minio/cmd/config/etcd"
|
||||
xetcd "github.com/minio/minio/cmd/config/etcd"
|
||||
"github.com/minio/minio/cmd/config/etcd/dns"
|
||||
xldap "github.com/minio/minio/cmd/config/identity/ldap"
|
||||
"github.com/minio/minio/cmd/config/identity/openid"
|
||||
"github.com/minio/minio/cmd/config/notify"
|
||||
|
@ -34,10 +37,164 @@ import (
|
|||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/cmd/logger/target/http"
|
||||
"github.com/minio/minio/pkg/dns"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
func initHelp() {
|
||||
var kvs = map[string]config.KVS{
|
||||
config.EtcdSubSys: etcd.DefaultKVS,
|
||||
config.CacheSubSys: cache.DefaultKVS,
|
||||
config.CompressionSubSys: compress.DefaultKVS,
|
||||
config.IdentityLDAPSubSys: xldap.DefaultKVS,
|
||||
config.IdentityOpenIDSubSys: openid.DefaultKVS,
|
||||
config.PolicyOPASubSys: opa.DefaultKVS,
|
||||
config.RegionSubSys: config.DefaultRegionKVS,
|
||||
config.CredentialsSubSys: config.DefaultCredentialKVS,
|
||||
config.KmsVaultSubSys: crypto.DefaultKVS,
|
||||
config.LoggerWebhookSubSys: logger.DefaultKVS,
|
||||
config.AuditWebhookSubSys: logger.DefaultAuditKVS,
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
kvs[k] = v
|
||||
}
|
||||
if globalIsXL {
|
||||
kvs[config.StorageClassSubSys] = storageclass.DefaultKVS
|
||||
}
|
||||
config.RegisterDefaultKVS(kvs)
|
||||
|
||||
// Captures help for each sub-system
|
||||
var helpSubSys = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: config.RegionSubSys,
|
||||
Description: "label the location of the server",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CacheSubSys,
|
||||
Description: "add caching storage tier",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.CompressionSubSys,
|
||||
Description: "enable server side compression of objects",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.EtcdSubSys,
|
||||
Description: "federate multiple clusters for IAM and Bucket DNS",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.IdentityOpenIDSubSys,
|
||||
Description: "enable OpenID SSO support",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.IdentityLDAPSubSys,
|
||||
Description: "enable LDAP SSO support",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.PolicyOPASubSys,
|
||||
Description: "enable external OPA for policy enforcement",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.KmsVaultSubSys,
|
||||
Description: "enable external HashiCorp Vault for KMS",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.LoggerWebhookSubSys,
|
||||
Description: "send server logs to webhook endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.AuditWebhookSubSys,
|
||||
Description: "send audit logs to webhook endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyWebhookSubSys,
|
||||
Description: "publish bucket notifications to webhook endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyAMQPSubSys,
|
||||
Description: "publish bucket notifications to AMQP endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyKafkaSubSys,
|
||||
Description: "publish bucket notifications to Kafka endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyMQTTSubSys,
|
||||
Description: "publish bucket notifications to MQTT endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyNATSSubSys,
|
||||
Description: "publish bucket notifications to NATS endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyNSQSubSys,
|
||||
Description: "publish bucket notifications to NSQ endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyMySQLSubSys,
|
||||
Description: "publish bucket notifications to MySQL endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyPostgresSubSys,
|
||||
Description: "publish bucket notifications to Postgres endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyRedisSubSys,
|
||||
Description: "publish bucket notifications to Redis endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.NotifyESSubSys,
|
||||
Description: "publish bucket notifications to Elasticsearch endpoints",
|
||||
MultipleTargets: true,
|
||||
},
|
||||
}
|
||||
|
||||
if globalIsXL {
|
||||
helpSubSys = append(helpSubSys, config.HelpKV{})
|
||||
copy(helpSubSys[2:], helpSubSys[1:])
|
||||
helpSubSys[1] = config.HelpKV{
|
||||
Key: config.StorageClassSubSys,
|
||||
Description: "define object level redundancy",
|
||||
}
|
||||
}
|
||||
|
||||
var helpMap = map[string]config.HelpKVS{
|
||||
"": helpSubSys, // Help for all sub-systems.
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
config.KmsVaultSubSys: crypto.Help,
|
||||
config.LoggerWebhookSubSys: logger.Help,
|
||||
config.AuditWebhookSubSys: logger.HelpAudit,
|
||||
config.NotifyAMQPSubSys: notify.HelpAMQP,
|
||||
config.NotifyKafkaSubSys: notify.HelpKafka,
|
||||
config.NotifyMQTTSubSys: notify.HelpMQTT,
|
||||
config.NotifyNATSSubSys: notify.HelpNATS,
|
||||
config.NotifyNSQSubSys: notify.HelpNSQ,
|
||||
config.NotifyMySQLSubSys: notify.HelpMySQL,
|
||||
config.NotifyPostgresSubSys: notify.HelpPostgres,
|
||||
config.NotifyRedisSubSys: notify.HelpRedis,
|
||||
config.NotifyWebhookSubSys: notify.HelpWebhook,
|
||||
config.NotifyESSubSys: notify.HelpES,
|
||||
}
|
||||
|
||||
config.RegisterHelpSubSys(helpMap)
|
||||
}
|
||||
|
||||
var (
|
||||
// globalServerConfig server config.
|
||||
globalServerConfig config.Config
|
||||
|
@ -59,10 +216,6 @@ func validateConfig(s config.Config) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if _, err := config.LookupWorm(s[config.WormSubSys][config.Default]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if globalIsXL {
|
||||
if _, err := storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalXLSetDriveCount); err != nil {
|
||||
|
@ -104,6 +257,9 @@ func validateConfig(s config.Config) error {
|
|||
if _, err = crypto.NewKMS(kmsCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Disable merging env values for the rest.
|
||||
env.SetEnvOff()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -134,73 +290,73 @@ func lookupConfigs(s config.Config) (err error) {
|
|||
// Env doesn't seem to be set, we fallback to lookup creds from the config.
|
||||
globalActiveCred, err = config.LookupCreds(s[config.CredentialsSubSys][config.Default])
|
||||
if err != nil {
|
||||
return config.Errorf("Invalid credentials configuration: %s", err)
|
||||
return fmt.Errorf("Invalid credentials configuration: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
etcdCfg, err := etcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
etcdCfg, err := xetcd.LookupConfig(s[config.EtcdSubSys][config.Default], globalRootCAs)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize etcd config: %s", err)
|
||||
return fmt.Errorf("Unable to initialize etcd config: %w", err)
|
||||
}
|
||||
|
||||
globalEtcdClient, err = etcd.New(etcdCfg)
|
||||
globalEtcdClient, err = xetcd.New(etcdCfg)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize etcd config: %s", err)
|
||||
return fmt.Errorf("Unable to initialize etcd config: %w", err)
|
||||
}
|
||||
|
||||
if len(globalDomainNames) != 0 && !globalDomainIPs.IsEmpty() && globalEtcdClient != nil {
|
||||
globalDNSConfig, err = dns.NewCoreDNS(globalEtcdClient,
|
||||
globalDNSConfig, err = dns.NewCoreDNS(etcdCfg.Config,
|
||||
dns.DomainNames(globalDomainNames),
|
||||
dns.DomainIPs(globalDomainIPs),
|
||||
dns.DomainPort(globalMinioPort),
|
||||
dns.CoreDNSPath(etcdCfg.CoreDNSPath),
|
||||
)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize DNS config for %s: %s", globalDomainNames, err)
|
||||
return config.Errorf(config.SafeModeKind,
|
||||
"Unable to initialize DNS config for %s: %s", globalDomainNames, err)
|
||||
}
|
||||
}
|
||||
|
||||
globalServerRegion, err = config.LookupRegion(s[config.RegionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return config.Errorf("Invalid region configuration: %s", err)
|
||||
return fmt.Errorf("Invalid region configuration: %w", err)
|
||||
}
|
||||
|
||||
globalWORMEnabled, err = config.LookupWorm(s[config.WormSubSys][config.Default])
|
||||
globalWORMEnabled, err = config.LookupWorm()
|
||||
if err != nil {
|
||||
return config.Errorf("Invalid worm configuration: %s", err)
|
||||
|
||||
return fmt.Errorf("Invalid worm configuration: %w", err)
|
||||
}
|
||||
|
||||
if globalIsXL {
|
||||
globalStorageClass, err = storageclass.LookupConfig(s[config.StorageClassSubSys][config.Default],
|
||||
globalXLSetDriveCount)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize storage class config: %s", err)
|
||||
return fmt.Errorf("Unable to initialize storage class config: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
globalCacheConfig, err = cache.LookupConfig(s[config.CacheSubSys][config.Default])
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to setup cache: %s", err)
|
||||
return fmt.Errorf("Unable to setup cache: %w", err)
|
||||
}
|
||||
|
||||
if globalCacheConfig.Enabled {
|
||||
if cacheEncKey := env.Get(cache.EnvCacheEncryptionMasterKey, ""); cacheEncKey != "" {
|
||||
globalCacheKMS, err = crypto.ParseMasterKey(cacheEncKey)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to setup encryption cache: %s", err)
|
||||
return fmt.Errorf("Unable to setup encryption cache: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
kmsCfg, err := crypto.LookupConfig(s[config.KmsVaultSubSys][config.Default])
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to setup KMS config: %s", err)
|
||||
return fmt.Errorf("Unable to setup KMS config: %w", err)
|
||||
}
|
||||
|
||||
GlobalKMS, err = crypto.NewKMS(kmsCfg)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to setup KMS with current KMS config: %s", err)
|
||||
return fmt.Errorf("Unable to setup KMS with current KMS config: %w", err)
|
||||
}
|
||||
|
||||
// Enable auto-encryption if enabled
|
||||
|
@ -208,19 +364,19 @@ func lookupConfigs(s config.Config) (err error) {
|
|||
|
||||
globalCompressConfig, err = compress.LookupConfig(s[config.CompressionSubSys][config.Default])
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to setup Compression: %s", err)
|
||||
return fmt.Errorf("Unable to setup Compression: %w", err)
|
||||
}
|
||||
|
||||
globalOpenIDConfig, err = openid.LookupConfig(s[config.IdentityOpenIDSubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize OpenID: %s", err)
|
||||
return fmt.Errorf("Unable to initialize OpenID: %w", err)
|
||||
}
|
||||
|
||||
opaCfg, err := opa.LookupConfig(s[config.PolicyOPASubSys][config.Default],
|
||||
NewCustomHTTPTransport(), xhttp.DrainBody)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize OPA: %s", err)
|
||||
return fmt.Errorf("Unable to initialize OPA: %w", err)
|
||||
}
|
||||
|
||||
globalOpenIDValidators = getOpenIDValidators(globalOpenIDConfig)
|
||||
|
@ -229,7 +385,7 @@ func lookupConfigs(s config.Config) (err error) {
|
|||
globalLDAPConfig, err = xldap.Lookup(s[config.IdentityLDAPSubSys][config.Default],
|
||||
globalRootCAs)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to parse LDAP configuration: %s", err)
|
||||
return fmt.Errorf("Unable to parse LDAP configuration: %w", err)
|
||||
}
|
||||
|
||||
// Load logger targets based on user's configuration
|
||||
|
@ -237,7 +393,7 @@ func lookupConfigs(s config.Config) (err error) {
|
|||
|
||||
loggerCfg, err := logger.LookupConfig(s)
|
||||
if err != nil {
|
||||
return config.Errorf("Unable to initialize logger: %s", err)
|
||||
return fmt.Errorf("Unable to initialize logger: %w", err)
|
||||
}
|
||||
|
||||
for _, l := range loggerCfg.HTTP {
|
||||
|
@ -260,115 +416,77 @@ func lookupConfigs(s config.Config) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
var helpMap = map[string]config.HelpKV{
|
||||
config.RegionSubSys: config.RegionHelp,
|
||||
config.WormSubSys: config.WormHelp,
|
||||
config.EtcdSubSys: etcd.Help,
|
||||
config.CacheSubSys: cache.Help,
|
||||
config.CompressionSubSys: compress.Help,
|
||||
config.StorageClassSubSys: storageclass.Help,
|
||||
config.IdentityOpenIDSubSys: openid.Help,
|
||||
config.IdentityLDAPSubSys: xldap.Help,
|
||||
config.PolicyOPASubSys: opa.Help,
|
||||
config.KmsVaultSubSys: crypto.Help,
|
||||
config.LoggerHTTPSubSys: logger.Help,
|
||||
config.LoggerHTTPAuditSubSys: logger.HelpAudit,
|
||||
config.NotifyAMQPSubSys: notify.HelpAMQP,
|
||||
config.NotifyKafkaSubSys: notify.HelpKafka,
|
||||
config.NotifyMQTTSubSys: notify.HelpMQTT,
|
||||
config.NotifyNATSSubSys: notify.HelpNATS,
|
||||
config.NotifyNSQSubSys: notify.HelpNSQ,
|
||||
config.NotifyMySQLSubSys: notify.HelpMySQL,
|
||||
config.NotifyPostgresSubSys: notify.HelpPostgres,
|
||||
config.NotifyRedisSubSys: notify.HelpRedis,
|
||||
config.NotifyWebhookSubSys: notify.HelpWebhook,
|
||||
config.NotifyESSubSys: notify.HelpES,
|
||||
// Help - return sub-system level help
|
||||
type Help struct {
|
||||
SubSys string `json:"subSys"`
|
||||
Description string `json:"description"`
|
||||
MultipleTargets bool `json:"multipleTargets"`
|
||||
KeysHelp config.HelpKVS `json:"keysHelp"`
|
||||
}
|
||||
|
||||
// GetHelp - returns help for sub-sys, a key for a sub-system or all the help.
|
||||
func GetHelp(subSys, key string, envOnly bool) (config.HelpKV, error) {
|
||||
func GetHelp(subSys, key string, envOnly bool) (Help, error) {
|
||||
if len(subSys) == 0 {
|
||||
return nil, config.Error("no help available for empty sub-system inputs")
|
||||
return Help{KeysHelp: config.HelpSubSysMap[subSys]}, nil
|
||||
}
|
||||
subSystemValue := strings.SplitN(subSys, config.SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return nil, config.Errorf("invalid number of arguments %s", subSys)
|
||||
return Help{}, config.Errorf(
|
||||
config.SafeModeKind,
|
||||
"invalid number of arguments %s", subSys)
|
||||
}
|
||||
|
||||
if !config.SubSystems.Contains(subSystemValue[0]) {
|
||||
return nil, config.Errorf("unknown sub-system %s", subSys)
|
||||
subSys = subSystemValue[0]
|
||||
|
||||
subSysHelp, ok := config.HelpSubSysMap[""].Lookup(subSys)
|
||||
if !ok {
|
||||
return Help{}, config.Errorf(
|
||||
config.SafeModeKind,
|
||||
"unknown sub-system %s", subSys)
|
||||
}
|
||||
|
||||
help := helpMap[subSystemValue[0]]
|
||||
h, ok := config.HelpSubSysMap[subSys]
|
||||
if !ok {
|
||||
return Help{}, config.Errorf(
|
||||
config.SafeModeKind,
|
||||
"unknown sub-system %s", subSys)
|
||||
}
|
||||
if key != "" {
|
||||
value, ok := help[key]
|
||||
value, ok := h.Lookup(key)
|
||||
if !ok {
|
||||
return nil, config.Errorf("unknown key %s for sub-system %s", key, subSys)
|
||||
}
|
||||
help = config.HelpKV{
|
||||
key: value,
|
||||
return Help{}, config.Errorf(
|
||||
config.SafeModeKind,
|
||||
"unknown key %s for sub-system %s", key, subSys)
|
||||
}
|
||||
h = config.HelpKVS{value}
|
||||
}
|
||||
|
||||
envHelp := config.HelpKV{}
|
||||
envHelp := config.HelpKVS{}
|
||||
if envOnly {
|
||||
for k, v := range help {
|
||||
for _, hkv := range h {
|
||||
envK := config.EnvPrefix + strings.Join([]string{
|
||||
strings.ToTitle(subSys), strings.ToTitle(k),
|
||||
strings.ToTitle(subSys), strings.ToTitle(hkv.Key),
|
||||
}, config.EnvWordDelimiter)
|
||||
envHelp[envK] = v
|
||||
envHelp = append(envHelp, config.HelpKV{
|
||||
Key: envK,
|
||||
Description: hkv.Description,
|
||||
Optional: hkv.Optional,
|
||||
Type: hkv.Type,
|
||||
})
|
||||
}
|
||||
help = envHelp
|
||||
h = envHelp
|
||||
}
|
||||
|
||||
return help, nil
|
||||
}
|
||||
|
||||
func configDefaultKVS() map[string]config.KVS {
|
||||
m := make(map[string]config.KVS)
|
||||
for k, tgt := range newServerConfig() {
|
||||
m[k] = tgt[config.Default]
|
||||
}
|
||||
return m
|
||||
return Help{
|
||||
SubSys: subSys,
|
||||
Description: subSysHelp.Description,
|
||||
MultipleTargets: subSysHelp.MultipleTargets,
|
||||
KeysHelp: h,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func newServerConfig() config.Config {
|
||||
srvCfg := config.New()
|
||||
for k := range srvCfg {
|
||||
// Initialize with default KVS
|
||||
switch k {
|
||||
case config.EtcdSubSys:
|
||||
srvCfg[k][config.Default] = etcd.DefaultKVS
|
||||
case config.CacheSubSys:
|
||||
srvCfg[k][config.Default] = cache.DefaultKVS
|
||||
case config.CompressionSubSys:
|
||||
srvCfg[k][config.Default] = compress.DefaultKVS
|
||||
case config.StorageClassSubSys:
|
||||
srvCfg[k][config.Default] = storageclass.DefaultKVS
|
||||
case config.IdentityLDAPSubSys:
|
||||
srvCfg[k][config.Default] = xldap.DefaultKVS
|
||||
case config.IdentityOpenIDSubSys:
|
||||
srvCfg[k][config.Default] = openid.DefaultKVS
|
||||
case config.PolicyOPASubSys:
|
||||
srvCfg[k][config.Default] = opa.DefaultKVS
|
||||
case config.WormSubSys:
|
||||
srvCfg[k][config.Default] = config.DefaultWormKVS
|
||||
case config.RegionSubSys:
|
||||
srvCfg[k][config.Default] = config.DefaultRegionKVS
|
||||
case config.CredentialsSubSys:
|
||||
srvCfg[k][config.Default] = config.DefaultCredentialKVS
|
||||
case config.KmsVaultSubSys:
|
||||
srvCfg[k][config.Default] = crypto.DefaultKVS
|
||||
case config.LoggerHTTPSubSys:
|
||||
srvCfg[k][config.Default] = logger.DefaultKVS
|
||||
case config.LoggerHTTPAuditSubSys:
|
||||
srvCfg[k][config.Default] = logger.DefaultAuditKVS
|
||||
}
|
||||
}
|
||||
for k, v := range notify.DefaultNotificationKVS {
|
||||
srvCfg[k][config.Default] = v
|
||||
}
|
||||
return srvCfg
|
||||
return config.New()
|
||||
}
|
||||
|
||||
// newSrvConfig - initialize a new server config, saves env parameters if
|
||||
|
@ -388,24 +506,11 @@ func newSrvConfig(objAPI ObjectLayer) error {
|
|||
globalServerConfigMu.Unlock()
|
||||
|
||||
// Save config into file.
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig, nil)
|
||||
return saveServerConfig(context.Background(), objAPI, globalServerConfig)
|
||||
}
|
||||
|
||||
func getValidConfig(objAPI ObjectLayer) (config.Config, error) {
|
||||
srvCfg, err := readServerConfig(context.Background(), objAPI)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defaultKVS := configDefaultKVS()
|
||||
for _, k := range config.SubSystems.ToSlice() {
|
||||
_, ok := srvCfg[k][config.Default]
|
||||
if !ok {
|
||||
// Populate default configs for any new
|
||||
// sub-systems added automatically.
|
||||
srvCfg[k][config.Default] = defaultKVS[k]
|
||||
}
|
||||
}
|
||||
return srvCfg, nil
|
||||
return readServerConfig(context.Background(), objAPI)
|
||||
}
|
||||
|
||||
// loadConfig - loads a new config from disk, overrides params
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/config-current_test.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/config-current_test.go
generated
vendored
|
@ -49,7 +49,7 @@ func TestServerConfig(t *testing.T) {
|
|||
t.Errorf("Expecting region `us-west-1` found %s", globalServerRegion)
|
||||
}
|
||||
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig, nil); err != nil {
|
||||
if err := saveServerConfig(context.Background(), objLayer, globalServerConfig); err != nil {
|
||||
t.Fatalf("Unable to save updated config file %s", err)
|
||||
}
|
||||
|
||||
|
|
14
vendor/github.com/minio/minio/cmd/config-encrypted.go
generated
vendored
14
vendor/github.com/minio/minio/cmd/config-encrypted.go
generated
vendored
|
@ -44,20 +44,6 @@ func handleEncryptedConfigBackend(objAPI ObjectLayer, server bool) error {
|
|||
var encrypted bool
|
||||
var err error
|
||||
|
||||
// Construct path to config/transaction.lock for locking
|
||||
transactionConfigPrefix := minioConfigPrefix + "/transaction.lock"
|
||||
|
||||
// Make sure to hold lock for entire migration to avoid
|
||||
// such that only one server should migrate the entire config
|
||||
// at a given time, this big transaction lock ensures this
|
||||
// appropriately. This is also true for rotation of encrypted
|
||||
// content.
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigPrefix)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
// Migrating Config backend needs a retry mechanism for
|
||||
// the following reasons:
|
||||
// - Read quorum is lost just after the initialization
|
||||
|
|
149
vendor/github.com/minio/minio/cmd/config-migrate.go
generated
vendored
149
vendor/github.com/minio/minio/cmd/config-migrate.go
generated
vendored
|
@ -246,7 +246,7 @@ func purgeV1() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘1’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘1’. %w", err)
|
||||
}
|
||||
if cv1.Version != "1" {
|
||||
return fmt.Errorf("unrecognized config version ‘%s’", cv1.Version)
|
||||
|
@ -267,7 +267,7 @@ func migrateV2ToV3() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘2’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘2’. %w", err)
|
||||
}
|
||||
if cv2.Version != "2" {
|
||||
return nil
|
||||
|
@ -275,7 +275,7 @@ func migrateV2ToV3() error {
|
|||
|
||||
cred, err := auth.CreateCredentials(cv2.Credentials.AccessKey, cv2.Credentials.SecretKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Invalid credential in V2 configuration file. %v", err)
|
||||
return fmt.Errorf("Invalid credential in V2 configuration file. %w", err)
|
||||
}
|
||||
|
||||
srvConfig := &configV3{}
|
||||
|
@ -308,7 +308,7 @@ func migrateV2ToV3() error {
|
|||
srvConfig.Logger.Syslog = slogger
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv2.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv2.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv2.Version, srvConfig.Version)
|
||||
|
@ -326,7 +326,7 @@ func migrateV3ToV4() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘3’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘3’. %w", err)
|
||||
}
|
||||
if cv3.Version != "3" {
|
||||
return nil
|
||||
|
@ -346,7 +346,7 @@ func migrateV3ToV4() error {
|
|||
srvConfig.Logger.Syslog = cv3.Logger.Syslog
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv3.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv3.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv3.Version, srvConfig.Version)
|
||||
|
@ -364,7 +364,7 @@ func migrateV4ToV5() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘4’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘4’. %w", err)
|
||||
}
|
||||
if cv4.Version != "4" {
|
||||
return nil
|
||||
|
@ -387,7 +387,7 @@ func migrateV4ToV5() error {
|
|||
srvConfig.Logger.Redis.Enable = false
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv4.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv4.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv4.Version, srvConfig.Version)
|
||||
|
@ -405,7 +405,7 @@ func migrateV5ToV6() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘5’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘5’. %w", err)
|
||||
}
|
||||
if cv5.Version != "5" {
|
||||
return nil
|
||||
|
@ -476,7 +476,7 @@ func migrateV5ToV6() error {
|
|||
}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv5.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv5.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv5.Version, srvConfig.Version)
|
||||
|
@ -494,7 +494,7 @@ func migrateV6ToV7() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘6’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘6’. %w", err)
|
||||
}
|
||||
if cv6.Version != "6" {
|
||||
return nil
|
||||
|
@ -532,7 +532,7 @@ func migrateV6ToV7() error {
|
|||
}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv6.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv6.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv6.Version, srvConfig.Version)
|
||||
|
@ -550,7 +550,7 @@ func migrateV7ToV8() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘7’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘7’. %w", err)
|
||||
}
|
||||
if cv7.Version != "7" {
|
||||
return nil
|
||||
|
@ -595,7 +595,7 @@ func migrateV7ToV8() error {
|
|||
}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv7.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv7.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv7.Version, srvConfig.Version)
|
||||
|
@ -612,7 +612,7 @@ func migrateV8ToV9() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘8’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘8’. %w", err)
|
||||
}
|
||||
if cv8.Version != "8" {
|
||||
return nil
|
||||
|
@ -665,7 +665,7 @@ func migrateV8ToV9() error {
|
|||
}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv8.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv8.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv8.Version, srvConfig.Version)
|
||||
|
@ -682,7 +682,7 @@ func migrateV9ToV10() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘9’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘9’. %w", err)
|
||||
}
|
||||
if cv9.Version != "9" {
|
||||
return nil
|
||||
|
@ -733,7 +733,7 @@ func migrateV9ToV10() error {
|
|||
}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv9.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv9.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv9.Version, srvConfig.Version)
|
||||
|
@ -750,7 +750,7 @@ func migrateV10ToV11() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘10’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘10’. %w", err)
|
||||
}
|
||||
if cv10.Version != "10" {
|
||||
return nil
|
||||
|
@ -804,7 +804,7 @@ func migrateV10ToV11() error {
|
|||
srvConfig.Notify.Kafka["1"] = target.KafkaArgs{}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv10.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv10.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv10.Version, srvConfig.Version)
|
||||
|
@ -821,7 +821,7 @@ func migrateV11ToV12() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘11’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘11’. %w", err)
|
||||
}
|
||||
if cv11.Version != "11" {
|
||||
return nil
|
||||
|
@ -902,7 +902,7 @@ func migrateV11ToV12() error {
|
|||
}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv11.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv11.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv11.Version, srvConfig.Version)
|
||||
|
@ -918,7 +918,7 @@ func migrateV12ToV13() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘12’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘12’. %w", err)
|
||||
}
|
||||
if cv12.Version != "12" {
|
||||
return nil
|
||||
|
@ -982,7 +982,7 @@ func migrateV12ToV13() error {
|
|||
srvConfig.Notify.Webhook["1"] = target.WebhookArgs{}
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv12.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv12.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv12.Version, srvConfig.Version)
|
||||
|
@ -998,7 +998,7 @@ func migrateV13ToV14() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘13’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘13’. %w", err)
|
||||
}
|
||||
if cv13.Version != "13" {
|
||||
return nil
|
||||
|
@ -1067,7 +1067,7 @@ func migrateV13ToV14() error {
|
|||
srvConfig.Browser = true
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv13.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv13.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv13.Version, srvConfig.Version)
|
||||
|
@ -1083,7 +1083,7 @@ func migrateV14ToV15() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘14’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘14’. %w", err)
|
||||
}
|
||||
if cv14.Version != "14" {
|
||||
return nil
|
||||
|
@ -1156,7 +1156,7 @@ func migrateV14ToV15() error {
|
|||
srvConfig.Browser = cv14.Browser
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv14.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv14.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv14.Version, srvConfig.Version)
|
||||
|
@ -1173,7 +1173,7 @@ func migrateV15ToV16() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘15’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘15’. %w", err)
|
||||
}
|
||||
if cv15.Version != "15" {
|
||||
return nil
|
||||
|
@ -1246,7 +1246,7 @@ func migrateV15ToV16() error {
|
|||
srvConfig.Browser = cv15.Browser
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv15.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv15.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv15.Version, srvConfig.Version)
|
||||
|
@ -1263,7 +1263,7 @@ func migrateV16ToV17() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘16’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘16’. %w", err)
|
||||
}
|
||||
if cv16.Version != "16" {
|
||||
return nil
|
||||
|
@ -1367,7 +1367,7 @@ func migrateV16ToV17() error {
|
|||
srvConfig.Browser = cv16.Browser
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv16.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv16.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv16.Version, srvConfig.Version)
|
||||
|
@ -1384,7 +1384,7 @@ func migrateV17ToV18() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘17’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘17’. %w", err)
|
||||
}
|
||||
if cv17.Version != "17" {
|
||||
return nil
|
||||
|
@ -1471,7 +1471,7 @@ func migrateV17ToV18() error {
|
|||
srvConfig.Browser = cv17.Browser
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv17.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv17.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv17.Version, srvConfig.Version)
|
||||
|
@ -1486,7 +1486,7 @@ func migrateV18ToV19() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘18’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘18’. %w", err)
|
||||
}
|
||||
if cv18.Version != "18" {
|
||||
return nil
|
||||
|
@ -1577,7 +1577,7 @@ func migrateV18ToV19() error {
|
|||
srvConfig.Browser = cv18.Browser
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv18.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv18.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv18.Version, srvConfig.Version)
|
||||
|
@ -1592,7 +1592,7 @@ func migrateV19ToV20() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘18’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘18’. %w", err)
|
||||
}
|
||||
if cv19.Version != "19" {
|
||||
return nil
|
||||
|
@ -1682,7 +1682,7 @@ func migrateV19ToV20() error {
|
|||
srvConfig.Browser = cv19.Browser
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv19.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv19.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv19.Version, srvConfig.Version)
|
||||
|
@ -1697,7 +1697,7 @@ func migrateV20ToV21() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘20’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘20’. %w", err)
|
||||
}
|
||||
if cv20.Version != "20" {
|
||||
return nil
|
||||
|
@ -1786,7 +1786,7 @@ func migrateV20ToV21() error {
|
|||
srvConfig.Domain = cv20.Domain
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv20.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv20.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv20.Version, srvConfig.Version)
|
||||
|
@ -1801,7 +1801,7 @@ func migrateV21ToV22() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘21’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘21’. %w", err)
|
||||
}
|
||||
if cv21.Version != "21" {
|
||||
return nil
|
||||
|
@ -1890,7 +1890,7 @@ func migrateV21ToV22() error {
|
|||
srvConfig.Domain = cv21.Domain
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv21.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv21.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv21.Version, srvConfig.Version)
|
||||
|
@ -1905,7 +1905,7 @@ func migrateV22ToV23() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘22’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘22’. %w", err)
|
||||
}
|
||||
if cv22.Version != "22" {
|
||||
return nil
|
||||
|
@ -2003,7 +2003,7 @@ func migrateV22ToV23() error {
|
|||
srvConfig.Cache.Expiry = 90
|
||||
|
||||
if err = Save(configFile, srvConfig); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv22.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv22.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv22.Version, srvConfig.Version)
|
||||
|
@ -2018,7 +2018,7 @@ func migrateV23ToV24() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘23’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘23’. %w", err)
|
||||
}
|
||||
if cv23.Version != "23" {
|
||||
return nil
|
||||
|
@ -2116,7 +2116,7 @@ func migrateV23ToV24() error {
|
|||
srvConfig.Cache.Expiry = cv23.Cache.Expiry
|
||||
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv23.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv23.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv23.Version, srvConfig.Version)
|
||||
|
@ -2131,7 +2131,7 @@ func migrateV24ToV25() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘24’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘24’. %w", err)
|
||||
}
|
||||
if cv24.Version != "24" {
|
||||
return nil
|
||||
|
@ -2234,7 +2234,7 @@ func migrateV24ToV25() error {
|
|||
srvConfig.Cache.Expiry = cv24.Cache.Expiry
|
||||
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv24.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv24.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv24.Version, srvConfig.Version)
|
||||
|
@ -2249,7 +2249,7 @@ func migrateV25ToV26() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config version ‘25’. %v", err)
|
||||
return fmt.Errorf("Unable to load config version ‘25’. %w", err)
|
||||
}
|
||||
if cv25.Version != "25" {
|
||||
return nil
|
||||
|
@ -2353,7 +2353,7 @@ func migrateV25ToV26() error {
|
|||
srvConfig.Cache.MaxUse = 80
|
||||
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %v", cv25.Version, srvConfig.Version, err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘%s’ to ‘%s’. %w", cv25.Version, srvConfig.Version, err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, cv25.Version, srvConfig.Version)
|
||||
|
@ -2371,7 +2371,7 @@ func migrateV26ToV27() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
|
||||
if srvConfig.Version != "26" {
|
||||
|
@ -2386,7 +2386,7 @@ func migrateV26ToV27() error {
|
|||
srvConfig.Logger.HTTP["1"] = logger.HTTP{}
|
||||
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘26’ to ‘27’. %v", err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘26’ to ‘27’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "26", "27")
|
||||
|
@ -2404,7 +2404,7 @@ func migrateV27ToV28() error {
|
|||
if os.IsNotExist(err) || os.IsPermission(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
|
||||
if srvConfig.Version != "27" {
|
||||
|
@ -2414,7 +2414,7 @@ func migrateV27ToV28() error {
|
|||
srvConfig.Version = "28"
|
||||
srvConfig.KMS = crypto.KMSConfig{}
|
||||
if err = quick.SaveConfig(srvConfig, configFile, globalEtcdClient); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
|
@ -2466,7 +2466,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
|||
// Initialize the server config, if no config exists.
|
||||
return newSrvConfig(objAPI)
|
||||
}
|
||||
return saveServerConfig(context.Background(), objAPI, config, nil)
|
||||
return saveServerConfig(context.Background(), objAPI, config)
|
||||
}
|
||||
|
||||
// Migrates '.minio.sys/config.json' to v33.
|
||||
|
@ -2534,7 +2534,7 @@ func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
|||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -2548,8 +2548,8 @@ func migrateV27ToV28MinioSys(objAPI ObjectLayer) error {
|
|||
cfg.Version = "28"
|
||||
cfg.KMS = crypto.KMSConfig{}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg, nil); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %v", err)
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘27’ to ‘28’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "27", "28")
|
||||
|
@ -2563,7 +2563,7 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
|||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -2575,8 +2575,8 @@ func migrateV28ToV29MinioSys(objAPI ObjectLayer) error {
|
|||
}
|
||||
|
||||
cfg.Version = "29"
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg, nil); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘28’ to ‘29’. %v", err)
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘28’ to ‘29’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "28", "29")
|
||||
|
@ -2590,7 +2590,7 @@ func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
|||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -2607,8 +2607,8 @@ func migrateV29ToV30MinioSys(objAPI ObjectLayer) error {
|
|||
cfg.Compression.Extensions = strings.Split(compress.DefaultExtensions, config.ValueSeparator)
|
||||
cfg.Compression.MimeTypes = strings.Split(compress.DefaultMimeTypes, config.ValueSeparator)
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg, nil); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘29’ to ‘30’. %v", err)
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘29’ to ‘30’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "29", "30")
|
||||
|
@ -2622,7 +2622,7 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
|||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -2642,8 +2642,8 @@ func migrateV30ToV31MinioSys(objAPI ObjectLayer) error {
|
|||
AuthToken: "",
|
||||
}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg, nil); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘30’ to ‘31’. %v", err)
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘30’ to ‘31’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "30", "31")
|
||||
|
@ -2657,7 +2657,7 @@ func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
|
|||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -2672,8 +2672,8 @@ func migrateV31ToV32MinioSys(objAPI ObjectLayer) error {
|
|||
cfg.Notify.NSQ = make(map[string]target.NSQArgs)
|
||||
cfg.Notify.NSQ["1"] = target.NSQArgs{}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg, nil); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘31’ to ‘32’. %v", err)
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from ‘31’ to ‘32’. %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "31", "32")
|
||||
|
@ -2687,7 +2687,7 @@ func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
|||
if err == errConfigNotFound {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("Unable to load config file. %v", err)
|
||||
return fmt.Errorf("Unable to load config file. %w", err)
|
||||
}
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -2700,8 +2700,8 @@ func migrateV32ToV33MinioSys(objAPI ObjectLayer) error {
|
|||
|
||||
cfg.Version = "33"
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg, nil); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from 32 to 33 . %v", err)
|
||||
if err = saveServerConfig(context.Background(), objAPI, cfg); err != nil {
|
||||
return fmt.Errorf("Failed to migrate config from '32' to '33' . %w", err)
|
||||
}
|
||||
|
||||
logger.Info(configMigrateMSGTemplate, configFile, "32", "33")
|
||||
|
@ -2729,7 +2729,6 @@ func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
|
|||
|
||||
config.SetCredentials(newCfg, cfg.Credential)
|
||||
config.SetRegion(newCfg, cfg.Region)
|
||||
config.SetWorm(newCfg, bool(cfg.Worm))
|
||||
|
||||
storageclass.SetStorageClass(newCfg, cfg.StorageClass)
|
||||
|
||||
|
@ -2778,7 +2777,7 @@ func migrateMinioSysConfigToKV(objAPI ObjectLayer) error {
|
|||
notify.SetNotifyWebhook(newCfg, k, args)
|
||||
}
|
||||
|
||||
if err = saveServerConfig(context.Background(), objAPI, newCfg, cfg); err != nil {
|
||||
if err = saveServerConfig(context.Background(), objAPI, newCfg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/config-migrate_test.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/config-migrate_test.go
generated
vendored
|
@ -220,12 +220,12 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
|||
}
|
||||
|
||||
// Check if accessKey and secretKey are not altered during migration
|
||||
caccessKey := globalServerConfig[config.CredentialsSubSys][config.Default][config.AccessKey]
|
||||
caccessKey := globalServerConfig[config.CredentialsSubSys][config.Default].Get(config.AccessKey)
|
||||
if caccessKey != accessKey {
|
||||
t.Fatalf("Access key lost during migration, expected: %v, found:%v", accessKey, caccessKey)
|
||||
}
|
||||
|
||||
csecretKey := globalServerConfig[config.CredentialsSubSys][config.Default][config.SecretKey]
|
||||
csecretKey := globalServerConfig[config.CredentialsSubSys][config.Default].Get(config.SecretKey)
|
||||
if csecretKey != secretKey {
|
||||
t.Fatalf("Secret key lost during migration, expected: %v, found: %v", secretKey, csecretKey)
|
||||
}
|
||||
|
|
58
vendor/github.com/minio/minio/cmd/config.go
generated
vendored
58
vendor/github.com/minio/minio/cmd/config.go
generated
vendored
|
@ -129,49 +129,12 @@ func saveServerConfigHistory(ctx context.Context, objAPI ObjectLayer, kv []byte)
|
|||
return saveConfig(ctx, objAPI, historyFile, kv)
|
||||
}
|
||||
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config interface{}, oldConfig interface{}) error {
|
||||
func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config interface{}) error {
|
||||
data, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
// Create a backup of the current config
|
||||
backupConfigFile := path.Join(minioConfigPrefix, minioConfigBackupFile)
|
||||
|
||||
var oldData []byte
|
||||
var freshConfig bool
|
||||
if oldConfig == nil {
|
||||
oldData, err = readConfig(ctx, objAPI, configFile)
|
||||
if err != nil && err != errConfigNotFound {
|
||||
return err
|
||||
}
|
||||
if err == errConfigNotFound {
|
||||
// Current config not found, so nothing to backup.
|
||||
freshConfig = true
|
||||
}
|
||||
// Do not need to decrypt oldData since we are going to
|
||||
// save it anyway if freshConfig is false.
|
||||
} else {
|
||||
oldData, err = json.Marshal(oldConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if globalConfigEncrypted {
|
||||
oldData, err = madmin.EncryptData(globalActiveCred.String(), oldData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No need to take backups for fresh setups.
|
||||
if !freshConfig {
|
||||
if err = saveConfig(ctx, objAPI, backupConfigFile, oldData); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if globalConfigEncrypted {
|
||||
data, err = madmin.EncryptData(globalActiveCred.String(), data)
|
||||
if err != nil {
|
||||
|
@ -179,6 +142,7 @@ func saveServerConfig(ctx context.Context, objAPI ObjectLayer, config interface{
|
|||
}
|
||||
}
|
||||
|
||||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
// Save the new config in the std config path
|
||||
return saveConfig(ctx, objAPI, configFile, data)
|
||||
}
|
||||
|
@ -187,6 +151,11 @@ func readServerConfig(ctx context.Context, objAPI ObjectLayer) (config.Config, e
|
|||
configFile := path.Join(minioConfigPrefix, minioConfigFile)
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
// Config not found for some reason, allow things to continue
|
||||
// by initializing a new fresh config in safe mode.
|
||||
if err == errConfigNotFound && globalSafeMode {
|
||||
return newServerConfig(), nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@ -287,19 +256,6 @@ func initConfig(objAPI ObjectLayer) error {
|
|||
}
|
||||
}
|
||||
|
||||
// Construct path to config/transaction.lock for locking
|
||||
transactionConfigPrefix := minioConfigPrefix + "/transaction.lock"
|
||||
|
||||
// Hold lock only by one server and let that server alone migrate
|
||||
// all the config as necessary, this is to ensure that
|
||||
// redundant locks are not held for each migration - this allows
|
||||
// for a more predictable behavior while debugging.
|
||||
objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, transactionConfigPrefix)
|
||||
if err := objLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer objLock.Unlock()
|
||||
|
||||
// Migrates ${HOME}/.minio/config.json or config.json.deprecated
|
||||
// to '<export_path>/.minio.sys/config/config.json'
|
||||
// ignore if the file doesn't exist.
|
||||
|
|
42
vendor/github.com/minio/minio/cmd/config/cache/config.go
generated
vendored
42
vendor/github.com/minio/minio/cmd/config/cache/config.go
generated
vendored
|
@ -61,22 +61,26 @@ func (cfg *Config) UnmarshalJSON(data []byte) (err error) {
|
|||
return errors.New("config quota value should not be null or negative")
|
||||
}
|
||||
|
||||
if _, err = parseCacheDrives(_cfg.Drives); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err = parseCacheExcludes(_cfg.Exclude); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Parses given cacheDrivesEnv and returns a list of cache drives.
|
||||
func parseCacheDrives(drives []string) ([]string, error) {
|
||||
func parseCacheDrives(drives string) ([]string, error) {
|
||||
var drivesSlice []string
|
||||
if len(drives) == 0 {
|
||||
return drives, nil
|
||||
return drivesSlice, nil
|
||||
}
|
||||
|
||||
drivesSlice = strings.Split(drives, cacheDelimiterLegacy)
|
||||
if len(drivesSlice) == 1 && drivesSlice[0] == drives {
|
||||
drivesSlice = strings.Split(drives, cacheDelimiter)
|
||||
}
|
||||
|
||||
var endpoints []string
|
||||
for _, d := range drives {
|
||||
for _, d := range drivesSlice {
|
||||
if len(d) == 0 {
|
||||
return nil, config.ErrInvalidCacheDrivesValue(nil).Msg("cache dir cannot be an empty path")
|
||||
}
|
||||
if ellipses.HasEllipses(d) {
|
||||
s, err := parseCacheDrivePaths(d)
|
||||
if err != nil {
|
||||
|
@ -89,9 +93,6 @@ func parseCacheDrives(drives []string) ([]string, error) {
|
|||
}
|
||||
|
||||
for _, d := range endpoints {
|
||||
if len(d) == 0 {
|
||||
return nil, config.ErrInvalidCacheDrivesValue(nil).Msg("cache dir cannot be an empty path")
|
||||
}
|
||||
if !filepath.IsAbs(d) {
|
||||
return nil, config.ErrInvalidCacheDrivesValue(nil).Msg("cache dir should be absolute path: %s", d)
|
||||
}
|
||||
|
@ -114,8 +115,18 @@ func parseCacheDrivePaths(arg string) (ep []string, err error) {
|
|||
}
|
||||
|
||||
// Parses given cacheExcludesEnv and returns a list of cache exclude patterns.
|
||||
func parseCacheExcludes(excludes []string) ([]string, error) {
|
||||
for _, e := range excludes {
|
||||
func parseCacheExcludes(excludes string) ([]string, error) {
|
||||
var excludesSlice []string
|
||||
if len(excludes) == 0 {
|
||||
return excludesSlice, nil
|
||||
}
|
||||
|
||||
excludesSlice = strings.Split(excludes, cacheDelimiterLegacy)
|
||||
if len(excludesSlice) == 1 && excludesSlice[0] == excludes {
|
||||
excludesSlice = strings.Split(excludes, cacheDelimiter)
|
||||
}
|
||||
|
||||
for _, e := range excludesSlice {
|
||||
if len(e) == 0 {
|
||||
return nil, config.ErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e)
|
||||
}
|
||||
|
@ -123,5 +134,6 @@ func parseCacheExcludes(excludes []string) ([]string, error) {
|
|||
return nil, config.ErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e)
|
||||
}
|
||||
}
|
||||
return excludes, nil
|
||||
|
||||
return excludesSlice, nil
|
||||
}
|
||||
|
|
11
vendor/github.com/minio/minio/cmd/config/cache/config_test.go
generated
vendored
11
vendor/github.com/minio/minio/cmd/config/cache/config_test.go
generated
vendored
|
@ -19,7 +19,6 @@ package cache
|
|||
import (
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
|
@ -61,6 +60,11 @@ func TestParseCacheDrives(t *testing.T) {
|
|||
expectedPatterns []string
|
||||
success bool
|
||||
}{"/home/drive1;/home/drive2;/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
success bool
|
||||
}{"/home/drive1,/home/drive2,/home/drive3", []string{"/home/drive1", "/home/drive2", "/home/drive3"}, true})
|
||||
testCases = append(testCases, struct {
|
||||
driveStr string
|
||||
expectedPatterns []string
|
||||
|
@ -73,7 +77,7 @@ func TestParseCacheDrives(t *testing.T) {
|
|||
}{"/home/drive{1..3}", []string{}, false})
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
drives, err := parseCacheDrives(strings.Split(testCase.driveStr, cacheDelimiter))
|
||||
drives, err := parseCacheDrives(testCase.driveStr)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||
}
|
||||
|
@ -102,11 +106,12 @@ func TestParseCacheExclude(t *testing.T) {
|
|||
|
||||
// valid input
|
||||
{"bucket1/*;*.png;images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true},
|
||||
{"bucket1/*,*.png,images/trip/barcelona/*", []string{"bucket1/*", "*.png", "images/trip/barcelona/*"}, true},
|
||||
{"bucket1", []string{"bucket1"}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
excludes, err := parseCacheExcludes(strings.Split(testCase.excludeStr, cacheDelimiter))
|
||||
excludes, err := parseCacheExcludes(testCase.excludeStr)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||
}
|
||||
|
|
37
vendor/github.com/minio/minio/cmd/config/cache/help.go
generated
vendored
37
vendor/github.com/minio/minio/cmd/config/cache/help.go
generated
vendored
|
@ -20,12 +20,35 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// Help template for caching feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
Drives: `List of mounted drives or directories delimited by ";"`,
|
||||
Exclude: `List of wildcard based cache exclusion patterns delimited by ";"`,
|
||||
Expiry: `Cache expiry duration in days. eg: "90"`,
|
||||
Quota: `Maximum permitted usage of the cache in percentage (0-100)`,
|
||||
config.State: "Indicates if caching is enabled or not",
|
||||
config.Comment: "A comment to describe the caching setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Drives,
|
||||
Description: `comma separated mountpoints e.g. "/optane1,/optane2"`,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Expiry,
|
||||
Description: `cache expiry duration in days e.g. "90"`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Quota,
|
||||
Description: `limit cache drive usage in percentage e.g. "90"`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: Exclude,
|
||||
Description: `comma separated wildcard exclusion patterns e.g. "bucket/*.tmp,*.exe"`,
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
38
vendor/github.com/minio/minio/cmd/config/cache/legacy.go
generated
vendored
38
vendor/github.com/minio/minio/cmd/config/cache/legacy.go
generated
vendored
|
@ -23,18 +23,32 @@ import (
|
|||
"github.com/minio/minio/cmd/config"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheDelimiterLegacy = ";"
|
||||
)
|
||||
|
||||
// SetCacheConfig - One time migration code needed, for migrating from older config to new for Cache.
|
||||
func SetCacheConfig(s config.Config, cfg Config) {
|
||||
s[config.CacheSubSys][config.Default] = DefaultKVS
|
||||
s[config.CacheSubSys][config.Default][Drives] = strings.Join(cfg.Drives, cacheDelimiter)
|
||||
s[config.CacheSubSys][config.Default][Exclude] = strings.Join(cfg.Exclude, cacheDelimiter)
|
||||
s[config.CacheSubSys][config.Default][Expiry] = fmt.Sprintf("%d", cfg.Expiry)
|
||||
s[config.CacheSubSys][config.Default][Quota] = fmt.Sprintf("%d", cfg.MaxUse)
|
||||
s[config.CacheSubSys][config.Default][config.State] = func() string {
|
||||
if len(cfg.Drives) > 0 {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}()
|
||||
s[config.CacheSubSys][config.Default][config.Comment] = "Settings for Cache, after migrating config"
|
||||
if len(cfg.Drives) == 0 {
|
||||
// Do not save cache if no settings available.
|
||||
return
|
||||
}
|
||||
s[config.CacheSubSys][config.Default] = config.KVS{
|
||||
config.KV{
|
||||
Key: Drives,
|
||||
Value: strings.Join(cfg.Drives, cacheDelimiter),
|
||||
},
|
||||
config.KV{
|
||||
Key: Exclude,
|
||||
Value: strings.Join(cfg.Exclude, cacheDelimiter),
|
||||
},
|
||||
config.KV{
|
||||
Key: Expiry,
|
||||
Value: fmt.Sprintf("%d", cfg.Expiry),
|
||||
},
|
||||
config.KV{
|
||||
Key: Quota,
|
||||
Value: fmt.Sprintf("%d", cfg.MaxUse),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
48
vendor/github.com/minio/minio/cmd/config/cache/lookup.go
generated
vendored
48
vendor/github.com/minio/minio/cmd/config/cache/lookup.go
generated
vendored
|
@ -19,7 +19,6 @@ package cache
|
|||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
|
@ -33,7 +32,6 @@ const (
|
|||
MaxUse = "maxuse"
|
||||
Quota = "quota"
|
||||
|
||||
EnvCacheState = "MINIO_CACHE_STATE"
|
||||
EnvCacheDrives = "MINIO_CACHE_DRIVES"
|
||||
EnvCacheExclude = "MINIO_CACHE_EXCLUDE"
|
||||
EnvCacheExpiry = "MINIO_CACHE_EXPIRY"
|
||||
|
@ -48,19 +46,35 @@ const (
|
|||
// DefaultKVS - default KV settings for caching.
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default cache configuration, only applicable in gateway setups",
|
||||
Drives: "",
|
||||
Exclude: "",
|
||||
Expiry: DefaultExpiry,
|
||||
Quota: DefaultQuota,
|
||||
config.KV{
|
||||
Key: Drives,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: Exclude,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: Expiry,
|
||||
Value: DefaultExpiry,
|
||||
},
|
||||
config.KV{
|
||||
Key: Quota,
|
||||
Value: DefaultQuota,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
const (
|
||||
cacheDelimiter = ";"
|
||||
cacheDelimiter = ","
|
||||
)
|
||||
|
||||
// Enabled returns if cache is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
drives := kvs.Get(Drives)
|
||||
return drives != ""
|
||||
}
|
||||
|
||||
// LookupConfig - extracts cache configuration provided by environment
|
||||
// variables and merge them with provided CacheConfiguration.
|
||||
func LookupConfig(kvs config.KVS) (Config, error) {
|
||||
|
@ -70,30 +84,20 @@ func LookupConfig(kvs config.KVS) (Config, error) {
|
|||
return cfg, err
|
||||
}
|
||||
|
||||
// Check if cache is explicitly disabled
|
||||
stateBool, err := config.ParseBool(env.Get(EnvCacheState, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
drives := env.Get(EnvCacheDrives, kvs.Get(Drives))
|
||||
if stateBool {
|
||||
if len(drives) == 0 {
|
||||
return cfg, config.Error("'drives' key cannot be empty if you wish to enable caching")
|
||||
}
|
||||
}
|
||||
if len(drives) == 0 {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
cfg.Drives, err = parseCacheDrives(strings.Split(drives, cacheDelimiter))
|
||||
var err error
|
||||
cfg.Drives, err = parseCacheDrives(drives)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
cfg.Enabled = true
|
||||
if excludes := env.Get(EnvCacheExclude, kvs.Get(Exclude)); excludes != "" {
|
||||
cfg.Exclude, err = parseCacheExcludes(strings.Split(excludes, cacheDelimiter))
|
||||
cfg.Exclude, err = parseCacheExcludes(excludes)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
|
24
vendor/github.com/minio/minio/cmd/config/compress/compress.go
generated
vendored
24
vendor/github.com/minio/minio/cmd/config/compress/compress.go
generated
vendored
|
@ -36,7 +36,7 @@ const (
|
|||
Extensions = "extensions"
|
||||
MimeTypes = "mime_types"
|
||||
|
||||
EnvCompressState = "MINIO_COMPRESS_STATE"
|
||||
EnvCompressState = "MINIO_COMPRESS_ENABLE"
|
||||
EnvCompressExtensions = "MINIO_COMPRESS_EXTENSIONS"
|
||||
EnvCompressMimeTypes = "MINIO_COMPRESS_MIME_TYPES"
|
||||
|
||||
|
@ -48,10 +48,18 @@ const (
|
|||
// DefaultKVS - default KV config for compression settings
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default compression configuration",
|
||||
Extensions: DefaultExtensions,
|
||||
MimeTypes: DefaultMimeTypes,
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: Extensions,
|
||||
Value: DefaultExtensions,
|
||||
},
|
||||
config.KV{
|
||||
Key: MimeTypes,
|
||||
Value: DefaultMimeTypes,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -79,10 +87,14 @@ func LookupConfig(kvs config.KVS) (Config, error) {
|
|||
|
||||
compress := env.Get(EnvCompress, "")
|
||||
if compress == "" {
|
||||
compress = env.Get(EnvCompressState, kvs.Get(config.State))
|
||||
compress = env.Get(EnvCompressState, kvs.Get(config.Enable))
|
||||
}
|
||||
cfg.Enabled, err = config.ParseBool(compress)
|
||||
if err != nil {
|
||||
// Parsing failures happen due to empty KVS, ignore it.
|
||||
if kvs.Empty() {
|
||||
return cfg, nil
|
||||
}
|
||||
return cfg, err
|
||||
}
|
||||
if !cfg.Enabled {
|
||||
|
|
24
vendor/github.com/minio/minio/cmd/config/compress/help.go
generated
vendored
24
vendor/github.com/minio/minio/cmd/config/compress/help.go
generated
vendored
|
@ -20,10 +20,24 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// Help template for compress feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
Extensions: `Comma separated file extensions to compress. eg: ".txt,.log,.csv"`,
|
||||
MimeTypes: `Comma separate wildcard mime-types to compress. eg: "text/*,application/json,application/xml"`,
|
||||
config.State: "Indicates if compression is enabled or not",
|
||||
config.Comment: "A comment to describe the compression setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Extensions,
|
||||
Description: `comma separated file extensions e.g. ".txt,.log,.csv"`,
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: MimeTypes,
|
||||
Description: `comma separated wildcard mime-types e.g. "text/*,application/json,application/xml"`,
|
||||
Optional: true,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
25
vendor/github.com/minio/minio/cmd/config/compress/legacy.go
generated
vendored
25
vendor/github.com/minio/minio/cmd/config/compress/legacy.go
generated
vendored
|
@ -30,15 +30,22 @@ const (
|
|||
|
||||
// SetCompressionConfig - One time migration code needed, for migrating from older config to new for Compression.
|
||||
func SetCompressionConfig(s config.Config, cfg Config) {
|
||||
if !cfg.Enabled {
|
||||
// No need to save disabled settings in new config.
|
||||
return
|
||||
}
|
||||
s[config.CompressionSubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enabled {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Compression, after migrating config",
|
||||
Extensions: strings.Join(cfg.Extensions, config.ValueSeparator),
|
||||
MimeTypes: strings.Join(cfg.MimeTypes, config.ValueSeparator),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: Extensions,
|
||||
Value: strings.Join(cfg.Extensions, config.ValueSeparator),
|
||||
},
|
||||
config.KV{
|
||||
Key: MimeTypes,
|
||||
Value: strings.Join(cfg.MimeTypes, config.ValueSeparator),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
466
vendor/github.com/minio/minio/cmd/config/config.go
generated
vendored
466
vendor/github.com/minio/minio/cmd/config/config.go
generated
vendored
|
@ -18,7 +18,10 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio-go/pkg/set"
|
||||
|
@ -28,27 +31,39 @@ import (
|
|||
)
|
||||
|
||||
// Error config error type
|
||||
type Error string
|
||||
type Error struct {
|
||||
Kind ErrorKind
|
||||
Err string
|
||||
}
|
||||
|
||||
// ErrorKind config error kind
|
||||
type ErrorKind int8
|
||||
|
||||
// Various error kinds.
|
||||
const (
|
||||
ContinueKind ErrorKind = iota + 1
|
||||
SafeModeKind
|
||||
)
|
||||
|
||||
// Errorf - formats according to a format specifier and returns
|
||||
// the string as a value that satisfies error of type config.Error
|
||||
func Errorf(format string, a ...interface{}) error {
|
||||
return Error(fmt.Sprintf(format, a...))
|
||||
func Errorf(errKind ErrorKind, format string, a ...interface{}) error {
|
||||
return Error{Kind: errKind, Err: fmt.Sprintf(format, a...)}
|
||||
}
|
||||
|
||||
func (e Error) Error() string {
|
||||
return string(e)
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// Default keys
|
||||
const (
|
||||
Default = madmin.Default
|
||||
State = "state"
|
||||
Comment = "comment"
|
||||
Enable = madmin.EnableKey
|
||||
Comment = madmin.CommentKey
|
||||
|
||||
// State values
|
||||
StateOn = "on"
|
||||
StateOff = "off"
|
||||
// Enable values
|
||||
EnableOn = madmin.EnableOn
|
||||
EnableOff = madmin.EnableOff
|
||||
|
||||
RegionName = "name"
|
||||
AccessKey = "access_key"
|
||||
|
@ -57,19 +72,18 @@ const (
|
|||
|
||||
// Top level config constants.
|
||||
const (
|
||||
CredentialsSubSys = "credentials"
|
||||
PolicyOPASubSys = "policy_opa"
|
||||
IdentityOpenIDSubSys = "identity_openid"
|
||||
IdentityLDAPSubSys = "identity_ldap"
|
||||
WormSubSys = "worm"
|
||||
CacheSubSys = "cache"
|
||||
RegionSubSys = "region"
|
||||
EtcdSubSys = "etcd"
|
||||
StorageClassSubSys = "storageclass"
|
||||
CompressionSubSys = "compression"
|
||||
KmsVaultSubSys = "kms_vault"
|
||||
LoggerHTTPSubSys = "logger_http"
|
||||
LoggerHTTPAuditSubSys = "logger_http_audit"
|
||||
CredentialsSubSys = "credentials"
|
||||
PolicyOPASubSys = "policy_opa"
|
||||
IdentityOpenIDSubSys = "identity_openid"
|
||||
IdentityLDAPSubSys = "identity_ldap"
|
||||
CacheSubSys = "cache"
|
||||
RegionSubSys = "region"
|
||||
EtcdSubSys = "etcd"
|
||||
StorageClassSubSys = "storage_class"
|
||||
CompressionSubSys = "compression"
|
||||
KmsVaultSubSys = "kms_vault"
|
||||
LoggerWebhookSubSys = "logger_webhook"
|
||||
AuditWebhookSubSys = "audit_webhook"
|
||||
|
||||
// Add new constants here if you add new fields to config.
|
||||
)
|
||||
|
@ -93,15 +107,14 @@ const (
|
|||
// SubSystems - all supported sub-systems
|
||||
var SubSystems = set.CreateStringSet([]string{
|
||||
CredentialsSubSys,
|
||||
WormSubSys,
|
||||
RegionSubSys,
|
||||
EtcdSubSys,
|
||||
CacheSubSys,
|
||||
StorageClassSubSys,
|
||||
CompressionSubSys,
|
||||
KmsVaultSubSys,
|
||||
LoggerHTTPSubSys,
|
||||
LoggerHTTPAuditSubSys,
|
||||
LoggerWebhookSubSys,
|
||||
AuditWebhookSubSys,
|
||||
PolicyOPASubSys,
|
||||
IdentityLDAPSubSys,
|
||||
IdentityOpenIDSubSys,
|
||||
|
@ -120,7 +133,6 @@ var SubSystems = set.CreateStringSet([]string{
|
|||
// SubSystemsSingleTargets - subsystems which only support single target.
|
||||
var SubSystemsSingleTargets = set.CreateStringSet([]string{
|
||||
CredentialsSubSys,
|
||||
WormSubSys,
|
||||
RegionSubSys,
|
||||
EtcdSubSys,
|
||||
CacheSubSys,
|
||||
|
@ -147,66 +159,202 @@ const (
|
|||
EnvWordDelimiter = `_`
|
||||
)
|
||||
|
||||
// DefaultKVS - default kvs for all sub-systems
|
||||
var DefaultKVS map[string]KVS
|
||||
|
||||
// RegisterDefaultKVS - this function saves input kvsMap
|
||||
// globally, this should be called only once preferably
|
||||
// during `init()`.
|
||||
func RegisterDefaultKVS(kvsMap map[string]KVS) {
|
||||
DefaultKVS = map[string]KVS{}
|
||||
for subSys, kvs := range kvsMap {
|
||||
DefaultKVS[subSys] = kvs
|
||||
}
|
||||
}
|
||||
|
||||
// HelpSubSysMap - help for all individual KVS for each sub-systems
|
||||
// also carries a special empty sub-system which dumps
|
||||
// help for each sub-system key.
|
||||
var HelpSubSysMap map[string]HelpKVS
|
||||
|
||||
// RegisterHelpSubSys - this function saves
|
||||
// input help KVS for each sub-system globally,
|
||||
// this function should be called only once
|
||||
// preferably in during `init()`.
|
||||
func RegisterHelpSubSys(helpKVSMap map[string]HelpKVS) {
|
||||
HelpSubSysMap = map[string]HelpKVS{}
|
||||
for subSys, hkvs := range helpKVSMap {
|
||||
HelpSubSysMap[subSys] = hkvs
|
||||
}
|
||||
}
|
||||
|
||||
// KV - is a shorthand of each key value.
|
||||
type KV struct {
|
||||
Key string `json:"key"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
// KVS - is a shorthand for some wrapper functions
|
||||
// to operate on list of key values.
|
||||
type KVS map[string]string
|
||||
type KVS []KV
|
||||
|
||||
// Empty - return if kv is empty
|
||||
func (kvs KVS) Empty() bool {
|
||||
return len(kvs) == 0
|
||||
}
|
||||
|
||||
func (kvs KVS) String() string {
|
||||
var s strings.Builder
|
||||
for k, v := range kvs {
|
||||
s.WriteString(k)
|
||||
for _, kv := range kvs {
|
||||
// Do not need to print if state is on
|
||||
if kv.Key == Enable && kv.Value == EnableOn {
|
||||
continue
|
||||
}
|
||||
s.WriteString(kv.Key)
|
||||
s.WriteString(KvSeparator)
|
||||
s.WriteString(KvDoubleQuote)
|
||||
s.WriteString(v)
|
||||
s.WriteString(KvDoubleQuote)
|
||||
spc := madmin.HasSpace(kv.Value)
|
||||
if spc {
|
||||
s.WriteString(KvDoubleQuote)
|
||||
}
|
||||
s.WriteString(kv.Value)
|
||||
if spc {
|
||||
s.WriteString(KvDoubleQuote)
|
||||
}
|
||||
s.WriteString(KvSpaceSeparator)
|
||||
}
|
||||
return s.String()
|
||||
}
|
||||
|
||||
// Set sets a value, if not sets a default value.
|
||||
func (kvs *KVS) Set(key, value string) {
|
||||
for i, kv := range *kvs {
|
||||
if kv.Key == key {
|
||||
(*kvs)[i] = KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
*kvs = append(*kvs, KV{
|
||||
Key: key,
|
||||
Value: value,
|
||||
})
|
||||
}
|
||||
|
||||
// Get - returns the value of a key, if not found returns empty.
|
||||
func (kvs KVS) Get(key string) string {
|
||||
return kvs[key]
|
||||
v, ok := kvs.Lookup(key)
|
||||
if ok {
|
||||
return v
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Lookup - lookup a key in a list of KVS
|
||||
func (kvs KVS) Lookup(key string) (string, bool) {
|
||||
for _, kv := range kvs {
|
||||
if kv.Key == key {
|
||||
return kv.Value, true
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Config - MinIO server config structure.
|
||||
type Config map[string]map[string]KVS
|
||||
|
||||
func (c Config) String() string {
|
||||
var s strings.Builder
|
||||
for k, v := range c {
|
||||
for target, kv := range v {
|
||||
s.WriteString(k)
|
||||
if target != Default {
|
||||
s.WriteString(SubSystemSeparator)
|
||||
s.WriteString(target)
|
||||
}
|
||||
s.WriteString(KvSpaceSeparator)
|
||||
s.WriteString(kv.String())
|
||||
s.WriteString(KvNewline)
|
||||
// DelFrom - deletes all keys in the input reader.
|
||||
func (c Config) DelFrom(r io.Reader) error {
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
// Skip any empty lines, or comment like characters
|
||||
text := scanner.Text()
|
||||
if text == "" || strings.HasPrefix(text, KvComment) {
|
||||
continue
|
||||
}
|
||||
if err := c.DelKVS(text); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return s.String()
|
||||
if err := scanner.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReadFrom - implements io.ReaderFrom interface
|
||||
func (c Config) ReadFrom(r io.Reader) (int64, error) {
|
||||
var n int
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
// Skip any empty lines, or comment like characters
|
||||
text := scanner.Text()
|
||||
if text == "" || strings.HasPrefix(text, KvComment) {
|
||||
continue
|
||||
}
|
||||
if err := c.SetKVS(text, DefaultKVS); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
n += len(text)
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return int64(n), nil
|
||||
}
|
||||
|
||||
type configWriteTo struct {
|
||||
Config
|
||||
filterByKey string
|
||||
}
|
||||
|
||||
// NewConfigWriteTo - returns a struct which
|
||||
// allows for serializing the config/kv struct
|
||||
// to a io.WriterTo
|
||||
func NewConfigWriteTo(cfg Config, key string) io.WriterTo {
|
||||
return &configWriteTo{Config: cfg, filterByKey: key}
|
||||
}
|
||||
|
||||
// WriteTo - implements io.WriterTo interface implementation for config.
|
||||
func (c *configWriteTo) WriteTo(w io.Writer) (int64, error) {
|
||||
kvs, err := c.GetKVS(c.filterByKey, DefaultKVS)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
var n int
|
||||
for k, kv := range kvs {
|
||||
m1, _ := w.Write([]byte(k))
|
||||
m2, _ := w.Write([]byte(KvSpaceSeparator))
|
||||
m3, _ := w.Write([]byte(kv.String()))
|
||||
if len(kvs) > 1 {
|
||||
m4, _ := w.Write([]byte(KvNewline))
|
||||
n += m1 + m2 + m3 + m4
|
||||
} else {
|
||||
n += m1 + m2 + m3
|
||||
}
|
||||
}
|
||||
return int64(n), nil
|
||||
}
|
||||
|
||||
// Default KV configs for worm and region
|
||||
var (
|
||||
DefaultCredentialKVS = KVS{
|
||||
State: StateOff,
|
||||
Comment: "This is a default credential configuration",
|
||||
AccessKey: auth.DefaultAccessKey,
|
||||
SecretKey: auth.DefaultSecretKey,
|
||||
}
|
||||
|
||||
DefaultWormKVS = KVS{
|
||||
State: StateOff,
|
||||
Comment: "This is a default WORM configuration",
|
||||
KV{
|
||||
Key: AccessKey,
|
||||
Value: auth.DefaultAccessKey,
|
||||
},
|
||||
KV{
|
||||
Key: SecretKey,
|
||||
Value: auth.DefaultSecretKey,
|
||||
},
|
||||
}
|
||||
|
||||
DefaultRegionKVS = KVS{
|
||||
State: StateOff,
|
||||
Comment: "This is a default Region configuration",
|
||||
RegionName: "",
|
||||
KV{
|
||||
Key: RegionName,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -215,10 +363,17 @@ func LookupCreds(kv KVS) (auth.Credentials, error) {
|
|||
if err := CheckValidKeys(CredentialsSubSys, kv, DefaultCredentialKVS); err != nil {
|
||||
return auth.Credentials{}, err
|
||||
}
|
||||
return auth.CreateCredentials(env.Get(EnvAccessKey, kv.Get(AccessKey)),
|
||||
env.Get(EnvSecretKey, kv.Get(SecretKey)))
|
||||
accessKey := env.Get(EnvAccessKey, kv.Get(AccessKey))
|
||||
secretKey := env.Get(EnvSecretKey, kv.Get(SecretKey))
|
||||
if accessKey == "" && secretKey == "" {
|
||||
accessKey = auth.DefaultAccessKey
|
||||
secretKey = auth.DefaultSecretKey
|
||||
}
|
||||
return auth.CreateCredentials(accessKey, secretKey)
|
||||
}
|
||||
|
||||
var validRegionRegex = regexp.MustCompile("^[a-zA-Z][a-zA-Z0-9-_-]+$")
|
||||
|
||||
// LookupRegion - get current region.
|
||||
func LookupRegion(kv KVS) (string, error) {
|
||||
if err := CheckValidKeys(RegionSubSys, kv, DefaultRegionKVS); err != nil {
|
||||
|
@ -228,37 +383,43 @@ func LookupRegion(kv KVS) (string, error) {
|
|||
if region == "" {
|
||||
region = env.Get(EnvRegionName, kv.Get(RegionName))
|
||||
}
|
||||
return region, nil
|
||||
if region != "" {
|
||||
if validRegionRegex.MatchString(region) {
|
||||
return region, nil
|
||||
}
|
||||
return "", Errorf(SafeModeKind,
|
||||
"region '%s' is invalid, expected simple characters such as [us-east-1, myregion...]",
|
||||
region)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// CheckValidKeys - checks if inputs KVS has the necessary keys,
|
||||
// returns error if it find extra or superflous keys.
|
||||
func CheckValidKeys(subSys string, kv KVS, validKVS KVS) error {
|
||||
nkv := KVS{}
|
||||
for k, v := range kv {
|
||||
if _, ok := validKVS[k]; !ok {
|
||||
nkv[k] = v
|
||||
for _, kv := range kv {
|
||||
// Comment is a valid key, its also fully optional
|
||||
// ignore it since it is a valid key for all
|
||||
// sub-systems.
|
||||
if kv.Key == Comment {
|
||||
continue
|
||||
}
|
||||
if _, ok := validKVS.Lookup(kv.Key); !ok {
|
||||
nkv = append(nkv, kv)
|
||||
}
|
||||
}
|
||||
if len(nkv) > 0 {
|
||||
return Error(fmt.Sprintf("found invalid keys (%s) for '%s' sub-system", nkv.String(), subSys))
|
||||
return Errorf(
|
||||
ContinueKind,
|
||||
"found invalid keys (%s) for '%s' sub-system, use 'mc admin config reset myminio %s' to fix invalid keys", nkv.String(), subSys, subSys)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// LookupWorm - check if worm is enabled
|
||||
func LookupWorm(kv KVS) (bool, error) {
|
||||
if err := CheckValidKeys(WormSubSys, kv, DefaultWormKVS); err != nil {
|
||||
return false, err
|
||||
}
|
||||
worm := env.Get(EnvWorm, "")
|
||||
if worm == "" {
|
||||
worm = env.Get(EnvWormState, kv.Get(State))
|
||||
if worm == "" {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return ParseBool(worm)
|
||||
func LookupWorm() (bool, error) {
|
||||
return ParseBool(env.Get(EnvWorm, EnableOff))
|
||||
}
|
||||
|
||||
// New - initialize a new server config.
|
||||
|
@ -266,95 +427,98 @@ func New() Config {
|
|||
srvCfg := make(Config)
|
||||
for _, k := range SubSystems.ToSlice() {
|
||||
srvCfg[k] = map[string]KVS{}
|
||||
srvCfg[k][Default] = DefaultKVS[k]
|
||||
}
|
||||
return srvCfg
|
||||
}
|
||||
|
||||
// GetKVS - get kvs from specific subsystem.
|
||||
func (c Config) GetKVS(s string) (map[string]KVS, error) {
|
||||
func (c Config) GetKVS(s string, defaultKVS map[string]KVS) (map[string]KVS, error) {
|
||||
if len(s) == 0 {
|
||||
return nil, Error("input cannot be empty")
|
||||
return nil, Errorf(SafeModeKind, "input cannot be empty")
|
||||
}
|
||||
inputs := strings.Fields(s)
|
||||
if len(inputs) > 1 {
|
||||
return nil, Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
return nil, Errorf(SafeModeKind, "invalid number of arguments %s", s)
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return nil, Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
return nil, Errorf(SafeModeKind, "invalid number of arguments %s", s)
|
||||
}
|
||||
found := SubSystems.Contains(subSystemValue[0])
|
||||
if !found {
|
||||
// Check for sub-prefix only if the input value
|
||||
// is only a single value, this rejects invalid
|
||||
// inputs if any.
|
||||
// Check for sub-prefix only if the input value is only a
|
||||
// single value, this rejects invalid inputs if any.
|
||||
found = !SubSystems.FuncMatch(strings.HasPrefix, subSystemValue[0]).IsEmpty() && len(subSystemValue) == 1
|
||||
}
|
||||
if !found {
|
||||
return nil, Error(fmt.Sprintf("unknown sub-system %s", s))
|
||||
return nil, Errorf(SafeModeKind, "unknown sub-system %s", s)
|
||||
}
|
||||
|
||||
kvs := make(map[string]KVS)
|
||||
var ok bool
|
||||
subSysPrefix := subSystemValue[0]
|
||||
if len(subSystemValue) == 2 {
|
||||
if len(subSystemValue[1]) == 0 {
|
||||
err := fmt.Sprintf("sub-system target '%s' cannot be empty", s)
|
||||
return nil, Error(err)
|
||||
return nil, Errorf(SafeModeKind, "sub-system target '%s' cannot be empty", s)
|
||||
}
|
||||
kvs[inputs[0]], ok = c[subSystemValue[0]][subSystemValue[1]]
|
||||
kvs[inputs[0]], ok = c[subSysPrefix][subSystemValue[1]]
|
||||
if !ok {
|
||||
err := fmt.Sprintf("sub-system target '%s' doesn't exist", s)
|
||||
return nil, Error(err)
|
||||
return nil, Errorf(SafeModeKind, "sub-system target '%s' doesn't exist", s)
|
||||
}
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
for subSys, subSysTgts := range c {
|
||||
if !strings.HasPrefix(subSys, subSystemValue[0]) {
|
||||
continue
|
||||
}
|
||||
for k, kv := range subSysTgts {
|
||||
if k != Default {
|
||||
kvs[subSys+SubSystemSeparator+k] = kv
|
||||
} else {
|
||||
kvs[subSys] = kv
|
||||
} else {
|
||||
for subSys, subSysTgts := range c {
|
||||
if !strings.HasPrefix(subSys, subSysPrefix) {
|
||||
continue
|
||||
}
|
||||
for k, kv := range subSysTgts {
|
||||
if k != Default {
|
||||
kvs[subSys+SubSystemSeparator+k] = kv
|
||||
} else {
|
||||
kvs[subSys] = kv
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(kvs) == 0 {
|
||||
kvs[subSysPrefix] = defaultKVS[subSysPrefix]
|
||||
return kvs, nil
|
||||
}
|
||||
return kvs, nil
|
||||
}
|
||||
|
||||
// DelKVS - delete a specific key.
|
||||
func (c Config) DelKVS(s string) error {
|
||||
if len(s) == 0 {
|
||||
return Error("input arguments cannot be empty")
|
||||
return Errorf(SafeModeKind, "input arguments cannot be empty")
|
||||
}
|
||||
inputs := strings.Fields(s)
|
||||
if len(inputs) > 1 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
return Errorf(SafeModeKind, "invalid number of arguments %s", s)
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
return Errorf(SafeModeKind, "invalid number of arguments %s", s)
|
||||
}
|
||||
if !SubSystems.Contains(subSystemValue[0]) {
|
||||
return Error(fmt.Sprintf("unknown sub-system %s", s))
|
||||
}
|
||||
if len(subSystemValue) == 2 {
|
||||
if len(subSystemValue[1]) == 0 {
|
||||
err := fmt.Sprintf("sub-system target '%s' cannot be empty", s)
|
||||
return Error(err)
|
||||
}
|
||||
delete(c[subSystemValue[0]], subSystemValue[1])
|
||||
// Unknown sub-system found try to remove it anyways.
|
||||
delete(c, subSystemValue[0])
|
||||
return nil
|
||||
}
|
||||
return Error(fmt.Sprintf("default config for '%s' sub-system cannot be removed", s))
|
||||
}
|
||||
|
||||
// This function is needed, to trim off single or double quotes, creeping into the values.
|
||||
func sanitizeValue(v string) string {
|
||||
v = strings.TrimSuffix(strings.TrimPrefix(strings.TrimSpace(v), KvDoubleQuote), KvDoubleQuote)
|
||||
return strings.TrimSuffix(strings.TrimPrefix(v, KvSingleQuote), KvSingleQuote)
|
||||
tgt := Default
|
||||
subSys := subSystemValue[0]
|
||||
if len(subSystemValue) == 2 {
|
||||
if len(subSystemValue[1]) == 0 {
|
||||
return Errorf(SafeModeKind, "sub-system target '%s' cannot be empty", s)
|
||||
}
|
||||
tgt = subSystemValue[1]
|
||||
}
|
||||
_, ok := c[subSys][tgt]
|
||||
if !ok {
|
||||
return Errorf(SafeModeKind, "sub-system %s already deleted", s)
|
||||
}
|
||||
delete(c[subSys], tgt)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Clone - clones a config map entirely.
|
||||
|
@ -363,10 +527,7 @@ func (c Config) Clone() Config {
|
|||
for subSys, tgtKV := range c {
|
||||
cp[subSys] = make(map[string]KVS)
|
||||
for tgt, kv := range tgtKV {
|
||||
cp[subSys][tgt] = KVS{}
|
||||
for k, v := range kv {
|
||||
cp[subSys][tgt][k] = v
|
||||
}
|
||||
cp[subSys][tgt] = append(cp[subSys][tgt], kv...)
|
||||
}
|
||||
}
|
||||
return cp
|
||||
|
@ -375,23 +536,23 @@ func (c Config) Clone() Config {
|
|||
// SetKVS - set specific key values per sub-system.
|
||||
func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
||||
if len(s) == 0 {
|
||||
return Error("input arguments cannot be empty")
|
||||
return Errorf(SafeModeKind, "input arguments cannot be empty")
|
||||
}
|
||||
inputs := strings.SplitN(s, KvSpaceSeparator, 2)
|
||||
if len(inputs) <= 1 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments '%s'", s))
|
||||
return Errorf(SafeModeKind, "invalid number of arguments '%s'", s)
|
||||
}
|
||||
subSystemValue := strings.SplitN(inputs[0], SubSystemSeparator, 2)
|
||||
if len(subSystemValue) == 0 {
|
||||
return Error(fmt.Sprintf("invalid number of arguments %s", s))
|
||||
return Errorf(SafeModeKind, "invalid number of arguments %s", s)
|
||||
}
|
||||
|
||||
if !SubSystems.Contains(subSystemValue[0]) {
|
||||
return Error(fmt.Sprintf("unknown sub-system %s", s))
|
||||
return Errorf(SafeModeKind, "unknown sub-system %s", s)
|
||||
}
|
||||
|
||||
if SubSystemsSingleTargets.Contains(subSystemValue[0]) && len(subSystemValue) == 2 {
|
||||
return Error(fmt.Sprintf("sub-system '%s' only supports single target", subSystemValue[0]))
|
||||
return Errorf(SafeModeKind, "sub-system '%s' only supports single target", subSystemValue[0])
|
||||
}
|
||||
|
||||
var kvs = KVS{}
|
||||
|
@ -402,34 +563,53 @@ func (c Config) SetKVS(s string, defaultKVS map[string]KVS) error {
|
|||
continue
|
||||
}
|
||||
if len(kv) == 1 && prevK != "" {
|
||||
kvs[prevK] = strings.Join([]string{kvs[prevK], sanitizeValue(kv[0])}, KvSpaceSeparator)
|
||||
value := strings.Join([]string{
|
||||
kvs.Get(prevK),
|
||||
madmin.SanitizeValue(kv[0]),
|
||||
}, KvSpaceSeparator)
|
||||
kvs.Set(prevK, value)
|
||||
continue
|
||||
}
|
||||
if len(kv) == 1 {
|
||||
return Error(fmt.Sprintf("key '%s', cannot have empty value", kv[0]))
|
||||
if len(kv) == 2 {
|
||||
prevK = kv[0]
|
||||
kvs.Set(prevK, madmin.SanitizeValue(kv[1]))
|
||||
continue
|
||||
}
|
||||
prevK = kv[0]
|
||||
kvs[kv[0]] = sanitizeValue(kv[1])
|
||||
return Errorf(SafeModeKind, "key '%s', cannot have empty value", kv[0])
|
||||
}
|
||||
|
||||
tgt := Default
|
||||
subSys := subSystemValue[0]
|
||||
if len(subSystemValue) == 2 {
|
||||
tgt = subSystemValue[1]
|
||||
}
|
||||
_, ok := c[subSystemValue[0]][tgt]
|
||||
|
||||
_, ok := kvs.Lookup(Enable)
|
||||
// Check if state is required
|
||||
_, defaultOk := defaultKVS[subSys].Lookup(Enable)
|
||||
if !ok && defaultOk {
|
||||
// implicit state "on" if not specified.
|
||||
kvs.Set(Enable, EnableOn)
|
||||
}
|
||||
|
||||
currKVS, ok := c[subSys][tgt]
|
||||
if !ok {
|
||||
c[subSystemValue[0]][tgt] = defaultKVS[subSystemValue[0]]
|
||||
comment := fmt.Sprintf("Settings for sub-system target %s:%s", subSystemValue[0], tgt)
|
||||
c[subSystemValue[0]][tgt][Comment] = comment
|
||||
currKVS = defaultKVS[subSys]
|
||||
}
|
||||
|
||||
for k, v := range kvs {
|
||||
if len(subSystemValue) == 2 {
|
||||
c[subSystemValue[0]][subSystemValue[1]][k] = v
|
||||
} else {
|
||||
c[subSystemValue[0]][Default][k] = v
|
||||
for _, kv := range kvs {
|
||||
if kv.Key == Comment {
|
||||
// Skip comment and add it later.
|
||||
continue
|
||||
}
|
||||
currKVS.Set(kv.Key, kv.Value)
|
||||
}
|
||||
|
||||
v, ok := kvs.Lookup(Comment)
|
||||
if ok {
|
||||
currKVS.Set(Comment, v)
|
||||
}
|
||||
|
||||
c[subSys][tgt] = currKVS
|
||||
return nil
|
||||
}
|
||||
|
|
44
vendor/github.com/minio/minio/cmd/config/config_test.go
generated
vendored
Normal file
44
vendor/github.com/minio/minio/cmd/config/config_test.go
generated
vendored
Normal file
|
@ -0,0 +1,44 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*
|
||||
*/
|
||||
|
||||
package config
|
||||
|
||||
import "testing"
|
||||
|
||||
func TestValidRegion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
success bool
|
||||
}{
|
||||
{name: "us-east-1", success: true},
|
||||
{name: "us_east", success: true},
|
||||
{name: "helloWorld", success: true},
|
||||
{name: "-fdslka", success: false},
|
||||
{name: "^00[", success: false},
|
||||
{name: "my region", success: false},
|
||||
{name: "%%$#!", success: false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
ok := validRegionRegex.MatchString(test.name)
|
||||
if test.success != ok {
|
||||
t.Errorf("Expected %t, got %t", test.success, ok)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
3
vendor/github.com/minio/minio/cmd/config/constants.go
generated
vendored
3
vendor/github.com/minio/minio/cmd/config/constants.go
generated
vendored
|
@ -33,8 +33,7 @@ const (
|
|||
EnvPublicIPs = "MINIO_PUBLIC_IPS"
|
||||
EnvEndpoints = "MINIO_ENDPOINTS"
|
||||
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
EnvWormState = "MINIO_WORM_STATE"
|
||||
EnvUpdate = "MINIO_UPDATE"
|
||||
|
||||
EnvWorm = "MINIO_WORM" // legacy
|
||||
EnvRegion = "MINIO_REGION" // legacy
|
||||
|
|
7
vendor/github.com/minio/minio/cmd/config/errors.go
generated
vendored
7
vendor/github.com/minio/minio/cmd/config/errors.go
generated
vendored
|
@ -45,13 +45,13 @@ var (
|
|||
ErrInvalidCacheDrivesValue = newErrFn(
|
||||
"Invalid cache drive value",
|
||||
"Please check the value in this ENV variable",
|
||||
"MINIO_CACHE_DRIVES: Mounted drives or directories are delimited by `;`",
|
||||
"MINIO_CACHE_DRIVES: Mounted drives or directories are delimited by `,`",
|
||||
)
|
||||
|
||||
ErrInvalidCacheExcludesValue = newErrFn(
|
||||
"Invalid cache excludes value",
|
||||
"Please check the passed value",
|
||||
"MINIO_CACHE_EXCLUDE: Cache exclusion patterns are delimited by `;`",
|
||||
"MINIO_CACHE_EXCLUDE: Cache exclusion patterns are delimited by `,`",
|
||||
)
|
||||
|
||||
ErrInvalidCacheExpiryValue = newErrFn(
|
||||
|
@ -93,8 +93,7 @@ var (
|
|||
ErrInvalidCredentials = newErrFn(
|
||||
"Invalid credentials",
|
||||
"Please provide correct credentials",
|
||||
`Access key length should be between minimum 3 characters in length.
|
||||
Secret key should be in between 8 and 40 characters`,
|
||||
`Access key length should be at least 3, and secret key length at least 8 characters`,
|
||||
)
|
||||
|
||||
ErrEnvCredentialsMissingGateway = newErrFn(
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
|
||||
"github.com/coredns/coredns/plugin/etcd/msg"
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
// ErrNoEntriesFound - Indicates no entries were found for the given key (directory)
|
||||
|
@ -106,7 +106,7 @@ func msgUnPath(s string) string {
|
|||
// Note that this method fetches entries upto only two levels deep.
|
||||
func (c *CoreDNS) list(key string) ([]SrvRecord, error) {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
r, err := c.etcdClient.Get(ctx, key, etcd.WithPrefix())
|
||||
r, err := c.etcdClient.Get(ctx, key, clientv3.WithPrefix())
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -217,7 +217,7 @@ type CoreDNS struct {
|
|||
domainIPs set.StringSet
|
||||
domainPort string
|
||||
prefixPath string
|
||||
etcdClient *etcd.Client
|
||||
etcdClient *clientv3.Client
|
||||
}
|
||||
|
||||
// Option - functional options pattern style
|
||||
|
@ -257,14 +257,14 @@ func CoreDNSPath(prefix string) Option {
|
|||
}
|
||||
|
||||
// NewCoreDNS - initialize a new coreDNS set/unset values.
|
||||
func NewCoreDNS(etcdClient *etcd.Client, setters ...Option) (Config, error) {
|
||||
if etcdClient == nil {
|
||||
return nil, errors.New("invalid argument")
|
||||
func NewCoreDNS(cfg clientv3.Config, setters ...Option) (*CoreDNS, error) {
|
||||
etcdClient, err := clientv3.New(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
args := &CoreDNS{
|
||||
etcdClient: etcdClient,
|
||||
prefixPath: defaultPrefixPath,
|
||||
}
|
||||
|
||||
for _, setter := range setters {
|
|
@ -55,13 +55,3 @@ type SrvRecord struct {
|
|||
// Key carries the original key used during Put().
|
||||
Key string `json:"-"`
|
||||
}
|
||||
|
||||
// Config - represents dns put, get interface. This interface can be
|
||||
// used to implement various backends as needed.
|
||||
type Config interface {
|
||||
Put(key string) error
|
||||
List() ([]SrvRecord, error)
|
||||
Get(key string) ([]SrvRecord, error)
|
||||
Delete(key string) error
|
||||
DeleteRecord(record SrvRecord) error
|
||||
}
|
121
vendor/github.com/minio/minio/cmd/config/etcd/etcd.go
generated
vendored
121
vendor/github.com/minio/minio/cmd/config/etcd/etcd.go
generated
vendored
|
@ -19,11 +19,11 @@ package etcd
|
|||
import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/clientv3/namespace"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
|
@ -38,12 +38,13 @@ const (
|
|||
// etcd environment values
|
||||
const (
|
||||
Endpoints = "endpoints"
|
||||
PathPrefix = "path_prefix"
|
||||
CoreDNSPath = "coredns_path"
|
||||
ClientCert = "client_cert"
|
||||
ClientCertKey = "client_cert_key"
|
||||
|
||||
EnvEtcdState = "MINIO_ETCD_STATE"
|
||||
EnvEtcdEndpoints = "MINIO_ETCD_ENDPOINTS"
|
||||
EnvEtcdPathPrefix = "MINIO_ETCD_PATH_PREFIX"
|
||||
EnvEtcdCoreDNSPath = "MINIO_ETCD_COREDNS_PATH"
|
||||
EnvEtcdClientCert = "MINIO_ETCD_CLIENT_CERT"
|
||||
EnvEtcdClientCertKey = "MINIO_ETCD_CLIENT_CERT_KEY"
|
||||
|
@ -52,18 +53,33 @@ const (
|
|||
// DefaultKVS - default KV settings for etcd.
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default etcd configuration",
|
||||
Endpoints: "",
|
||||
CoreDNSPath: "/skydns",
|
||||
ClientCert: "",
|
||||
ClientCertKey: "",
|
||||
config.KV{
|
||||
Key: Endpoints,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: PathPrefix,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: CoreDNSPath,
|
||||
Value: "/skydns",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClientCert,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClientCertKey,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// Config - server etcd config.
|
||||
type Config struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
PathPrefix string `json:"pathPrefix"`
|
||||
CoreDNSPath string `json:"coreDNSPath"`
|
||||
clientv3.Config
|
||||
}
|
||||
|
@ -73,7 +89,14 @@ func New(cfg Config) (*clientv3.Client, error) {
|
|||
if !cfg.Enabled {
|
||||
return nil, nil
|
||||
}
|
||||
return clientv3.New(cfg.Config)
|
||||
cli, err := clientv3.New(cfg.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cli.KV = namespace.NewKV(cli.KV, cfg.PathPrefix)
|
||||
cli.Watcher = namespace.NewWatcher(cli.Watcher, cfg.PathPrefix)
|
||||
cli.Lease = namespace.NewLease(cli.Lease, cfg.PathPrefix)
|
||||
return cli, nil
|
||||
}
|
||||
|
||||
func parseEndpoints(endpoints string) ([]string, bool, error) {
|
||||
|
@ -86,7 +109,8 @@ func parseEndpoints(endpoints string) ([]string, bool, error) {
|
|||
return nil, false, err
|
||||
}
|
||||
if etcdSecure && u.Scheme == "http" {
|
||||
return nil, false, fmt.Errorf("all endpoints should be https or http: %s", endpoint)
|
||||
return nil, false, config.Errorf(config.SafeModeKind,
|
||||
"all endpoints should be https or http: %s", endpoint)
|
||||
}
|
||||
// If one of the endpoint is https, we will use https directly.
|
||||
etcdSecure = etcdSecure || u.Scheme == "https"
|
||||
|
@ -95,75 +119,22 @@ func parseEndpoints(endpoints string) ([]string, bool, error) {
|
|||
return etcdEndpoints, etcdSecure, nil
|
||||
}
|
||||
|
||||
func lookupLegacyConfig(rootCAs *x509.CertPool) (Config, error) {
|
||||
cfg := Config{}
|
||||
endpoints := env.Get(EnvEtcdEndpoints, "")
|
||||
if endpoints == "" {
|
||||
return cfg, nil
|
||||
}
|
||||
etcdEndpoints, etcdSecure, err := parseEndpoints(endpoints)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
cfg.Enabled = true
|
||||
cfg.DialTimeout = defaultDialTimeout
|
||||
cfg.DialKeepAliveTime = defaultDialKeepAlive
|
||||
cfg.Endpoints = etcdEndpoints
|
||||
cfg.CoreDNSPath = "/skydns"
|
||||
if etcdSecure {
|
||||
cfg.TLS = &tls.Config{
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
// This is only to support client side certificate authentication
|
||||
// https://coreos.com/etcd/docs/latest/op-guide/security.html
|
||||
etcdClientCertFile := env.Get(EnvEtcdClientCert, "")
|
||||
etcdClientCertKey := env.Get(EnvEtcdClientCertKey, "")
|
||||
if etcdClientCertFile != "" && etcdClientCertKey != "" {
|
||||
cfg.TLS.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
cert, err := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
|
||||
return &cert, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return cfg, nil
|
||||
// Enabled returns if etcd is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
endpoints := kvs.Get(Endpoints)
|
||||
return endpoints != ""
|
||||
}
|
||||
|
||||
// LookupConfig - Initialize new etcd config.
|
||||
func LookupConfig(kv config.KVS, rootCAs *x509.CertPool) (Config, error) {
|
||||
func LookupConfig(kvs config.KVS, rootCAs *x509.CertPool) (Config, error) {
|
||||
cfg := Config{}
|
||||
if err := config.CheckValidKeys(config.EtcdSubSys, kv, DefaultKVS); err != nil {
|
||||
if err := config.CheckValidKeys(config.EtcdSubSys, kvs, DefaultKVS); err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
stateBool, err := config.ParseBool(env.Get(EnvEtcdState, config.StateOn))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if stateBool {
|
||||
// By default state is 'on' to honor legacy config.
|
||||
cfg, err = lookupLegacyConfig(rootCAs)
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
// If old legacy config is enabled honor it.
|
||||
if cfg.Enabled {
|
||||
return cfg, nil
|
||||
}
|
||||
}
|
||||
|
||||
stateBool, err = config.ParseBool(env.Get(EnvEtcdState, kv.Get(config.State)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
if !stateBool {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
endpoints := env.Get(EnvEtcdEndpoints, kv.Get(Endpoints))
|
||||
endpoints := env.Get(EnvEtcdEndpoints, kvs.Get(Endpoints))
|
||||
if endpoints == "" {
|
||||
return cfg, config.Error("'endpoints' key cannot be empty to enable etcd")
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
etcdEndpoints, etcdSecure, err := parseEndpoints(endpoints)
|
||||
|
@ -175,15 +146,17 @@ func LookupConfig(kv config.KVS, rootCAs *x509.CertPool) (Config, error) {
|
|||
cfg.DialTimeout = defaultDialTimeout
|
||||
cfg.DialKeepAliveTime = defaultDialKeepAlive
|
||||
cfg.Endpoints = etcdEndpoints
|
||||
cfg.CoreDNSPath = env.Get(EnvEtcdCoreDNSPath, kv.Get(CoreDNSPath))
|
||||
cfg.CoreDNSPath = env.Get(EnvEtcdCoreDNSPath, kvs.Get(CoreDNSPath))
|
||||
// Default path prefix for all keys on etcd, other than CoreDNSPath.
|
||||
cfg.PathPrefix = env.Get(EnvEtcdPathPrefix, kvs.Get(PathPrefix))
|
||||
if etcdSecure {
|
||||
cfg.TLS = &tls.Config{
|
||||
RootCAs: rootCAs,
|
||||
}
|
||||
// This is only to support client side certificate authentication
|
||||
// https://coreos.com/etcd/docs/latest/op-guide/security.html
|
||||
etcdClientCertFile := env.Get(EnvEtcdClientCert, kv.Get(ClientCert))
|
||||
etcdClientCertKey := env.Get(EnvEtcdClientCertKey, kv.Get(ClientCertKey))
|
||||
etcdClientCertFile := env.Get(EnvEtcdClientCert, kvs.Get(ClientCert))
|
||||
etcdClientCertKey := env.Get(EnvEtcdClientCertKey, kvs.Get(ClientCertKey))
|
||||
if etcdClientCertFile != "" && etcdClientCertKey != "" {
|
||||
cfg.TLS.GetClientCertificate = func(unused *tls.CertificateRequestInfo) (*tls.Certificate, error) {
|
||||
cert, err := tls.LoadX509KeyPair(etcdClientCertFile, etcdClientCertKey)
|
||||
|
|
43
vendor/github.com/minio/minio/cmd/config/etcd/help.go
generated
vendored
43
vendor/github.com/minio/minio/cmd/config/etcd/help.go
generated
vendored
|
@ -20,12 +20,41 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// etcd config documented in default config
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
Endpoints: `(required) Comma separated list of etcd endpoints eg: "http://localhost:2379"`,
|
||||
CoreDNSPath: `(optional) CoreDNS etcd path location to populate DNS srv records eg: "/skydns"`,
|
||||
ClientCert: `(optional) Etcd client cert for mTLS authentication`,
|
||||
ClientCertKey: `(optional) Etcd client cert key for mTLS authentication`,
|
||||
config.State: "Indicates if etcd config is on or off",
|
||||
config.Comment: "A comment to describe the etcd settings",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: Endpoints,
|
||||
Description: `comma separated list of etcd endpoints e.g. "http://localhost:2379"`,
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: PathPrefix,
|
||||
Description: `default etcd path prefix to populate all IAM assets eg: "customer/"`,
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: CoreDNSPath,
|
||||
Description: `default etcd path location to populate bucket DNS srv records eg: "/skydns"`,
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClientCert,
|
||||
Description: `client cert for mTLS authentication`,
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClientCertKey,
|
||||
Description: `client cert key for mTLS authentication`,
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
48
vendor/github.com/minio/minio/cmd/config/help.go
generated
vendored
48
vendor/github.com/minio/minio/cmd/config/help.go
generated
vendored
|
@ -18,18 +18,46 @@ package config
|
|||
|
||||
// HelpKV - implements help messages for keys
|
||||
// with value as description of the keys.
|
||||
type HelpKV map[string]string
|
||||
type HelpKV struct {
|
||||
Key string `json:"key"`
|
||||
Type string `json:"type"`
|
||||
Description string `json:"description"`
|
||||
Optional bool `json:"optional"`
|
||||
|
||||
// Indicates if sub-sys supports multiple targets.
|
||||
MultipleTargets bool `json:"multipleTargets"`
|
||||
}
|
||||
|
||||
// HelpKVS - implement order of keys help messages.
|
||||
type HelpKVS []HelpKV
|
||||
|
||||
// Lookup - lookup a key from help kvs.
|
||||
func (hkvs HelpKVS) Lookup(key string) (HelpKV, bool) {
|
||||
for _, hkv := range hkvs {
|
||||
if hkv.Key == key {
|
||||
return hkv, true
|
||||
}
|
||||
}
|
||||
return HelpKV{}, false
|
||||
}
|
||||
|
||||
// DefaultComment used across all sub-systems.
|
||||
const DefaultComment = "optionally add a comment to this setting"
|
||||
|
||||
// Region and Worm help is documented in default config
|
||||
var (
|
||||
RegionHelp = HelpKV{
|
||||
RegionName: `Region name of this deployment, eg: "us-west-2"`,
|
||||
State: "Indicates if config region is honored or ignored",
|
||||
Comment: "A comment to describe the region setting",
|
||||
}
|
||||
|
||||
WormHelp = HelpKV{
|
||||
State: `Indicates if worm is "on" or "off"`,
|
||||
Comment: "A comment to describe the worm state",
|
||||
RegionHelp = HelpKVS{
|
||||
HelpKV{
|
||||
Key: RegionName,
|
||||
Type: "string",
|
||||
Description: `name of the location of the server e.g. "us-west-rack2"`,
|
||||
Optional: true,
|
||||
},
|
||||
HelpKV{
|
||||
Key: Comment,
|
||||
Type: "sentence",
|
||||
Description: DefaultComment,
|
||||
Optional: true,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
52
vendor/github.com/minio/minio/cmd/config/identity/ldap/config.go
generated
vendored
52
vendor/github.com/minio/minio/cmd/config/identity/ldap/config.go
generated
vendored
|
@ -65,7 +65,6 @@ const (
|
|||
GroupSearchBaseDN = "group_search_base_dn"
|
||||
TLSSkipVerify = "tls_skip_verify"
|
||||
|
||||
EnvLDAPState = "MINIO_IDENTITY_LDAP_STATE"
|
||||
EnvServerAddr = "MINIO_IDENTITY_LDAP_SERVER_ADDR"
|
||||
EnvSTSExpiry = "MINIO_IDENTITY_LDAP_STS_EXPIRY"
|
||||
EnvTLSSkipVerify = "MINIO_IDENTITY_LDAP_TLS_SKIP_VERIFY"
|
||||
|
@ -78,15 +77,34 @@ const (
|
|||
// DefaultKVS - default config for LDAP config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default LDAP configuration",
|
||||
ServerAddr: "",
|
||||
STSExpiry: "1h",
|
||||
UsernameFormat: "",
|
||||
GroupSearchFilter: "",
|
||||
GroupNameAttribute: "",
|
||||
GroupSearchBaseDN: "",
|
||||
TLSSkipVerify: config.StateOff,
|
||||
config.KV{
|
||||
Key: ServerAddr,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: STSExpiry,
|
||||
Value: "1h",
|
||||
},
|
||||
config.KV{
|
||||
Key: UsernameFormat,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchFilter,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupNameAttribute,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: TLSSkipVerify,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -107,22 +125,18 @@ func (l Config) GetExpiryDuration() time.Duration {
|
|||
return l.stsExpiryDuration
|
||||
}
|
||||
|
||||
// Enabled returns if jwks is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
return kvs.Get(ServerAddr) != ""
|
||||
}
|
||||
|
||||
// Lookup - initializes LDAP config, overrides config, if any ENV values are set.
|
||||
func Lookup(kvs config.KVS, rootCAs *x509.CertPool) (l Config, err error) {
|
||||
l = Config{}
|
||||
if err = config.CheckValidKeys(config.IdentityLDAPSubSys, kvs, DefaultKVS); err != nil {
|
||||
return l, err
|
||||
}
|
||||
stateBool, err := config.ParseBool(env.Get(EnvLDAPState, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return l, err
|
||||
}
|
||||
ldapServer := env.Get(EnvServerAddr, kvs.Get(ServerAddr))
|
||||
if stateBool {
|
||||
if ldapServer == "" {
|
||||
return l, config.Error("'serveraddr' cannot be empty if you wish to enable AD/LDAP support")
|
||||
}
|
||||
}
|
||||
if ldapServer == "" {
|
||||
return l, nil
|
||||
}
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/config/identity/ldap/config_test.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/config/identity/ldap/config_test.go
generated
vendored
|
@ -66,6 +66,12 @@ func TestSubstituter(t *testing.T) {
|
|||
SubstitutableStr: "uid=${usernamedn},cn=users,dc=example,dc=com",
|
||||
ErrExpected: true,
|
||||
},
|
||||
{
|
||||
KV: []string{"username", "john"},
|
||||
SubstitutableStr: "(&(objectclass=user)(sAMAccountName={username})(memberOf=CN=myorg,OU=Rialto,OU=Application Managed,OU=Groups,DC=amr,DC=corp,DC=myorg,DC=com))",
|
||||
SubstitutedStr: "(&(objectclass=user)(sAMAccountName=john)(memberOf=CN=myorg,OU=Rialto,OU=Application Managed,OU=Groups,DC=amr,DC=corp,DC=myorg,DC=com))",
|
||||
ErrExpected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
|
59
vendor/github.com/minio/minio/cmd/config/identity/ldap/help.go
generated
vendored
59
vendor/github.com/minio/minio/cmd/config/identity/ldap/help.go
generated
vendored
|
@ -18,17 +18,54 @@ package ldap
|
|||
|
||||
import "github.com/minio/minio/cmd/config"
|
||||
|
||||
// Help template for Ldap identity feature.
|
||||
// Help template for LDAP identity feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
ServerAddr: `(Required) AD/LDAP server address eg: "myldapserver.com:636"`,
|
||||
UsernameFormat: `(Required) AD/LDAP format of full username DN eg: "uid={username},cn=accounts,dc=myldapserver,dc=com"`,
|
||||
GroupSearchFilter: `Search filter to find groups of a user (optional) eg: "(&(objectclass=groupOfNames)(member={usernamedn}))"`,
|
||||
GroupNameAttribute: `Attribute of search results to use as group name (optional) eg: "cn"`,
|
||||
GroupSearchBaseDN: `Base DN in AD/LDAP hierarchy to use in search requests (optional) eg: "dc=myldapserver,dc=com"`,
|
||||
STSExpiry: `AD/LDAP STS credentials validity duration (optional) eg: "1h"`,
|
||||
TLSSkipVerify: "Set this to 'on', to disable client verification of server certificates",
|
||||
config.State: "(Required) Enable or disable LDAP/AD identity",
|
||||
config.Comment: "A comment to describe the LDAP/AD identity setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: ServerAddr,
|
||||
Description: `AD/LDAP server address e.g. "myldapserver.com:636"`,
|
||||
Type: "address",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: UsernameFormat,
|
||||
Description: `AD/LDAP format of full username DN e.g. "uid={username},cn=accounts,dc=myldapserver,dc=com"`,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupSearchFilter,
|
||||
Description: `search filter to find groups of a user (optional) e.g. "(&(objectclass=groupOfNames)(member={usernamedn}))"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupNameAttribute,
|
||||
Description: `attribute of search results to use as group name (optional) e.g. "cn"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Description: `base DN in AD/LDAP hierarchy to use in search requests (optional) e.g. "dc=myldapserver,dc=com"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: STSExpiry,
|
||||
Description: `AD/LDAP STS credentials validity duration e.g. "1h"`,
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: TLSSkipVerify,
|
||||
Description: "enable this to disable client verification of server certificates",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
41
vendor/github.com/minio/minio/cmd/config/identity/ldap/legacy.go
generated
vendored
41
vendor/github.com/minio/minio/cmd/config/identity/ldap/legacy.go
generated
vendored
|
@ -20,19 +20,34 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// SetIdentityLDAP - One time migration code needed, for migrating from older config to new for LDAPConfig.
|
||||
func SetIdentityLDAP(s config.Config, ldapArgs Config) {
|
||||
if !ldapArgs.Enabled {
|
||||
// ldap not enabled no need to preserve it in new settings.
|
||||
return
|
||||
}
|
||||
s[config.IdentityLDAPSubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if !ldapArgs.Enabled {
|
||||
return config.StateOff
|
||||
}
|
||||
return config.StateOn
|
||||
}(),
|
||||
config.Comment: "Settings for LDAP, after migrating config",
|
||||
ServerAddr: ldapArgs.ServerAddr,
|
||||
STSExpiry: ldapArgs.STSExpiryDuration,
|
||||
UsernameFormat: ldapArgs.UsernameFormat,
|
||||
GroupSearchFilter: ldapArgs.GroupSearchFilter,
|
||||
GroupNameAttribute: ldapArgs.GroupNameAttribute,
|
||||
GroupSearchBaseDN: ldapArgs.GroupSearchBaseDN,
|
||||
config.KV{
|
||||
Key: ServerAddr,
|
||||
Value: ldapArgs.ServerAddr,
|
||||
},
|
||||
config.KV{
|
||||
Key: STSExpiry,
|
||||
Value: ldapArgs.STSExpiryDuration,
|
||||
},
|
||||
config.KV{
|
||||
Key: UsernameFormat,
|
||||
Value: ldapArgs.UsernameFormat,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchFilter,
|
||||
Value: ldapArgs.GroupSearchFilter,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupNameAttribute,
|
||||
Value: ldapArgs.GroupNameAttribute,
|
||||
},
|
||||
config.KV{
|
||||
Key: GroupSearchBaseDN,
|
||||
Value: ldapArgs.GroupSearchBaseDN,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
28
vendor/github.com/minio/minio/cmd/config/identity/openid/help.go
generated
vendored
28
vendor/github.com/minio/minio/cmd/config/identity/openid/help.go
generated
vendored
|
@ -20,9 +20,29 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// Help template for OpenID identity feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
ConfigURL: `OpenID discovery documented endpoint. eg: "https://accounts.google.com/.well-known/openid-configuration"`,
|
||||
config.State: "Indicates if OpenID identity is enabled or not",
|
||||
config.Comment: "A comment to describe the OpenID identity setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: ConfigURL,
|
||||
Description: `openid discovery document e.g. "https://accounts.google.com/.well-known/openid-configuration"`,
|
||||
Type: "url",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClientID,
|
||||
Description: `client identifier of the authenticating party at the identity provider`,
|
||||
Type: "string",
|
||||
Optional: true,
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClaimPrefix,
|
||||
Description: `openid JWT claim namespace prefix e.g. "customer"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
125
vendor/github.com/minio/minio/cmd/config/identity/openid/jwt.go
generated
vendored
125
vendor/github.com/minio/minio/cmd/config/identity/openid/jwt.go
generated
vendored
|
@ -28,6 +28,7 @@ import (
|
|||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
@ -41,6 +42,7 @@ type Config struct {
|
|||
URL *xnet.URL `json:"url,omitempty"`
|
||||
ClaimPrefix string `json:"claimPrefix,omitempty"`
|
||||
DiscoveryDoc DiscoveryDoc
|
||||
ClientID string
|
||||
publicKeys map[string]crypto.PublicKey
|
||||
transport *http.Transport
|
||||
closeRespFn func(io.ReadCloser)
|
||||
|
@ -107,36 +109,20 @@ type JWT struct {
|
|||
Config
|
||||
}
|
||||
|
||||
func expToInt64(expI interface{}) (expAt int64, err error) {
|
||||
switch exp := expI.(type) {
|
||||
case float64:
|
||||
expAt = int64(exp)
|
||||
case int64:
|
||||
expAt = exp
|
||||
case json.Number:
|
||||
expAt, err = exp.Int64()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
default:
|
||||
return 0, ErrInvalidDuration
|
||||
}
|
||||
return expAt, nil
|
||||
}
|
||||
|
||||
// GetDefaultExpiration - returns the expiration seconds expected.
|
||||
func GetDefaultExpiration(dsecs string) (time.Duration, error) {
|
||||
defaultExpiryDuration := time.Duration(60) * time.Minute // Defaults to 1hr.
|
||||
if dsecs != "" {
|
||||
expirySecs, err := strconv.ParseInt(dsecs, 10, 64)
|
||||
if err != nil {
|
||||
return 0, ErrInvalidDuration
|
||||
return 0, auth.ErrInvalidDuration
|
||||
}
|
||||
|
||||
// The duration, in seconds, of the role session.
|
||||
// The value can range from 900 seconds (15 minutes)
|
||||
// to 12 hours.
|
||||
if expirySecs < 900 || expirySecs > 43200 {
|
||||
return 0, ErrInvalidDuration
|
||||
return 0, auth.ErrInvalidDuration
|
||||
}
|
||||
|
||||
defaultExpiryDuration = time.Duration(expirySecs) * time.Second
|
||||
|
@ -144,6 +130,39 @@ func GetDefaultExpiration(dsecs string) (time.Duration, error) {
|
|||
return defaultExpiryDuration, nil
|
||||
}
|
||||
|
||||
func updateClaimsExpiry(dsecs string, claims map[string]interface{}) error {
|
||||
expStr := claims["exp"]
|
||||
if expStr == "" {
|
||||
return ErrTokenExpired
|
||||
}
|
||||
|
||||
// No custom duration requested, the claims can be used as is.
|
||||
if dsecs == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
expAt, err := auth.ExpToInt64(expStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defaultExpiryDuration, err := GetDefaultExpiration(dsecs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Verify if JWT expiry is lesser than default expiry duration,
|
||||
// if that is the case then set the default expiration to be
|
||||
// from the JWT expiry claim.
|
||||
if time.Unix(expAt, 0).UTC().Sub(time.Now().UTC()) < defaultExpiryDuration {
|
||||
defaultExpiryDuration = time.Unix(expAt, 0).UTC().Sub(time.Now().UTC())
|
||||
} // else honor the specified expiry duration.
|
||||
|
||||
expiry := time.Now().UTC().Add(defaultExpiryDuration).Unix()
|
||||
claims["exp"] = strconv.FormatInt(expiry, 10) // update with new expiry.
|
||||
return nil
|
||||
}
|
||||
|
||||
// Validate - validates the access token.
|
||||
func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
|
||||
jp := new(jwtgo.Parser)
|
||||
|
@ -173,25 +192,10 @@ func (p *JWT) Validate(token, dsecs string) (map[string]interface{}, error) {
|
|||
return nil, ErrTokenExpired
|
||||
}
|
||||
|
||||
expAt, err := expToInt64(claims["exp"])
|
||||
if err != nil {
|
||||
if err = updateClaimsExpiry(dsecs, claims); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defaultExpiryDuration, err := GetDefaultExpiration(dsecs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if time.Unix(expAt, 0).UTC().Sub(time.Now().UTC()) < defaultExpiryDuration {
|
||||
defaultExpiryDuration = time.Unix(expAt, 0).UTC().Sub(time.Now().UTC())
|
||||
}
|
||||
|
||||
expiry := time.Now().UTC().Add(defaultExpiryDuration).Unix()
|
||||
if expAt < expiry {
|
||||
claims["exp"] = strconv.FormatInt(expAt, 64)
|
||||
}
|
||||
|
||||
return claims, nil
|
||||
|
||||
}
|
||||
|
@ -206,8 +210,9 @@ const (
|
|||
JwksURL = "jwks_url"
|
||||
ConfigURL = "config_url"
|
||||
ClaimPrefix = "claim_prefix"
|
||||
ClientID = "client_id"
|
||||
|
||||
EnvIdentityOpenIDState = "MINIO_IDENTITY_OPENID_STATE"
|
||||
EnvIdentityOpenIDClientID = "MINIO_IDENTITY_OPENID_CLIENT_ID"
|
||||
EnvIdentityOpenIDJWKSURL = "MINIO_IDENTITY_OPENID_JWKS_URL"
|
||||
EnvIdentityOpenIDURL = "MINIO_IDENTITY_OPENID_CONFIG_URL"
|
||||
EnvIdentityOpenIDClaimPrefix = "MINIO_IDENTITY_OPENID_CLAIM_PREFIX"
|
||||
|
@ -258,38 +263,50 @@ func parseDiscoveryDoc(u *xnet.URL, transport *http.Transport, closeRespFn func(
|
|||
// DefaultKVS - default config for OpenID config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default OpenID configuration",
|
||||
JwksURL: "",
|
||||
ConfigURL: "",
|
||||
ClaimPrefix: "",
|
||||
config.KV{
|
||||
Key: ConfigURL,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClientID,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClaimPrefix,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: JwksURL,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// LookupConfig lookup jwks from config, override with any ENVs.
|
||||
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (c Config, err error) {
|
||||
if err = config.CheckValidKeys(config.IdentityOpenIDSubSys, kv, DefaultKVS); err != nil {
|
||||
return c, err
|
||||
}
|
||||
// Enabled returns if jwks is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
return kvs.Get(JwksURL) != ""
|
||||
}
|
||||
|
||||
stateBool, err := config.ParseBool(env.Get(EnvIdentityOpenIDState, kv.Get(config.State)))
|
||||
if err != nil {
|
||||
// LookupConfig lookup jwks from config, override with any ENVs.
|
||||
func LookupConfig(kvs config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (c Config, err error) {
|
||||
if err = config.CheckValidKeys(config.IdentityOpenIDSubSys, kvs, DefaultKVS); err != nil {
|
||||
return c, err
|
||||
}
|
||||
|
||||
jwksURL := env.Get(EnvIamJwksURL, "") // Legacy
|
||||
if jwksURL == "" {
|
||||
jwksURL = env.Get(EnvIdentityOpenIDJWKSURL, kv.Get(JwksURL))
|
||||
jwksURL = env.Get(EnvIdentityOpenIDJWKSURL, kvs.Get(JwksURL))
|
||||
}
|
||||
|
||||
c = Config{
|
||||
ClaimPrefix: env.Get(EnvIdentityOpenIDClaimPrefix, kv.Get(ClaimPrefix)),
|
||||
ClaimPrefix: env.Get(EnvIdentityOpenIDClaimPrefix, kvs.Get(ClaimPrefix)),
|
||||
publicKeys: make(map[string]crypto.PublicKey),
|
||||
ClientID: env.Get(EnvIdentityOpenIDClientID, kvs.Get(ClientID)),
|
||||
transport: transport,
|
||||
closeRespFn: closeRespFn,
|
||||
}
|
||||
|
||||
configURL := env.Get(EnvIdentityOpenIDURL, kv.Get(ConfigURL))
|
||||
configURL := env.Get(EnvIdentityOpenIDURL, kvs.Get(ConfigURL))
|
||||
if configURL != "" {
|
||||
c.URL, err = xnet.ParseHTTPURL(configURL)
|
||||
if err != nil {
|
||||
|
@ -305,12 +322,6 @@ func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.
|
|||
jwksURL = c.DiscoveryDoc.JwksURI
|
||||
}
|
||||
|
||||
if stateBool {
|
||||
// This check is needed to ensure that empty Jwks urls are not allowed.
|
||||
if jwksURL == "" {
|
||||
return c, config.Error("'config_url' must be set to a proper OpenID discovery document URL")
|
||||
}
|
||||
}
|
||||
if jwksURL == "" {
|
||||
return c, nil
|
||||
}
|
||||
|
|
38
vendor/github.com/minio/minio/cmd/config/identity/openid/jwt_test.go
generated
vendored
38
vendor/github.com/minio/minio/cmd/config/identity/openid/jwt_test.go
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2018 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -26,6 +26,42 @@ import (
|
|||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
func TestUpdateClaimsExpiry(t *testing.T) {
|
||||
testCases := []struct {
|
||||
exp interface{}
|
||||
dsecs string
|
||||
expectedFailure bool
|
||||
}{
|
||||
{"", "", true},
|
||||
{"-1", "0", true},
|
||||
{"-1", "900", true},
|
||||
{"1574812326", "900", false},
|
||||
{1574812326, "900", false},
|
||||
{int64(1574812326), "900", false},
|
||||
{int(1574812326), "900", false},
|
||||
{uint(1574812326), "900", false},
|
||||
{uint64(1574812326), "900", false},
|
||||
{json.Number("1574812326"), "900", false},
|
||||
{1574812326.000, "900", false},
|
||||
{time.Duration(3) * time.Minute, "900", false},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
claims := map[string]interface{}{}
|
||||
claims["exp"] = testCase.exp
|
||||
err := updateClaimsExpiry(testCase.dsecs, claims)
|
||||
if err != nil && !testCase.expectedFailure {
|
||||
t.Errorf("Expected success, got failure %s", err)
|
||||
}
|
||||
if err == nil && testCase.expectedFailure {
|
||||
t.Error("Expected failure, got success")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestJWT(t *testing.T) {
|
||||
const jsonkey = `{"keys":
|
||||
[
|
||||
|
|
34
vendor/github.com/minio/minio/cmd/config/identity/openid/legacy.go
generated
vendored
34
vendor/github.com/minio/minio/cmd/config/identity/openid/legacy.go
generated
vendored
|
@ -25,24 +25,22 @@ const (
|
|||
|
||||
// SetIdentityOpenID - One time migration code needed, for migrating from older config to new for OpenIDConfig.
|
||||
func SetIdentityOpenID(s config.Config, cfg Config) {
|
||||
if cfg.JWKS.URL == nil || cfg.JWKS.URL.String() == "" {
|
||||
// No need to save not-enabled settings in new config.
|
||||
return
|
||||
}
|
||||
s[config.IdentityOpenIDSubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.JWKS.URL == nil {
|
||||
return config.StateOff
|
||||
}
|
||||
if cfg.JWKS.URL.String() == "" {
|
||||
return config.StateOff
|
||||
}
|
||||
return config.StateOn
|
||||
}(),
|
||||
config.Comment: "Settings for OpenID, after migrating config",
|
||||
JwksURL: func() string {
|
||||
if cfg.JWKS.URL != nil {
|
||||
return cfg.JWKS.URL.String()
|
||||
}
|
||||
return ""
|
||||
}(),
|
||||
ConfigURL: "",
|
||||
ClaimPrefix: "",
|
||||
config.KV{
|
||||
Key: JwksURL,
|
||||
Value: cfg.JWKS.URL.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: ConfigURL,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClaimPrefix,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
3
vendor/github.com/minio/minio/cmd/config/identity/openid/validators.go
generated
vendored
3
vendor/github.com/minio/minio/cmd/config/identity/openid/validators.go
generated
vendored
|
@ -38,8 +38,7 @@ type Validator interface {
|
|||
|
||||
// ErrTokenExpired - error token expired
|
||||
var (
|
||||
ErrTokenExpired = errors.New("token expired")
|
||||
ErrInvalidDuration = errors.New("duration higher than token expiry")
|
||||
ErrTokenExpired = errors.New("token expired")
|
||||
)
|
||||
|
||||
// Validators - holds list of providers indexed by provider id.
|
||||
|
|
43
vendor/github.com/minio/minio/cmd/config/legacy.go
generated
vendored
43
vendor/github.com/minio/minio/cmd/config/legacy.go
generated
vendored
|
@ -23,33 +23,34 @@ import "github.com/minio/minio/pkg/auth"
|
|||
|
||||
// SetCredentials - One time migration code needed, for migrating from older config to new for server credentials.
|
||||
func SetCredentials(c Config, cred auth.Credentials) {
|
||||
creds, err := auth.CreateCredentials(cred.AccessKey, cred.SecretKey)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if !creds.IsValid() {
|
||||
return
|
||||
}
|
||||
c[CredentialsSubSys][Default] = KVS{
|
||||
State: StateOn,
|
||||
Comment: "Settings for credentials, after migrating config",
|
||||
AccessKey: cred.AccessKey,
|
||||
SecretKey: cred.SecretKey,
|
||||
KV{
|
||||
Key: AccessKey,
|
||||
Value: cred.AccessKey,
|
||||
},
|
||||
KV{
|
||||
Key: SecretKey,
|
||||
Value: cred.SecretKey,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// SetRegion - One time migration code needed, for migrating from older config to new for server Region.
|
||||
func SetRegion(c Config, name string) {
|
||||
if name == "" {
|
||||
return
|
||||
}
|
||||
c[RegionSubSys][Default] = KVS{
|
||||
RegionName: name,
|
||||
State: StateOn,
|
||||
Comment: "Settings for Region, after migrating config",
|
||||
}
|
||||
}
|
||||
|
||||
// SetWorm - One time migration code needed, for migrating from older config to new for Worm mode.
|
||||
func SetWorm(c Config, b bool) {
|
||||
// Set the new value.
|
||||
c[WormSubSys][Default] = KVS{
|
||||
State: func() string {
|
||||
if b {
|
||||
return StateOn
|
||||
}
|
||||
return StateOff
|
||||
}(),
|
||||
Comment: "Settings for WORM, after migrating config",
|
||||
KV{
|
||||
Key: RegionName,
|
||||
Value: name,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
704
vendor/github.com/minio/minio/cmd/config/notify/help.go
generated
vendored
704
vendor/github.com/minio/minio/cmd/config/notify/help.go
generated
vendored
|
@ -23,138 +23,612 @@ import (
|
|||
|
||||
// Help template inputs for all notification targets
|
||||
var (
|
||||
HelpAMQP = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the AMQP target setting",
|
||||
target.AmqpURL: "(Required) AMQP server endpoint, e.g. `amqp://myuser:mypassword@localhost:5672`",
|
||||
target.AmqpExchange: "Name of the AMQP exchange",
|
||||
target.AmqpExchangeType: "Kind of AMQP exchange type",
|
||||
target.AmqpRoutingKey: "Routing key for publishing",
|
||||
target.AmqpMandatory: "Set this to 'on' for server to return an unroutable message with a Return method. If this flag is 'off', the server silently drops the message",
|
||||
target.AmqpDurable: "Set this to 'on' for queue to surive broker restarts",
|
||||
target.AmqpNoWait: "When no_wait is 'on', declare without waiting for a confirmation from the server",
|
||||
target.AmqpInternal: "Set this to 'on' for exchange to be not used directly by publishers, but only when bound to other exchanges",
|
||||
target.AmqpAutoDeleted: "Set this to 'on' for queue that has had at least one consumer is deleted when last consumer unsubscribes",
|
||||
target.AmqpDeliveryMode: "Delivery queue implementation use non-persistent (1) or persistent (2)",
|
||||
target.AmqpQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.AmqpQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
HelpAMQP = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.AmqpURL,
|
||||
Description: "AMQP server endpoint e.g. `amqp://myuser:mypassword@localhost:5672`",
|
||||
Type: "url",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpExchange,
|
||||
Description: "name of the AMQP exchange",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpExchangeType,
|
||||
Description: "kind of AMQP exchange type",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpRoutingKey,
|
||||
Description: "routing key for publishing",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpMandatory,
|
||||
Description: "set this to 'on' for server to return an unroutable message with a Return method. If this flag is 'off', the server silently drops the message",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpDurable,
|
||||
Description: "set this to 'on' for queue to survive broker restarts",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpNoWait,
|
||||
Description: "when no_wait is 'on', declare without waiting for a confirmation from the server",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpInternal,
|
||||
Description: "set this to 'on' for exchange to be not used directly by publishers, but only when bound to other exchanges",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpAutoDeleted,
|
||||
Description: "set this to 'on' for queue that has had at least one consumer is deleted when last consumer unsubscribes",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpDeliveryMode,
|
||||
Description: "delivery queue implementation use non-persistent (1) or persistent (2)",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.AmqpQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpKafka = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Kafka target setting",
|
||||
target.KafkaTopic: "The Kafka topic for a given message",
|
||||
target.KafkaBrokers: "Command separated list of Kafka broker addresses",
|
||||
target.KafkaSASLUsername: "Username for SASL/PLAIN or SASL/SCRAM authentication",
|
||||
target.KafkaSASLPassword: "Password for SASL/PLAIN or SASL/SCRAM authentication",
|
||||
target.KafkaTLSClientAuth: "ClientAuth determines the Kafka server's policy for TLS client auth",
|
||||
target.KafkaSASLEnable: "Set this to 'on' to enable SASL authentication",
|
||||
target.KafkaTLSEnable: "Set this to 'on' to enable TLS",
|
||||
target.KafkaTLSSkipVerify: "Set this to 'on' to disable client verification of server certificate chain",
|
||||
target.KafkaQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.KafkaQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
HelpKafka = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.KafkaBrokers,
|
||||
Description: "comma separated list of Kafka broker addresses",
|
||||
Type: "csv",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaTopic,
|
||||
Description: "Kafka topic used for bucket notifications",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaSASLUsername,
|
||||
Description: "username for SASL/PLAIN or SASL/SCRAM authentication",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaSASLPassword,
|
||||
Description: "password for SASL/PLAIN or SASL/SCRAM authentication",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaTLSClientAuth,
|
||||
Description: "clientAuth determines the Kafka server's policy for TLS client auth",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaSASL,
|
||||
Description: "set this to 'on' to enable SASL authentication",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaTLS,
|
||||
Description: "set this to 'on' to enable TLS",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaTLSSkipVerify,
|
||||
Description: "set this to 'on' to disable client verification of server certificate chain",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaClientTLSCert,
|
||||
Description: "Set path to client certificate",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.KafkaClientTLSKey,
|
||||
Description: "Set path to client key",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpMQTT = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the MQTT target setting",
|
||||
target.MqttBroker: "(Required) MQTT server endpoint, e.g. `tcp://localhost:1883`",
|
||||
target.MqttTopic: "(Required) Name of the MQTT topic to publish on, e.g. `minio`",
|
||||
target.MqttUsername: "Username to connect to the MQTT server (if required)",
|
||||
target.MqttPassword: "Password to connect to the MQTT server (if required)",
|
||||
target.MqttQoS: "Set the Quality of Service Level for MQTT endpoint",
|
||||
target.MqttKeepAliveInterval: "Optional keep alive interval for MQTT connections",
|
||||
target.MqttReconnectInterval: "Optional reconnect interval for MQTT connections",
|
||||
target.MqttQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.MqttQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
HelpMQTT = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.MqttBroker,
|
||||
Description: "MQTT server endpoint e.g. `tcp://localhost:1883`",
|
||||
Type: "uri",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttTopic,
|
||||
Description: "name of the MQTT topic to publish on, e.g. `minio`",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttUsername,
|
||||
Description: "username to connect to the MQTT server",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttPassword,
|
||||
Description: "password to connect to the MQTT server",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttQoS,
|
||||
Description: "set the Quality of Service Level for MQTT endpoint",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttKeepAliveInterval,
|
||||
Description: "keep alive interval for MQTT connections",
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttReconnectInterval,
|
||||
Description: "reconnect interval for MQTT connections",
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MqttQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpES = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Elasticsearch target setting",
|
||||
target.ElasticURL: "(Required) The Elasticsearch server's address, with optional authentication info",
|
||||
target.ElasticFormat: "(Required) Either `namespace` or `access`, defaults to 'namespace'",
|
||||
target.ElasticIndex: "(Required) The name of an Elasticsearch index in which MinIO will store document",
|
||||
target.ElasticQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.ElasticQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
HelpES = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.ElasticURL,
|
||||
Description: "Elasticsearch server's address, with optional authentication info",
|
||||
Type: "url",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticFormat,
|
||||
Description: "set this to `namespace` or `access`, defaults to 'namespace'",
|
||||
Type: "namespace*|access",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticIndex,
|
||||
Description: "the name of an Elasticsearch index in which MinIO will store document",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.ElasticQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpWebhook = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Webhook target setting",
|
||||
target.WebhookEndpoint: "Webhook server endpoint eg: http://localhost:8080/minio/events",
|
||||
target.WebhookAuthToken: "Authorization token used for webhook server endpoint (optional)",
|
||||
target.WebhookQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.WebhookQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
HelpWebhook = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.WebhookEndpoint,
|
||||
Description: "webhook server endpoint e.g. http://localhost:8080/minio/events",
|
||||
Type: "url",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.WebhookAuthToken,
|
||||
Description: "authorization token used for webhook server endpoint",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.WebhookQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.WebhookQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpRedis = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Redis target setting",
|
||||
target.RedisFormat: "Specify how data is populated, a hash is used in case of `namespace` format and a list in case of `access` format, defaults to 'namespace'",
|
||||
target.RedisAddress: "(Required) The Redis server's address. For example: `localhost:6379`",
|
||||
target.RedisKey: "The name of the redis key under which events are stored",
|
||||
target.RedisPassword: "(Optional) The Redis server's password",
|
||||
target.RedisQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.RedisQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
HelpRedis = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.RedisAddress,
|
||||
Description: "Redis server's address. For example: `localhost:6379`",
|
||||
Type: "address",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.RedisFormat,
|
||||
Description: "specifies how data is populated, a hash is used in case of `namespace` format and a list in case of `access` format, defaults to 'namespace'",
|
||||
Type: "namespace*|access",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.RedisKey,
|
||||
Description: "name of the Redis key under which events are stored",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.RedisPassword,
|
||||
Description: "Redis server's password",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.RedisQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.RedisQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpPostgres = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the Postgres target setting",
|
||||
target.PostgresFormat: "Specify how data is populated, `namespace` format and `access` format, defaults to 'namespace'",
|
||||
target.PostgresConnectionString: "Connection string parameters for the PostgreSQL server",
|
||||
target.PostgresTable: "(Required) Table name in which events will be stored/updated. If the table does not exist, the MinIO server creates it at start-up",
|
||||
target.PostgresHost: "(Optional) Host name of the PostgreSQL server. Defaults to `localhost`. IPv6 host should be enclosed with `[` and `]`",
|
||||
target.PostgresPort: "(Optional) Port on which to connect to PostgreSQL server, defaults to `5432`",
|
||||
target.PostgresUsername: "Database username, defaults to user running the MinIO process if not specified",
|
||||
target.PostgresPassword: "Database password",
|
||||
target.PostgresDatabase: "Database name",
|
||||
target.PostgresQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
target.PostgresQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
HelpPostgres = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.PostgresConnectionString,
|
||||
Description: "connection string parameters for the PostgreSQL server",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresFormat,
|
||||
Description: "specifies how data is populated, `namespace` format and `access` format, defaults to 'namespace'",
|
||||
Type: "namespace*|access",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresTable,
|
||||
Description: "table name in which events will be stored/updated. If the table does not exist, the MinIO server creates it at start-up",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresHost,
|
||||
Description: "host name of the PostgreSQL server. Defaults to `localhost`. IPv6 host should be enclosed with `[` and `]`",
|
||||
Optional: true,
|
||||
Type: "hostname",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresPort,
|
||||
Description: "port on which to connect to PostgreSQL server, defaults to `5432`",
|
||||
Optional: true,
|
||||
Type: "port",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresUsername,
|
||||
Description: "database username, defaults to user running the MinIO process if not specified",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresPassword,
|
||||
Description: "database password",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresDatabase,
|
||||
Description: "postgres Database name",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.PostgresQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpMySQL = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the MySQL target setting",
|
||||
target.MySQLFormat: "Specify how data is populated, `namespace` format and `access` format, defaults to 'namespace'",
|
||||
target.MySQLHost: "Host name of the MySQL server (used only if `dsnString` is empty)",
|
||||
target.MySQLPort: "Port on which to connect to the MySQL server (used only if `dsn_string` is empty)",
|
||||
target.MySQLUsername: "Database user-name (used only if `dsnString` is empty)",
|
||||
target.MySQLPassword: "Database password (used only if `dsnString` is empty)",
|
||||
target.MySQLDatabase: "Database name (used only if `dsnString` is empty)",
|
||||
target.MySQLDSNString: "Data-Source-Name connection string for the MySQL server",
|
||||
target.MySQLTable: "(Required) Table name in which events will be stored/updated. If the table does not exist, the MinIO server creates it at start-up",
|
||||
target.MySQLQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.MySQLQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
HelpMySQL = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.MySQLDSNString,
|
||||
Description: "data source name connection string for the MySQL server",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLTable,
|
||||
Description: "table name in which events will be stored/updated. If the table does not exist, the MinIO server creates it at start-up",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLFormat,
|
||||
Description: "specifies how data is populated, `namespace` format and `access` format, defaults to 'namespace'",
|
||||
Type: "namespace*|access",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLHost,
|
||||
Description: "host name of the MySQL server (used only if `dsnString` is empty)",
|
||||
Optional: true,
|
||||
Type: "hostname",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLPort,
|
||||
Description: "port on which to connect to the MySQL server (used only if `dsn_string` is empty)",
|
||||
Optional: true,
|
||||
Type: "port",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLUsername,
|
||||
Description: "database user-name (used only if `dsnString` is empty)",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLPassword,
|
||||
Description: "database password (used only if `dsnString` is empty)",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLDatabase,
|
||||
Description: "database name (used only if `dsnString` is empty)",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.MySQLQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpNATS = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the NATS target setting",
|
||||
target.NATSAddress: "NATS server address eg: '0.0.0.0:4222'",
|
||||
target.NATSSubject: "NATS subject that represents this subscription",
|
||||
target.NATSUsername: "Username to be used when connecting to the server",
|
||||
target.NATSPassword: "Password to be used when connecting to a server",
|
||||
target.NATSToken: "Token to be used when connecting to a server",
|
||||
target.NATSSecure: "Set this to 'on', enables TLS secure connections that skip server verification (not recommended)",
|
||||
target.NATSPingInterval: "Client ping commands interval to the server, disabled by default",
|
||||
target.NATSStreamingEnable: "Set this to 'on', to use streaming NATS server",
|
||||
target.NATSStreamingAsync: "Set this to 'on', to enable asynchronous publish, process the ACK or error state",
|
||||
target.NATSStreamingMaxPubAcksInFlight: "Specifies how many messages can be published without getting ACKs back from NATS streaming server",
|
||||
target.NATSStreamingClusterID: "Unique ID for the NATS streaming cluster",
|
||||
target.NATSQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.NATSQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
HelpNATS = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.NATSAddress,
|
||||
Description: "NATS server address e.g. '0.0.0.0:4222'",
|
||||
Type: "address",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSSubject,
|
||||
Description: "NATS subject that represents this subscription",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSUsername,
|
||||
Description: "username to be used when connecting to the server",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSPassword,
|
||||
Description: "password to be used when connecting to a server",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSToken,
|
||||
Description: "token to be used when connecting to a server",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSSecure,
|
||||
Description: "set this to 'on', enables TLS secure connections that skip server verification (not recommended)",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSPingInterval,
|
||||
Description: "client ping commands interval to the server, disabled by default",
|
||||
Optional: true,
|
||||
Type: "duration",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSStreaming,
|
||||
Description: "set this to 'on', to use streaming NATS server",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSStreamingAsync,
|
||||
Description: "set this to 'on', to enable asynchronous publish, process the ACK or error state",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSStreamingMaxPubAcksInFlight,
|
||||
Description: "specifies how many messages can be published without getting ACKs back from NATS streaming server",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSStreamingClusterID,
|
||||
Description: "unique ID for the NATS streaming cluster",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSCertAuthority,
|
||||
Description: "certificate chain of the target NATS server if self signed certs were used",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSClientCert,
|
||||
Description: "TLS Cert used for NATS configured to require client certificates",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NATSClientKey,
|
||||
Description: "TLS Key used for NATS configured to require client certificates",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
|
||||
HelpNSQ = config.HelpKV{
|
||||
config.State: "(Required) Is this server endpoint configuration active/enabled",
|
||||
config.Comment: "A comment to describe the NSQ target setting",
|
||||
target.NSQAddress: "NSQ server address eg: '127.0.0.1:4150'",
|
||||
target.NSQTopic: "NSQ topic unique per target",
|
||||
target.NSQTLSEnable: "Set this to 'on', to enable TLS negotiation",
|
||||
target.NSQTLSSkipVerify: "Set this to 'on', to disable client verification of server certificates",
|
||||
target.NSQQueueLimit: "Enable persistent event store queue limit, defaults to '10000'",
|
||||
target.NSQQueueDir: "Local directory where events are stored eg: '/home/events'",
|
||||
HelpNSQ = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: target.NSQAddress,
|
||||
Description: "NSQ server address e.g. '127.0.0.1:4150'",
|
||||
Type: "address",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NSQTopic,
|
||||
Description: "NSQ topic unique per target",
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NSQTLS,
|
||||
Description: "set this to 'on', to enable TLS negotiation",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NSQTLSSkipVerify,
|
||||
Description: "set this to 'on', to disable client verification of server certificates",
|
||||
Optional: true,
|
||||
Type: "on|off",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NSQQueueDir,
|
||||
Description: "local directory where events are stored e.g. '/home/events'",
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: target.NSQQueueLimit,
|
||||
Description: "enable persistent event store queue limit, defaults to '10000'",
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
617
vendor/github.com/minio/minio/cmd/config/notify/legacy.go
generated
vendored
617
vendor/github.com/minio/minio/cmd/config/notify/legacy.go
generated
vendored
|
@ -11,64 +11,140 @@ import (
|
|||
|
||||
// SetNotifyKafka - helper for config migration from older config.
|
||||
func SetNotifyKafka(s config.Config, kName string, cfg target.KafkaArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyKafkaSubSys][kName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
target.KafkaBrokers: func() string {
|
||||
var brokers []string
|
||||
for _, broker := range cfg.Brokers {
|
||||
brokers = append(brokers, broker.String())
|
||||
}
|
||||
return strings.Join(brokers, config.ValueSeparator)
|
||||
}(),
|
||||
config.Comment: "Settings for Kafka notification, after migrating config",
|
||||
target.KafkaTopic: cfg.Topic,
|
||||
target.KafkaQueueDir: cfg.QueueDir,
|
||||
target.KafkaQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
target.KafkaTLSEnable: config.FormatBool(cfg.TLS.Enable),
|
||||
target.KafkaTLSSkipVerify: config.FormatBool(cfg.TLS.SkipVerify),
|
||||
target.KafkaTLSClientAuth: strconv.Itoa(int(cfg.TLS.ClientAuth)),
|
||||
target.KafkaSASLEnable: config.FormatBool(cfg.SASL.Enable),
|
||||
target.KafkaSASLUsername: cfg.SASL.User,
|
||||
target.KafkaSASLPassword: cfg.SASL.Password,
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaBrokers,
|
||||
Value: func() string {
|
||||
var brokers []string
|
||||
for _, broker := range cfg.Brokers {
|
||||
brokers = append(brokers, broker.String())
|
||||
}
|
||||
return strings.Join(brokers, config.ValueSeparator)
|
||||
}(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTopic,
|
||||
Value: cfg.Topic,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaClientTLSCert,
|
||||
Value: cfg.TLS.ClientTLSCert,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaClientTLSKey,
|
||||
Value: cfg.TLS.ClientTLSKey,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTLS,
|
||||
Value: config.FormatBool(cfg.TLS.Enable),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTLSSkipVerify,
|
||||
Value: config.FormatBool(cfg.TLS.SkipVerify),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTLSClientAuth,
|
||||
Value: strconv.Itoa(int(cfg.TLS.ClientAuth)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaSASL,
|
||||
Value: config.FormatBool(cfg.SASL.Enable),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaSASLUsername,
|
||||
Value: cfg.SASL.User,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaSASLPassword,
|
||||
Value: cfg.SASL.Password,
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetNotifyAMQP - helper for config migration from older config.
|
||||
func SetNotifyAMQP(s config.Config, amqpName string, cfg target.AMQPArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyAMQPSubSys][amqpName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for AMQP notification, after migrating config",
|
||||
target.AmqpURL: cfg.URL.String(),
|
||||
target.AmqpExchange: cfg.Exchange,
|
||||
target.AmqpRoutingKey: cfg.RoutingKey,
|
||||
target.AmqpExchangeType: cfg.ExchangeType,
|
||||
target.AmqpDeliveryMode: strconv.Itoa(int(cfg.DeliveryMode)),
|
||||
target.AmqpMandatory: config.FormatBool(cfg.Mandatory),
|
||||
target.AmqpInternal: config.FormatBool(cfg.Immediate),
|
||||
target.AmqpDurable: config.FormatBool(cfg.Durable),
|
||||
target.AmqpNoWait: config.FormatBool(cfg.NoWait),
|
||||
target.AmqpAutoDeleted: config.FormatBool(cfg.AutoDeleted),
|
||||
target.AmqpQueueDir: cfg.QueueDir,
|
||||
target.AmqpQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpURL,
|
||||
Value: cfg.URL.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpExchange,
|
||||
Value: cfg.Exchange,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpRoutingKey,
|
||||
Value: cfg.RoutingKey,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpExchangeType,
|
||||
Value: cfg.ExchangeType,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpDeliveryMode,
|
||||
Value: strconv.Itoa(int(cfg.DeliveryMode)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpMandatory,
|
||||
Value: config.FormatBool(cfg.Mandatory),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpInternal,
|
||||
Value: config.FormatBool(cfg.Immediate),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpDurable,
|
||||
Value: config.FormatBool(cfg.Durable),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpNoWait,
|
||||
Value: config.FormatBool(cfg.NoWait),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpAutoDeleted,
|
||||
Value: config.FormatBool(cfg.AutoDeleted),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -76,23 +152,39 @@ func SetNotifyAMQP(s config.Config, amqpName string, cfg target.AMQPArgs) error
|
|||
|
||||
// SetNotifyES - helper for config migration from older config.
|
||||
func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyESSubSys][esName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Elasticsearch notification, after migrating config",
|
||||
target.ElasticFormat: cfg.Format,
|
||||
target.ElasticURL: cfg.URL.String(),
|
||||
target.ElasticIndex: cfg.Index,
|
||||
target.ElasticQueueDir: cfg.QueueDir,
|
||||
target.ElasticQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticFormat,
|
||||
Value: cfg.Format,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticURL,
|
||||
Value: cfg.URL.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticIndex,
|
||||
Value: cfg.Index,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -100,24 +192,43 @@ func SetNotifyES(s config.Config, esName string, cfg target.ElasticsearchArgs) e
|
|||
|
||||
// SetNotifyRedis - helper for config migration from older config.
|
||||
func SetNotifyRedis(s config.Config, redisName string, cfg target.RedisArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyRedisSubSys][redisName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Redis notification, after migrating config",
|
||||
target.RedisFormat: cfg.Format,
|
||||
target.RedisAddress: cfg.Addr.String(),
|
||||
target.RedisPassword: cfg.Password,
|
||||
target.RedisKey: cfg.Key,
|
||||
target.RedisQueueDir: cfg.QueueDir,
|
||||
target.RedisQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisFormat,
|
||||
Value: cfg.Format,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisAddress,
|
||||
Value: cfg.Addr.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisKey,
|
||||
Value: cfg.Key,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -125,22 +236,35 @@ func SetNotifyRedis(s config.Config, redisName string, cfg target.RedisArgs) err
|
|||
|
||||
// SetNotifyWebhook - helper for config migration from older config.
|
||||
func SetNotifyWebhook(s config.Config, whName string, cfg target.WebhookArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyWebhookSubSys][whName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Webhook notification, after migrating config",
|
||||
target.WebhookEndpoint: cfg.Endpoint.String(),
|
||||
target.WebhookAuthToken: cfg.AuthToken,
|
||||
target.WebhookQueueDir: cfg.QueueDir,
|
||||
target.WebhookQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookEndpoint,
|
||||
Value: cfg.Endpoint.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookAuthToken,
|
||||
Value: cfg.AuthToken,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -148,28 +272,59 @@ func SetNotifyWebhook(s config.Config, whName string, cfg target.WebhookArgs) er
|
|||
|
||||
// SetNotifyPostgres - helper for config migration from older config.
|
||||
func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyPostgresSubSys][psqName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for Postgres notification, after migrating config",
|
||||
target.PostgresFormat: cfg.Format,
|
||||
target.PostgresConnectionString: cfg.ConnectionString,
|
||||
target.PostgresTable: cfg.Table,
|
||||
target.PostgresHost: cfg.Host.String(),
|
||||
target.PostgresPort: cfg.Port,
|
||||
target.PostgresUsername: cfg.User,
|
||||
target.PostgresPassword: cfg.Password,
|
||||
target.PostgresDatabase: cfg.Database,
|
||||
target.PostgresQueueDir: cfg.QueueDir,
|
||||
target.PostgresQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresFormat,
|
||||
Value: cfg.Format,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresConnectionString,
|
||||
Value: cfg.ConnectionString,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresTable,
|
||||
Value: cfg.Table,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresHost,
|
||||
Value: cfg.Host.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresPort,
|
||||
Value: cfg.Port,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresUsername,
|
||||
Value: cfg.User,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresDatabase,
|
||||
Value: cfg.Database,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -177,24 +332,43 @@ func SetNotifyPostgres(s config.Config, psqName string, cfg target.PostgreSQLArg
|
|||
|
||||
// SetNotifyNSQ - helper for config migration from older config.
|
||||
func SetNotifyNSQ(s config.Config, nsqName string, cfg target.NSQArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyNSQSubSys][nsqName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for NSQ notification, after migrating config",
|
||||
target.NSQAddress: cfg.NSQDAddress.String(),
|
||||
target.NSQTopic: cfg.Topic,
|
||||
target.NSQTLSEnable: config.FormatBool(cfg.TLS.Enable),
|
||||
target.NSQTLSSkipVerify: config.FormatBool(cfg.TLS.SkipVerify),
|
||||
target.NSQQueueDir: cfg.QueueDir,
|
||||
target.NSQQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQAddress,
|
||||
Value: cfg.NSQDAddress.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQTopic,
|
||||
Value: cfg.Topic,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQTLS,
|
||||
Value: config.FormatBool(cfg.TLS.Enable),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQTLSSkipVerify,
|
||||
Value: config.FormatBool(cfg.TLS.SkipVerify),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -202,36 +376,88 @@ func SetNotifyNSQ(s config.Config, nsqName string, cfg target.NSQArgs) error {
|
|||
|
||||
// SetNotifyNATS - helper for config migration from older config.
|
||||
func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyNATSSubSys][natsName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for NATS notification, after migrating config",
|
||||
target.NATSAddress: cfg.Address.String(),
|
||||
target.NATSSubject: cfg.Subject,
|
||||
target.NATSUsername: cfg.Username,
|
||||
target.NATSPassword: cfg.Password,
|
||||
target.NATSToken: cfg.Token,
|
||||
target.NATSSecure: config.FormatBool(cfg.Secure),
|
||||
target.NATSPingInterval: strconv.FormatInt(cfg.PingInterval, 10),
|
||||
target.NATSQueueDir: cfg.QueueDir,
|
||||
target.NATSQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
target.NATSStreamingEnable: func() string {
|
||||
if cfg.Streaming.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
target.NATSStreamingClusterID: cfg.Streaming.ClusterID,
|
||||
target.NATSStreamingAsync: config.FormatBool(cfg.Streaming.Async),
|
||||
target.NATSStreamingMaxPubAcksInFlight: strconv.Itoa(cfg.Streaming.MaxPubAcksInflight),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSAddress,
|
||||
Value: cfg.Address.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSSubject,
|
||||
Value: cfg.Subject,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSUsername,
|
||||
Value: cfg.Username,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSToken,
|
||||
Value: cfg.Token,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSCertAuthority,
|
||||
Value: cfg.CertAuthority,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSClientCert,
|
||||
Value: cfg.ClientCert,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSClientKey,
|
||||
Value: cfg.ClientKey,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSSecure,
|
||||
Value: config.FormatBool(cfg.Secure),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSPingInterval,
|
||||
Value: strconv.FormatInt(cfg.PingInterval, 10),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreaming,
|
||||
Value: func() string {
|
||||
if cfg.Streaming.Enable {
|
||||
return config.EnableOn
|
||||
}
|
||||
return config.EnableOff
|
||||
}(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreamingClusterID,
|
||||
Value: cfg.Streaming.ClusterID,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreamingAsync,
|
||||
Value: config.FormatBool(cfg.Streaming.Async),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreamingMaxPubAcksInFlight,
|
||||
Value: strconv.Itoa(cfg.Streaming.MaxPubAcksInflight),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -239,28 +465,59 @@ func SetNotifyNATS(s config.Config, natsName string, cfg target.NATSArgs) error
|
|||
|
||||
// SetNotifyMySQL - helper for config migration from older config.
|
||||
func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyMySQLSubSys][sqlName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for MySQL notification, after migrating config",
|
||||
target.MySQLFormat: cfg.Format,
|
||||
target.MySQLDSNString: cfg.DSN,
|
||||
target.MySQLTable: cfg.Table,
|
||||
target.MySQLHost: cfg.Host.String(),
|
||||
target.MySQLPort: cfg.Port,
|
||||
target.MySQLUsername: cfg.User,
|
||||
target.MySQLPassword: cfg.Password,
|
||||
target.MySQLDatabase: cfg.Database,
|
||||
target.MySQLQueueDir: cfg.QueueDir,
|
||||
target.MySQLQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLFormat,
|
||||
Value: cfg.Format,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLDSNString,
|
||||
Value: cfg.DSN,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLTable,
|
||||
Value: cfg.Table,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLHost,
|
||||
Value: cfg.Host.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLPort,
|
||||
Value: cfg.Port,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLUsername,
|
||||
Value: cfg.User,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLDatabase,
|
||||
Value: cfg.Database,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -268,27 +525,55 @@ func SetNotifyMySQL(s config.Config, sqlName string, cfg target.MySQLArgs) error
|
|||
|
||||
// SetNotifyMQTT - helper for config migration from older config.
|
||||
func SetNotifyMQTT(s config.Config, mqttName string, cfg target.MQTTArgs) error {
|
||||
if !cfg.Enable {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := cfg.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s[config.NotifyMQTTSubSys][mqttName] = config.KVS{
|
||||
config.State: func() string {
|
||||
if cfg.Enable {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for MQTT notification, after migrating config",
|
||||
target.MqttBroker: cfg.Broker.String(),
|
||||
target.MqttTopic: cfg.Topic,
|
||||
target.MqttQoS: fmt.Sprintf("%d", cfg.QoS),
|
||||
target.MqttUsername: cfg.User,
|
||||
target.MqttPassword: cfg.Password,
|
||||
target.MqttReconnectInterval: cfg.MaxReconnectInterval.String(),
|
||||
target.MqttKeepAliveInterval: cfg.KeepAlive.String(),
|
||||
target.MqttQueueDir: cfg.QueueDir,
|
||||
target.MqttQueueLimit: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOn,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttBroker,
|
||||
Value: cfg.Broker.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttTopic,
|
||||
Value: cfg.Topic,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttQoS,
|
||||
Value: fmt.Sprintf("%d", cfg.QoS),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttUsername,
|
||||
Value: cfg.User,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttPassword,
|
||||
Value: cfg.Password,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttReconnectInterval,
|
||||
Value: cfg.MaxReconnectInterval.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttKeepAliveInterval,
|
||||
Value: cfg.KeepAlive.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttQueueDir,
|
||||
Value: cfg.QueueDir,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttQueueLimit,
|
||||
Value: strconv.Itoa(int(cfg.QueueLimit)),
|
||||
},
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
674
vendor/github.com/minio/minio/cmd/config/notify/parse.go
generated
vendored
674
vendor/github.com/minio/minio/cmd/config/notify/parse.go
generated
vendored
|
@ -340,30 +340,70 @@ func mergeTargets(cfgTargets map[string]config.KVS, envname string, defaultKVS c
|
|||
// DefaultKakfaKVS - default KV for kafka target
|
||||
var (
|
||||
DefaultKafkaKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for Kafka notification",
|
||||
target.KafkaTopic: "",
|
||||
target.KafkaBrokers: "",
|
||||
target.KafkaSASLUsername: "",
|
||||
target.KafkaSASLPassword: "",
|
||||
target.KafkaTLSClientAuth: "0",
|
||||
target.KafkaSASLEnable: config.StateOff,
|
||||
target.KafkaTLSEnable: config.StateOff,
|
||||
target.KafkaTLSSkipVerify: config.StateOff,
|
||||
target.KafkaQueueLimit: "0",
|
||||
target.KafkaQueueDir: "",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTopic,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaBrokers,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaSASLUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaSASLPassword,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaClientTLSCert,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaClientTLSKey,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTLSClientAuth,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaSASL,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTLS,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaTLSSkipVerify,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.KafkaQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyKafka - returns a map of registered notification 'kafka' targets
|
||||
func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs, error) {
|
||||
kafkaTargets := make(map[string]target.KafkaArgs)
|
||||
for k, kv := range mergeTargets(kafkaKVS, target.EnvKafkaState, DefaultKafkaKVS) {
|
||||
stateEnv := target.EnvKafkaState
|
||||
for k, kv := range mergeTargets(kafkaKVS, target.EnvKafkaEnable, DefaultKafkaKVS) {
|
||||
enableEnv := target.EnvKafkaEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -377,7 +417,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
|
|||
}
|
||||
kafkaBrokers := env.Get(brokersEnv, kv.Get(target.KafkaBrokers))
|
||||
if len(kafkaBrokers) == 0 {
|
||||
return nil, config.Error("kafka 'brokers' cannot be empty")
|
||||
return nil, config.Errorf(config.SafeModeKind, "kafka 'brokers' cannot be empty")
|
||||
}
|
||||
for _, s := range strings.Split(kafkaBrokers, config.ValueSeparator) {
|
||||
var host *xnet.Host
|
||||
|
@ -427,7 +467,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
|
|||
QueueLimit: queueLimit,
|
||||
}
|
||||
|
||||
tlsEnableEnv := target.EnvKafkaTLSEnable
|
||||
tlsEnableEnv := target.EnvKafkaTLS
|
||||
if k != config.Default {
|
||||
tlsEnableEnv = tlsEnableEnv + config.Default + k
|
||||
}
|
||||
|
@ -435,10 +475,24 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
|
|||
if k != config.Default {
|
||||
tlsSkipVerifyEnv = tlsSkipVerifyEnv + config.Default + k
|
||||
}
|
||||
kafkaArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.KafkaTLSEnable)) == config.StateOn
|
||||
kafkaArgs.TLS.SkipVerify = env.Get(tlsSkipVerifyEnv, kv.Get(target.KafkaTLSSkipVerify)) == config.StateOn
|
||||
|
||||
tlsClientTLSCertEnv := target.EnvKafkaClientTLSCert
|
||||
if k != config.Default {
|
||||
tlsClientTLSCertEnv = tlsClientTLSCertEnv + config.Default + k
|
||||
}
|
||||
|
||||
tlsClientTLSKeyEnv := target.EnvKafkaClientTLSKey
|
||||
if k != config.Default {
|
||||
tlsClientTLSKeyEnv = tlsClientTLSKeyEnv + config.Default + k
|
||||
}
|
||||
|
||||
kafkaArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.KafkaTLS)) == config.EnableOn
|
||||
kafkaArgs.TLS.SkipVerify = env.Get(tlsSkipVerifyEnv, kv.Get(target.KafkaTLSSkipVerify)) == config.EnableOn
|
||||
kafkaArgs.TLS.ClientAuth = tls.ClientAuthType(clientAuth)
|
||||
|
||||
kafkaArgs.TLS.ClientTLSCert = env.Get(tlsClientTLSCertEnv, kv.Get(target.KafkaClientTLSCert))
|
||||
kafkaArgs.TLS.ClientTLSKey = env.Get(tlsClientTLSKeyEnv, kv.Get(target.KafkaClientTLSKey))
|
||||
|
||||
saslEnableEnv := target.EnvKafkaSASLEnable
|
||||
if k != config.Default {
|
||||
saslEnableEnv = saslEnableEnv + config.Default + k
|
||||
|
@ -451,7 +505,7 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
|
|||
if k != config.Default {
|
||||
saslPasswordEnv = saslPasswordEnv + config.Default + k
|
||||
}
|
||||
kafkaArgs.SASL.Enable = env.Get(saslEnableEnv, kv.Get(target.KafkaSASLEnable)) == config.StateOn
|
||||
kafkaArgs.SASL.Enable = env.Get(saslEnableEnv, kv.Get(target.KafkaSASL)) == config.EnableOn
|
||||
kafkaArgs.SASL.User = env.Get(saslUsernameEnv, kv.Get(target.KafkaSASLUsername))
|
||||
kafkaArgs.SASL.Password = env.Get(saslPasswordEnv, kv.Get(target.KafkaSASLPassword))
|
||||
|
||||
|
@ -468,30 +522,59 @@ func GetNotifyKafka(kafkaKVS map[string]config.KVS) (map[string]target.KafkaArgs
|
|||
// DefaultMQTTKVS - default MQTT config
|
||||
var (
|
||||
DefaultMQTTKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for MQTT notification",
|
||||
target.MqttBroker: "",
|
||||
target.MqttTopic: "",
|
||||
target.MqttPassword: "",
|
||||
target.MqttUsername: "",
|
||||
target.MqttQoS: "0",
|
||||
target.MqttKeepAliveInterval: "0s",
|
||||
target.MqttReconnectInterval: "0s",
|
||||
target.MqttQueueDir: "",
|
||||
target.MqttQueueLimit: "0",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttBroker,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttTopic,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttPassword,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttQoS,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttKeepAliveInterval,
|
||||
Value: "0s",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttReconnectInterval,
|
||||
Value: "0s",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MqttQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyMQTT - returns a map of registered notification 'mqtt' targets
|
||||
func GetNotifyMQTT(mqttKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[string]target.MQTTArgs, error) {
|
||||
mqttTargets := make(map[string]target.MQTTArgs)
|
||||
for k, kv := range mergeTargets(mqttKVS, target.EnvMQTTState, DefaultMQTTKVS) {
|
||||
stateEnv := target.EnvMQTTState
|
||||
for k, kv := range mergeTargets(mqttKVS, target.EnvMQTTEnable, DefaultMQTTKVS) {
|
||||
enableEnv := target.EnvMQTTEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -503,6 +586,7 @@ func GetNotifyMQTT(mqttKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[s
|
|||
if k != config.Default {
|
||||
brokerEnv = brokerEnv + config.Default + k
|
||||
}
|
||||
|
||||
brokerURL, err := xnet.ParseURL(env.Get(brokerEnv, kv.Get(target.MqttBroker)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -593,31 +677,63 @@ func GetNotifyMQTT(mqttKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[s
|
|||
// DefaultMySQLKVS - default KV for MySQL
|
||||
var (
|
||||
DefaultMySQLKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for MySQL notification",
|
||||
target.MySQLFormat: formatNamespace,
|
||||
target.MySQLHost: "",
|
||||
target.MySQLPort: "",
|
||||
target.MySQLUsername: "",
|
||||
target.MySQLPassword: "",
|
||||
target.MySQLDatabase: "",
|
||||
target.MySQLDSNString: "",
|
||||
target.MySQLTable: "",
|
||||
target.MySQLQueueLimit: "0",
|
||||
target.MySQLQueueDir: "",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLFormat,
|
||||
Value: formatNamespace,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLHost,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLPort,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLPassword,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLDatabase,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLDSNString,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLTable,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.MySQLQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyMySQL - returns a map of registered notification 'mysql' targets
|
||||
func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs, error) {
|
||||
mysqlTargets := make(map[string]target.MySQLArgs)
|
||||
for k, kv := range mergeTargets(mysqlKVS, target.EnvMySQLState, DefaultMySQLKVS) {
|
||||
stateEnv := target.EnvMySQLState
|
||||
for k, kv := range mergeTargets(mysqlKVS, target.EnvMySQLEnable, DefaultMySQLKVS) {
|
||||
enableEnv := target.EnvMySQLEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -700,34 +816,87 @@ func GetNotifyMySQL(mysqlKVS map[string]config.KVS) (map[string]target.MySQLArgs
|
|||
// DefaultNATSKVS - NATS KV for nats config.
|
||||
var (
|
||||
DefaultNATSKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for NATS notification",
|
||||
target.NATSAddress: "",
|
||||
target.NATSSubject: "",
|
||||
target.NATSUsername: "",
|
||||
target.NATSPassword: "",
|
||||
target.NATSToken: "",
|
||||
target.NATSSecure: config.StateOff,
|
||||
target.NATSPingInterval: "0",
|
||||
target.NATSQueueLimit: "0",
|
||||
target.NATSQueueDir: "",
|
||||
target.NATSStreamingEnable: config.StateOff,
|
||||
target.NATSStreamingAsync: config.StateOff,
|
||||
target.NATSStreamingMaxPubAcksInFlight: "0",
|
||||
target.NATSStreamingClusterID: "",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSAddress,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSSubject,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSPassword,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSToken,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSCertAuthority,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSClientCert,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSClientKey,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSSecure,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSPingInterval,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreaming,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreamingAsync,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreamingMaxPubAcksInFlight,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSStreamingClusterID,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NATSQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyNATS - returns a map of registered notification 'nats' targets
|
||||
func GetNotifyNATS(natsKVS map[string]config.KVS) (map[string]target.NATSArgs, error) {
|
||||
natsTargets := make(map[string]target.NATSArgs)
|
||||
for k, kv := range mergeTargets(natsKVS, target.EnvNATSState, DefaultNATSKVS) {
|
||||
stateEnv := target.EnvNATSState
|
||||
for k, kv := range mergeTargets(natsKVS, target.EnvNATSEnable, DefaultNATSKVS) {
|
||||
enableEnv := target.EnvNATSEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -795,25 +964,43 @@ func GetNotifyNATS(natsKVS map[string]config.KVS) (map[string]target.NATSArgs, e
|
|||
queueDirEnv = queueDirEnv + config.Default + k
|
||||
}
|
||||
|
||||
natsArgs := target.NATSArgs{
|
||||
Enable: true,
|
||||
Address: *address,
|
||||
Subject: env.Get(subjectEnv, kv.Get(target.NATSSubject)),
|
||||
Username: env.Get(usernameEnv, kv.Get(target.NATSUsername)),
|
||||
Password: env.Get(passwordEnv, kv.Get(target.NATSPassword)),
|
||||
Token: env.Get(tokenEnv, kv.Get(target.NATSToken)),
|
||||
Secure: env.Get(secureEnv, kv.Get(target.NATSSecure)) == config.StateOn,
|
||||
PingInterval: pingInterval,
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.NATSQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
certAuthorityEnv := target.EnvNATSCertAuthority
|
||||
if k != config.Default {
|
||||
certAuthorityEnv = certAuthorityEnv + config.Default + k
|
||||
}
|
||||
|
||||
streamingEnableEnv := target.EnvNATSStreamingEnable
|
||||
clientCertEnv := target.EnvNATSClientCert
|
||||
if k != config.Default {
|
||||
clientCertEnv = clientCertEnv + config.Default + k
|
||||
}
|
||||
|
||||
clientKeyEnv := target.EnvNATSClientKey
|
||||
if k != config.Default {
|
||||
clientKeyEnv = clientKeyEnv + config.Default + k
|
||||
}
|
||||
|
||||
natsArgs := target.NATSArgs{
|
||||
Enable: true,
|
||||
Address: *address,
|
||||
Subject: env.Get(subjectEnv, kv.Get(target.NATSSubject)),
|
||||
Username: env.Get(usernameEnv, kv.Get(target.NATSUsername)),
|
||||
Password: env.Get(passwordEnv, kv.Get(target.NATSPassword)),
|
||||
CertAuthority: env.Get(certAuthorityEnv, kv.Get(target.NATSCertAuthority)),
|
||||
ClientCert: env.Get(clientCertEnv, kv.Get(target.NATSClientCert)),
|
||||
ClientKey: env.Get(clientKeyEnv, kv.Get(target.NATSClientKey)),
|
||||
Token: env.Get(tokenEnv, kv.Get(target.NATSToken)),
|
||||
Secure: env.Get(secureEnv, kv.Get(target.NATSSecure)) == config.EnableOn,
|
||||
PingInterval: pingInterval,
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.NATSQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
}
|
||||
|
||||
streamingEnableEnv := target.EnvNATSStreaming
|
||||
if k != config.Default {
|
||||
streamingEnableEnv = streamingEnableEnv + config.Default + k
|
||||
}
|
||||
|
||||
streamingEnabled := env.Get(streamingEnableEnv, kv.Get(target.NATSStreamingEnable)) == config.StateOn
|
||||
streamingEnabled := env.Get(streamingEnableEnv, kv.Get(target.NATSStreaming)) == config.EnableOn
|
||||
if streamingEnabled {
|
||||
asyncEnv := target.EnvNATSStreamingAsync
|
||||
if k != config.Default {
|
||||
|
@ -834,7 +1021,7 @@ func GetNotifyNATS(natsKVS map[string]config.KVS) (map[string]target.NATSArgs, e
|
|||
}
|
||||
natsArgs.Streaming.Enable = streamingEnabled
|
||||
natsArgs.Streaming.ClusterID = env.Get(clusterIDEnv, kv.Get(target.NATSStreamingClusterID))
|
||||
natsArgs.Streaming.Async = env.Get(asyncEnv, kv.Get(target.NATSStreamingAsync)) == config.StateOn
|
||||
natsArgs.Streaming.Async = env.Get(asyncEnv, kv.Get(target.NATSStreamingAsync)) == config.EnableOn
|
||||
natsArgs.Streaming.MaxPubAcksInflight = maxPubAcksInflight
|
||||
}
|
||||
|
||||
|
@ -850,27 +1037,47 @@ func GetNotifyNATS(natsKVS map[string]config.KVS) (map[string]target.NATSArgs, e
|
|||
// DefaultNSQKVS - NSQ KV for config
|
||||
var (
|
||||
DefaultNSQKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for NSQ notification",
|
||||
target.NSQAddress: "",
|
||||
target.NSQTopic: "",
|
||||
target.NSQTLSEnable: config.StateOff,
|
||||
target.NSQTLSSkipVerify: config.StateOff,
|
||||
target.NSQQueueLimit: "0",
|
||||
target.NSQQueueDir: "",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQAddress,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQTopic,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQTLS,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQTLSSkipVerify,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.NSQQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyNSQ - returns a map of registered notification 'nsq' targets
|
||||
func GetNotifyNSQ(nsqKVS map[string]config.KVS) (map[string]target.NSQArgs, error) {
|
||||
nsqTargets := make(map[string]target.NSQArgs)
|
||||
for k, kv := range mergeTargets(nsqKVS, target.EnvNSQState, DefaultNSQKVS) {
|
||||
stateEnv := target.EnvNSQState
|
||||
for k, kv := range mergeTargets(nsqKVS, target.EnvNSQEnable, DefaultNSQKVS) {
|
||||
enableEnv := target.EnvNSQEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -886,7 +1093,7 @@ func GetNotifyNSQ(nsqKVS map[string]config.KVS) (map[string]target.NSQArgs, erro
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsEnableEnv := target.EnvNSQTLSEnable
|
||||
tlsEnableEnv := target.EnvNSQTLS
|
||||
if k != config.Default {
|
||||
tlsEnableEnv = tlsEnableEnv + config.Default + k
|
||||
}
|
||||
|
@ -920,8 +1127,8 @@ func GetNotifyNSQ(nsqKVS map[string]config.KVS) (map[string]target.NSQArgs, erro
|
|||
QueueDir: env.Get(queueDirEnv, kv.Get(target.NSQQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
}
|
||||
nsqArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.NSQTLSEnable)) == config.StateOn
|
||||
nsqArgs.TLS.SkipVerify = env.Get(tlsSkipVerifyEnv, kv.Get(target.NSQTLSSkipVerify)) == config.StateOn
|
||||
nsqArgs.TLS.Enable = env.Get(tlsEnableEnv, kv.Get(target.NSQTLS)) == config.EnableOn
|
||||
nsqArgs.TLS.SkipVerify = env.Get(tlsSkipVerifyEnv, kv.Get(target.NSQTLSSkipVerify)) == config.EnableOn
|
||||
|
||||
if err = nsqArgs.Validate(); err != nil {
|
||||
return nil, err
|
||||
|
@ -935,31 +1142,63 @@ func GetNotifyNSQ(nsqKVS map[string]config.KVS) (map[string]target.NSQArgs, erro
|
|||
// DefaultPostgresKVS - default Postgres KV for server config.
|
||||
var (
|
||||
DefaultPostgresKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for Postgres notification",
|
||||
target.PostgresFormat: formatNamespace,
|
||||
target.PostgresConnectionString: "",
|
||||
target.PostgresTable: "",
|
||||
target.PostgresHost: "",
|
||||
target.PostgresPort: "",
|
||||
target.PostgresUsername: "",
|
||||
target.PostgresPassword: "",
|
||||
target.PostgresDatabase: "",
|
||||
target.PostgresQueueDir: "",
|
||||
target.PostgresQueueLimit: "0",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresFormat,
|
||||
Value: formatNamespace,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresConnectionString,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresTable,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresHost,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresPort,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresUsername,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresPassword,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresDatabase,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.PostgresQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyPostgres - returns a map of registered notification 'postgres' targets
|
||||
func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.PostgreSQLArgs, error) {
|
||||
psqlTargets := make(map[string]target.PostgreSQLArgs)
|
||||
for k, kv := range mergeTargets(postgresKVS, target.EnvPostgresState, DefaultPostgresKVS) {
|
||||
stateEnv := target.EnvPostgresState
|
||||
for k, kv := range mergeTargets(postgresKVS, target.EnvPostgresEnable, DefaultPostgresKVS) {
|
||||
enableEnv := target.EnvPostgresEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1052,27 +1291,47 @@ func GetNotifyPostgres(postgresKVS map[string]config.KVS) (map[string]target.Pos
|
|||
// DefaultRedisKVS - default KV for redis config
|
||||
var (
|
||||
DefaultRedisKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for Redis notification",
|
||||
target.RedisFormat: formatNamespace,
|
||||
target.RedisAddress: "",
|
||||
target.RedisKey: "",
|
||||
target.RedisPassword: "",
|
||||
target.RedisQueueDir: "",
|
||||
target.RedisQueueLimit: "0",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisFormat,
|
||||
Value: formatNamespace,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisAddress,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisKey,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisPassword,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.RedisQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyRedis - returns a map of registered notification 'redis' targets
|
||||
func GetNotifyRedis(redisKVS map[string]config.KVS) (map[string]target.RedisArgs, error) {
|
||||
redisTargets := make(map[string]target.RedisArgs)
|
||||
for k, kv := range mergeTargets(redisKVS, target.EnvRedisState, DefaultRedisKVS) {
|
||||
stateEnv := target.EnvRedisState
|
||||
for k, kv := range mergeTargets(redisKVS, target.EnvRedisEnable, DefaultRedisKVS) {
|
||||
enableEnv := target.EnvRedisEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1132,24 +1391,38 @@ func GetNotifyRedis(redisKVS map[string]config.KVS) (map[string]target.RedisArgs
|
|||
// DefaultWebhookKVS - default KV for webhook config
|
||||
var (
|
||||
DefaultWebhookKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for Webhook notification",
|
||||
target.WebhookEndpoint: "",
|
||||
target.WebhookAuthToken: "",
|
||||
target.WebhookQueueLimit: "0",
|
||||
target.WebhookQueueDir: "",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookEndpoint,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookAuthToken,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.WebhookQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyWebhook - returns a map of registered notification 'webhook' targets
|
||||
func GetNotifyWebhook(webhookKVS map[string]config.KVS, rootCAs *x509.CertPool) (map[string]target.WebhookArgs, error) {
|
||||
webhookTargets := make(map[string]target.WebhookArgs)
|
||||
for k, kv := range mergeTargets(webhookKVS, target.EnvWebhookState, DefaultWebhookKVS) {
|
||||
stateEnv := target.EnvWebhookState
|
||||
for k, kv := range mergeTargets(webhookKVS, target.EnvWebhookEnable, DefaultWebhookKVS) {
|
||||
enableEnv := target.EnvWebhookEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1200,25 +1473,42 @@ func GetNotifyWebhook(webhookKVS map[string]config.KVS, rootCAs *x509.CertPool)
|
|||
// DefaultESKVS - default KV config for Elasticsearch target
|
||||
var (
|
||||
DefaultESKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for Elasticsearch notification",
|
||||
target.ElasticURL: "",
|
||||
target.ElasticFormat: formatNamespace,
|
||||
target.ElasticIndex: "",
|
||||
target.ElasticQueueDir: "",
|
||||
target.ElasticQueueLimit: "0",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticURL,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticFormat,
|
||||
Value: formatNamespace,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticIndex,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.ElasticQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyES - returns a map of registered notification 'elasticsearch' targets
|
||||
func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchArgs, error) {
|
||||
esTargets := make(map[string]target.ElasticsearchArgs)
|
||||
for k, kv := range mergeTargets(esKVS, target.EnvElasticState, DefaultESKVS) {
|
||||
stateEnv := target.EnvElasticState
|
||||
for k, kv := range mergeTargets(esKVS, target.EnvElasticEnable, DefaultESKVS) {
|
||||
enableEnv := target.EnvElasticEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1280,32 +1570,70 @@ func GetNotifyES(esKVS map[string]config.KVS) (map[string]target.ElasticsearchAr
|
|||
// DefaultAMQPKVS - default KV for AMQP config
|
||||
var (
|
||||
DefaultAMQPKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "Default settings for AMQP notification",
|
||||
target.AmqpURL: "",
|
||||
target.AmqpExchange: "",
|
||||
target.AmqpExchangeType: "",
|
||||
target.AmqpRoutingKey: "",
|
||||
target.AmqpMandatory: config.StateOff,
|
||||
target.AmqpDurable: config.StateOff,
|
||||
target.AmqpNoWait: config.StateOff,
|
||||
target.AmqpInternal: config.StateOff,
|
||||
target.AmqpAutoDeleted: config.StateOff,
|
||||
target.AmqpDeliveryMode: "0",
|
||||
target.AmqpQueueLimit: "0",
|
||||
target.AmqpQueueDir: "",
|
||||
config.KV{
|
||||
Key: config.Enable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpURL,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpExchange,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpExchangeType,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpRoutingKey,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpMandatory,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpDurable,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpNoWait,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpInternal,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpAutoDeleted,
|
||||
Value: config.EnableOff,
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpDeliveryMode,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpQueueLimit,
|
||||
Value: "0",
|
||||
},
|
||||
config.KV{
|
||||
Key: target.AmqpQueueDir,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
// GetNotifyAMQP - returns a map of registered notification 'amqp' targets
|
||||
func GetNotifyAMQP(amqpKVS map[string]config.KVS) (map[string]target.AMQPArgs, error) {
|
||||
amqpTargets := make(map[string]target.AMQPArgs)
|
||||
for k, kv := range mergeTargets(amqpKVS, target.EnvAMQPState, DefaultAMQPKVS) {
|
||||
stateEnv := target.EnvAMQPState
|
||||
for k, kv := range mergeTargets(amqpKVS, target.EnvAMQPEnable, DefaultAMQPKVS) {
|
||||
enableEnv := target.EnvAMQPEnable
|
||||
if k != config.Default {
|
||||
stateEnv = stateEnv + config.Default + k
|
||||
enableEnv = enableEnv + config.Default + k
|
||||
}
|
||||
enabled, err := config.ParseBool(env.Get(stateEnv, kv.Get(config.State)))
|
||||
enabled, err := config.ParseBool(env.Get(enableEnv, kv.Get(config.Enable)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -1383,12 +1711,12 @@ func GetNotifyAMQP(amqpKVS map[string]config.KVS) (map[string]target.AMQPArgs, e
|
|||
RoutingKey: env.Get(routingKeyEnv, kv.Get(target.AmqpRoutingKey)),
|
||||
ExchangeType: env.Get(exchangeTypeEnv, kv.Get(target.AmqpExchangeType)),
|
||||
DeliveryMode: uint8(deliveryMode),
|
||||
Mandatory: env.Get(mandatoryEnv, kv.Get(target.AmqpMandatory)) == config.StateOn,
|
||||
Immediate: env.Get(immediateEnv, kv.Get(target.AmqpImmediate)) == config.StateOn,
|
||||
Durable: env.Get(durableEnv, kv.Get(target.AmqpDurable)) == config.StateOn,
|
||||
Internal: env.Get(internalEnv, kv.Get(target.AmqpInternal)) == config.StateOn,
|
||||
NoWait: env.Get(noWaitEnv, kv.Get(target.AmqpNoWait)) == config.StateOn,
|
||||
AutoDeleted: env.Get(autoDeletedEnv, kv.Get(target.AmqpAutoDeleted)) == config.StateOn,
|
||||
Mandatory: env.Get(mandatoryEnv, kv.Get(target.AmqpMandatory)) == config.EnableOn,
|
||||
Immediate: env.Get(immediateEnv, kv.Get(target.AmqpImmediate)) == config.EnableOn,
|
||||
Durable: env.Get(durableEnv, kv.Get(target.AmqpDurable)) == config.EnableOn,
|
||||
Internal: env.Get(internalEnv, kv.Get(target.AmqpInternal)) == config.EnableOn,
|
||||
NoWait: env.Get(noWaitEnv, kv.Get(target.AmqpNoWait)) == config.EnableOn,
|
||||
AutoDeleted: env.Get(autoDeletedEnv, kv.Get(target.AmqpAutoDeleted)) == config.EnableOn,
|
||||
QueueDir: env.Get(queueDirEnv, kv.Get(target.AmqpQueueDir)),
|
||||
QueueLimit: queueLimit,
|
||||
}
|
||||
|
|
17
vendor/github.com/minio/minio/cmd/config/policy/opa/config.go
generated
vendored
17
vendor/github.com/minio/minio/cmd/config/policy/opa/config.go
generated
vendored
|
@ -41,10 +41,14 @@ const (
|
|||
// DefaultKVS - default config for OPA config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default OPA configuration",
|
||||
URL: "",
|
||||
AuthToken: "",
|
||||
config.KV{
|
||||
Key: URL,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: AuthToken,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -104,6 +108,11 @@ type Opa struct {
|
|||
client *http.Client
|
||||
}
|
||||
|
||||
// Enabled returns if opa is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
return kvs.Get(URL) != ""
|
||||
}
|
||||
|
||||
// LookupConfig lookup Opa from config, override with any ENVs.
|
||||
func LookupConfig(kv config.KVS, transport *http.Transport, closeRespFn func(io.ReadCloser)) (Args, error) {
|
||||
args := Args{}
|
||||
|
|
23
vendor/github.com/minio/minio/cmd/config/policy/opa/help.go
generated
vendored
23
vendor/github.com/minio/minio/cmd/config/policy/opa/help.go
generated
vendored
|
@ -20,10 +20,23 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// Help template for OPA policy feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
URL: `Points to URL for OPA HTTP API endpoint. eg: "http://localhost:8181/v1/data/httpapi/authz/allow"`,
|
||||
AuthToken: "Authorization token for the OPA HTTP API endpoint (optional)",
|
||||
config.State: "Indicates if OPA policy is enabled or not",
|
||||
config.Comment: "A comment to describe the OPA policy setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: URL,
|
||||
Description: `OPA HTTP API endpoint e.g. "http://localhost:8181/v1/data/httpapi/authz/allow"`,
|
||||
Type: "url",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: AuthToken,
|
||||
Description: "authorization token for OPA HTTP API endpoint",
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
29
vendor/github.com/minio/minio/cmd/config/policy/opa/legacy.go
generated
vendored
29
vendor/github.com/minio/minio/cmd/config/policy/opa/legacy.go
generated
vendored
|
@ -28,23 +28,18 @@ const (
|
|||
|
||||
// SetPolicyOPAConfig - One time migration code needed, for migrating from older config to new for PolicyOPAConfig.
|
||||
func SetPolicyOPAConfig(s config.Config, opaArgs Args) {
|
||||
if opaArgs.URL == nil || opaArgs.URL.String() == "" {
|
||||
// Do not enable if opaArgs was empty.
|
||||
return
|
||||
}
|
||||
s[config.PolicyOPASubSys][config.Default] = config.KVS{
|
||||
config.State: func() string {
|
||||
if opaArgs.URL == nil {
|
||||
return config.StateOff
|
||||
}
|
||||
if opaArgs.URL.String() == "" {
|
||||
return config.StateOff
|
||||
}
|
||||
return config.StateOn
|
||||
}(),
|
||||
config.Comment: "Settings for OPA, after migrating config",
|
||||
URL: func() string {
|
||||
if opaArgs.URL != nil {
|
||||
return opaArgs.URL.String()
|
||||
}
|
||||
return ""
|
||||
}(),
|
||||
AuthToken: opaArgs.AuthToken,
|
||||
config.KV{
|
||||
Key: URL,
|
||||
Value: opaArgs.URL.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: AuthToken,
|
||||
Value: opaArgs.AuthToken,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
24
vendor/github.com/minio/minio/cmd/config/storageclass/help.go
generated
vendored
24
vendor/github.com/minio/minio/cmd/config/storageclass/help.go
generated
vendored
|
@ -20,10 +20,24 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// Help template for storageclass feature.
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
ClassRRS: "Set reduced redundancy storage class parity ratio. eg: \"EC:2\"",
|
||||
ClassStandard: "Set standard storage class parity ratio. eg: \"EC:4\"",
|
||||
config.State: "Indicates if storageclass is enabled or not",
|
||||
config.Comment: "A comment to describe the storageclass setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: ClassStandard,
|
||||
Description: `set the parity count for default standard storage class e.g. "EC:4"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: ClassRRS,
|
||||
Description: `set the parity count for reduced redundancy storage class e.g. "EC:2"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
21
vendor/github.com/minio/minio/cmd/config/storageclass/legacy.go
generated
vendored
21
vendor/github.com/minio/minio/cmd/config/storageclass/legacy.go
generated
vendored
|
@ -22,15 +22,18 @@ import (
|
|||
|
||||
// SetStorageClass - One time migration code needed, for migrating from older config to new for StorageClass.
|
||||
func SetStorageClass(s config.Config, cfg Config) {
|
||||
if len(cfg.Standard.String()) == 0 && len(cfg.RRS.String()) == 0 {
|
||||
// Do not enable storage-class if no settings found.
|
||||
return
|
||||
}
|
||||
s[config.StorageClassSubSys][config.Default] = config.KVS{
|
||||
ClassStandard: cfg.Standard.String(),
|
||||
ClassRRS: cfg.RRS.String(),
|
||||
config.State: func() string {
|
||||
if len(cfg.Standard.String()) > 0 || len(cfg.RRS.String()) > 0 {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for StorageClass, after migrating config",
|
||||
config.KV{
|
||||
Key: ClassStandard,
|
||||
Value: cfg.Standard.String(),
|
||||
},
|
||||
config.KV{
|
||||
Key: ClassRRS,
|
||||
Value: cfg.RRS.String(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
32
vendor/github.com/minio/minio/cmd/config/storageclass/storage-class.go
generated
vendored
32
vendor/github.com/minio/minio/cmd/config/storageclass/storage-class.go
generated
vendored
|
@ -39,8 +39,6 @@ const (
|
|||
ClassStandard = "standard"
|
||||
ClassRRS = "rrs"
|
||||
|
||||
// Env to on/off storage class settings.
|
||||
EnvStorageClass = "MINIO_STORAGE_CLASS_STATE"
|
||||
// Reduced redundancy storage class environment variable
|
||||
RRSEnv = "MINIO_STORAGE_CLASS_RRS"
|
||||
// Standard storage class environment variable
|
||||
|
@ -59,10 +57,14 @@ const (
|
|||
// DefaultKVS - default storage class config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default StorageClass configuration, only applicable in erasure coded setups",
|
||||
ClassStandard: "",
|
||||
ClassRRS: "EC:2",
|
||||
config.KV{
|
||||
Key: ClassStandard,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: ClassRRS,
|
||||
Value: "EC:2",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -210,6 +212,13 @@ func (sCfg Config) GetParityForSC(sc string) (parity int) {
|
|||
}
|
||||
}
|
||||
|
||||
// Enabled returns if etcd is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
ssc := kvs.Get(ClassStandard)
|
||||
rrsc := kvs.Get(ClassRRS)
|
||||
return ssc != "" || rrsc != ""
|
||||
}
|
||||
|
||||
// LookupConfig - lookup storage class config and override with valid environment settings if any.
|
||||
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
||||
cfg = Config{}
|
||||
|
@ -220,19 +229,8 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
|
|||
return cfg, err
|
||||
}
|
||||
|
||||
stateBool, err := config.ParseBool(env.Get(EnvStorageClass, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
ssc := env.Get(StandardEnv, kvs.Get(ClassStandard))
|
||||
rrsc := env.Get(RRSEnv, kvs.Get(ClassRRS))
|
||||
if stateBool {
|
||||
if ssc == "" && rrsc == "" {
|
||||
return cfg, config.Error("'standard' and 'rrs' key cannot be empty for enabled storage class")
|
||||
}
|
||||
// if one of storage class is not empty proceed.
|
||||
}
|
||||
|
||||
// Check for environment variables and parse into storageClass struct
|
||||
if ssc != "" {
|
||||
cfg.Standard, err = parseStorageClass(ssc)
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/consolelogger.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/consolelogger.go
generated
vendored
|
@ -44,8 +44,8 @@ type HTTPConsoleLoggerSys struct {
|
|||
|
||||
// NewConsoleLogger - creates new HTTPConsoleLoggerSys with all nodes subscribed to
|
||||
// the console logging pub sub system
|
||||
func NewConsoleLogger(ctx context.Context, endpoints EndpointList) *HTTPConsoleLoggerSys {
|
||||
host, err := xnet.ParseHost(GetLocalPeer(globalEndpoints))
|
||||
func NewConsoleLogger(ctx context.Context, endpointZones EndpointZones) *HTTPConsoleLoggerSys {
|
||||
host, err := xnet.ParseHost(GetLocalPeer(endpointZones))
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to start console logging subsystem")
|
||||
}
|
||||
|
|
61
vendor/github.com/minio/minio/cmd/crypto/config.go
generated
vendored
61
vendor/github.com/minio/minio/cmd/crypto/config.go
generated
vendored
|
@ -46,16 +46,38 @@ const (
|
|||
// DefaultKVS - default KV crypto config
|
||||
var (
|
||||
DefaultKVS = config.KVS{
|
||||
config.State: config.StateOff,
|
||||
config.Comment: "This is a default Vault configuration",
|
||||
KMSVaultEndpoint: "",
|
||||
KMSVaultCAPath: "",
|
||||
KMSVaultKeyName: "",
|
||||
KMSVaultKeyVersion: "",
|
||||
KMSVaultNamespace: "",
|
||||
KMSVaultAuthType: "approle",
|
||||
KMSVaultAppRoleID: "",
|
||||
KMSVaultAppRoleSecret: "",
|
||||
config.KV{
|
||||
Key: KMSVaultEndpoint,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultKeyName,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultAuthType,
|
||||
Value: "approle",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultAppRoleID,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultAppRoleSecret,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultCAPath,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultKeyVersion,
|
||||
Value: "",
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultNamespace,
|
||||
Value: "",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
|
@ -74,9 +96,6 @@ const (
|
|||
)
|
||||
|
||||
const (
|
||||
// EnvKMSVaultState to enable on/off
|
||||
EnvKMSVaultState = "MINIO_KMS_VAULT_STATE"
|
||||
|
||||
// EnvKMSVaultEndpoint is the environment variable used to specify
|
||||
// the vault HTTPS endpoint.
|
||||
EnvKMSVaultEndpoint = "MINIO_KMS_VAULT_ENDPOINT"
|
||||
|
@ -119,6 +138,12 @@ var defaultCfg = VaultConfig{
|
|||
},
|
||||
}
|
||||
|
||||
// Enabled returns if HashiCorp Vault is enabled.
|
||||
func Enabled(kvs config.KVS) bool {
|
||||
endpoint := kvs.Get(KMSVaultEndpoint)
|
||||
return endpoint != ""
|
||||
}
|
||||
|
||||
// LookupConfig extracts the KMS configuration provided by environment
|
||||
// variables and merge them with the provided KMS configuration. The
|
||||
// merging follows the following rules:
|
||||
|
@ -141,7 +166,7 @@ func LookupConfig(kvs config.KVS) (KMSConfig, error) {
|
|||
return kmsCfg, err
|
||||
}
|
||||
if !kmsCfg.AutoEncryption {
|
||||
kmsCfg.AutoEncryption, err = config.ParseBool(env.Get(EnvKMSAutoEncryption, config.StateOff))
|
||||
kmsCfg.AutoEncryption, err = config.ParseBool(env.Get(EnvKMSAutoEncryption, config.EnableOff))
|
||||
if err != nil {
|
||||
return kmsCfg, err
|
||||
}
|
||||
|
@ -149,13 +174,7 @@ func LookupConfig(kvs config.KVS) (KMSConfig, error) {
|
|||
if kmsCfg.Vault.Enabled {
|
||||
return kmsCfg, nil
|
||||
}
|
||||
stateBool, err := config.ParseBool(env.Get(EnvKMSVaultState, kvs.Get(config.State)))
|
||||
if err != nil {
|
||||
return kmsCfg, err
|
||||
}
|
||||
if !stateBool {
|
||||
return kmsCfg, nil
|
||||
}
|
||||
|
||||
vcfg := VaultConfig{
|
||||
Auth: VaultAuth{
|
||||
Type: "approle",
|
||||
|
|
61
vendor/github.com/minio/minio/cmd/crypto/help.go
generated
vendored
61
vendor/github.com/minio/minio/cmd/crypto/help.go
generated
vendored
|
@ -20,16 +20,55 @@ import "github.com/minio/minio/cmd/config"
|
|||
|
||||
// Help template for KMS vault
|
||||
var (
|
||||
Help = config.HelpKV{
|
||||
KMSVaultEndpoint: `Points to Vault API endpoint eg: "http://vault-endpoint-ip:8200"`,
|
||||
KMSVaultKeyName: `Transit key name used in vault policy, must be unique name eg: "my-minio-key"`,
|
||||
KMSVaultAuthType: `Authentication type to Vault API endpoint eg: "approle"`,
|
||||
KMSVaultAppRoleID: `Unique role ID created for AppRole`,
|
||||
KMSVaultAppRoleSecret: `Unique secret ID created for AppRole`,
|
||||
KMSVaultNamespace: `Only needed if AppRole engine is scoped to Vault Namespace eg: "ns1"`,
|
||||
KMSVaultKeyVersion: `Key version (optional)`,
|
||||
KMSVaultCAPath: `Path to PEM-encoded CA cert files to use mTLS authentication (optional) eg: "/home/user/custom-certs"`,
|
||||
config.State: "Indicates if KMS Vault is enabled or not",
|
||||
config.Comment: "A comment to describe the KMS Vault setting",
|
||||
Help = config.HelpKVS{
|
||||
config.HelpKV{
|
||||
Key: KMSVaultEndpoint,
|
||||
Description: `HashiCorp Vault API endpoint e.g. "http://vault-endpoint-ip:8200"`,
|
||||
Type: "url",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultKeyName,
|
||||
Description: `transit key name used in vault policy, must be unique name e.g. "my-minio-key"`,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultAuthType,
|
||||
Description: `authentication type to Vault API endpoint e.g. "approle"`,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultAppRoleID,
|
||||
Description: `unique role ID created for AppRole`,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultAppRoleSecret,
|
||||
Description: `unique secret ID created for AppRole`,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultNamespace,
|
||||
Description: `only needed if AppRole engine is scoped to Vault Namespace e.g. "ns1"`,
|
||||
Optional: true,
|
||||
Type: "string",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultKeyVersion,
|
||||
Description: `KMS Vault key version`,
|
||||
Optional: true,
|
||||
Type: "number",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: KMSVaultCAPath,
|
||||
Description: `path to PEM-encoded CA cert files to use mTLS authentication (optional) e.g. "/home/user/custom-certs"`,
|
||||
Optional: true,
|
||||
Type: "path",
|
||||
},
|
||||
config.HelpKV{
|
||||
Key: config.Comment,
|
||||
Description: config.DefaultComment,
|
||||
Optional: true,
|
||||
Type: "sentence",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
|
73
vendor/github.com/minio/minio/cmd/crypto/legacy.go
generated
vendored
73
vendor/github.com/minio/minio/cmd/crypto/legacy.go
generated
vendored
|
@ -79,27 +79,47 @@ const (
|
|||
|
||||
// SetKMSConfig helper to migrate from older KMSConfig to new KV.
|
||||
func SetKMSConfig(s config.Config, cfg KMSConfig) {
|
||||
if cfg.Vault.Endpoint == "" {
|
||||
return
|
||||
}
|
||||
s[config.KmsVaultSubSys][config.Default] = config.KVS{
|
||||
KMSVaultEndpoint: cfg.Vault.Endpoint,
|
||||
KMSVaultCAPath: cfg.Vault.CAPath,
|
||||
KMSVaultAuthType: func() string {
|
||||
if cfg.Vault.Auth.Type != "" {
|
||||
return cfg.Vault.Auth.Type
|
||||
}
|
||||
return "approle"
|
||||
}(),
|
||||
KMSVaultAppRoleID: cfg.Vault.Auth.AppRole.ID,
|
||||
KMSVaultAppRoleSecret: cfg.Vault.Auth.AppRole.Secret,
|
||||
KMSVaultKeyName: cfg.Vault.Key.Name,
|
||||
KMSVaultKeyVersion: strconv.Itoa(cfg.Vault.Key.Version),
|
||||
KMSVaultNamespace: cfg.Vault.Namespace,
|
||||
config.State: func() string {
|
||||
if cfg.Vault.Endpoint != "" {
|
||||
return config.StateOn
|
||||
}
|
||||
return config.StateOff
|
||||
}(),
|
||||
config.Comment: "Settings for KMS Vault, after migrating config",
|
||||
config.KV{
|
||||
Key: KMSVaultEndpoint,
|
||||
Value: cfg.Vault.Endpoint,
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultCAPath,
|
||||
Value: cfg.Vault.CAPath,
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultAuthType,
|
||||
Value: func() string {
|
||||
if cfg.Vault.Auth.Type != "" {
|
||||
return cfg.Vault.Auth.Type
|
||||
}
|
||||
return "approle"
|
||||
}(),
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultAppRoleID,
|
||||
Value: cfg.Vault.Auth.AppRole.ID,
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultAppRoleSecret,
|
||||
Value: cfg.Vault.Auth.AppRole.Secret,
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultKeyName,
|
||||
Value: cfg.Vault.Key.Name,
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultKeyVersion,
|
||||
Value: strconv.Itoa(cfg.Vault.Key.Version),
|
||||
},
|
||||
config.KV{
|
||||
Key: KMSVaultNamespace,
|
||||
Value: cfg.Vault.Namespace,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,7 +137,7 @@ func SetKMSConfig(s config.Config, cfg KMSConfig) {
|
|||
// It sets the global KMS configuration according to the merged configuration
|
||||
// on success.
|
||||
func lookupConfigLegacy(kvs config.KVS) (KMSConfig, error) {
|
||||
autoBool, err := config.ParseBool(env.Get(EnvAutoEncryptionLegacy, config.StateOff))
|
||||
autoBool, err := config.ParseBool(env.Get(EnvAutoEncryptionLegacy, config.EnableOff))
|
||||
if err != nil {
|
||||
return KMSConfig{}, err
|
||||
}
|
||||
|
@ -131,17 +151,6 @@ func lookupConfigLegacy(kvs config.KVS) (KMSConfig, error) {
|
|||
},
|
||||
}
|
||||
|
||||
// Assume default as "on" for legacy config since we didn't have a _STATE
|
||||
// flag to turn it off, but we should honor it nonetheless to turn it off
|
||||
// if the vault endpoint is down and there is no way to start the server.
|
||||
stateBool, err := config.ParseBool(env.Get(EnvKMSVaultState, config.StateOn))
|
||||
if err != nil {
|
||||
return cfg, err
|
||||
}
|
||||
if !stateBool {
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
endpointStr := env.Get(EnvLegacyVaultEndpoint, "")
|
||||
if endpointStr != "" {
|
||||
// Lookup Hashicorp-Vault configuration & overwrite config entry if ENV var is present
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/daily-lifecycle-ops.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/daily-lifecycle-ops.go
generated
vendored
|
@ -108,7 +108,7 @@ var lifecycleTimeout = newDynamicTimeout(60*time.Second, time.Second)
|
|||
|
||||
func lifecycleRound(ctx context.Context, objAPI ObjectLayer) error {
|
||||
// Lock to avoid concurrent lifecycle ops from other nodes
|
||||
sweepLock := globalNSMutex.NewNSLock(ctx, "system", "daily-lifecycle-ops")
|
||||
sweepLock := objAPI.NewNSLock(ctx, "system", "daily-lifecycle-ops")
|
||||
if err := sweepLock.GetLock(lifecycleTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/disk-cache-backend.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/disk-cache-backend.go
generated
vendored
|
@ -124,7 +124,7 @@ type diskCache struct {
|
|||
// Inits the disk cache dir if it is not initialized already.
|
||||
func newDiskCache(dir string, expiry int, quotaPct int) (*diskCache, error) {
|
||||
if err := os.MkdirAll(dir, 0777); err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %s", dir, err)
|
||||
return nil, fmt.Errorf("Unable to initialize '%s' dir, %w", dir, err)
|
||||
}
|
||||
cache := diskCache{
|
||||
dir: dir,
|
||||
|
|
43
vendor/github.com/minio/minio/cmd/disk-cache.go
generated
vendored
43
vendor/github.com/minio/minio/cmd/disk-cache.go
generated
vendored
|
@ -1,3 +1,19 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
|
@ -49,15 +65,17 @@ type cacheObjects struct {
|
|||
cache []*diskCache
|
||||
// file path patterns to exclude from cache
|
||||
exclude []string
|
||||
// to manage cache namespace locks
|
||||
nsMutex *nsLockMap
|
||||
|
||||
// if true migration is in progress from v1 to v2
|
||||
migrating bool
|
||||
// mutex to protect migration bool
|
||||
migMutex sync.Mutex
|
||||
|
||||
// nsMutex namespace lock
|
||||
nsMutex *nsLockMap
|
||||
|
||||
// Object functions pointing to the corresponding functions of backend implementation.
|
||||
NewNSLockFn func(ctx context.Context, bucket, object string) RWLocker
|
||||
GetObjectNInfoFn func(ctx context.Context, bucket, object string, rs *HTTPRangeSpec, h http.Header, lockType LockType, opts ObjectOptions) (gr *GetObjectReader, err error)
|
||||
GetObjectInfoFn func(ctx context.Context, bucket, object string, opts ObjectOptions) (objInfo ObjectInfo, err error)
|
||||
DeleteObjectFn func(ctx context.Context, bucket, object string) error
|
||||
|
@ -66,7 +84,7 @@ type cacheObjects struct {
|
|||
}
|
||||
|
||||
func (c *cacheObjects) delete(ctx context.Context, dcache *diskCache, bucket, object string) (err error) {
|
||||
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, bucket, object)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -75,7 +93,7 @@ func (c *cacheObjects) delete(ctx context.Context, dcache *diskCache, bucket, ob
|
|||
}
|
||||
|
||||
func (c *cacheObjects) put(ctx context.Context, dcache *diskCache, bucket, object string, data io.Reader, size int64, opts ObjectOptions) error {
|
||||
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, bucket, object)
|
||||
if err := cLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -84,7 +102,7 @@ func (c *cacheObjects) put(ctx context.Context, dcache *diskCache, bucket, objec
|
|||
}
|
||||
|
||||
func (c *cacheObjects) get(ctx context.Context, dcache *diskCache, bucket, object string, rs *HTTPRangeSpec, h http.Header, opts ObjectOptions) (gr *GetObjectReader, err error) {
|
||||
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, bucket, object)
|
||||
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -94,7 +112,7 @@ func (c *cacheObjects) get(ctx context.Context, dcache *diskCache, bucket, objec
|
|||
}
|
||||
|
||||
func (c *cacheObjects) stat(ctx context.Context, dcache *diskCache, bucket, object string) (oi ObjectInfo, err error) {
|
||||
cLock := c.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
cLock := c.NewNSLockFn(ctx, bucket, object)
|
||||
if err := cLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
@ -509,6 +527,13 @@ func (c *cacheObjects) PutObject(ctx context.Context, bucket, object string, r *
|
|||
if opts.ServerSideEncryption != nil {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
|
||||
// skip cache for objects with locks
|
||||
objRetention := getObjectRetentionMeta(opts.UserDefined)
|
||||
if objRetention.Mode == Governance || objRetention.Mode == Compliance {
|
||||
return putObjectFn(ctx, bucket, object, r, opts)
|
||||
}
|
||||
|
||||
// fetch from backend if cache exclude pattern or cache-control
|
||||
// directive set to exclude
|
||||
if c.isCacheExclude(bucket, object) {
|
||||
|
@ -549,9 +574,9 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
|
|||
c := &cacheObjects{
|
||||
cache: cache,
|
||||
exclude: config.Exclude,
|
||||
nsMutex: newNSLock(false),
|
||||
migrating: migrateSw,
|
||||
migMutex: sync.Mutex{},
|
||||
nsMutex: newNSLock(false),
|
||||
GetObjectInfoFn: func(ctx context.Context, bucket, object string, opts ObjectOptions) (ObjectInfo, error) {
|
||||
return newObjectLayerFn().GetObjectInfo(ctx, bucket, object, opts)
|
||||
},
|
||||
|
@ -572,6 +597,10 @@ func newServerCacheObjects(ctx context.Context, config cache.Config) (CacheObjec
|
|||
return newObjectLayerFn().PutObject(ctx, bucket, object, data, opts)
|
||||
},
|
||||
}
|
||||
c.NewNSLockFn = func(ctx context.Context, bucket, object string) RWLocker {
|
||||
return c.nsMutex.NewNSLock(ctx, nil, bucket, object)
|
||||
}
|
||||
|
||||
if migrateSw {
|
||||
go c.migrateCacheFromV1toV2(ctx)
|
||||
}
|
||||
|
|
111
vendor/github.com/minio/minio/cmd/endpoint-ellipses.go
generated
vendored
111
vendor/github.com/minio/minio/cmd/endpoint-ellipses.go
generated
vendored
|
@ -30,11 +30,6 @@ import (
|
|||
// This file implements and supports ellipses pattern for
|
||||
// `minio server` command line arguments.
|
||||
|
||||
// Maximum number of unique args supported on the command line.
|
||||
const (
|
||||
serverCommandLineArgsMax = 32
|
||||
)
|
||||
|
||||
// Endpoint set represents parsed ellipses values, also provides
|
||||
// methods to get the sets of endpoints.
|
||||
type endpointSet struct {
|
||||
|
@ -63,31 +58,20 @@ func getDivisibleSize(totalSizes []uint64) (result uint64) {
|
|||
return result
|
||||
}
|
||||
|
||||
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
||||
var isValidSetSize = func(count uint64) bool {
|
||||
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
|
||||
}
|
||||
|
||||
// getSetIndexes returns list of indexes which provides the set size
|
||||
// on each index, this function also determines the final set size
|
||||
// The final set size has the affinity towards choosing smaller
|
||||
// indexes (total sets)
|
||||
func getSetIndexes(args []string, totalSizes []uint64) (setIndexes [][]uint64, err error) {
|
||||
func getSetIndexes(args []string, totalSizes []uint64, customSetDriveCount uint64) (setIndexes [][]uint64, err error) {
|
||||
if len(totalSizes) == 0 || len(args) == 0 {
|
||||
return nil, errInvalidArgument
|
||||
}
|
||||
|
||||
// isValidSetSize - checks whether given count is a valid set size for erasure coding.
|
||||
isValidSetSize := func(count uint64) bool {
|
||||
return (count >= setSizes[0] && count <= setSizes[len(setSizes)-1] && count%2 == 0)
|
||||
}
|
||||
|
||||
var customSetDriveCount uint64
|
||||
if v := env.Get("MINIO_ERASURE_SET_DRIVE_COUNT", ""); v != "" {
|
||||
customSetDriveCount, err = strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, config.ErrInvalidErasureSetSize(err)
|
||||
}
|
||||
if !isValidSetSize(customSetDriveCount) {
|
||||
return nil, config.ErrInvalidErasureSetSize(nil)
|
||||
}
|
||||
}
|
||||
|
||||
setIndexes = make([][]uint64, len(totalSizes))
|
||||
for _, totalSize := range totalSizes {
|
||||
// Check if totalSize has minimum range upto setSize
|
||||
|
@ -194,7 +178,7 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 {
|
|||
// Parses all arguments and returns an endpointSet which is a collection
|
||||
// of endpoints following the ellipses pattern, this is what is used
|
||||
// by the object layer for initializing itself.
|
||||
func parseEndpointSet(args ...string) (ep endpointSet, err error) {
|
||||
func parseEndpointSet(customSetDriveCount uint64, args ...string) (ep endpointSet, err error) {
|
||||
var argPatterns = make([]ellipses.ArgPattern, len(args))
|
||||
for i, arg := range args {
|
||||
patterns, perr := ellipses.FindEllipsesPatterns(arg)
|
||||
|
@ -204,7 +188,7 @@ func parseEndpointSet(args ...string) (ep endpointSet, err error) {
|
|||
argPatterns[i] = patterns
|
||||
}
|
||||
|
||||
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns))
|
||||
ep.setIndexes, err = getSetIndexes(args, getTotalSizes(argPatterns), customSetDriveCount)
|
||||
if err != nil {
|
||||
return endpointSet{}, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
@ -220,8 +204,15 @@ func parseEndpointSet(args ...string) (ep endpointSet, err error) {
|
|||
// For example: {1...64} is divided into 4 sets each of size 16.
|
||||
// This applies to even distributed setup syntax as well.
|
||||
func GetAllSets(args ...string) ([][]string, error) {
|
||||
if len(args) == 0 {
|
||||
return nil, errInvalidArgument
|
||||
var customSetDriveCount uint64
|
||||
if v := env.Get("MINIO_ERASURE_SET_DRIVE_COUNT", ""); v != "" {
|
||||
customSetDriveCount, err := strconv.ParseUint(v, 10, 64)
|
||||
if err != nil {
|
||||
return nil, config.ErrInvalidErasureSetSize(err)
|
||||
}
|
||||
if !isValidSetSize(customSetDriveCount) {
|
||||
return nil, config.ErrInvalidErasureSetSize(nil)
|
||||
}
|
||||
}
|
||||
|
||||
var setArgs [][]string
|
||||
|
@ -230,7 +221,7 @@ func GetAllSets(args ...string) ([][]string, error) {
|
|||
// Check if we have more one args.
|
||||
if len(args) > 1 {
|
||||
var err error
|
||||
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))})
|
||||
setIndexes, err = getSetIndexes(args, []uint64{uint64(len(args))}, customSetDriveCount)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -244,7 +235,7 @@ func GetAllSets(args ...string) ([][]string, error) {
|
|||
}
|
||||
setArgs = s.Get()
|
||||
} else {
|
||||
s, err := parseEndpointSet(args...)
|
||||
s, err := parseEndpointSet(customSetDriveCount, args...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -266,18 +257,64 @@ func GetAllSets(args ...string) ([][]string, error) {
|
|||
|
||||
// CreateServerEndpoints - validates and creates new endpoints from input args, supports
|
||||
// both ellipses and without ellipses transparently.
|
||||
func createServerEndpoints(serverAddr string, args ...string) (string, EndpointList, SetupType, int, int, error) {
|
||||
setArgs, err := GetAllSets(args...)
|
||||
if err != nil {
|
||||
return serverAddr, nil, -1, 0, 0, err
|
||||
func createServerEndpoints(serverAddr string, args ...string) (EndpointZones, int, SetupType, error) {
|
||||
if len(args) == 0 {
|
||||
return nil, -1, -1, errInvalidArgument
|
||||
}
|
||||
|
||||
var endpoints EndpointList
|
||||
var endpointZones EndpointZones
|
||||
var setupType SetupType
|
||||
serverAddr, endpoints, setupType, err = CreateEndpoints(serverAddr, setArgs...)
|
||||
if err != nil {
|
||||
return serverAddr, nil, -1, 0, 0, err
|
||||
var drivesPerSet int
|
||||
if !ellipses.HasEllipses(args...) {
|
||||
setArgs, err := GetAllSets(args...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
endpointList, newSetupType, err := CreateEndpoints(serverAddr, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
endpointZones = append(endpointZones, ZoneEndpoints{
|
||||
SetCount: len(setArgs),
|
||||
DrivesPerSet: len(setArgs[0]),
|
||||
Endpoints: endpointList,
|
||||
})
|
||||
setupType = newSetupType
|
||||
return endpointZones, len(setArgs[0]), setupType, nil
|
||||
}
|
||||
|
||||
return serverAddr, endpoints, setupType, len(setArgs), len(setArgs[0]), nil
|
||||
// Verify the args setup-type appropriately.
|
||||
{
|
||||
setArgs, err := GetAllSets(args...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
|
||||
_, setupType, err = CreateEndpoints(serverAddr, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, arg := range args {
|
||||
setArgs, err := GetAllSets(arg)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
endpointList, _, err := CreateEndpoints(serverAddr, setArgs...)
|
||||
if err != nil {
|
||||
return nil, -1, -1, err
|
||||
}
|
||||
if drivesPerSet != 0 && drivesPerSet != len(setArgs[0]) {
|
||||
return nil, -1, -1, fmt.Errorf("All zones should have same drive per set ratio - expected %d, got %d", drivesPerSet, len(setArgs[0]))
|
||||
}
|
||||
endpointZones = append(endpointZones, ZoneEndpoints{
|
||||
SetCount: len(setArgs),
|
||||
DrivesPerSet: len(setArgs[0]),
|
||||
Endpoints: endpointList,
|
||||
})
|
||||
drivesPerSet = len(setArgs[0])
|
||||
}
|
||||
|
||||
return endpointZones, drivesPerSet, setupType, nil
|
||||
}
|
||||
|
|
61
vendor/github.com/minio/minio/cmd/endpoint-ellipses_test.go
generated
vendored
61
vendor/github.com/minio/minio/cmd/endpoint-ellipses_test.go
generated
vendored
|
@ -18,7 +18,6 @@ package cmd
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
|
@ -54,14 +53,17 @@ func TestCreateServerEndpoints(t *testing.T) {
|
|||
{":9001", []string{"http://localhost:9001/export{01...64}"}, true},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
_, _, _, _, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Test %d: Expected success but failed instead %s", i+1, err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Errorf("Test %d: Expected failure but passed instead", i+1)
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
_, _, _, err := createServerEndpoints(testCase.serverAddr, testCase.args...)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
}
|
||||
if err == nil && !testCase.success {
|
||||
t.Errorf("Expected failure but passed instead")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,8 +76,10 @@ func TestGetDivisibleSize(t *testing.T) {
|
|||
{[]uint64{8, 8, 8}, 8},
|
||||
{[]uint64{24}, 24},
|
||||
}
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotGCD := getDivisibleSize(testCase.totalSizes)
|
||||
if testCase.result != gotGCD {
|
||||
t.Errorf("Expected %v, got %v", testCase.result, gotGCD)
|
||||
|
@ -90,45 +94,43 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
|||
args []string
|
||||
totalSizes []uint64
|
||||
indexes [][]uint64
|
||||
envOverride string
|
||||
envOverride uint64
|
||||
success bool
|
||||
}{
|
||||
{
|
||||
[]string{"data{1...64}"},
|
||||
[]uint64{64},
|
||||
[][]uint64{{8, 8, 8, 8, 8, 8, 8, 8}},
|
||||
"8",
|
||||
8,
|
||||
true,
|
||||
},
|
||||
{
|
||||
[]string{"data{1...60}"},
|
||||
nil,
|
||||
nil,
|
||||
"8",
|
||||
8,
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"data{1...64}"},
|
||||
nil,
|
||||
nil,
|
||||
"-1",
|
||||
64,
|
||||
false,
|
||||
},
|
||||
{
|
||||
[]string{"data{1...64}"},
|
||||
nil,
|
||||
nil,
|
||||
"2",
|
||||
2,
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
if err := os.Setenv("MINIO_ERASURE_SET_DRIVE_COUNT", testCase.envOverride); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes)
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes, testCase.envOverride)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
}
|
||||
|
@ -138,7 +140,6 @@ func TestGetSetIndexesEnvOverride(t *testing.T) {
|
|||
if !reflect.DeepEqual(testCase.indexes, gotIndexes) {
|
||||
t.Errorf("Expected %v, got %v", testCase.indexes, gotIndexes)
|
||||
}
|
||||
os.Unsetenv("MINIO_ERASURE_SET_DRIVE_COUNT")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@ -209,9 +210,10 @@ func TestGetSetIndexes(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes)
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotIndexes, err := getSetIndexes(testCase.args, testCase.totalSizes, 0)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
}
|
||||
|
@ -530,9 +532,10 @@ func TestParseEndpointSet(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
gotEs, err := parseEndpointSet(testCase.arg)
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
gotEs, err := parseEndpointSet(0, testCase.arg)
|
||||
if err != nil && testCase.success {
|
||||
t.Errorf("Expected success but failed instead %s", err)
|
||||
}
|
||||
|
|
269
vendor/github.com/minio/minio/cmd/endpoint.go
generated
vendored
269
vendor/github.com/minio/minio/cmd/endpoint.go
generated
vendored
|
@ -55,7 +55,6 @@ type Endpoint struct {
|
|||
*url.URL
|
||||
IsLocal bool
|
||||
SetIndex int
|
||||
HostName string
|
||||
}
|
||||
|
||||
func (endpoint Endpoint) String() string {
|
||||
|
@ -75,20 +74,18 @@ func (endpoint Endpoint) Type() EndpointType {
|
|||
return URLEndpointType
|
||||
}
|
||||
|
||||
// IsHTTPS - returns true if secure for URLEndpointType.
|
||||
func (endpoint Endpoint) IsHTTPS() bool {
|
||||
// HTTPS - returns true if secure for URLEndpointType.
|
||||
func (endpoint Endpoint) HTTPS() bool {
|
||||
return endpoint.Scheme == "https"
|
||||
}
|
||||
|
||||
// UpdateIsLocal - resolves the host and updates if it is local or not.
|
||||
func (endpoint *Endpoint) UpdateIsLocal() error {
|
||||
func (endpoint *Endpoint) UpdateIsLocal() (err error) {
|
||||
if !endpoint.IsLocal {
|
||||
isLocal, err := isLocalHost(endpoint.HostName)
|
||||
endpoint.IsLocal, err = isLocalHost(endpoint.Hostname(), endpoint.Port(), globalMinioPort)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
endpoint.IsLocal = isLocal
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -121,7 +118,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
|||
host, port, err = net.SplitHostPort(u.Host)
|
||||
if err != nil {
|
||||
if !strings.Contains(err.Error(), "missing port in address") {
|
||||
return ep, fmt.Errorf("invalid URL endpoint format: %s", err)
|
||||
return ep, fmt.Errorf("invalid URL endpoint format: %w", err)
|
||||
}
|
||||
|
||||
host = u.Host
|
||||
|
@ -181,35 +178,51 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
|||
}
|
||||
|
||||
return Endpoint{
|
||||
URL: u,
|
||||
IsLocal: isLocal,
|
||||
HostName: host,
|
||||
URL: u,
|
||||
IsLocal: isLocal,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// EndpointList - list of same type of endpoint.
|
||||
type EndpointList []Endpoint
|
||||
|
||||
// Nodes - returns number of unique servers.
|
||||
func (endpoints EndpointList) Nodes() int {
|
||||
uniqueNodes := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if uniqueNodes.Contains(endpoint.Host) {
|
||||
continue
|
||||
}
|
||||
uniqueNodes.Add(endpoint.Host)
|
||||
}
|
||||
return len(uniqueNodes)
|
||||
// ZoneEndpoints represent endpoints in a given zone
|
||||
// along with its setCount and drivesPerSet.
|
||||
type ZoneEndpoints struct {
|
||||
SetCount int
|
||||
DrivesPerSet int
|
||||
Endpoints Endpoints
|
||||
}
|
||||
|
||||
// IsHTTPS - returns true if secure for URLEndpointType.
|
||||
func (endpoints EndpointList) IsHTTPS() bool {
|
||||
return endpoints[0].IsHTTPS()
|
||||
// EndpointZones - list of list of endpoints
|
||||
type EndpointZones []ZoneEndpoints
|
||||
|
||||
// FirstLocal returns true if the first endpoint is local.
|
||||
func (l EndpointZones) FirstLocal() bool {
|
||||
return l[0].Endpoints[0].IsLocal
|
||||
}
|
||||
|
||||
// HTTPS - returns true if secure for URLEndpointType.
|
||||
func (l EndpointZones) HTTPS() bool {
|
||||
return l[0].Endpoints.HTTPS()
|
||||
}
|
||||
|
||||
// Nodes - returns all nodes count
|
||||
func (l EndpointZones) Nodes() (count int) {
|
||||
for _, ep := range l {
|
||||
count += len(ep.Endpoints)
|
||||
}
|
||||
return count
|
||||
}
|
||||
|
||||
// Endpoints - list of same type of endpoint.
|
||||
type Endpoints []Endpoint
|
||||
|
||||
// HTTPS - returns true if secure for URLEndpointType.
|
||||
func (endpoints Endpoints) HTTPS() bool {
|
||||
return endpoints[0].HTTPS()
|
||||
}
|
||||
|
||||
// GetString - returns endpoint string of i-th endpoint (0-based),
|
||||
// and empty string for invalid indexes.
|
||||
func (endpoints EndpointList) GetString(i int) string {
|
||||
func (endpoints Endpoints) GetString(i int) string {
|
||||
if i < 0 || i >= len(endpoints) {
|
||||
return ""
|
||||
}
|
||||
|
@ -217,7 +230,7 @@ func (endpoints EndpointList) GetString(i int) string {
|
|||
}
|
||||
|
||||
// UpdateIsLocal - resolves the host and discovers the local host.
|
||||
func (endpoints EndpointList) UpdateIsLocal() error {
|
||||
func (endpoints Endpoints) UpdateIsLocal() error {
|
||||
var epsResolved int
|
||||
var foundLocal bool
|
||||
resolvedList := make([]bool, len(endpoints))
|
||||
|
@ -246,7 +259,7 @@ func (endpoints EndpointList) UpdateIsLocal() error {
|
|||
// return err if not Docker or Kubernetes
|
||||
// We use IsDocker() to check for Docker environment
|
||||
// We use IsKubernetes() to check for Kubernetes environment
|
||||
isLocal, err := isLocalHost(endpoints[i].HostName)
|
||||
isLocal, err := isLocalHost(endpoints[i].Hostname(), endpoints[i].Port(), globalMinioPort)
|
||||
if err != nil {
|
||||
if !IsDocker() && !IsKubernetes() {
|
||||
return err
|
||||
|
@ -256,8 +269,10 @@ func (endpoints EndpointList) UpdateIsLocal() error {
|
|||
// log error only if more than 1s elapsed
|
||||
if timeElapsed > time.Second {
|
||||
// Log the message to console about the host not being resolveable.
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("host", endpoints[i].HostName)
|
||||
reqInfo.AppendTags("elapsedTime", humanize.RelTime(startTime, startTime.Add(timeElapsed), "elapsed", ""))
|
||||
reqInfo := (&logger.ReqInfo{}).AppendTags("host", endpoints[i].Hostname())
|
||||
reqInfo.AppendTags("elapsedTime",
|
||||
humanize.RelTime(startTime, startTime.Add(timeElapsed),
|
||||
"elapsed", ""))
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err, logger.Application)
|
||||
}
|
||||
|
@ -301,8 +316,8 @@ func (endpoints EndpointList) UpdateIsLocal() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NewEndpointList - returns new endpoint list based on input args.
|
||||
func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
|
||||
// NewEndpoints - returns new endpoint list based on input args.
|
||||
func NewEndpoints(args ...string) (endpoints Endpoints, err error) {
|
||||
var endpointType EndpointType
|
||||
var scheme string
|
||||
|
||||
|
@ -335,28 +350,30 @@ func NewEndpointList(args ...string) (endpoints EndpointList, err error) {
|
|||
return endpoints, nil
|
||||
}
|
||||
|
||||
func checkEndpointsSubOptimal(ctx *cli.Context, setupType SetupType, endpoints EndpointList) (err error) {
|
||||
func checkEndpointsSubOptimal(ctx *cli.Context, setupType SetupType, endpointZones EndpointZones) (err error) {
|
||||
// Validate sub optimal ordering only for distributed setup.
|
||||
if setupType != DistXLSetupType {
|
||||
return nil
|
||||
}
|
||||
var endpointOrder int
|
||||
err = fmt.Errorf("Too many disk args are local, input is in sub-optimal order. Please review input args: %s", ctx.Args())
|
||||
for _, endpoint := range endpoints {
|
||||
if endpoint.IsLocal {
|
||||
endpointOrder++
|
||||
} else {
|
||||
endpointOrder--
|
||||
}
|
||||
if endpointOrder >= 2 {
|
||||
return err
|
||||
for _, endpoints := range endpointZones {
|
||||
for _, endpoint := range endpoints.Endpoints {
|
||||
if endpoint.IsLocal {
|
||||
endpointOrder++
|
||||
} else {
|
||||
endpointOrder--
|
||||
}
|
||||
if endpointOrder >= 2 {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Checks if there are any cross device mounts.
|
||||
func checkCrossDeviceMounts(endpoints EndpointList) (err error) {
|
||||
func checkCrossDeviceMounts(endpoints Endpoints) (err error) {
|
||||
var absPaths []string
|
||||
for _, endpoint := range endpoints {
|
||||
if endpoint.IsLocal {
|
||||
|
@ -372,14 +389,14 @@ func checkCrossDeviceMounts(endpoints EndpointList) (err error) {
|
|||
}
|
||||
|
||||
// CreateEndpoints - validates and creates new endpoints for given args.
|
||||
func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList, SetupType, error) {
|
||||
var endpoints EndpointList
|
||||
func CreateEndpoints(serverAddr string, args ...[]string) (Endpoints, SetupType, error) {
|
||||
var endpoints Endpoints
|
||||
var setupType SetupType
|
||||
var err error
|
||||
|
||||
// Check whether serverAddr is valid for this host.
|
||||
if err = CheckLocalServerAddr(serverAddr); err != nil {
|
||||
return serverAddr, endpoints, setupType, err
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
|
||||
_, serverAddrPort := mustSplitHostPort(serverAddr)
|
||||
|
@ -389,36 +406,36 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||
var endpoint Endpoint
|
||||
endpoint, err = NewEndpoint(args[0][0])
|
||||
if err != nil {
|
||||
return serverAddr, endpoints, setupType, err
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
if err := endpoint.UpdateIsLocal(); err != nil {
|
||||
return serverAddr, endpoints, setupType, err
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
if endpoint.Type() != PathEndpointType {
|
||||
return serverAddr, endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg("use path style endpoint for FS setup")
|
||||
return endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg("use path style endpoint for FS setup")
|
||||
}
|
||||
endpoints = append(endpoints, endpoint)
|
||||
setupType = FSSetupType
|
||||
|
||||
// Check for cross device mounts if any.
|
||||
if err = checkCrossDeviceMounts(endpoints); err != nil {
|
||||
return serverAddr, endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg(err.Error())
|
||||
return endpoints, setupType, config.ErrInvalidFSEndpoint(nil).Msg(err.Error())
|
||||
}
|
||||
return serverAddr, endpoints, setupType, nil
|
||||
|
||||
return endpoints, setupType, nil
|
||||
}
|
||||
|
||||
for i, iargs := range args {
|
||||
var newEndpoints EndpointList
|
||||
// Convert args to endpoints
|
||||
var eps EndpointList
|
||||
eps, err = NewEndpointList(iargs...)
|
||||
var newEndpoints Endpoints
|
||||
eps, err := NewEndpoints(iargs...)
|
||||
if err != nil {
|
||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// Check for cross device mounts if any.
|
||||
if err = checkCrossDeviceMounts(eps); err != nil {
|
||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
for _, ep := range eps {
|
||||
|
@ -428,57 +445,51 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||
endpoints = append(endpoints, newEndpoints...)
|
||||
}
|
||||
|
||||
if len(endpoints) == 0 {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("invalid number of endpoints")
|
||||
}
|
||||
|
||||
// Return XL setup when all endpoints are path style.
|
||||
if endpoints[0].Type() == PathEndpointType {
|
||||
setupType = XLSetupType
|
||||
return serverAddr, endpoints, setupType, nil
|
||||
return endpoints, setupType, nil
|
||||
}
|
||||
|
||||
if err := endpoints.UpdateIsLocal(); err != nil {
|
||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
if err = endpoints.UpdateIsLocal(); err != nil {
|
||||
return endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg(err.Error())
|
||||
}
|
||||
|
||||
// Here all endpoints are URL style.
|
||||
endpointPathSet := set.NewStringSet()
|
||||
localEndpointCount := 0
|
||||
localServerAddrSet := set.NewStringSet()
|
||||
localServerHostSet := set.NewStringSet()
|
||||
localPortSet := set.NewStringSet()
|
||||
|
||||
for _, endpoint := range endpoints {
|
||||
endpointPathSet.Add(endpoint.Path)
|
||||
if endpoint.IsLocal {
|
||||
localServerAddrSet.Add(endpoint.Host)
|
||||
localServerHostSet.Add(endpoint.Hostname())
|
||||
|
||||
var port string
|
||||
_, port, err = net.SplitHostPort(endpoint.Host)
|
||||
if err != nil {
|
||||
port = serverAddrPort
|
||||
}
|
||||
|
||||
localPortSet.Add(port)
|
||||
|
||||
localEndpointCount++
|
||||
}
|
||||
}
|
||||
|
||||
// No local endpoint found.
|
||||
if localEndpointCount == 0 {
|
||||
return serverAddr, endpoints, setupType, config.ErrInvalidErasureEndpoints(nil).Msg("no endpoint pointing to the local machine is found")
|
||||
}
|
||||
|
||||
// Check whether same path is not used in endpoints of a host on different port.
|
||||
{
|
||||
pathIPMap := make(map[string]set.StringSet)
|
||||
for _, endpoint := range endpoints {
|
||||
var host string
|
||||
host, _, err = net.SplitHostPort(endpoint.Host)
|
||||
if err != nil {
|
||||
host = endpoint.Host
|
||||
}
|
||||
host := endpoint.Hostname()
|
||||
hostIPSet, _ := getHostIP(host)
|
||||
if IPSet, ok := pathIPMap[endpoint.Path]; ok {
|
||||
if !IPSet.Intersection(hostIPSet).IsEmpty() {
|
||||
return serverAddr, endpoints, setupType,
|
||||
return endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' can not be served by different port on same address", endpoint.Path))
|
||||
}
|
||||
pathIPMap[endpoint.Path] = IPSet.Union(hostIPSet)
|
||||
|
@ -496,69 +507,26 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||
continue
|
||||
}
|
||||
if localPathSet.Contains(endpoint.Path) {
|
||||
return serverAddr, endpoints, setupType,
|
||||
return endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg(fmt.Sprintf("path '%s' cannot be served by different address on same server", endpoint.Path))
|
||||
}
|
||||
localPathSet.Add(endpoint.Path)
|
||||
}
|
||||
}
|
||||
|
||||
// Check whether serverAddrPort matches at least in one of port used in local endpoints.
|
||||
{
|
||||
if !localPortSet.Contains(serverAddrPort) {
|
||||
if len(localPortSet) > 1 {
|
||||
return serverAddr, endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg("port number in server address must match with one of the port in local endpoints")
|
||||
}
|
||||
return serverAddr, endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg("server address and local endpoint have different ports")
|
||||
}
|
||||
}
|
||||
|
||||
// All endpoints are pointing to local host
|
||||
if len(endpoints) == localEndpointCount {
|
||||
// If all endpoints have same port number, then this is XL setup using URL style endpoints.
|
||||
if len(localPortSet) == 1 {
|
||||
if len(localServerAddrSet) > 1 {
|
||||
// TODO: Even though all endpoints are local, the local host is referred by different IP/name.
|
||||
// eg '172.0.0.1', 'localhost' and 'mylocalhostname' point to same local host.
|
||||
//
|
||||
// In this case, we bind to 0.0.0.0 ie to all interfaces.
|
||||
// The actual way to do is bind to only IPs in uniqueLocalHosts.
|
||||
serverAddr = net.JoinHostPort("", serverAddrPort)
|
||||
if len(localServerHostSet) > 1 {
|
||||
return endpoints, setupType,
|
||||
config.ErrInvalidErasureEndpoints(nil).Msg("all local endpoints should not have different hostnames/ips")
|
||||
}
|
||||
|
||||
endpointPaths := endpointPathSet.ToSlice()
|
||||
endpoints, _ = NewEndpointList(endpointPaths...)
|
||||
setupType = XLSetupType
|
||||
return serverAddr, endpoints, setupType, nil
|
||||
return endpoints, XLSetupType, nil
|
||||
}
|
||||
|
||||
// Even though all endpoints are local, but those endpoints use different ports.
|
||||
// This means it is DistXL setup.
|
||||
} else {
|
||||
// This is DistXL setup.
|
||||
// Check whether local server address are not 127.x.x.x
|
||||
for _, localServerAddr := range localServerAddrSet.ToSlice() {
|
||||
host, _, err := net.SplitHostPort(localServerAddr)
|
||||
if err != nil {
|
||||
host = localServerAddr
|
||||
}
|
||||
|
||||
ipList, err := getHostIP(host)
|
||||
logger.FatalIf(err, "unexpected error when resolving host '%s'", host)
|
||||
|
||||
// Filter ipList by IPs those start with '127.' or '::1'
|
||||
loopBackIPs := ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||
return strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
|
||||
}, "")
|
||||
|
||||
// If loop back IP is found and ipList contains only loop back IPs, then error out.
|
||||
if len(loopBackIPs) > 0 && len(loopBackIPs) == len(ipList) {
|
||||
err = fmt.Errorf("'%s' resolves to loopback address is not allowed for distributed XL", localServerAddr)
|
||||
return serverAddr, endpoints, setupType, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add missing port in all endpoints.
|
||||
|
@ -577,16 +545,10 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||
uniqueArgs.Add(endpoint.Host)
|
||||
}
|
||||
|
||||
// Error out if we have more than serverCommandLineArgsMax unique servers.
|
||||
if len(uniqueArgs.ToSlice()) > serverCommandLineArgsMax {
|
||||
err := fmt.Errorf("Unsupported number of endpoints (%s), total number of servers cannot be more than %d", endpoints, serverCommandLineArgsMax)
|
||||
return serverAddr, endpoints, setupType, err
|
||||
}
|
||||
|
||||
// Error out if we have less than 2 unique servers.
|
||||
if len(uniqueArgs.ToSlice()) < 2 && setupType == DistXLSetupType {
|
||||
err := fmt.Errorf("Unsupported number of endpoints (%s), minimum number of servers cannot be less than 2 in distributed setup", endpoints)
|
||||
return serverAddr, endpoints, setupType, err
|
||||
return endpoints, setupType, err
|
||||
}
|
||||
|
||||
publicIPs := env.Get(config.EnvPublicIPs, "")
|
||||
|
@ -595,7 +557,7 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||
}
|
||||
|
||||
setupType = DistXLSetupType
|
||||
return serverAddr, endpoints, setupType, nil
|
||||
return endpoints, setupType, nil
|
||||
}
|
||||
|
||||
// GetLocalPeer - returns local peer value, returns globalMinioAddr
|
||||
|
@ -603,14 +565,16 @@ func CreateEndpoints(serverAddr string, args ...[]string) (string, EndpointList,
|
|||
// the first element from the set of peers which indicate that
|
||||
// they are local. There is always one entry that is local
|
||||
// even with repeated server endpoints.
|
||||
func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
||||
func GetLocalPeer(endpointZones EndpointZones) (localPeer string) {
|
||||
peerSet := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if endpoint.Type() != URLEndpointType {
|
||||
continue
|
||||
}
|
||||
if endpoint.IsLocal && endpoint.Host != "" {
|
||||
peerSet.Add(endpoint.Host)
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if endpoint.Type() != URLEndpointType {
|
||||
continue
|
||||
}
|
||||
if endpoint.IsLocal && endpoint.Host != "" {
|
||||
peerSet.Add(endpoint.Host)
|
||||
}
|
||||
}
|
||||
}
|
||||
if peerSet.IsEmpty() {
|
||||
|
@ -626,23 +590,24 @@ func GetLocalPeer(endpoints EndpointList) (localPeer string) {
|
|||
}
|
||||
|
||||
// GetRemotePeers - get hosts information other than this minio service.
|
||||
func GetRemotePeers(endpoints EndpointList) []string {
|
||||
func GetRemotePeers(endpointZones EndpointZones) []string {
|
||||
peerSet := set.NewStringSet()
|
||||
for _, endpoint := range endpoints {
|
||||
if endpoint.Type() != URLEndpointType {
|
||||
continue
|
||||
}
|
||||
|
||||
peer := endpoint.Host
|
||||
if endpoint.IsLocal {
|
||||
if _, port := mustSplitHostPort(peer); port == globalMinioPort {
|
||||
for _, ep := range endpointZones {
|
||||
for _, endpoint := range ep.Endpoints {
|
||||
if endpoint.Type() != URLEndpointType {
|
||||
continue
|
||||
}
|
||||
|
||||
peer := endpoint.Host
|
||||
if endpoint.IsLocal {
|
||||
if _, port := mustSplitHostPort(peer); port == globalMinioPort {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
peerSet.Add(peer)
|
||||
}
|
||||
|
||||
peerSet.Add(peer)
|
||||
}
|
||||
|
||||
return peerSet.ToSlice()
|
||||
}
|
||||
|
||||
|
@ -670,6 +635,10 @@ func updateDomainIPs(endPoints set.StringSet) {
|
|||
ipList = ipList.Union(IPsWithPort)
|
||||
}
|
||||
globalDomainIPs = ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||
return !(strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1") || strings.HasPrefix(ip, "[::1]"))
|
||||
host, _, err := net.SplitHostPort(ip)
|
||||
if err != nil {
|
||||
host = ip
|
||||
}
|
||||
return !net.ParseIP(host).IsLoopback()
|
||||
}, "")
|
||||
}
|
||||
|
|
229
vendor/github.com/minio/minio/cmd/endpoint_test.go
generated
vendored
229
vendor/github.com/minio/minio/cmd/endpoint_test.go
generated
vendored
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2017 MinIO, Inc.
|
||||
* MinIO Cloud Storage, (C) 2017,2018,2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
|
@ -19,6 +19,7 @@ package cmd
|
|||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
@ -35,19 +36,19 @@ func TestSubOptimalEndpointInput(t *testing.T) {
|
|||
tests := []struct {
|
||||
setupType SetupType
|
||||
ctx *cli.Context
|
||||
endpoints EndpointList
|
||||
endpoints EndpointZones
|
||||
isErr bool
|
||||
}{
|
||||
{
|
||||
setupType: DistXLSetupType,
|
||||
ctx: cli.NewContext(cli.NewApp(), flag.NewFlagSet("", flag.ContinueOnError), nil),
|
||||
endpoints: mustGetNewEndpointList(args1...),
|
||||
endpoints: mustGetZoneEndpoints(args1...),
|
||||
isErr: false,
|
||||
},
|
||||
{
|
||||
setupType: DistXLSetupType,
|
||||
ctx: cli.NewContext(cli.NewApp(), flag.NewFlagSet("", flag.ContinueOnError), nil),
|
||||
endpoints: mustGetNewEndpointList(args2...),
|
||||
endpoints: mustGetZoneEndpoints(args2...),
|
||||
isErr: false,
|
||||
},
|
||||
}
|
||||
|
@ -66,9 +67,7 @@ func TestSubOptimalEndpointInput(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestNewEndpoint(t *testing.T) {
|
||||
u1, _ := url.Parse("http://localhost/path")
|
||||
u2, _ := url.Parse("https://example.org/path")
|
||||
u3, _ := url.Parse("http://127.0.0.1:8080/path")
|
||||
u4, _ := url.Parse("http://192.168.253.200/path")
|
||||
|
||||
testCases := []struct {
|
||||
|
@ -90,11 +89,8 @@ func TestNewEndpoint(t *testing.T) {
|
|||
{"http:path", Endpoint{URL: &url.URL{Path: "http:path"}, IsLocal: true}, PathEndpointType, nil},
|
||||
{"http:/path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
|
||||
{"http:///path", Endpoint{URL: &url.URL{Path: "http:/path"}, IsLocal: true}, PathEndpointType, nil},
|
||||
{"http://localhost/path", Endpoint{URL: u1, IsLocal: true, HostName: "localhost"}, URLEndpointType, nil},
|
||||
{"http://localhost/path//", Endpoint{URL: u1, IsLocal: true, HostName: "localhost"}, URLEndpointType, nil},
|
||||
{"https://example.org/path", Endpoint{URL: u2, IsLocal: false, HostName: "example.org"}, URLEndpointType, nil},
|
||||
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true, HostName: "127.0.0.1"}, URLEndpointType, nil},
|
||||
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false, HostName: "192.168.253.200"}, URLEndpointType, nil},
|
||||
{"https://example.org/path", Endpoint{URL: u2, IsLocal: false}, URLEndpointType, nil},
|
||||
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false}, URLEndpointType, nil},
|
||||
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{SlashSeparator, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
|
@ -111,32 +107,35 @@ func TestNewEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
endpoint, err := NewEndpoint(testCase.arg)
|
||||
if err == nil {
|
||||
err = endpoint.UpdateIsLocal()
|
||||
}
|
||||
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
testCase := testCase
|
||||
t.Run("", func(t *testing.T) {
|
||||
endpoint, err := NewEndpoint(testCase.arg)
|
||||
if err == nil {
|
||||
err = endpoint.UpdateIsLocal()
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
} else if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
}
|
||||
|
||||
if err == nil && !reflect.DeepEqual(testCase.expectedEndpoint, endpoint) {
|
||||
t.Fatalf("endpoint: expected = %+v, got = %+v", testCase.expectedEndpoint, endpoint)
|
||||
}
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Errorf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
} else if err == nil {
|
||||
t.Errorf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
} else if testCase.expectedErr.Error() != err.Error() {
|
||||
t.Errorf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
}
|
||||
|
||||
if err == nil && testCase.expectedType != endpoint.Type() {
|
||||
t.Fatalf("type: expected = %+v, got = %+v", testCase.expectedType, endpoint.Type())
|
||||
}
|
||||
if err == nil && !reflect.DeepEqual(testCase.expectedEndpoint, endpoint) {
|
||||
t.Errorf("endpoint: expected = %#v, got = %#v", testCase.expectedEndpoint, endpoint)
|
||||
}
|
||||
|
||||
if err == nil && testCase.expectedType != endpoint.Type() {
|
||||
t.Errorf("type: expected = %+v, got = %+v", testCase.expectedType, endpoint.Type())
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewEndpointList(t *testing.T) {
|
||||
func TestNewEndpoints(t *testing.T) {
|
||||
testCases := []struct {
|
||||
args []string
|
||||
expectedErr error
|
||||
|
@ -159,7 +158,7 @@ func TestNewEndpointList(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
_, err := NewEndpointList(testCase.args...)
|
||||
_, err := NewEndpoints(testCase.args...)
|
||||
if testCase.expectedErr == nil {
|
||||
if err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
|
@ -175,7 +174,7 @@ func TestNewEndpointList(t *testing.T) {
|
|||
func TestCreateEndpoints(t *testing.T) {
|
||||
// Filter ipList by IPs those do not start with '127.'.
|
||||
nonLoopBackIPs := localIP4.FuncMatch(func(ip string, matchString string) bool {
|
||||
return !strings.HasPrefix(ip, "127.")
|
||||
return !net.ParseIP(ip).IsLoopback()
|
||||
}, "")
|
||||
if len(nonLoopBackIPs) == 0 {
|
||||
t.Fatalf("No non-loop back IP address found for this host")
|
||||
|
@ -257,120 +256,114 @@ func TestCreateEndpoints(t *testing.T) {
|
|||
serverAddr string
|
||||
args [][]string
|
||||
expectedServerAddr string
|
||||
expectedEndpoints EndpointList
|
||||
expectedEndpoints Endpoints
|
||||
expectedSetupType SetupType
|
||||
expectedErr error
|
||||
}{
|
||||
{"localhost", [][]string{}, "", EndpointList{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
||||
{"localhost", [][]string{}, "", Endpoints{}, -1, fmt.Errorf("address localhost: missing port in address")},
|
||||
|
||||
// FS Setup
|
||||
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", EndpointList{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
|
||||
{":443", [][]string{{"d1"}}, ":443", EndpointList{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"./d1"}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{`\d1`}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: `\d1`}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{`.\d1`}}, "localhost:10000", EndpointList{Endpoint{URL: &url.URL{Path: `.\d1`}, IsLocal: true}}, FSSetupType, nil},
|
||||
{":8080", [][]string{{"https://example.org/d1", "https://example.org/d2", "https://example.org/d3", "https://example.org/d4"}}, "", EndpointList{}, -1, fmt.Errorf("no endpoint pointing to the local machine is found")},
|
||||
{":8080", [][]string{{"https://example.org/d1", "https://example.com/d2", "https://example.net:8000/d3", "https://example.edu/d1"}}, "", EndpointList{}, -1, fmt.Errorf("no endpoint pointing to the local machine is found")},
|
||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", EndpointList{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||
{"localhost:9000", [][]string{{"https://127.0.0.1:8000/d1", "https://localhost:9001/d2", "https://example.com/d1", "https://example.com/d2"}}, "", EndpointList{}, -1, fmt.Errorf("port number in server address must match with one of the port in local endpoints")},
|
||||
{"localhost:10000", [][]string{{"https://127.0.0.1:8000/d1", "https://localhost:8000/d2", "https://example.com/d1", "https://example.com/d2"}}, "", EndpointList{}, -1, fmt.Errorf("server address and local endpoint have different ports")},
|
||||
{"localhost:9000", [][]string{{"http://localhost/d1"}}, "", Endpoints{}, -1, fmt.Errorf("use path style endpoint for FS setup")},
|
||||
{":443", [][]string{{"d1"}}, ":443", Endpoints{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"/d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{"./d1"}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: "d1"}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{`\d1`}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: `\d1`}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:10000", [][]string{{`.\d1`}}, "localhost:10000", Endpoints{Endpoint{URL: &url.URL{Path: `.\d1`}, IsLocal: true}}, FSSetupType, nil},
|
||||
{"localhost:9000", [][]string{{"https://127.0.0.1:9000/d1", "https://localhost:9001/d1", "https://example.com/d1", "https://example.com/d2"}}, "", Endpoints{}, -1, fmt.Errorf("path '/d1' can not be served by different port on same address")},
|
||||
|
||||
// XL Setup with PathEndpointType
|
||||
{":1234", [][]string{{"/d1", "/d2", "d3", "d4"}}, ":1234",
|
||||
EndpointList{
|
||||
Endpoints{
|
||||
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "d3"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "d4"}, IsLocal: true},
|
||||
}, XLSetupType, nil},
|
||||
// XL Setup with URLEndpointType
|
||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", EndpointList{
|
||||
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
|
||||
// DistXL Setup with URLEndpointType
|
||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://localhost/d3", "http://localhost/d4"}}, ":9000", Endpoints{
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d1"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d2"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d3"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Scheme: "http", Host: "localhost", Path: "/d4"}, IsLocal: true},
|
||||
}, XLSetupType, nil},
|
||||
// XL Setup with URLEndpointType having mixed naming to local host.
|
||||
{"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, ":10000", EndpointList{
|
||||
Endpoint{URL: &url.URL{Path: "/d1"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d2"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d3"}, IsLocal: true},
|
||||
Endpoint{URL: &url.URL{Path: "/d4"}, IsLocal: true},
|
||||
}, XLSetupType, nil},
|
||||
{":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", EndpointList{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
|
||||
// DistXL Setup with URLEndpointType having mixed naming to local host.
|
||||
{"127.0.0.1:10000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://127.0.0.1/d3", "http://127.0.0.1/d4"}}, "", Endpoints{}, -1, fmt.Errorf("all local endpoints should not have different hostnames/ips")},
|
||||
|
||||
{":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", EndpointList{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
|
||||
{":9001", [][]string{{"http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export", "http://" + nonLoopBackIP + ":9001/export", "http://10.0.0.2:9001/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' can not be served by different port on same address")},
|
||||
|
||||
{":9000", [][]string{{"http://localhost/d1", "http://localhost/d2", "http://example.org/d3", "http://example.com/d4"}}, "", EndpointList{}, -1, fmt.Errorf("'localhost' resolves to loopback address is not allowed for distributed XL")},
|
||||
{":9000", [][]string{{"http://127.0.0.1:9000/export", "http://" + nonLoopBackIP + ":9000/export", "http://10.0.0.1:9000/export", "http://10.0.0.2:9000/export"}}, "", Endpoints{}, -1, fmt.Errorf("path '/export' cannot be served by different address on same server")},
|
||||
|
||||
// DistXL type
|
||||
{"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", EndpointList{
|
||||
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2], HostName: "example.org"},
|
||||
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3], HostName: "example.com"},
|
||||
{"127.0.0.1:10000", [][]string{{case1Endpoint1, case1Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
|
||||
Endpoint{URL: case1URLs[0], IsLocal: case1LocalFlags[0]},
|
||||
Endpoint{URL: case1URLs[1], IsLocal: case1LocalFlags[1]},
|
||||
Endpoint{URL: case1URLs[2], IsLocal: case1LocalFlags[2]},
|
||||
Endpoint{URL: case1URLs[3], IsLocal: case1LocalFlags[3]},
|
||||
}, DistXLSetupType, nil},
|
||||
|
||||
{"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", EndpointList{
|
||||
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2], HostName: "example.org"},
|
||||
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3], HostName: "example.com"},
|
||||
{"127.0.0.1:10000", [][]string{{case2Endpoint1, case2Endpoint2, "http://example.org/d3", "http://example.com/d4"}}, "127.0.0.1:10000", Endpoints{
|
||||
Endpoint{URL: case2URLs[0], IsLocal: case2LocalFlags[0]},
|
||||
Endpoint{URL: case2URLs[1], IsLocal: case2LocalFlags[1]},
|
||||
Endpoint{URL: case2URLs[2], IsLocal: case2LocalFlags[2]},
|
||||
Endpoint{URL: case2URLs[3], IsLocal: case2LocalFlags[3]},
|
||||
}, DistXLSetupType, nil},
|
||||
|
||||
{":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", EndpointList{
|
||||
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1], HostName: "example.org"},
|
||||
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2], HostName: "example.com"},
|
||||
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3], HostName: "example.net"},
|
||||
{":80", [][]string{{case3Endpoint1, "http://example.org:9000/d2", "http://example.com/d3", "http://example.net/d4"}}, ":80", Endpoints{
|
||||
Endpoint{URL: case3URLs[0], IsLocal: case3LocalFlags[0]},
|
||||
Endpoint{URL: case3URLs[1], IsLocal: case3LocalFlags[1]},
|
||||
Endpoint{URL: case3URLs[2], IsLocal: case3LocalFlags[2]},
|
||||
Endpoint{URL: case3URLs[3], IsLocal: case3LocalFlags[3]},
|
||||
}, DistXLSetupType, nil},
|
||||
|
||||
{":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", EndpointList{
|
||||
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1], HostName: "example.org"},
|
||||
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2], HostName: "example.com"},
|
||||
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3], HostName: "example.net"},
|
||||
{":9000", [][]string{{case4Endpoint1, "http://example.org/d2", "http://example.com/d3", "http://example.net/d4"}}, ":9000", Endpoints{
|
||||
Endpoint{URL: case4URLs[0], IsLocal: case4LocalFlags[0]},
|
||||
Endpoint{URL: case4URLs[1], IsLocal: case4LocalFlags[1]},
|
||||
Endpoint{URL: case4URLs[2], IsLocal: case4LocalFlags[2]},
|
||||
Endpoint{URL: case4URLs[3], IsLocal: case4LocalFlags[3]},
|
||||
}, DistXLSetupType, nil},
|
||||
|
||||
{":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", EndpointList{
|
||||
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2], HostName: nonLoopBackIP},
|
||||
Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3], HostName: nonLoopBackIP},
|
||||
{":9000", [][]string{{case5Endpoint1, case5Endpoint2, case5Endpoint3, case5Endpoint4}}, ":9000", Endpoints{
|
||||
Endpoint{URL: case5URLs[0], IsLocal: case5LocalFlags[0]},
|
||||
Endpoint{URL: case5URLs[1], IsLocal: case5LocalFlags[1]},
|
||||
Endpoint{URL: case5URLs[2], IsLocal: case5LocalFlags[2]},
|
||||
Endpoint{URL: case5URLs[3], IsLocal: case5LocalFlags[3]},
|
||||
}, DistXLSetupType, nil},
|
||||
|
||||
// DistXL Setup using only local host.
|
||||
{":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", EndpointList{
|
||||
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0], HostName: "localhost"},
|
||||
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1], HostName: "localhost"},
|
||||
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2], HostName: "127.0.0.1"},
|
||||
Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3], HostName: nonLoopBackIP},
|
||||
{":9003", [][]string{{"http://localhost:9000/d1", "http://localhost:9001/d2", "http://127.0.0.1:9002/d3", case6Endpoint}}, ":9003", Endpoints{
|
||||
Endpoint{URL: case6URLs[0], IsLocal: case6LocalFlags[0]},
|
||||
Endpoint{URL: case6URLs[1], IsLocal: case6LocalFlags[1]},
|
||||
Endpoint{URL: case6URLs[2], IsLocal: case6LocalFlags[2]},
|
||||
Endpoint{URL: case6URLs[3], IsLocal: case6LocalFlags[3]},
|
||||
}, DistXLSetupType, nil},
|
||||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
for _, testCase := range testCases {
|
||||
testCase := testCase
|
||||
t.Run(fmt.Sprintf("Test%d", i+1), func(t *testing.T) {
|
||||
serverAddr, endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
|
||||
t.Run("", func(t *testing.T) {
|
||||
endpoints, setupType, err := CreateEndpoints(testCase.serverAddr, testCase.args...)
|
||||
if err == nil && testCase.expectedErr != nil {
|
||||
t.Errorf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
}
|
||||
if err == nil {
|
||||
if testCase.expectedErr != nil {
|
||||
t.Fatalf("error: expected = %v, got = <nil>", testCase.expectedErr)
|
||||
if setupType != testCase.expectedSetupType {
|
||||
t.Errorf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
|
||||
}
|
||||
if len(endpoints) != len(testCase.expectedEndpoints) {
|
||||
t.Errorf("endpoints: expected = %d, got = %d", len(testCase.expectedEndpoints),
|
||||
len(endpoints))
|
||||
} else {
|
||||
if serverAddr != testCase.expectedServerAddr {
|
||||
t.Fatalf("serverAddr: expected = %v, got = %v", testCase.expectedServerAddr, serverAddr)
|
||||
}
|
||||
if !reflect.DeepEqual(endpoints, testCase.expectedEndpoints) {
|
||||
t.Fatalf("endpoints: expected = %v, got = %v", testCase.expectedEndpoints, endpoints)
|
||||
}
|
||||
if setupType != testCase.expectedSetupType {
|
||||
t.Fatalf("setupType: expected = %v, got = %v", testCase.expectedSetupType, setupType)
|
||||
for i, endpoint := range endpoints {
|
||||
if testCase.expectedEndpoints[i].String() != endpoint.String() {
|
||||
t.Errorf("endpoints: expected = %s, got = %s",
|
||||
testCase.expectedEndpoints[i],
|
||||
endpoint)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if testCase.expectedErr == nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
} else if err.Error() != testCase.expectedErr.Error() {
|
||||
t.Fatalf("error: expected = %v, got = %v", testCase.expectedErr, err)
|
||||
}
|
||||
if err != nil && testCase.expectedErr == nil {
|
||||
t.Errorf("error: expected = <nil>, got = %v, testCase: %v", err, testCase)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
@ -403,13 +396,13 @@ func TestGetLocalPeer(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, testCase := range testCases {
|
||||
endpoints, _ := NewEndpointList(testCase.endpointArgs...)
|
||||
if !endpoints[0].IsLocal {
|
||||
if err := endpoints.UpdateIsLocal(); err != nil {
|
||||
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
||||
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||
if err := zendpoints[0].Endpoints.UpdateIsLocal(); err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
}
|
||||
remotePeer := GetLocalPeer(endpoints)
|
||||
remotePeer := GetLocalPeer(zendpoints)
|
||||
if remotePeer != testCase.expectedResult {
|
||||
t.Fatalf("Test %d: expected: %v, got: %v", i+1, testCase.expectedResult, remotePeer)
|
||||
}
|
||||
|
@ -435,13 +428,13 @@ func TestGetRemotePeers(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
endpoints, _ := NewEndpointList(testCase.endpointArgs...)
|
||||
if !endpoints[0].IsLocal {
|
||||
if err := endpoints.UpdateIsLocal(); err != nil {
|
||||
zendpoints := mustGetZoneEndpoints(testCase.endpointArgs...)
|
||||
if !zendpoints[0].Endpoints[0].IsLocal {
|
||||
if err := zendpoints[0].Endpoints.UpdateIsLocal(); err != nil {
|
||||
t.Fatalf("error: expected = <nil>, got = %v", err)
|
||||
}
|
||||
}
|
||||
remotePeers := GetRemotePeers(endpoints)
|
||||
remotePeers := GetRemotePeers(zendpoints)
|
||||
if !reflect.DeepEqual(remotePeers, testCase.expectedResult) {
|
||||
t.Fatalf("expected: %v, got: %v", testCase.expectedResult, remotePeers)
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/etcd.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/etcd.go
generated
vendored
|
@ -32,9 +32,9 @@ func etcdErrToErr(err error, etcdEndpoints []string) error {
|
|||
}
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return fmt.Errorf("%s %s", errEtcdUnreachable, etcdEndpoints)
|
||||
return fmt.Errorf("%w %s", errEtcdUnreachable, etcdEndpoints)
|
||||
default:
|
||||
return fmt.Errorf("unexpected error %s from etcd, please check your endpoints %s", err, etcdEndpoints)
|
||||
return fmt.Errorf("unexpected error %w from etcd, please check your endpoints %s", err, etcdEndpoints)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
39
vendor/github.com/minio/minio/cmd/format-fs.go
generated
vendored
39
vendor/github.com/minio/minio/cmd/format-fs.go
generated
vendored
|
@ -350,27 +350,24 @@ func formatFSFixDeploymentID(fsFormatPath string) error {
|
|||
logger.Info("Another minio process(es) might be holding a lock to the file %s. Please kill that minio process(es) (elapsed %s)\n", fsFormatPath, getElapsedTime())
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
if err = jsonLoad(wlk, format); err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
// Check if format needs to be updated
|
||||
if format.ID != "" {
|
||||
err = nil
|
||||
break
|
||||
}
|
||||
|
||||
format.ID = mustGetUUID()
|
||||
if err = jsonSave(wlk, format); err != nil {
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
if wlk != nil {
|
||||
wlk.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
|
||||
defer wlk.Close()
|
||||
|
||||
if err = jsonLoad(wlk, format); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Check if format needs to be updated
|
||||
if format.ID != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Set new UUID to the format and save it
|
||||
format.ID = mustGetUUID()
|
||||
return jsonSave(wlk, format)
|
||||
}
|
||||
|
|
75
vendor/github.com/minio/minio/cmd/format-xl.go
generated
vendored
75
vendor/github.com/minio/minio/cmd/format-xl.go
generated
vendored
|
@ -124,6 +124,18 @@ type formatXLV3 struct {
|
|||
} `json:"xl"`
|
||||
}
|
||||
|
||||
func (f *formatXLV3) Clone() *formatXLV3 {
|
||||
b, err := json.Marshal(f)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var dst formatXLV3
|
||||
if err = json.Unmarshal(b, &dst); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return &dst
|
||||
}
|
||||
|
||||
// Returns formatXL.XL.Version
|
||||
func newFormatXLV3(numSets int, setLen int) *formatXLV3 {
|
||||
format := &formatXLV3{}
|
||||
|
@ -471,21 +483,13 @@ func formatXLGetDeploymentID(refFormat *formatXLV3, formats []*formatXLV3) (stri
|
|||
}
|
||||
|
||||
// formatXLFixDeploymentID - Add deployment id if it is not present.
|
||||
func formatXLFixDeploymentID(ctx context.Context, endpoints EndpointList, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) {
|
||||
// Acquire lock on format.json
|
||||
mutex := newNSLock(globalIsDistXL)
|
||||
formatLock := mutex.NewNSLock(ctx, minioMetaBucket, formatConfigFile)
|
||||
if err = formatLock.GetLock(globalHealingTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
defer formatLock.Unlock()
|
||||
|
||||
func formatXLFixDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) (err error) {
|
||||
// Attempt to load all `format.json` from all disks.
|
||||
var sErrs []error
|
||||
formats, sErrs := loadFormatXLAll(storageDisks)
|
||||
for i, sErr := range sErrs {
|
||||
if _, ok := formatCriticalErrors[sErr]; ok {
|
||||
return fmt.Errorf("Disk %s: %s", endpoints[i], sErr)
|
||||
return fmt.Errorf("Disk %s: %w", endpoints[i], sErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -518,12 +522,12 @@ func formatXLFixDeploymentID(ctx context.Context, endpoints EndpointList, storag
|
|||
}
|
||||
// Deployment ID needs to be set on all the disks.
|
||||
// Save `format.json` across all disks.
|
||||
return saveFormatXLAll(ctx, storageDisks, formats)
|
||||
return saveFormatXLAll(context.Background(), storageDisks, formats)
|
||||
|
||||
}
|
||||
|
||||
// Update only the valid local disks which have not been updated before.
|
||||
func formatXLFixLocalDeploymentID(ctx context.Context, endpoints EndpointList, storageDisks []StorageAPI, refFormat *formatXLV3) error {
|
||||
func formatXLFixLocalDeploymentID(endpoints Endpoints, storageDisks []StorageAPI, refFormat *formatXLV3) error {
|
||||
// If this server was down when the deploymentID was updated
|
||||
// then we make sure that we update the local disks with the deploymentID.
|
||||
for index, storageDisk := range storageDisks {
|
||||
|
@ -542,8 +546,8 @@ func formatXLFixLocalDeploymentID(ctx context.Context, endpoints EndpointList, s
|
|||
}
|
||||
format.ID = refFormat.ID
|
||||
if err := saveFormatXL(storageDisk, format); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return fmt.Errorf("Unable to save format.json, %s", err)
|
||||
logger.LogIf(context.Background(), err)
|
||||
return fmt.Errorf("Unable to save format.json, %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -589,9 +593,9 @@ func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) {
|
|||
|
||||
for i, hash := range formatHashes {
|
||||
if hash == maxHash {
|
||||
format := *formats[i]
|
||||
format := formats[i].Clone()
|
||||
format.XL.This = ""
|
||||
return &format, nil
|
||||
return format, nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -599,7 +603,7 @@ func getFormatXLInQuorum(formats []*formatXLV3) (*formatXLV3, error) {
|
|||
}
|
||||
|
||||
func formatXLV3Check(reference *formatXLV3, format *formatXLV3) error {
|
||||
tmpFormat := *format
|
||||
tmpFormat := format.Clone()
|
||||
this := tmpFormat.XL.This
|
||||
tmpFormat.XL.This = ""
|
||||
if len(reference.XL.Sets) != len(format.XL.Sets) {
|
||||
|
@ -663,7 +667,7 @@ func closeStorageDisks(storageDisks []StorageAPI) {
|
|||
|
||||
// Initialize storage disks for each endpoint.
|
||||
// Errors are returned for each endpoint with matching index.
|
||||
func initStorageDisksWithErrors(endpoints EndpointList) ([]StorageAPI, []error) {
|
||||
func initStorageDisksWithErrors(endpoints Endpoints) ([]StorageAPI, []error) {
|
||||
// Bootstrap disks.
|
||||
storageDisks := make([]StorageAPI, len(endpoints))
|
||||
g := errgroup.WithNErrs(len(endpoints))
|
||||
|
@ -703,7 +707,7 @@ func formatXLV3ThisEmpty(formats []*formatXLV3) bool {
|
|||
}
|
||||
|
||||
// fixFormatXLV3 - fix format XL configuration on all disks.
|
||||
func fixFormatXLV3(storageDisks []StorageAPI, endpoints EndpointList, formats []*formatXLV3) error {
|
||||
func fixFormatXLV3(storageDisks []StorageAPI, endpoints Endpoints, formats []*formatXLV3) error {
|
||||
for i, format := range formats {
|
||||
if format == nil || !endpoints[i].IsLocal {
|
||||
continue
|
||||
|
@ -726,29 +730,32 @@ func fixFormatXLV3(storageDisks []StorageAPI, endpoints EndpointList, formats []
|
|||
}
|
||||
|
||||
// initFormatXL - save XL format configuration on all disks.
|
||||
func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, disksPerSet int) (format *formatXLV3, err error) {
|
||||
format = newFormatXLV3(setCount, disksPerSet)
|
||||
func initFormatXL(ctx context.Context, storageDisks []StorageAPI, setCount, drivesPerSet int, deploymentID string) (*formatXLV3, error) {
|
||||
format := newFormatXLV3(setCount, drivesPerSet)
|
||||
formats := make([]*formatXLV3, len(storageDisks))
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < disksPerSet; j++ {
|
||||
newFormat := *format
|
||||
for j := 0; j < drivesPerSet; j++ {
|
||||
newFormat := format.Clone()
|
||||
newFormat.XL.This = format.XL.Sets[i][j]
|
||||
formats[i*disksPerSet+j] = &newFormat
|
||||
if deploymentID != "" {
|
||||
newFormat.ID = deploymentID
|
||||
}
|
||||
formats[i*drivesPerSet+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize meta volume, if volume already exists ignores it.
|
||||
if err = initFormatXLMetaVolume(storageDisks, formats); err != nil {
|
||||
return format, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %s", err)
|
||||
if err := initFormatXLMetaVolume(storageDisks, formats); err != nil {
|
||||
return format, fmt.Errorf("Unable to initialize '.minio.sys' meta volume, %w", err)
|
||||
}
|
||||
|
||||
// Save formats `format.json` across all disks.
|
||||
if err = saveFormatXLAll(ctx, storageDisks, formats); err != nil {
|
||||
if err := saveFormatXLAll(ctx, storageDisks, formats); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return format, nil
|
||||
return getFormatXLInQuorum(formats)
|
||||
}
|
||||
|
||||
// Make XL backend meta volumes.
|
||||
|
@ -862,14 +869,14 @@ func markUUIDsOffline(refFormat *formatXLV3, formats []*formatXLV3) {
|
|||
}
|
||||
|
||||
// Initialize a new set of set formats which will be written to all disks.
|
||||
func newHealFormatSets(refFormat *formatXLV3, setCount, disksPerSet int, formats []*formatXLV3, errs []error) [][]*formatXLV3 {
|
||||
func newHealFormatSets(refFormat *formatXLV3, setCount, drivesPerSet int, formats []*formatXLV3, errs []error) [][]*formatXLV3 {
|
||||
newFormats := make([][]*formatXLV3, setCount)
|
||||
for i := range refFormat.XL.Sets {
|
||||
newFormats[i] = make([]*formatXLV3, disksPerSet)
|
||||
newFormats[i] = make([]*formatXLV3, drivesPerSet)
|
||||
}
|
||||
for i := range refFormat.XL.Sets {
|
||||
for j := range refFormat.XL.Sets[i] {
|
||||
if errs[i*disksPerSet+j] == errUnformattedDisk || errs[i*disksPerSet+j] == nil {
|
||||
if errs[i*drivesPerSet+j] == errUnformattedDisk || errs[i*drivesPerSet+j] == nil {
|
||||
newFormats[i][j] = &formatXLV3{}
|
||||
newFormats[i][j].Version = refFormat.Version
|
||||
newFormats[i][j].ID = refFormat.ID
|
||||
|
@ -877,13 +884,13 @@ func newHealFormatSets(refFormat *formatXLV3, setCount, disksPerSet int, formats
|
|||
newFormats[i][j].XL.Version = refFormat.XL.Version
|
||||
newFormats[i][j].XL.DistributionAlgo = refFormat.XL.DistributionAlgo
|
||||
}
|
||||
if errs[i*disksPerSet+j] == errUnformattedDisk {
|
||||
if errs[i*drivesPerSet+j] == errUnformattedDisk {
|
||||
newFormats[i][j].XL.This = ""
|
||||
newFormats[i][j].XL.Sets = nil
|
||||
continue
|
||||
}
|
||||
if errs[i*disksPerSet+j] == nil {
|
||||
newFormats[i][j].XL.This = formats[i*disksPerSet+j].XL.This
|
||||
if errs[i*drivesPerSet+j] == nil {
|
||||
newFormats[i][j].XL.This = formats[i*drivesPerSet+j].XL.This
|
||||
newFormats[i][j].XL.Sets = nil
|
||||
}
|
||||
}
|
||||
|
|
44
vendor/github.com/minio/minio/cmd/format-xl_test.go
generated
vendored
44
vendor/github.com/minio/minio/cmd/format-xl_test.go
generated
vendored
|
@ -83,7 +83,7 @@ func TestFixFormatV3(t *testing.T) {
|
|||
for _, xlDir := range xlDirs {
|
||||
defer os.RemoveAll(xlDir)
|
||||
}
|
||||
endpoints := mustGetNewEndpointList(xlDirs...)
|
||||
endpoints := mustGetNewEndpoints(xlDirs...)
|
||||
|
||||
storageDisks, errs := initStorageDisksWithErrors(endpoints)
|
||||
for _, err := range errs {
|
||||
|
@ -96,9 +96,9 @@ func TestFixFormatV3(t *testing.T) {
|
|||
formats := make([]*formatXLV3, 8)
|
||||
|
||||
for j := 0; j < 8; j++ {
|
||||
newFormat := *format
|
||||
newFormat := format.Clone()
|
||||
newFormat.XL.This = format.XL.Sets[0][j]
|
||||
formats[j] = &newFormat
|
||||
formats[j] = newFormat
|
||||
}
|
||||
|
||||
if err = initFormatXLMetaVolume(storageDisks, formats); err != nil {
|
||||
|
@ -130,9 +130,9 @@ func TestFormatXLEmpty(t *testing.T) {
|
|||
formats := make([]*formatXLV3, 16)
|
||||
|
||||
for j := 0; j < 16; j++ {
|
||||
newFormat := *format
|
||||
newFormat := format.Clone()
|
||||
newFormat.XL.This = format.XL.Sets[0][j]
|
||||
formats[j] = &newFormat
|
||||
formats[j] = newFormat
|
||||
}
|
||||
|
||||
// empty format to indicate disk not found, but this
|
||||
|
@ -411,16 +411,16 @@ func TestCheckFormatXLValue(t *testing.T) {
|
|||
// Tests getFormatXLInQuorum()
|
||||
func TestGetFormatXLInQuorumCheck(t *testing.T) {
|
||||
setCount := 2
|
||||
disksPerSet := 16
|
||||
drivesPerSet := 16
|
||||
|
||||
format := newFormatXLV3(setCount, disksPerSet)
|
||||
format := newFormatXLV3(setCount, drivesPerSet)
|
||||
formats := make([]*formatXLV3, 32)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < disksPerSet; j++ {
|
||||
newFormat := *format
|
||||
for j := 0; j < drivesPerSet; j++ {
|
||||
newFormat := format.Clone()
|
||||
newFormat.XL.This = format.XL.Sets[i][j]
|
||||
formats[i*disksPerSet+j] = &newFormat
|
||||
formats[i*drivesPerSet+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -477,16 +477,16 @@ func TestGetFormatXLInQuorumCheck(t *testing.T) {
|
|||
// Tests formatXLGetDeploymentID()
|
||||
func TestGetXLID(t *testing.T) {
|
||||
setCount := 2
|
||||
disksPerSet := 8
|
||||
drivesPerSet := 8
|
||||
|
||||
format := newFormatXLV3(setCount, disksPerSet)
|
||||
format := newFormatXLV3(setCount, drivesPerSet)
|
||||
formats := make([]*formatXLV3, 16)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < disksPerSet; j++ {
|
||||
newFormat := *format
|
||||
for j := 0; j < drivesPerSet; j++ {
|
||||
newFormat := format.Clone()
|
||||
newFormat.XL.This = format.XL.Sets[i][j]
|
||||
formats[i*disksPerSet+j] = &newFormat
|
||||
formats[i*drivesPerSet+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -532,17 +532,17 @@ func TestGetXLID(t *testing.T) {
|
|||
// Initialize new format sets.
|
||||
func TestNewFormatSets(t *testing.T) {
|
||||
setCount := 2
|
||||
disksPerSet := 16
|
||||
drivesPerSet := 16
|
||||
|
||||
format := newFormatXLV3(setCount, disksPerSet)
|
||||
format := newFormatXLV3(setCount, drivesPerSet)
|
||||
formats := make([]*formatXLV3, 32)
|
||||
errs := make([]error, 32)
|
||||
|
||||
for i := 0; i < setCount; i++ {
|
||||
for j := 0; j < disksPerSet; j++ {
|
||||
newFormat := *format
|
||||
for j := 0; j < drivesPerSet; j++ {
|
||||
newFormat := format.Clone()
|
||||
newFormat.XL.This = format.XL.Sets[i][j]
|
||||
formats[i*disksPerSet+j] = &newFormat
|
||||
formats[i*drivesPerSet+j] = newFormat
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -554,7 +554,7 @@ func TestNewFormatSets(t *testing.T) {
|
|||
// 16th disk is unformatted.
|
||||
errs[15] = errUnformattedDisk
|
||||
|
||||
newFormats := newHealFormatSets(quorumFormat, setCount, disksPerSet, formats, errs)
|
||||
newFormats := newHealFormatSets(quorumFormat, setCount, drivesPerSet, formats, errs)
|
||||
if newFormats == nil {
|
||||
t.Fatal("Unexpected failure")
|
||||
}
|
||||
|
@ -593,7 +593,7 @@ func benchmarkInitStorageDisksN(b *testing.B, nDisks int) {
|
|||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
endpoints := mustGetNewEndpointList(fsDirs...)
|
||||
endpoints := mustGetNewEndpoints(fsDirs...)
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
endpoints := endpoints
|
||||
for pb.Next() {
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/fs-v1-multipart.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/fs-v1-multipart.go
generated
vendored
|
@ -646,7 +646,7 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||
}
|
||||
|
||||
// Hold write lock on the object.
|
||||
destLock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
destLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = destLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
@ -683,8 +683,8 @@ func (fs *FSObjects) CompleteMultipartUpload(ctx context.Context, bucket string,
|
|||
}
|
||||
|
||||
// Deny if WORM is enabled
|
||||
if retention, isWORMBucket := isWORMEnabled(bucket); isWORMBucket {
|
||||
if fi, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)); err == nil && retention.Retain(fi.ModTime()) {
|
||||
if globalWORMEnabled {
|
||||
if _, err := fsStatFile(ctx, pathJoin(fs.fsPath, bucket, object)); err == nil {
|
||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
||||
}
|
||||
}
|
||||
|
|
38
vendor/github.com/minio/minio/cmd/fs-v1.go
generated
vendored
38
vendor/github.com/minio/minio/cmd/fs-v1.go
generated
vendored
|
@ -165,6 +165,12 @@ func NewFSObjectLayer(fsPath string) (ObjectLayer, error) {
|
|||
return fs, nil
|
||||
}
|
||||
|
||||
// NewNSLock - initialize a new namespace RWLocker instance.
|
||||
func (fs *FSObjects) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
||||
// lockers are explicitly 'nil' for FS mode since there are only local lockers
|
||||
return fs.nsMutex.NewNSLock(ctx, nil, bucket, object)
|
||||
}
|
||||
|
||||
// Shutdown - should be called when process shuts down.
|
||||
func (fs *FSObjects) Shutdown(ctx context.Context) error {
|
||||
fs.fsFormatRlk.Close()
|
||||
|
@ -176,12 +182,12 @@ func (fs *FSObjects) Shutdown(ctx context.Context) error {
|
|||
// diskUsage returns du information for the posix path, in a continuous routine.
|
||||
func (fs *FSObjects) diskUsage(doneCh chan struct{}) {
|
||||
usageFn := func(ctx context.Context, entry string) error {
|
||||
if globalHTTPServer != nil {
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to count the usage.
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the usage.
|
||||
for globalHTTPServer.GetRequestCount() > 0 && waitCount > 0 {
|
||||
for httpServer.GetRequestCount() > 0 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
@ -214,12 +220,12 @@ func (fs *FSObjects) diskUsage(doneCh chan struct{}) {
|
|||
case <-time.After(globalUsageCheckInterval):
|
||||
var usage uint64
|
||||
usageFn = func(ctx context.Context, entry string) error {
|
||||
if globalHTTPServer != nil {
|
||||
if httpServer := newHTTPServerFn(); httpServer != nil {
|
||||
// Wait at max 1 minute for an inprogress request
|
||||
// before proceeding to count the usage.
|
||||
waitCount := 60
|
||||
// Any requests in progress, delay the usage.
|
||||
for globalHTTPServer.GetRequestCount() > 0 && waitCount > 0 {
|
||||
for httpServer.GetRequestCount() > 0 && waitCount > 0 {
|
||||
waitCount--
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
@ -290,7 +296,7 @@ func (fs *FSObjects) statBucketDir(ctx context.Context, bucket string) (os.FileI
|
|||
// MakeBucketWithLocation - create a new bucket, returns if it
|
||||
// already exists.
|
||||
func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, location string) error {
|
||||
bucketLock := fs.nsMutex.NewNSLock(ctx, bucket, "")
|
||||
bucketLock := fs.NewNSLock(ctx, bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -313,7 +319,7 @@ func (fs *FSObjects) MakeBucketWithLocation(ctx context.Context, bucket, locatio
|
|||
|
||||
// GetBucketInfo - fetch bucket metadata info.
|
||||
func (fs *FSObjects) GetBucketInfo(ctx context.Context, bucket string) (bi BucketInfo, e error) {
|
||||
bucketLock := fs.nsMutex.NewNSLock(ctx, bucket, "")
|
||||
bucketLock := fs.NewNSLock(ctx, bucket, "")
|
||||
if e := bucketLock.GetRLock(globalObjectTimeout); e != nil {
|
||||
return bi, e
|
||||
}
|
||||
|
@ -376,7 +382,7 @@ func (fs *FSObjects) ListBuckets(ctx context.Context) ([]BucketInfo, error) {
|
|||
// DeleteBucket - delete a bucket and all the metadata associated
|
||||
// with the bucket including pending multipart, object metadata.
|
||||
func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
bucketLock := fs.nsMutex.NewNSLock(ctx, bucket, "")
|
||||
bucketLock := fs.NewNSLock(ctx, bucket, "")
|
||||
if err := bucketLock.GetLock(globalObjectTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
|
@ -412,7 +418,7 @@ func (fs *FSObjects) DeleteBucket(ctx context.Context, bucket string) error {
|
|||
func (fs *FSObjects) CopyObject(ctx context.Context, srcBucket, srcObject, dstBucket, dstObject string, srcInfo ObjectInfo, srcOpts, dstOpts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
cpSrcDstSame := isStringEqual(pathJoin(srcBucket, srcObject), pathJoin(dstBucket, dstObject))
|
||||
if !cpSrcDstSame {
|
||||
objectDWLock := fs.nsMutex.NewNSLock(ctx, dstBucket, dstObject)
|
||||
objectDWLock := fs.NewNSLock(ctx, dstBucket, dstObject)
|
||||
if err := objectDWLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
@ -484,7 +490,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||
|
||||
if lockType != noLock {
|
||||
// Lock the object before reading.
|
||||
lock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
lock := fs.NewNSLock(ctx, bucket, object)
|
||||
switch lockType {
|
||||
case writeLock:
|
||||
if err = lock.GetLock(globalObjectTimeout); err != nil {
|
||||
|
@ -571,7 +577,7 @@ func (fs *FSObjects) GetObject(ctx context.Context, bucket, object string, offse
|
|||
}
|
||||
|
||||
// Lock the object before reading.
|
||||
objectLock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
objectLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
|
@ -739,7 +745,7 @@ func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (
|
|||
// getObjectInfoWithLock - reads object metadata and replies back ObjectInfo.
|
||||
func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
// Lock the object before reading.
|
||||
objectLock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
objectLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := objectLock.GetRLock(globalObjectTimeout); err != nil {
|
||||
return oi, err
|
||||
}
|
||||
|
@ -764,7 +770,7 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s
|
|||
func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, opts ObjectOptions) (oi ObjectInfo, e error) {
|
||||
oi, err := fs.getObjectInfoWithLock(ctx, bucket, object)
|
||||
if err == errCorruptedFormat || err == io.EOF {
|
||||
objectLock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
objectLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err = objectLock.GetLock(globalObjectTimeout); err != nil {
|
||||
return oi, toObjectErr(err, bucket, object)
|
||||
}
|
||||
|
@ -810,7 +816,7 @@ func (fs *FSObjects) PutObject(ctx context.Context, bucket string, object string
|
|||
return ObjectInfo{}, err
|
||||
}
|
||||
// Lock the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
objectLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := objectLock.GetLock(globalObjectTimeout); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return objInfo, err
|
||||
|
@ -925,8 +931,8 @@ func (fs *FSObjects) putObject(ctx context.Context, bucket string, object string
|
|||
// Entire object was written to the temp location, now it's safe to rename it to the actual location.
|
||||
fsNSObjPath := pathJoin(fs.fsPath, bucket, object)
|
||||
// Deny if WORM is enabled
|
||||
if retention, isWORMBucket := isWORMEnabled(bucket); isWORMBucket {
|
||||
if fi, err := fsStatFile(ctx, fsNSObjPath); err == nil && retention.Retain(fi.ModTime()) {
|
||||
if globalWORMEnabled {
|
||||
if _, err := fsStatFile(ctx, fsNSObjPath); err == nil {
|
||||
return ObjectInfo{}, ObjectAlreadyExists{Bucket: bucket, Object: object}
|
||||
}
|
||||
}
|
||||
|
@ -965,7 +971,7 @@ func (fs *FSObjects) DeleteObjects(ctx context.Context, bucket string, objects [
|
|||
// and there are no rollbacks supported.
|
||||
func (fs *FSObjects) DeleteObject(ctx context.Context, bucket, object string) error {
|
||||
// Acquire a write lock before deleting the object.
|
||||
objectLock := fs.nsMutex.NewNSLock(ctx, bucket, object)
|
||||
objectLock := fs.NewNSLock(ctx, bucket, object)
|
||||
if err := objectLock.GetLock(globalOperationTimeout); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
30
vendor/github.com/minio/minio/cmd/gateway-main.go
generated
vendored
30
vendor/github.com/minio/minio/cmd/gateway-main.go
generated
vendored
|
@ -118,6 +118,9 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
// Handle common command args.
|
||||
handleCommonCmdArgs(ctx)
|
||||
|
||||
// Initialize all help
|
||||
initHelp()
|
||||
|
||||
// Get port to listen on from gateway address
|
||||
globalMinioHost, globalMinioPort = mustSplitHostPort(globalCLIContext.Addr)
|
||||
|
||||
|
@ -142,11 +145,12 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
// Set system resources to maximum.
|
||||
logger.LogIf(context.Background(), setMaxResources())
|
||||
|
||||
initNSLock(false) // Enable local namespace lock.
|
||||
|
||||
// Set when gateway is enabled
|
||||
globalIsGateway = true
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background(), globalEndpoints)
|
||||
|
||||
enableConfigOps := gatewayName == "nas"
|
||||
|
||||
// TODO: We need to move this code with globalConfigSys.Init()
|
||||
|
@ -173,8 +177,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
registerSTSRouter(router)
|
||||
}
|
||||
|
||||
// Initialize globalConsoleSys system
|
||||
globalConsoleSys = NewConsoleLogger(context.Background(), globalEndpoints)
|
||||
enableIAMOps := globalEtcdClient != nil
|
||||
|
||||
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
|
||||
|
@ -204,12 +206,16 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
getCert = globalTLSCerts.GetCertificate
|
||||
}
|
||||
|
||||
globalHTTPServer = xhttp.NewServer([]string{globalCLIContext.Addr},
|
||||
httpServer := xhttp.NewServer([]string{globalCLIContext.Addr},
|
||||
criticalErrorHandler{registerHandlers(router, globalHandlers...)}, getCert)
|
||||
go func() {
|
||||
globalHTTPServerErrorCh <- globalHTTPServer.Start()
|
||||
globalHTTPServerErrorCh <- httpServer.Start()
|
||||
}()
|
||||
|
||||
globalObjLayerMutex.Lock()
|
||||
globalHTTPServer = httpServer
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
signal.Notify(globalOSSignalCh, os.Interrupt, syscall.SIGTERM)
|
||||
|
||||
newObject, err := gw.NewGatewayLayer(globalActiveCred)
|
||||
|
@ -221,6 +227,8 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
logger.FatalIf(err, "Unable to initialize gateway backend")
|
||||
}
|
||||
|
||||
newObject = NewGatewayLayerWithLocker(newObject)
|
||||
|
||||
// Re-enable logging
|
||||
logger.Disable = false
|
||||
|
||||
|
@ -292,6 +300,11 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
// - compression
|
||||
verifyObjectLayerFeatures("gateway "+gatewayName, newObject)
|
||||
|
||||
// Disable safe mode operation, after all initialization is over.
|
||||
globalObjLayerMutex.Lock()
|
||||
globalSafeMode = false
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Prints the formatted startup message once object layer is initialized.
|
||||
if !globalCLIContext.Quiet {
|
||||
mode := globalMinioModeGatewayPrefix + gatewayName
|
||||
|
@ -307,11 +320,6 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
printGatewayStartupMessage(getAPIEndpoints(), gatewayName)
|
||||
}
|
||||
|
||||
// Disable safe mode operation, after all initialization is over.
|
||||
globalObjLayerMutex.Lock()
|
||||
globalSafeMode = false
|
||||
globalObjLayerMutex.Unlock()
|
||||
|
||||
// Set uptime time after object layer has initialized.
|
||||
globalBootTime = UTCNow()
|
||||
|
||||
|
|
23
vendor/github.com/minio/minio/cmd/gateway-unsupported.go
generated
vendored
23
vendor/github.com/minio/minio/cmd/gateway-unsupported.go
generated
vendored
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
|
@ -25,9 +26,31 @@ import (
|
|||
"github.com/minio/minio/pkg/policy"
|
||||
)
|
||||
|
||||
// GatewayLocker implements custom NeNSLock implementation
|
||||
type GatewayLocker struct {
|
||||
ObjectLayer
|
||||
nsMutex *nsLockMap
|
||||
}
|
||||
|
||||
// NewNSLock - implements gateway level locker
|
||||
func (l *GatewayLocker) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
||||
return l.nsMutex.NewNSLock(ctx, nil, bucket, object)
|
||||
}
|
||||
|
||||
// NewGatewayLayerWithLocker - initialize gateway with locker.
|
||||
func NewGatewayLayerWithLocker(gwLayer ObjectLayer) ObjectLayer {
|
||||
return &GatewayLocker{ObjectLayer: gwLayer, nsMutex: newNSLock(false)}
|
||||
}
|
||||
|
||||
// GatewayUnsupported list of unsupported call stubs for gateway.
|
||||
type GatewayUnsupported struct{}
|
||||
|
||||
// NewNSLock is a dummy stub for gateway.
|
||||
func (a GatewayUnsupported) NewNSLock(ctx context.Context, bucket string, object string) RWLocker {
|
||||
logger.CriticalIf(ctx, errors.New("not implemented"))
|
||||
return nil
|
||||
}
|
||||
|
||||
// ListMultipartUploads lists all multipart uploads.
|
||||
func (a GatewayUnsupported) ListMultipartUploads(ctx context.Context, bucket string, prefix string, keyMarker string, uploadIDMarker string, delimiter string, maxUploads int) (lmi ListMultipartsInfo, err error) {
|
||||
return lmi, NotImplemented{}
|
||||
|
|
593
vendor/github.com/minio/minio/cmd/gateway/azure/gateway-azure.go
generated
vendored
593
vendor/github.com/minio/minio/cmd/gateway/azure/gateway-azure.go
generated
vendored
File diff suppressed because it is too large
Load diff
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue