mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 18:38:40 +00:00
update vendor
This commit is contained in:
parent
462231c09d
commit
c839149fb9
286 changed files with 6947 additions and 2563 deletions
vendor/github.com
evanphx/json-patch
hashicorp/golang-lru
json-iterator/go
adapter.gogo.modgo.sumiter_skip.go
misc_tests
reflect_native.goreflect_struct_decoder.goskip_tests
stream_float.gominio/minio
.travis.ymlDockerfile.simpleciMakefileREADME.md
buildscripts
cmd
admin-handlers.goadmin-handlers_test.goadmin-heal-ops.goadmin-router.goapi-errors.goapi-resources_test.goapi-response.goapi-router.goauth-handler_test.gobucket-handlers.gobucket-handlers_test.gobucket-notification-handlers.gocerts.gocommon-main.goconfig-common.goconfig-current.goconfig-current_test.goconfig-migrate.goconfig-migrate_test.goconfig.go
crypto
disk-cache-config.godisk-cache-fs.godisk-cache.godisk-usage.goendpoint.goendpoint_test.goetcd.goformat-xl.gofs-v1-helpers.gofs-v1-helpers_test.gofs-v1-metadata.gofs-v1-multipart.gofs-v1.gofs-v1_test.gogateway-main.gogateway
generic-handlers.gogeneric-handlers_test.gohandler-utils.goiam-etcd-store.goiam-object-store.goiam.golock-rest-client.golock-rest-server-common.golock-rest-server.gonotification.goobject-api-common.goobject-api-errors.goobject-api-getobject_test.goobject-api-input-checks.goobject-api-listobjects_test.goobject-api-multipart_test.goobject-api-utils.goobject-handlers.goobject-handlers_test.goobject_api_suite_test.goos-reliable.gopeer-rest-client.gopeer-rest-common.gopeer-rest-server.goposix-list-dir_other.goposix-list-dir_unix.goposix-list-dir_windows.goposix.goposix_test.goprepare-storage.gorest
5
vendor/github.com/evanphx/json-patch/go.mod
generated
vendored
Normal file
5
vendor/github.com/evanphx/json-patch/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,5 @@
|
|||
module github.com/evanphx/json-patch
|
||||
|
||||
go 1.12
|
||||
|
||||
require github.com/pkg/errors v0.8.1
|
2
vendor/github.com/evanphx/json-patch/go.sum
generated
vendored
Normal file
2
vendor/github.com/evanphx/json-patch/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
2
vendor/github.com/hashicorp/golang-lru/go.mod
generated
vendored
2
vendor/github.com/hashicorp/golang-lru/go.mod
generated
vendored
|
@ -1 +1,3 @@
|
|||
module github.com/hashicorp/golang-lru
|
||||
|
||||
go 1.12
|
||||
|
|
26
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
26
vendor/github.com/hashicorp/golang-lru/lru.go
generated
vendored
|
@ -86,17 +86,35 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) {
|
|||
}
|
||||
|
||||
// Remove removes the provided key from the cache.
|
||||
func (c *Cache) Remove(key interface{}) {
|
||||
func (c *Cache) Remove(key interface{}) (present bool) {
|
||||
c.lock.Lock()
|
||||
c.lru.Remove(key)
|
||||
present = c.lru.Remove(key)
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *Cache) Resize(size int) (evicted int) {
|
||||
c.lock.Lock()
|
||||
evicted = c.lru.Resize(size)
|
||||
c.lock.Unlock()
|
||||
return evicted
|
||||
}
|
||||
|
||||
// RemoveOldest removes the oldest item from the cache.
|
||||
func (c *Cache) RemoveOldest() {
|
||||
func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
c.lru.RemoveOldest()
|
||||
key, value, ok = c.lru.RemoveOldest()
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// GetOldest returns the oldest entry
|
||||
func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) {
|
||||
c.lock.Lock()
|
||||
key, value, ok = c.lru.GetOldest()
|
||||
c.lock.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||
|
|
39
vendor/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
39
vendor/github.com/hashicorp/golang-lru/lru_test.go
generated
vendored
|
@ -219,3 +219,42 @@ func TestLRUPeek(t *testing.T) {
|
|||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// test that Resize can upsize and downsize
|
||||
func TestLRUResize(t *testing.T) {
|
||||
onEvictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
onEvictCounter++
|
||||
}
|
||||
l, err := NewWithEvict(2, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Downsize
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
evicted := l.Resize(1);
|
||||
if evicted != 1 {
|
||||
t.Errorf("1 element should have been evicted: %v", evicted)
|
||||
}
|
||||
if onEvictCounter != 1 {
|
||||
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Element 1 should have been evicted")
|
||||
}
|
||||
|
||||
// Upsize
|
||||
evicted = l.Resize(2);
|
||||
if evicted != 0 {
|
||||
t.Errorf("0 elements should have been evicted: %v", evicted)
|
||||
}
|
||||
|
||||
l.Add(4, 4)
|
||||
if !l.Contains(3) || !l.Contains(4) {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
}
|
||||
|
|
16
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
16
vendor/github.com/hashicorp/golang-lru/simplelru/lru.go
generated
vendored
|
@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) {
|
|||
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||
if ent, ok := c.items[key]; ok {
|
||||
c.evictList.MoveToFront(ent)
|
||||
if ent.Value.(*entry) == nil {
|
||||
return nil, false
|
||||
}
|
||||
return ent.Value.(*entry).value, true
|
||||
}
|
||||
return
|
||||
|
@ -142,6 +145,19 @@ func (c *LRU) Len() int {
|
|||
return c.evictList.Len()
|
||||
}
|
||||
|
||||
// Resize changes the cache size.
|
||||
func (c *LRU) Resize(size int) (evicted int) {
|
||||
diff := c.Len() - size
|
||||
if diff < 0 {
|
||||
diff = 0
|
||||
}
|
||||
for i := 0; i < diff; i++ {
|
||||
c.removeOldest()
|
||||
}
|
||||
c.size = size
|
||||
return diff
|
||||
}
|
||||
|
||||
// removeOldest removes the oldest item from the cache.
|
||||
func (c *LRU) removeOldest() {
|
||||
ent := c.evictList.Back()
|
||||
|
|
7
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
7
vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go
generated
vendored
|
@ -10,7 +10,7 @@ type LRUCache interface {
|
|||
// updates the "recently used"-ness of the key. #value, isFound
|
||||
Get(key interface{}) (value interface{}, ok bool)
|
||||
|
||||
// Check if a key exsists in cache without updating the recent-ness.
|
||||
// Checks if a key exists in cache without updating the recent-ness.
|
||||
Contains(key interface{}) (ok bool)
|
||||
|
||||
// Returns key's value without updating the "recently used"-ness of the key.
|
||||
|
@ -31,6 +31,9 @@ type LRUCache interface {
|
|||
// Returns the number of items in the cache.
|
||||
Len() int
|
||||
|
||||
// Clear all cache entries
|
||||
// Clears all cache entries.
|
||||
Purge()
|
||||
|
||||
// Resizes cache, returning number evicted
|
||||
Resize(int) int
|
||||
}
|
||||
|
|
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go
generated
vendored
39
vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go
generated
vendored
|
@ -165,3 +165,42 @@ func TestLRU_Peek(t *testing.T) {
|
|||
t.Errorf("should not have updated recent-ness of 1")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that Resize can upsize and downsize
|
||||
func TestLRU_Resize(t *testing.T) {
|
||||
onEvictCounter := 0
|
||||
onEvicted := func(k interface{}, v interface{}) {
|
||||
onEvictCounter++
|
||||
}
|
||||
l, err := NewLRU(2, onEvicted)
|
||||
if err != nil {
|
||||
t.Fatalf("err: %v", err)
|
||||
}
|
||||
|
||||
// Downsize
|
||||
l.Add(1, 1)
|
||||
l.Add(2, 2)
|
||||
evicted := l.Resize(1);
|
||||
if evicted != 1 {
|
||||
t.Errorf("1 element should have been evicted: %v", evicted)
|
||||
}
|
||||
if onEvictCounter != 1 {
|
||||
t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter)
|
||||
}
|
||||
|
||||
l.Add(3, 3)
|
||||
if l.Contains(1) {
|
||||
t.Errorf("Element 1 should have been evicted")
|
||||
}
|
||||
|
||||
// Upsize
|
||||
evicted = l.Resize(2);
|
||||
if evicted != 0 {
|
||||
t.Errorf("0 elements should have been evicted: %v", evicted)
|
||||
}
|
||||
|
||||
l.Add(4, 4)
|
||||
if !l.Contains(3) || !l.Contains(4) {
|
||||
t.Errorf("Cache should have contained 2 elements")
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/json-iterator/go/adapter.go
generated
vendored
2
vendor/github.com/json-iterator/go/adapter.go
generated
vendored
|
@ -16,7 +16,7 @@ func Unmarshal(data []byte, v interface{}) error {
|
|||
return ConfigDefault.Unmarshal(data, v)
|
||||
}
|
||||
|
||||
// UnmarshalFromString convenient method to read from string instead of []byte
|
||||
// UnmarshalFromString is a convenient method to read from string instead of []byte
|
||||
func UnmarshalFromString(str string, v interface{}) error {
|
||||
return ConfigDefault.UnmarshalFromString(str, v)
|
||||
}
|
||||
|
|
11
vendor/github.com/json-iterator/go/go.mod
generated
vendored
Normal file
11
vendor/github.com/json-iterator/go/go.mod
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
|||
module github.com/json-iterator/go
|
||||
|
||||
go 1.12
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/google/gofuzz v1.0.0
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742
|
||||
github.com/stretchr/testify v1.3.0
|
||||
)
|
14
vendor/github.com/json-iterator/go/go.sum
generated
vendored
Normal file
14
vendor/github.com/json-iterator/go/go.sum
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
25
vendor/github.com/json-iterator/go/iter_skip.go
generated
vendored
25
vendor/github.com/json-iterator/go/iter_skip.go
generated
vendored
|
@ -37,17 +37,24 @@ func (iter *Iterator) SkipAndReturnBytes() []byte {
|
|||
return iter.stopCapture()
|
||||
}
|
||||
|
||||
type captureBuffer struct {
|
||||
startedAt int
|
||||
captured []byte
|
||||
// SkipAndAppendBytes skips next JSON element and appends its content to
|
||||
// buffer, returning the result.
|
||||
func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte {
|
||||
iter.startCaptureTo(buf, iter.head)
|
||||
iter.Skip()
|
||||
return iter.stopCapture()
|
||||
}
|
||||
|
||||
func (iter *Iterator) startCapture(captureStartedAt int) {
|
||||
func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) {
|
||||
if iter.captured != nil {
|
||||
panic("already in capture mode")
|
||||
}
|
||||
iter.captureStartedAt = captureStartedAt
|
||||
iter.captured = make([]byte, 0, 32)
|
||||
iter.captured = buf
|
||||
}
|
||||
|
||||
func (iter *Iterator) startCapture(captureStartedAt int) {
|
||||
iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt)
|
||||
}
|
||||
|
||||
func (iter *Iterator) stopCapture() []byte {
|
||||
|
@ -58,13 +65,7 @@ func (iter *Iterator) stopCapture() []byte {
|
|||
remaining := iter.buf[iter.captureStartedAt:iter.head]
|
||||
iter.captureStartedAt = -1
|
||||
iter.captured = nil
|
||||
if len(captured) == 0 {
|
||||
copied := make([]byte, len(remaining))
|
||||
copy(copied, remaining)
|
||||
return copied
|
||||
}
|
||||
captured = append(captured, remaining...)
|
||||
return captured
|
||||
return append(captured, remaining...)
|
||||
}
|
||||
|
||||
// Skip skips a json object and positions to relatively the next json object
|
||||
|
|
21
vendor/github.com/json-iterator/go/misc_tests/jsoniter_array_test.go
generated
vendored
21
vendor/github.com/json-iterator/go/misc_tests/jsoniter_array_test.go
generated
vendored
|
@ -158,6 +158,27 @@ func Test_encode_byte_array(t *testing.T) {
|
|||
should.Equal(`"AQID"`, string(bytes))
|
||||
}
|
||||
|
||||
func Test_encode_empty_byte_array(t *testing.T) {
|
||||
should := require.New(t)
|
||||
bytes, err := json.Marshal([]byte{})
|
||||
should.Nil(err)
|
||||
should.Equal(`""`, string(bytes))
|
||||
bytes, err = jsoniter.Marshal([]byte{})
|
||||
should.Nil(err)
|
||||
should.Equal(`""`, string(bytes))
|
||||
}
|
||||
|
||||
func Test_encode_nil_byte_array(t *testing.T) {
|
||||
should := require.New(t)
|
||||
var nilSlice []byte
|
||||
bytes, err := json.Marshal(nilSlice)
|
||||
should.Nil(err)
|
||||
should.Equal(`null`, string(bytes))
|
||||
bytes, err = jsoniter.Marshal(nilSlice)
|
||||
should.Nil(err)
|
||||
should.Equal(`null`, string(bytes))
|
||||
}
|
||||
|
||||
func Test_decode_byte_array_from_base64(t *testing.T) {
|
||||
should := require.New(t)
|
||||
data := []byte{}
|
||||
|
|
21
vendor/github.com/json-iterator/go/misc_tests/jsoniter_float_test.go
generated
vendored
21
vendor/github.com/json-iterator/go/misc_tests/jsoniter_float_test.go
generated
vendored
|
@ -2,6 +2,7 @@ package misc_tests
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"math"
|
||||
"testing"
|
||||
|
||||
"github.com/json-iterator/go"
|
||||
|
@ -77,6 +78,26 @@ func Test_read_number(t *testing.T) {
|
|||
should.Equal(`92233720368547758079223372036854775807`, string(val))
|
||||
}
|
||||
|
||||
func Test_encode_inf(t *testing.T) {
|
||||
should := require.New(t)
|
||||
_, err := json.Marshal(math.Inf(1))
|
||||
should.Error(err)
|
||||
_, err = jsoniter.Marshal(float32(math.Inf(1)))
|
||||
should.Error(err)
|
||||
_, err = jsoniter.Marshal(math.Inf(-1))
|
||||
should.Error(err)
|
||||
}
|
||||
|
||||
func Test_encode_nan(t *testing.T) {
|
||||
should := require.New(t)
|
||||
_, err := json.Marshal(math.NaN())
|
||||
should.Error(err)
|
||||
_, err = jsoniter.Marshal(float32(math.NaN()))
|
||||
should.Error(err)
|
||||
_, err = jsoniter.Marshal(math.NaN())
|
||||
should.Error(err)
|
||||
}
|
||||
|
||||
func Benchmark_jsoniter_float(b *testing.B) {
|
||||
b.ReportAllocs()
|
||||
input := []byte(`1.1123,`)
|
||||
|
|
14
vendor/github.com/json-iterator/go/reflect_native.go
generated
vendored
14
vendor/github.com/json-iterator/go/reflect_native.go
generated
vendored
|
@ -432,17 +432,19 @@ func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) {
|
|||
}
|
||||
|
||||
func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) {
|
||||
src := *((*[]byte)(ptr))
|
||||
if len(src) == 0 {
|
||||
if codec.sliceType.UnsafeIsNil(ptr) {
|
||||
stream.WriteNil()
|
||||
return
|
||||
}
|
||||
src := *((*[]byte)(ptr))
|
||||
encoding := base64.StdEncoding
|
||||
stream.writeByte('"')
|
||||
size := encoding.EncodedLen(len(src))
|
||||
buf := make([]byte, size)
|
||||
encoding.Encode(buf, src)
|
||||
stream.buf = append(stream.buf, buf...)
|
||||
if len(src) != 0 {
|
||||
size := encoding.EncodedLen(len(src))
|
||||
buf := make([]byte, size)
|
||||
encoding.Encode(buf, src)
|
||||
stream.buf = append(stream.buf, buf...)
|
||||
}
|
||||
stream.writeByte('"')
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/json-iterator/go/reflect_struct_decoder.go
generated
vendored
2
vendor/github.com/json-iterator/go/reflect_struct_decoder.go
generated
vendored
|
@ -530,8 +530,8 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It
|
|||
}
|
||||
}
|
||||
if fieldDecoder == nil {
|
||||
msg := "found unknown field: " + field
|
||||
if decoder.disallowUnknownFields {
|
||||
msg := "found unknown field: " + field
|
||||
iter.ReportError("ReadObject", msg)
|
||||
}
|
||||
c := iter.nextToken()
|
||||
|
|
9
vendor/github.com/json-iterator/go/skip_tests/jsoniter_skip_test.go
generated
vendored
9
vendor/github.com/json-iterator/go/skip_tests/jsoniter_skip_test.go
generated
vendored
|
@ -105,6 +105,15 @@ func Test_skip_and_return_bytes_with_reader(t *testing.T) {
|
|||
should.Equal(`{"a" : [{"stream": "c"}], "d": 102 }`, string(skipped))
|
||||
}
|
||||
|
||||
func Test_append_skip_and_return_bytes_with_reader(t *testing.T) {
|
||||
should := require.New(t)
|
||||
iter := jsoniter.Parse(jsoniter.ConfigDefault, bytes.NewBufferString(`[ {"a" : [{"stream": "c"}], "d": 102 }, "stream"]`), 4)
|
||||
iter.ReadArray()
|
||||
buf := make([]byte, 0, 1024)
|
||||
buf = iter.SkipAndAppendBytes(buf)
|
||||
should.Equal(`{"a" : [{"stream": "c"}], "d": 102 }`, string(buf))
|
||||
}
|
||||
|
||||
func Test_skip_empty(t *testing.T) {
|
||||
should := require.New(t)
|
||||
should.NotNil(jsoniter.Get([]byte("")).LastError())
|
||||
|
|
17
vendor/github.com/json-iterator/go/stream_float.go
generated
vendored
17
vendor/github.com/json-iterator/go/stream_float.go
generated
vendored
|
@ -1,6 +1,7 @@
|
|||
package jsoniter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"strconv"
|
||||
)
|
||||
|
@ -13,6 +14,10 @@ func init() {
|
|||
|
||||
// WriteFloat32 write float32 to stream
|
||||
func (stream *Stream) WriteFloat32(val float32) {
|
||||
if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
|
||||
stream.Error = fmt.Errorf("unsupported value: %f", val)
|
||||
return
|
||||
}
|
||||
abs := math.Abs(float64(val))
|
||||
fmt := byte('f')
|
||||
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
|
@ -26,6 +31,10 @@ func (stream *Stream) WriteFloat32(val float32) {
|
|||
|
||||
// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster
|
||||
func (stream *Stream) WriteFloat32Lossy(val float32) {
|
||||
if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) {
|
||||
stream.Error = fmt.Errorf("unsupported value: %f", val)
|
||||
return
|
||||
}
|
||||
if val < 0 {
|
||||
stream.writeByte('-')
|
||||
val = -val
|
||||
|
@ -54,6 +63,10 @@ func (stream *Stream) WriteFloat32Lossy(val float32) {
|
|||
|
||||
// WriteFloat64 write float64 to stream
|
||||
func (stream *Stream) WriteFloat64(val float64) {
|
||||
if math.IsInf(val, 0) || math.IsNaN(val) {
|
||||
stream.Error = fmt.Errorf("unsupported value: %f", val)
|
||||
return
|
||||
}
|
||||
abs := math.Abs(val)
|
||||
fmt := byte('f')
|
||||
// Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right.
|
||||
|
@ -67,6 +80,10 @@ func (stream *Stream) WriteFloat64(val float64) {
|
|||
|
||||
// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster
|
||||
func (stream *Stream) WriteFloat64Lossy(val float64) {
|
||||
if math.IsInf(val, 0) || math.IsNaN(val) {
|
||||
stream.Error = fmt.Errorf("unsupported value: %f", val)
|
||||
return
|
||||
}
|
||||
if val < 0 {
|
||||
stream.writeByte('-')
|
||||
val = -val
|
||||
|
|
2
vendor/github.com/minio/minio/.travis.yml
generated
vendored
2
vendor/github.com/minio/minio/.travis.yml
generated
vendored
|
@ -26,6 +26,7 @@ matrix:
|
|||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
# Enable build cache
|
||||
# https://restic.net/blog/2018-09-02/travis-build-cache
|
||||
cache:
|
||||
|
@ -52,6 +53,7 @@ matrix:
|
|||
- ARCH=x86_64
|
||||
- CGO_ENABLED=0
|
||||
- GO111MODULE=on
|
||||
- GOPROXY=https://proxy.golang.org
|
||||
go: 1.12.5
|
||||
script:
|
||||
- go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe
|
||||
|
|
1
vendor/github.com/minio/minio/Dockerfile.simpleci
generated
vendored
1
vendor/github.com/minio/minio/Dockerfile.simpleci
generated
vendored
|
@ -8,6 +8,7 @@ WORKDIR /go/src/github.com/minio/minio
|
|||
|
||||
RUN apt-get update && apt-get install -y jq
|
||||
ENV GO111MODULE=on
|
||||
ENV GOPROXY=https://proxy.golang.org
|
||||
|
||||
RUN git config --global http.cookiefile /gitcookie/.gitcookie
|
||||
|
||||
|
|
30
vendor/github.com/minio/minio/Makefile
generated
vendored
30
vendor/github.com/minio/minio/Makefile
generated
vendored
|
@ -30,35 +30,35 @@ verifiers: getdeps vet fmt lint staticcheck spelling
|
|||
|
||||
vet:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on go vet github.com/minio/minio/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on go vet github.com/minio/minio/...
|
||||
|
||||
fmt:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on gofmt -d cmd/
|
||||
@GO111MODULE=on gofmt -d pkg/
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on gofmt -d cmd/
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on gofmt -d pkg/
|
||||
|
||||
lint:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/...
|
||||
|
||||
staticcheck:
|
||||
@echo "Running $@"
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/...
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/...
|
||||
|
||||
spelling:
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/`
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/`
|
||||
|
||||
# Builds minio, runs the verifiers then runs the tests.
|
||||
check: test
|
||||
test: verifiers build
|
||||
@echo "Running unit tests"
|
||||
@GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null
|
||||
|
||||
verify: build
|
||||
@echo "Verifying build"
|
||||
|
@ -71,8 +71,8 @@ coverage: build
|
|||
# Builds minio locally.
|
||||
build: checks
|
||||
@echo "Building minio binary to './minio'"
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null
|
||||
@GOPROXY=https://proxy.golang.org GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null
|
||||
|
||||
docker: build
|
||||
@docker build -t $(TAG) . -f Dockerfile.dev
|
||||
|
|
9
vendor/github.com/minio/minio/README.md
generated
vendored
9
vendor/github.com/minio/minio/README.md
generated
vendored
|
@ -1,10 +1,15 @@
|
|||
# MinIO Quickstart Guide
|
||||
[](https://slack.min.io) [](https://goreportcard.com/report/minio/minio) [](https://hub.docker.com/r/minio/minio/)
|
||||
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
MinIO is an object storage server released under Apache License v2.0. It is compatible[1] with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB.
|
||||
|
||||
MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL.
|
||||
|
||||
[1]: MinIO in its default mode is faster and does not calculate MD5Sum unless passed by client. This may lead to incompatibility with few S3 clients like s3ql that heavily depend on MD5Sum. For full compatibility with Amazon S3 API, start MinIO with `--compat` option.
|
||||
```sh
|
||||
minio --compat server /data
|
||||
```
|
||||
|
||||
## Docker Container
|
||||
### Stable
|
||||
```
|
||||
|
@ -86,7 +91,7 @@ service minio start
|
|||
Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.12](https://golang.org/dl/#stable)
|
||||
|
||||
```sh
|
||||
GO111MODULE=on go get github.com/minio/minio
|
||||
GOPROXY=https://proxy.golang.org GO111MODULE=on go get github.com/minio/minio
|
||||
```
|
||||
|
||||
## Allow port access for Firewalls
|
||||
|
|
1
vendor/github.com/minio/minio/buildscripts/cross-compile.sh
generated
vendored
1
vendor/github.com/minio/minio/buildscripts/cross-compile.sh
generated
vendored
|
@ -23,6 +23,7 @@ function _build() {
|
|||
export GOOS=$os
|
||||
export GOARCH=$arch
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
go build -tags kqueue -o /dev/null
|
||||
}
|
||||
|
||||
|
|
2
vendor/github.com/minio/minio/buildscripts/go-coverage.sh
generated
vendored
2
vendor/github.com/minio/minio/buildscripts/go-coverage.sh
generated
vendored
|
@ -2,4 +2,4 @@
|
|||
|
||||
set -e
|
||||
|
||||
GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
|
|
1
vendor/github.com/minio/minio/buildscripts/verify-build.sh
generated
vendored
1
vendor/github.com/minio/minio/buildscripts/verify-build.sh
generated
vendored
|
@ -33,6 +33,7 @@ export ACCESS_KEY="minio"
|
|||
export SECRET_KEY="minio123"
|
||||
export ENABLE_HTTPS=0
|
||||
export GO111MODULE=on
|
||||
export GOPROXY=https://proxy.golang.org
|
||||
|
||||
MINIO_CONFIG_DIR="$WORK_DIR/.minio"
|
||||
MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" )
|
||||
|
|
177
vendor/github.com/minio/minio/cmd/admin-handlers.go
generated
vendored
177
vendor/github.com/minio/minio/cmd/admin-handlers.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
|||
"encoding/json"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
|
@ -1019,6 +1020,130 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) {
|
|||
writeSuccessResponseJSON(w, econfigData)
|
||||
}
|
||||
|
||||
// UpdateGroupMembers - PUT /minio/admin/v1/update-group-members
|
||||
func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "UpdateGroupMembers")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer r.Body.Close()
|
||||
data, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
var updReq madmin.GroupAddRemove
|
||||
err = json.Unmarshal(data, &updReq)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
if updReq.IsRemove {
|
||||
err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members)
|
||||
} else {
|
||||
err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to load group.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetGroup - /minio/admin/v1/group?group=mygroup1
|
||||
func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "GetGroup")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
group := vars["group"]
|
||||
|
||||
gdesc, err := globalIAMSys.GetGroupDescription(group)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
body, err := json.Marshal(gdesc)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// ListGroups - GET /minio/admin/v1/groups
|
||||
func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "ListGroups")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
groups := globalIAMSys.ListGroups()
|
||||
body, err := json.Marshal(groups)
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
writeSuccessResponseJSON(w, body)
|
||||
}
|
||||
|
||||
// SetGroupStatus - PUT /minio/admin/v1/set-group-status?group=mygroup1&status=enabled
|
||||
func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetGroupStatus")
|
||||
|
||||
objectAPI := validateAdminReq(ctx, w, r)
|
||||
if objectAPI == nil {
|
||||
return
|
||||
}
|
||||
|
||||
vars := mux.Vars(r)
|
||||
group := vars["group"]
|
||||
status := vars["status"]
|
||||
|
||||
var err error
|
||||
if status == statusEnabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, true)
|
||||
} else if status == statusDisabled {
|
||||
err = globalIAMSys.SetGroupStatus(group, false)
|
||||
} else {
|
||||
err = errInvalidArgument
|
||||
}
|
||||
if err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
return
|
||||
}
|
||||
|
||||
// Notify all other MinIO peers to reload user.
|
||||
for _, nerr := range globalNotificationSys.LoadGroup(group) {
|
||||
if nerr.Err != nil {
|
||||
logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String())
|
||||
logger.LogIf(ctx, nerr.Err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUserStatus - PUT /minio/admin/v1/set-user-status?accessKey=<access_key>&status=[enabled|disabled]
|
||||
func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) {
|
||||
ctx := newContext(r, w, "SetUserStatus")
|
||||
|
@ -1253,7 +1378,7 @@ func (a adminAPIHandlers) SetUserPolicy(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
if err := globalIAMSys.SetUserPolicy(accessKey, policyName); err != nil {
|
||||
if err := globalIAMSys.PolicyDBSet(accessKey, policyName, false); err != nil {
|
||||
writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL)
|
||||
}
|
||||
|
||||
|
@ -1459,6 +1584,23 @@ func (a adminAPIHandlers) SetConfigKeysHandler(w http.ResponseWriter, r *http.Re
|
|||
writeSuccessResponseHeadersOnly(w)
|
||||
}
|
||||
|
||||
// Returns true if the trace.Info should be traced,
|
||||
// false if certain conditions are not met.
|
||||
// - input entry is not of the type *trace.Info*
|
||||
// - errOnly entries are to be traced, not status code 2xx, 3xx.
|
||||
// - all entries to be traced, if not trace only S3 API requests.
|
||||
func mustTrace(entry interface{}, trcAll, errOnly bool) bool {
|
||||
trcInfo, ok := entry.(trace.Info)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
trace := trcAll || !hasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator)
|
||||
if errOnly {
|
||||
return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest
|
||||
}
|
||||
return trace
|
||||
}
|
||||
|
||||
// TraceHandler - POST /minio/admin/v1/trace
|
||||
// ----------
|
||||
// The handler sends http trace to the connected HTTP client.
|
||||
|
@ -1474,10 +1616,6 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
// Avoid reusing tcp connection if read timeout is hit
|
||||
// This is needed to make r.Context().Done() work as
|
||||
// expected in case of read timeout
|
||||
w.Header().Set(xhttp.Connection, "close")
|
||||
w.Header().Set(xhttp.ContentType, "text/event-stream")
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
|
@ -1487,28 +1625,22 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
traceCh := make(chan interface{}, 4000)
|
||||
|
||||
filter := func(entry interface{}) bool {
|
||||
trcInfo := entry.(trace.Info)
|
||||
if trcErr && isHTTPStatusOK(trcInfo.RespInfo.StatusCode) {
|
||||
return false
|
||||
}
|
||||
if trcAll {
|
||||
return true
|
||||
}
|
||||
return !strings.HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath)
|
||||
|
||||
}
|
||||
remoteHosts := getRemoteHosts(globalEndpoints)
|
||||
peers, err := getRestClients(remoteHosts)
|
||||
peers, err := getRestClients(getRemoteHosts(globalEndpoints))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
globalHTTPTrace.Subscribe(traceCh, doneCh, filter)
|
||||
|
||||
globalHTTPTrace.Subscribe(traceCh, doneCh, func(entry interface{}) bool {
|
||||
return mustTrace(entry, trcAll, trcErr)
|
||||
})
|
||||
|
||||
for _, peer := range peers {
|
||||
peer.Trace(traceCh, doneCh, trcAll, trcErr)
|
||||
}
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
|
@ -1517,8 +1649,11 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
if _, err := w.Write([]byte(" ")); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
}
|
||||
|
|
18
vendor/github.com/minio/minio/cmd/admin-handlers_test.go
generated
vendored
18
vendor/github.com/minio/minio/cmd/admin-handlers_test.go
generated
vendored
|
@ -83,7 +83,9 @@ var (
|
|||
"durable": false,
|
||||
"internal": false,
|
||||
"noWait": false,
|
||||
"autoDeleted": false
|
||||
"autoDeleted": false,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"elasticsearch": {
|
||||
|
@ -139,7 +141,9 @@ var (
|
|||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"nats": {
|
||||
|
@ -152,6 +156,8 @@ var (
|
|||
"token": "",
|
||||
"secure": false,
|
||||
"pingInterval": 0,
|
||||
"queueDir": "",
|
||||
"queueLimit": 0,
|
||||
"streaming": {
|
||||
"enable": false,
|
||||
"clusterID": "",
|
||||
|
@ -183,7 +189,9 @@ var (
|
|||
"port": "",
|
||||
"user": "",
|
||||
"password": "",
|
||||
"database": ""
|
||||
"database": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"redis": {
|
||||
|
@ -192,7 +200,9 @@ var (
|
|||
"format": "namespace",
|
||||
"address": "",
|
||||
"password": "",
|
||||
"key": ""
|
||||
"key": "",
|
||||
"queueDir": "",
|
||||
"queueLimit": 0
|
||||
}
|
||||
},
|
||||
"webhook": {
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/admin-heal-ops.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/admin-heal-ops.go
generated
vendored
|
@ -587,9 +587,9 @@ func (h *healSequence) healItemsFromSourceCh() error {
|
|||
|
||||
var itemType madmin.HealItemType
|
||||
switch {
|
||||
case path == "/":
|
||||
case path == SlashSeparator:
|
||||
itemType = madmin.HealItemMetadata
|
||||
case !strings.Contains(path, "/"):
|
||||
case !strings.Contains(path, SlashSeparator):
|
||||
itemType = madmin.HealItemBucket
|
||||
default:
|
||||
itemType = madmin.HealItemObject
|
||||
|
@ -693,7 +693,7 @@ func (h *healSequence) healDiskFormat() error {
|
|||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
return h.queueHealTask("/", madmin.HealItemMetadata)
|
||||
return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata)
|
||||
}
|
||||
|
||||
// healBuckets - check for all buckets heal or just particular bucket.
|
||||
|
|
12
vendor/github.com/minio/minio/cmd/admin-router.go
generated
vendored
12
vendor/github.com/minio/minio/cmd/admin-router.go
generated
vendored
|
@ -110,6 +110,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
|
|||
// List users
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers))
|
||||
|
||||
// Add/Remove members from group
|
||||
adminV1Router.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers))
|
||||
|
||||
// Get Group
|
||||
adminV1Router.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}")
|
||||
|
||||
// List Groups
|
||||
adminV1Router.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups))
|
||||
|
||||
// Set Group Status
|
||||
adminV1Router.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}")
|
||||
|
||||
// List policies
|
||||
adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies))
|
||||
}
|
||||
|
|
16
vendor/github.com/minio/minio/cmd/api-errors.go
generated
vendored
16
vendor/github.com/minio/minio/cmd/api-errors.go
generated
vendored
|
@ -203,6 +203,8 @@ const (
|
|||
|
||||
ErrMalformedJSON
|
||||
ErrAdminNoSuchUser
|
||||
ErrAdminNoSuchGroup
|
||||
ErrAdminGroupNotEmpty
|
||||
ErrAdminNoSuchPolicy
|
||||
ErrAdminInvalidArgument
|
||||
ErrAdminInvalidAccessKey
|
||||
|
@ -923,6 +925,16 @@ var errorCodes = errorCodeMap{
|
|||
Description: "The specified user does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminNoSuchGroup: {
|
||||
Code: "XMinioAdminNoSuchGroup",
|
||||
Description: "The specified group does not exist.",
|
||||
HTTPStatusCode: http.StatusNotFound,
|
||||
},
|
||||
ErrAdminGroupNotEmpty: {
|
||||
Code: "XMinioAdminGroupNotEmpty",
|
||||
Description: "The specified group is not empty - cannot remove it.",
|
||||
HTTPStatusCode: http.StatusBadRequest,
|
||||
},
|
||||
ErrAdminNoSuchPolicy: {
|
||||
Code: "XMinioAdminNoSuchPolicy",
|
||||
Description: "The canned policy does not exist.",
|
||||
|
@ -1500,6 +1512,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
|
|||
apiErr = ErrAdminInvalidArgument
|
||||
case errNoSuchUser:
|
||||
apiErr = ErrAdminNoSuchUser
|
||||
case errNoSuchGroup:
|
||||
apiErr = ErrAdminNoSuchGroup
|
||||
case errGroupNotEmpty:
|
||||
apiErr = ErrAdminGroupNotEmpty
|
||||
case errNoSuchPolicy:
|
||||
apiErr = ErrAdminNoSuchPolicy
|
||||
case errSignatureMismatch:
|
||||
|
|
18
vendor/github.com/minio/minio/cmd/api-resources_test.go
generated
vendored
18
vendor/github.com/minio/minio/cmd/api-resources_test.go
generated
vendored
|
@ -36,7 +36,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"max-keys": []string{"100"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
|
@ -44,7 +44,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||
prefix: "photos/",
|
||||
token: "token",
|
||||
startAfter: "start-after",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 100,
|
||||
encodingType: "gzip",
|
||||
|
@ -55,14 +55,14 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{"token"},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
token: "token",
|
||||
startAfter: "start-after",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
fetchOwner: true,
|
||||
maxKeys: 1000,
|
||||
encodingType: "gzip",
|
||||
|
@ -73,7 +73,7 @@ func TestListObjectsV2Resources(t *testing.T) {
|
|||
"prefix": []string{"photos/"},
|
||||
"continuation-token": []string{""},
|
||||
"start-after": []string{"start-after"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"fetch-owner": []string{"true"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
|
@ -130,13 +130,13 @@ func TestListObjectsV1Resources(t *testing.T) {
|
|||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"marker": []string{"test"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"max-keys": []string{"100"},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 100,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
|
@ -144,12 +144,12 @@ func TestListObjectsV1Resources(t *testing.T) {
|
|||
values: url.Values{
|
||||
"prefix": []string{"photos/"},
|
||||
"marker": []string{"test"},
|
||||
"delimiter": []string{"/"},
|
||||
"delimiter": []string{SlashSeparator},
|
||||
"encoding-type": []string{"gzip"},
|
||||
},
|
||||
prefix: "photos/",
|
||||
marker: "test",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxKeys: 1000,
|
||||
encodingType: "gzip",
|
||||
},
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/api-response.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/api-response.go
generated
vendored
|
@ -293,14 +293,14 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string)
|
|||
}
|
||||
u := &url.URL{
|
||||
Host: r.Host,
|
||||
Path: path.Join(slashSeparator, bucket, object),
|
||||
Path: path.Join(SlashSeparator, bucket, object),
|
||||
Scheme: proto,
|
||||
}
|
||||
// If domain is set then we need to use bucket DNS style.
|
||||
for _, domain := range domains {
|
||||
if strings.Contains(r.Host, domain) {
|
||||
u.Host = bucket + "." + r.Host
|
||||
u.Path = path.Join(slashSeparator, object)
|
||||
u.Path = path.Join(SlashSeparator, object)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/api-router.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/api-router.go
generated
vendored
|
@ -48,7 +48,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||
}
|
||||
|
||||
// API Router
|
||||
apiRouter := router.PathPrefix("/").Subrouter()
|
||||
apiRouter := router.PathPrefix(SlashSeparator).Subrouter()
|
||||
var routers []*mux.Router
|
||||
for _, domainName := range globalDomainNames {
|
||||
routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter())
|
||||
|
@ -157,7 +157,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool)
|
|||
/// Root operation
|
||||
|
||||
// ListBuckets
|
||||
apiRouter.Methods(http.MethodGet).Path("/").HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler))
|
||||
|
||||
// If none of the routes match.
|
||||
apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
|
|
10
vendor/github.com/minio/minio/cmd/auth-handler_test.go
generated
vendored
10
vendor/github.com/minio/minio/cmd/auth-handler_test.go
generated
vendored
|
@ -44,7 +44,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"AWS4-HMAC-SHA256 <cred_string>"},
|
||||
|
@ -62,7 +62,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{"Bearer 12313123"},
|
||||
|
@ -77,7 +77,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Authorization": []string{""},
|
||||
|
@ -92,7 +92,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1",
|
||||
},
|
||||
},
|
||||
|
@ -105,7 +105,7 @@ func TestGetRequestAuthType(t *testing.T) {
|
|||
URL: &url.URL{
|
||||
Host: "127.0.0.1:9000",
|
||||
Scheme: httpScheme,
|
||||
Path: "/",
|
||||
Path: SlashSeparator,
|
||||
},
|
||||
Header: http.Header{
|
||||
"Content-Type": []string{"multipart/form-data"},
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/bucket-handlers.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/bucket-handlers.go
generated
vendored
|
@ -650,7 +650,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h
|
|||
return
|
||||
}
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE-C and SSE-S3 requests
|
||||
var reader io.Reader
|
||||
var key []byte
|
||||
if crypto.SSEC.IsRequested(formValues) {
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/bucket-handlers_test.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/bucket-handlers_test.go
generated
vendored
|
@ -69,7 +69,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
expectedRespStatus: http.StatusForbidden,
|
||||
locationResponse: []byte(""),
|
||||
errorResponse: APIErrorResponse{
|
||||
Resource: "/" + bucketName + "/",
|
||||
Resource: SlashSeparator + bucketName + SlashSeparator,
|
||||
Code: "InvalidAccessKeyId",
|
||||
Message: "The access key ID you provided does not exist in our records.",
|
||||
},
|
||||
|
@ -394,7 +394,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s
|
|||
prefix: "",
|
||||
keyMarker: "",
|
||||
uploadIDMarker: "",
|
||||
delimiter: "/",
|
||||
delimiter: SlashSeparator,
|
||||
maxUploads: "100",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
|
35
vendor/github.com/minio/minio/cmd/bucket-notification-handlers.go
generated
vendored
35
vendor/github.com/minio/minio/cmd/bucket-notification-handlers.go
generated
vendored
|
@ -17,10 +17,12 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"path"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
|
@ -49,6 +51,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
|||
|
||||
vars := mux.Vars(r)
|
||||
bucketName := vars["bucket"]
|
||||
var config *event.Config
|
||||
|
||||
objAPI := api.ObjectAPI()
|
||||
if objAPI == nil {
|
||||
|
@ -72,24 +75,31 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter,
|
|||
return
|
||||
}
|
||||
|
||||
// Attempt to successfully load notification config.
|
||||
nConfig, err := readNotificationConfig(ctx, objAPI, bucketName)
|
||||
// Construct path to notification.xml for the given bucket.
|
||||
configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig)
|
||||
|
||||
configData, err := readConfig(ctx, objAPI, configFile)
|
||||
if err != nil {
|
||||
// Ignore errNoSuchNotifications to comply with AWS S3.
|
||||
if err != errNoSuchNotifications {
|
||||
if err != errConfigNotFound {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
config = &event.Config{}
|
||||
} else {
|
||||
if err = xml.NewDecoder(bytes.NewReader(configData)).Decode(&config); err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
|
||||
nConfig = &event.Config{}
|
||||
}
|
||||
|
||||
config.SetRegion(globalServerConfig.GetRegion())
|
||||
|
||||
// If xml namespace is empty, set a default value before returning.
|
||||
if nConfig.XMLNS == "" {
|
||||
nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
if config.XMLNS == "" {
|
||||
config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/"
|
||||
}
|
||||
|
||||
notificationBytes, err := xml.Marshal(nConfig)
|
||||
notificationBytes, err := xml.Marshal(config)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
|
@ -143,9 +153,10 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter,
|
|||
if event.IsEventError(err) {
|
||||
apiErr = toAPIError(ctx, err)
|
||||
}
|
||||
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
if _, ok := err.(*event.ErrARNNotFound); !ok {
|
||||
writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil {
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/certs.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/certs.go
generated
vendored
|
@ -87,7 +87,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) {
|
|||
// Load all custom CA files.
|
||||
for _, fi := range fis {
|
||||
// Skip all directories.
|
||||
if hasSuffix(fi, slashSeparator) {
|
||||
if hasSuffix(fi, SlashSeparator) {
|
||||
continue
|
||||
}
|
||||
caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi))
|
||||
|
|
19
vendor/github.com/minio/minio/cmd/common-main.go
generated
vendored
19
vendor/github.com/minio/minio/cmd/common-main.go
generated
vendored
|
@ -38,6 +38,25 @@ import (
|
|||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
||||
func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) {
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but '%s' does not support encryption", name)
|
||||
}
|
||||
|
||||
if strings.HasPrefix(name, "gateway") {
|
||||
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
|
||||
uiErr := uiErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured")
|
||||
logger.Fatal(uiErr, "Unable to start gateway with SSE")
|
||||
}
|
||||
}
|
||||
|
||||
if globalIsCompressionEnabled && !objAPI.IsCompressionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Compression support is requested but '%s' does not support compression", name)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for updates and print a notification message
|
||||
func checkUpdate(mode string) {
|
||||
// Its OK to ignore any errors during doUpdate() here.
|
||||
|
|
53
vendor/github.com/minio/minio/cmd/config-common.go
generated
vendored
53
vendor/github.com/minio/minio/cmd/config-common.go
generated
vendored
|
@ -20,9 +20,7 @@ import (
|
|||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/hash"
|
||||
)
|
||||
|
@ -51,38 +49,10 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b
|
|||
return buffer.Bytes(), nil
|
||||
}
|
||||
|
||||
func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := client.Delete(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s",
|
||||
client.Endpoints())
|
||||
}
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s",
|
||||
err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
return objAPI.DeleteObject(ctx, minioMetaBucket, configFile)
|
||||
}
|
||||
|
||||
func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, configFile, string(data))
|
||||
if err == context.DeadlineExceeded {
|
||||
return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints())
|
||||
} else if err != nil {
|
||||
return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error {
|
||||
hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat)
|
||||
if err != nil {
|
||||
|
@ -93,29 +63,6 @@ func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data
|
|||
return err
|
||||
}
|
||||
|
||||
func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, configFile)
|
||||
if err != nil {
|
||||
if err == context.DeadlineExceeded {
|
||||
return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s",
|
||||
client.Endpoints())
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s",
|
||||
err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == configFile {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
|
||||
func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error {
|
||||
if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil {
|
||||
// Treat object not found as config not found.
|
||||
|
|
57
vendor/github.com/minio/minio/cmd/config-current.go
generated
vendored
57
vendor/github.com/minio/minio/cmd/config-current.go
generated
vendored
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/minio/minio/pkg/auth"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/iam/policy"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/iam/validator"
|
||||
xnet "github.com/minio/minio/pkg/net"
|
||||
)
|
||||
|
@ -281,17 +281,27 @@ func (s *serverConfig) loadFromEnvs() {
|
|||
}
|
||||
|
||||
if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok {
|
||||
if u, err := xnet.ParseURL(jwksURL); err == nil {
|
||||
s.OpenID.JWKS.URL = u
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL")
|
||||
u, err := xnet.ParseURL(jwksURL)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse MINIO_IAM_JWKS_URL %s", jwksURL)
|
||||
}
|
||||
s.OpenID.JWKS.URL = u
|
||||
}
|
||||
|
||||
if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok {
|
||||
if u, err := xnet.ParseURL(opaURL); err == nil {
|
||||
s.Policy.OPA.URL = u
|
||||
s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN")
|
||||
u, err := xnet.ParseURL(opaURL)
|
||||
if err != nil {
|
||||
logger.FatalIf(err, "Unable to parse MINIO_IAM_OPA_URL %s", opaURL)
|
||||
}
|
||||
opaArgs := iampolicy.OpaArgs{
|
||||
URL: u,
|
||||
AuthToken: os.Getenv("MINIO_IAM_OPA_AUTHTOKEN"),
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
}
|
||||
logger.FatalIf(opaArgs.Validate(), "Unable to reach MINIO_IAM_OPA_URL %s", opaURL)
|
||||
s.Policy.OPA.URL = opaArgs.URL
|
||||
s.Policy.OPA.AuthToken = opaArgs.AuthToken
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -303,7 +313,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
|||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewAMQPTarget(k, v)
|
||||
t, err := target.NewAMQPTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("amqp(%s): %s", k, err.Error())
|
||||
}
|
||||
|
@ -347,7 +357,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
|||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewMySQLTarget(k, v)
|
||||
t, err := target.NewMySQLTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("mysql(%s): %s", k, err.Error())
|
||||
}
|
||||
|
@ -358,7 +368,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
|||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewNATSTarget(k, v)
|
||||
t, err := target.NewNATSTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("nats(%s): %s", k, err.Error())
|
||||
}
|
||||
|
@ -380,7 +390,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
|||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewPostgreSQLTarget(k, v)
|
||||
t, err := target.NewPostgreSQLTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("postgreSQL(%s): %s", k, err.Error())
|
||||
}
|
||||
|
@ -391,7 +401,7 @@ func (s *serverConfig) TestNotificationTargets() error {
|
|||
if !v.Enable {
|
||||
continue
|
||||
}
|
||||
t, err := target.NewRedisTarget(k, v)
|
||||
t, err := target.NewRedisTarget(k, v, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
return fmt.Errorf("redis(%s): %s", k, err.Error())
|
||||
}
|
||||
|
@ -536,7 +546,7 @@ func (s *serverConfig) loadToCachedConfigs() {
|
|||
globalCacheMaxUse = cacheConf.MaxUse
|
||||
}
|
||||
if err := Environment.LookupKMSConfig(s.KMS); err != nil {
|
||||
logger.FatalIf(err, "Unable to setup the KMS")
|
||||
logger.FatalIf(err, "Unable to setup the KMS %s", s.KMS.Vault.Endpoint)
|
||||
}
|
||||
|
||||
if !globalIsCompressionEnabled {
|
||||
|
@ -546,15 +556,22 @@ func (s *serverConfig) loadToCachedConfigs() {
|
|||
globalIsCompressionEnabled = compressionConf.Enabled
|
||||
}
|
||||
|
||||
if s.OpenID.JWKS.URL != nil && s.OpenID.JWKS.URL.String() != "" {
|
||||
logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(),
|
||||
"Unable to populate public key from JWKS URL %s", s.OpenID.JWKS.URL)
|
||||
}
|
||||
|
||||
globalIAMValidators = getAuthValidators(s)
|
||||
|
||||
if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" {
|
||||
globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{
|
||||
opaArgs := iampolicy.OpaArgs{
|
||||
URL: s.Policy.OPA.URL,
|
||||
AuthToken: s.Policy.OPA.AuthToken,
|
||||
Transport: NewCustomHTTPTransport(),
|
||||
CloseRespFn: xhttp.DrainBody,
|
||||
})
|
||||
}
|
||||
logger.FatalIf(opaArgs.Validate(), "Unable to reach OPA URL %s", s.Policy.OPA.URL)
|
||||
globalPolicyOPA = iampolicy.NewOpa(opaArgs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -637,7 +654,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
|||
}
|
||||
for id, args := range config.Notify.AMQP {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewAMQPTarget(id, args)
|
||||
newTarget, err := target.NewAMQPTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
@ -696,7 +713,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
|||
|
||||
for id, args := range config.Notify.MySQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewMySQLTarget(id, args)
|
||||
newTarget, err := target.NewMySQLTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
@ -710,7 +727,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
|||
|
||||
for id, args := range config.Notify.NATS {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewNATSTarget(id, args)
|
||||
newTarget, err := target.NewNATSTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
@ -738,7 +755,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
|||
|
||||
for id, args := range config.Notify.PostgreSQL {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args)
|
||||
newTarget, err := target.NewPostgreSQLTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
@ -752,7 +769,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
|
|||
|
||||
for id, args := range config.Notify.Redis {
|
||||
if args.Enable {
|
||||
newTarget, err := target.NewRedisTarget(id, args)
|
||||
newTarget, err := target.NewRedisTarget(id, args, GlobalServiceDoneCh)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
continue
|
||||
|
|
10
vendor/github.com/minio/minio/cmd/config-current_test.go
generated
vendored
10
vendor/github.com/minio/minio/cmd/config-current_test.go
generated
vendored
|
@ -185,10 +185,10 @@ func TestValidateConfig(t *testing.T) {
|
|||
{`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false},
|
||||
|
||||
// Test 11 - Test AMQP
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false, "queueDir": "", "queueLimit": 0}}}}`, false},
|
||||
|
||||
// Test 12 - Test NATS
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "queueDir": "", "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false},
|
||||
|
||||
// Test 13 - Test ElasticSearch
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false},
|
||||
|
@ -197,7 +197,7 @@ func TestValidateConfig(t *testing.T) {
|
|||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false},
|
||||
|
||||
// Test 15 - Test PostgreSQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 16 - Test Kafka
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
@ -206,7 +206,7 @@ func TestValidateConfig(t *testing.T) {
|
|||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "", "queueDir": "", "queueLimit": 0} }}}`, false},
|
||||
|
||||
// Test 18 - Test MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false},
|
||||
|
||||
// Test 19 - Test Format for MySQL
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false},
|
||||
|
@ -227,7 +227,7 @@ func TestValidateConfig(t *testing.T) {
|
|||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex", "queueDir": "", "queueLimit": 0 } }}}`, true},
|
||||
|
||||
// Test 25 - Test Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false},
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1", "queueDir": "", "queueLimit": 0 } }}}`, false},
|
||||
|
||||
// Test 26 - Test valid Format for Redis
|
||||
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true},
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/config-migrate.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/config-migrate.go
generated
vendored
|
@ -2426,7 +2426,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) {
|
|||
defer func() {
|
||||
if err == nil {
|
||||
if globalEtcdClient != nil {
|
||||
deleteConfigEtcd(context.Background(), globalEtcdClient, configFile)
|
||||
deleteKeyEtcd(context.Background(), globalEtcdClient, configFile)
|
||||
} else {
|
||||
// Rename config.json to config.json.deprecated only upon
|
||||
// success of this function.
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/config-migrate_test.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/config-migrate_test.go
generated
vendored
|
@ -175,7 +175,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) {
|
|||
}
|
||||
defer os.RemoveAll(fsDir)
|
||||
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil {
|
||||
|
@ -238,7 +238,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) {
|
|||
defer os.RemoveAll(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
// Create a corrupted config file
|
||||
if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil {
|
||||
|
@ -335,7 +335,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) {
|
|||
defer os.RemoveAll(rootPath)
|
||||
|
||||
globalConfigDir = &ConfigDir{path: rootPath}
|
||||
configPath := rootPath + "/" + minioConfigFile
|
||||
configPath := rootPath + SlashSeparator + minioConfigFile
|
||||
|
||||
for i := 3; i <= 17; i++ {
|
||||
// Create a corrupted config file
|
||||
|
|
20
vendor/github.com/minio/minio/cmd/config.go
generated
vendored
20
vendor/github.com/minio/minio/cmd/config.go
generated
vendored
|
@ -23,6 +23,7 @@ import (
|
|||
"path"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/quick"
|
||||
|
@ -101,6 +102,25 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error {
|
|||
return sys.Init(objAPI)
|
||||
}
|
||||
|
||||
// WatchConfigNASDisk - watches nas disk on periodic basis.
|
||||
func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) {
|
||||
configInterval := globalRefreshIAMInterval
|
||||
watchDisk := func() {
|
||||
ticker := time.NewTicker(configInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
loadConfig(objAPI)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Refresh configSys in background for NAS gateway.
|
||||
go watchDisk()
|
||||
}
|
||||
|
||||
// Init - initializes config system from config.json.
|
||||
func (sys *ConfigSys) Init(objAPI ObjectLayer) error {
|
||||
if objAPI == nil {
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/crypto/error.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/crypto/error.go
generated
vendored
|
@ -62,6 +62,8 @@ var (
|
|||
|
||||
errInvalidInternalIV = Error{"The internal encryption IV is malformed"}
|
||||
errInvalidInternalSealAlgorithm = Error{"The internal seal algorithm is invalid and not supported"}
|
||||
|
||||
errMissingUpdatedKey = Error{"The key update returned no error but also no sealed key"}
|
||||
)
|
||||
|
||||
var (
|
||||
|
|
20
vendor/github.com/minio/minio/cmd/crypto/kms.go
generated
vendored
20
vendor/github.com/minio/minio/cmd/crypto/kms.go
generated
vendored
|
@ -86,6 +86,19 @@ type KMS interface {
|
|||
// referenced by the keyID. The provided context must
|
||||
// match the context used to generate the sealed key.
|
||||
UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error)
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key, referenced by
|
||||
// `keyID`, has changed in the meantime. This usually happens when the
|
||||
// KMS operator performs a key-rotation operation of the master key.
|
||||
// UpdateKey fails if the provided sealedKey cannot be decrypted using
|
||||
// the master key referenced by keyID.
|
||||
//
|
||||
// UpdateKey makes no guarantees whatsoever about whether the returned
|
||||
// rotatedKey is actually different from the sealedKey. If nothing has
|
||||
// changed at the KMS or if the KMS does not support updating generated
|
||||
// keys this method may behave like a NOP and just return the sealedKey
|
||||
// itself.
|
||||
UpdateKey(keyID string, sealedKey []byte, context Context) (rotatedKey []byte, err error)
|
||||
}
|
||||
|
||||
type masterKeyKMS struct {
|
||||
|
@ -126,6 +139,13 @@ func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context)
|
|||
return key, nil
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) UpdateKey(keyID string, sealedKey []byte, ctx Context) ([]byte, error) {
|
||||
if _, err := kms.UnsealKey(keyID, sealedKey, ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sealedKey, nil // The master key cannot update data keys -> Do nothing.
|
||||
}
|
||||
|
||||
func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) {
|
||||
if context == nil {
|
||||
context = Context{}
|
||||
|
|
11
vendor/github.com/minio/minio/cmd/crypto/kms_test.go
generated
vendored
11
vendor/github.com/minio/minio/cmd/crypto/kms_test.go
generated
vendored
|
@ -51,11 +51,20 @@ func TestMasterKeyKMS(t *testing.T) {
|
|||
t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err)
|
||||
}
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS unsealed the generated successfully but should have failed", i)
|
||||
t.Errorf("Test %d: KMS unsealed the generated key successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) {
|
||||
t.Errorf("Test %d: The generated and unsealed key differ", i)
|
||||
}
|
||||
|
||||
rotatedKey, err := kms.UpdateKey(test.UnsealKeyID, sealedKey, test.UnsealContext)
|
||||
if err == nil && test.ShouldFail {
|
||||
t.Errorf("Test %d: KMS updated the generated key successfully but should have failed", i)
|
||||
}
|
||||
if !test.ShouldFail && !bytes.Equal(rotatedKey, sealedKey[:]) {
|
||||
t.Errorf("Test %d: The updated and sealed key differ", i)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
27
vendor/github.com/minio/minio/cmd/crypto/vault.go
generated
vendored
27
vendor/github.com/minio/minio/cmd/crypto/vault.go
generated
vendored
|
@ -250,3 +250,30 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k
|
|||
copy(key[:], []byte(plainKey))
|
||||
return key, nil
|
||||
}
|
||||
|
||||
// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID
|
||||
// has been changed by the KMS operator - i.e. the master key has been rotated.
|
||||
// If the master key hasn't changed since the sealedKey has been created / updated
|
||||
// it may return the same sealedKey as rotatedKey.
|
||||
//
|
||||
// The context must be same context as the one provided while
|
||||
// generating the plaintext key / sealedKey.
|
||||
func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) {
|
||||
var contextStream bytes.Buffer
|
||||
ctx.WriteTo(&contextStream)
|
||||
|
||||
payload := map[string]interface{}{
|
||||
"ciphertext": string(sealedKey),
|
||||
"context": base64.StdEncoding.EncodeToString(contextStream.Bytes()),
|
||||
}
|
||||
s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ciphertext, ok := s.Data["ciphertext"]
|
||||
if !ok {
|
||||
return nil, errMissingUpdatedKey
|
||||
}
|
||||
rotatedKey = ciphertext.([]byte)
|
||||
return rotatedKey, nil
|
||||
}
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/disk-cache-config.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/disk-cache-config.go
generated
vendored
|
@ -109,7 +109,7 @@ func parseCacheExcludes(excludes []string) ([]string, error) {
|
|||
if len(e) == 0 {
|
||||
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e)
|
||||
}
|
||||
if hasPrefix(e, slashSeparator) {
|
||||
if hasPrefix(e, SlashSeparator) {
|
||||
return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e)
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/disk-cache-fs.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/disk-cache-fs.go
generated
vendored
|
@ -182,7 +182,7 @@ func (cfs *cacheFSObjects) purgeTrash() {
|
|||
|
||||
// Purge cache entries that were not accessed.
|
||||
func (cfs *cacheFSObjects) purge() {
|
||||
delimiter := slashSeparator
|
||||
delimiter := SlashSeparator
|
||||
maxKeys := 1000
|
||||
ctx := context.Background()
|
||||
for {
|
||||
|
|
8
vendor/github.com/minio/minio/cmd/disk-cache.go
generated
vendored
8
vendor/github.com/minio/minio/cmd/disk-cache.go
generated
vendored
|
@ -395,7 +395,7 @@ func listDirCacheFactory(isLeaf func(string, string) bool, disks []*cacheFSObjec
|
|||
|
||||
for i := range entries {
|
||||
if isLeaf(bucket, entries[i]) {
|
||||
entries[i] = strings.TrimSuffix(entries[i], slashSeparator)
|
||||
entries[i] = strings.TrimSuffix(entries[i], SlashSeparator)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,7 +432,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
|||
var nextMarker string
|
||||
|
||||
recursive := true
|
||||
if delimiter == slashSeparator {
|
||||
if delimiter == SlashSeparator {
|
||||
recursive = false
|
||||
}
|
||||
walkResultCh, endWalkCh := c.listPool.Release(listParams{bucket, recursive, marker, prefix, false})
|
||||
|
@ -460,7 +460,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
|||
|
||||
entry := walkResult.entry
|
||||
var objInfo ObjectInfo
|
||||
if hasSuffix(entry, slashSeparator) {
|
||||
if hasSuffix(entry, SlashSeparator) {
|
||||
// Object name needs to be full path.
|
||||
objInfo.Bucket = bucket
|
||||
objInfo.Name = entry
|
||||
|
@ -502,7 +502,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark
|
|||
result = ListObjectsInfo{IsTruncated: !eof}
|
||||
for _, objInfo := range objInfos {
|
||||
result.NextMarker = objInfo.Name
|
||||
if objInfo.IsDir && delimiter == slashSeparator {
|
||||
if objInfo.IsDir && delimiter == SlashSeparator {
|
||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||
continue
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/disk-usage.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/disk-usage.go
generated
vendored
|
@ -23,7 +23,7 @@ import (
|
|||
// getDiskUsage walks the file tree rooted at root, calling usageFn
|
||||
// for each file or directory in the tree, including root.
|
||||
func getDiskUsage(ctx context.Context, root string, usageFn usageFunc) error {
|
||||
return walk(ctx, root+slashSeparator, usageFn)
|
||||
return walk(ctx, root+SlashSeparator, usageFn)
|
||||
}
|
||||
|
||||
type usageFunc func(ctx context.Context, entry string) error
|
||||
|
@ -34,7 +34,7 @@ func walk(ctx context.Context, path string, usageFn usageFunc) error {
|
|||
return err
|
||||
}
|
||||
|
||||
if !hasSuffix(path, slashSeparator) {
|
||||
if !hasSuffix(path, SlashSeparator) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
21
vendor/github.com/minio/minio/cmd/endpoint.go
generated
vendored
21
vendor/github.com/minio/minio/cmd/endpoint.go
generated
vendored
|
@ -100,7 +100,7 @@ func (endpoint *Endpoint) UpdateIsLocal() error {
|
|||
func NewEndpoint(arg string) (ep Endpoint, e error) {
|
||||
// isEmptyPath - check whether given path is not empty.
|
||||
isEmptyPath := func(path string) bool {
|
||||
return path == "" || path == "/" || path == `\`
|
||||
return path == "" || path == SlashSeparator || path == `\`
|
||||
}
|
||||
|
||||
if isEmptyPath(arg) {
|
||||
|
@ -152,7 +152,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
|
|||
return ep, fmt.Errorf("empty or root path is not supported in URL endpoint")
|
||||
}
|
||||
|
||||
// On windows having a preceding "/" will cause problems, if the
|
||||
// On windows having a preceding SlashSeparator will cause problems, if the
|
||||
// command line already has C:/<export-folder/ in it. Final resulting
|
||||
// path on windows might become C:/C:/ this will cause problems
|
||||
// of starting minio server properly in distributed mode on windows.
|
||||
|
@ -720,18 +720,27 @@ func GetRemotePeers(endpoints EndpointList) []string {
|
|||
func updateDomainIPs(endPoints set.StringSet) {
|
||||
ipList := set.NewStringSet()
|
||||
for e := range endPoints {
|
||||
host, _, err := net.SplitHostPort(e)
|
||||
host, port, err := net.SplitHostPort(e)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "missing port in address") {
|
||||
host = e
|
||||
port = globalMinioPort
|
||||
} else {
|
||||
continue
|
||||
}
|
||||
}
|
||||
IPs, _ := getHostIP(host)
|
||||
ipList = ipList.Union(IPs)
|
||||
IPs, err := getHostIP(host)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
IPsWithPort := IPs.ApplyFunc(func(ip string) string {
|
||||
return net.JoinHostPort(ip, port)
|
||||
})
|
||||
|
||||
ipList = ipList.Union(IPsWithPort)
|
||||
}
|
||||
globalDomainIPs = ipList.FuncMatch(func(ip string, matchString string) bool {
|
||||
return !strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1")
|
||||
return !(strings.HasPrefix(ip, "127.") || strings.HasPrefix(ip, "::1") || strings.HasPrefix(ip, "[::1]"))
|
||||
}, "")
|
||||
}
|
||||
|
|
43
vendor/github.com/minio/minio/cmd/endpoint_test.go
generated
vendored
43
vendor/github.com/minio/minio/cmd/endpoint_test.go
generated
vendored
|
@ -25,6 +25,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
)
|
||||
|
||||
func TestSubOptimalEndpointInput(t *testing.T) {
|
||||
|
@ -95,7 +96,7 @@ func TestNewEndpoint(t *testing.T) {
|
|||
{"http://127.0.0.1:8080/path", Endpoint{URL: u3, IsLocal: true, HostName: "127.0.0.1"}, URLEndpointType, nil},
|
||||
{"http://192.168.253.200/path", Endpoint{URL: u4, IsLocal: false, HostName: "192.168.253.200"}, URLEndpointType, nil},
|
||||
{"", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{"/", Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{SlashSeparator, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{`\`, Endpoint{}, -1, fmt.Errorf("empty or root endpoint is not supported")},
|
||||
{"c://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
||||
{"ftp://foo", Endpoint{}, -1, fmt.Errorf("invalid URL endpoint format")},
|
||||
|
@ -444,3 +445,43 @@ func TestGetRemotePeers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestUpdateDomainIPs(t *testing.T) {
|
||||
tempGlobalMinioPort := globalMinioPort
|
||||
defer func() {
|
||||
globalMinioPort = tempGlobalMinioPort
|
||||
}()
|
||||
globalMinioPort = "9000"
|
||||
|
||||
tempGlobalDomainIPs := globalDomainIPs
|
||||
defer func() {
|
||||
globalDomainIPs = tempGlobalDomainIPs
|
||||
}()
|
||||
|
||||
ipv4TestCases := []struct {
|
||||
endPoints set.StringSet
|
||||
expectedResult set.StringSet
|
||||
}{
|
||||
{set.NewStringSet(), set.NewStringSet()},
|
||||
{set.CreateStringSet("localhost"), set.NewStringSet()},
|
||||
{set.CreateStringSet("localhost", "10.0.0.1"), set.CreateStringSet("10.0.0.1:9000")},
|
||||
{set.CreateStringSet("localhost:9001", "10.0.0.1"), set.CreateStringSet("10.0.0.1:9000")},
|
||||
{set.CreateStringSet("localhost", "10.0.0.1:9001"), set.CreateStringSet("10.0.0.1:9001")},
|
||||
{set.CreateStringSet("localhost:9000", "10.0.0.1:9001"), set.CreateStringSet("10.0.0.1:9001")},
|
||||
|
||||
{set.CreateStringSet("10.0.0.1", "10.0.0.2"), set.CreateStringSet("10.0.0.1:9000", "10.0.0.2:9000")},
|
||||
{set.CreateStringSet("10.0.0.1:9001", "10.0.0.2"), set.CreateStringSet("10.0.0.1:9001", "10.0.0.2:9000")},
|
||||
{set.CreateStringSet("10.0.0.1", "10.0.0.2:9002"), set.CreateStringSet("10.0.0.1:9000", "10.0.0.2:9002")},
|
||||
{set.CreateStringSet("10.0.0.1:9001", "10.0.0.2:9002"), set.CreateStringSet("10.0.0.1:9001", "10.0.0.2:9002")},
|
||||
}
|
||||
|
||||
for _, testCase := range ipv4TestCases {
|
||||
globalDomainIPs = nil
|
||||
|
||||
updateDomainIPs(testCase.endPoints)
|
||||
|
||||
if !testCase.expectedResult.Equals(globalDomainIPs) {
|
||||
t.Fatalf("error: expected = %s, got = %s", testCase.expectedResult, globalDomainIPs)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
72
vendor/github.com/minio/minio/cmd/etcd.go
generated
vendored
Normal file
72
vendor/github.com/minio/minio/cmd/etcd.go
generated
vendored
Normal file
|
@ -0,0 +1,72 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
)
|
||||
|
||||
var errEtcdUnreachable = errors.New("etcd is unreachable, please check your endpoints")
|
||||
|
||||
func etcdErrToErr(err error, etcdEndpoints []string) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
switch err {
|
||||
case context.DeadlineExceeded:
|
||||
return fmt.Errorf("%s %s", errEtcdUnreachable, etcdEndpoints)
|
||||
default:
|
||||
return fmt.Errorf("unexpected error %s from etcd, please check your endpoints %s", err, etcdEndpoints)
|
||||
}
|
||||
}
|
||||
|
||||
func saveKeyEtcd(ctx context.Context, client *etcd.Client, key string, data []byte) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
_, err := client.Put(timeoutCtx, key, string(data))
|
||||
return etcdErrToErr(err, client.Endpoints())
|
||||
}
|
||||
|
||||
func deleteKeyEtcd(ctx context.Context, client *etcd.Client, key string) error {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
|
||||
_, err := client.Delete(timeoutCtx, key)
|
||||
return etcdErrToErr(err, client.Endpoints())
|
||||
}
|
||||
|
||||
func readKeyEtcd(ctx context.Context, client *etcd.Client, key string) ([]byte, error) {
|
||||
timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout)
|
||||
defer cancel()
|
||||
resp, err := client.Get(timeoutCtx, key)
|
||||
if err != nil {
|
||||
return nil, etcdErrToErr(err, client.Endpoints())
|
||||
}
|
||||
if resp.Count == 0 {
|
||||
return nil, errConfigNotFound
|
||||
}
|
||||
for _, ev := range resp.Kvs {
|
||||
if string(ev.Key) == key {
|
||||
return ev.Value, nil
|
||||
}
|
||||
}
|
||||
return nil, errConfigNotFound
|
||||
}
|
16
vendor/github.com/minio/minio/cmd/format-xl.go
generated
vendored
16
vendor/github.com/minio/minio/cmd/format-xl.go
generated
vendored
|
@ -698,6 +698,22 @@ func initStorageDisks(endpoints EndpointList) ([]StorageAPI, error) {
|
|||
return storageDisks, nil
|
||||
}
|
||||
|
||||
// Runs through the faulty disks and record their errors.
|
||||
func initDisksWithErrors(endpoints EndpointList) ([]StorageAPI, []error) {
|
||||
storageDisks := make([]StorageAPI, len(endpoints))
|
||||
var dErrs = make([]error, len(storageDisks))
|
||||
for index, endpoint := range endpoints {
|
||||
storage, err := newStorageAPI(endpoint)
|
||||
if err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
dErrs[index] = err
|
||||
continue
|
||||
}
|
||||
storageDisks[index] = storage
|
||||
}
|
||||
return storageDisks, dErrs
|
||||
}
|
||||
|
||||
// formatXLV3ThisEmpty - find out if '.This' field is empty
|
||||
// in any of the input `formats`, if yes return true.
|
||||
func formatXLV3ThisEmpty(formats []*formatXLV3) bool {
|
||||
|
|
36
vendor/github.com/minio/minio/cmd/fs-v1-helpers.go
generated
vendored
36
vendor/github.com/minio/minio/cmd/fs-v1-helpers.go
generated
vendored
|
@ -228,15 +228,6 @@ func fsStatDir(ctx context.Context, statDir string) (os.FileInfo, error) {
|
|||
return fi, nil
|
||||
}
|
||||
|
||||
// Returns if the dirPath is a directory.
|
||||
func fsIsDir(ctx context.Context, dirPath string) bool {
|
||||
fi, err := fsStat(ctx, dirPath)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return fi.IsDir()
|
||||
}
|
||||
|
||||
// Lookup if file exists, returns file attributes upon success.
|
||||
func fsStatFile(ctx context.Context, statFile string) (os.FileInfo, error) {
|
||||
fi, err := fsStat(ctx, statFile)
|
||||
|
@ -280,7 +271,7 @@ func fsOpenFile(ctx context.Context, readPath string, offset int64) (io.ReadClos
|
|||
}
|
||||
|
||||
// Stat to get the size of the file at path.
|
||||
st, err := os.Stat(readPath)
|
||||
st, err := fr.Stat()
|
||||
if err != nil {
|
||||
err = osErrToFSFileErr(err)
|
||||
if err != errFileNotFound {
|
||||
|
@ -386,6 +377,26 @@ func fsFAllocate(fd int, offset int64, len int64) (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Renames source path to destination path, fails if the destination path
|
||||
// parents are not already created.
|
||||
func fsSimpleRenameFile(ctx context.Context, sourcePath, destPath string) error {
|
||||
if err := checkPathLength(sourcePath); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
if err := checkPathLength(destPath); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := os.Rename(sourcePath, destPath); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return osErrToFSFileErr(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Renames source path to destination path, creates all the
|
||||
// missing parents if they don't exist.
|
||||
func fsRenameFile(ctx context.Context, sourcePath, destPath string) error {
|
||||
|
@ -398,11 +409,6 @@ func fsRenameFile(ctx context.Context, sourcePath, destPath string) error {
|
|||
return err
|
||||
}
|
||||
|
||||
// Verify if source path exists.
|
||||
if _, err := os.Stat(sourcePath); err != nil {
|
||||
return osErrToFSFileErr(err)
|
||||
}
|
||||
|
||||
if err := renameAll(sourcePath, destPath); err != nil {
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
|
|
12
vendor/github.com/minio/minio/cmd/fs-v1-helpers_test.go
generated
vendored
12
vendor/github.com/minio/minio/cmd/fs-v1-helpers_test.go
generated
vendored
|
@ -548,18 +548,6 @@ func TestFSRemoveMeta(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestFSIsDir(t *testing.T) {
|
||||
dirPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
t.Fatalf("Unable to create tmp directory %s", err)
|
||||
}
|
||||
defer os.RemoveAll(dirPath)
|
||||
|
||||
if !fsIsDir(context.Background(), dirPath) {
|
||||
t.Fatalf("Expected %s to be a directory", dirPath)
|
||||
}
|
||||
}
|
||||
|
||||
func TestFSIsFile(t *testing.T) {
|
||||
dirPath, err := ioutil.TempDir(globalTestTmpDir, "minio-")
|
||||
if err != nil {
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/fs-v1-metadata.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/fs-v1-metadata.go
generated
vendored
|
@ -141,7 +141,7 @@ func (m fsMetaV1) ToObjectInfo(bucket, object string, fi os.FileInfo) ObjectInfo
|
|||
m.Meta["content-type"] = mimedb.TypeByExtension(pathutil.Ext(object))
|
||||
}
|
||||
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
if hasSuffix(object, SlashSeparator) {
|
||||
m.Meta["etag"] = emptyETag // For directories etag is d41d8cd98f00b204e9800998ecf8427e
|
||||
m.Meta["content-type"] = "application/octet-stream"
|
||||
}
|
||||
|
|
10
vendor/github.com/minio/minio/cmd/fs-v1-multipart.go
generated
vendored
10
vendor/github.com/minio/minio/cmd/fs-v1-multipart.go
generated
vendored
|
@ -152,7 +152,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
|
|||
return result, toObjectErr(err)
|
||||
}
|
||||
|
||||
// S3 spec says uploaIDs should be sorted based on initiated time. ModTime of fs.json
|
||||
// S3 spec says uploadIDs should be sorted based on initiated time. ModTime of fs.json
|
||||
// is the creation time of the uploadID, hence we will use that.
|
||||
var uploads []MultipartInfo
|
||||
for _, uploadID := range uploadIDs {
|
||||
|
@ -163,7 +163,7 @@ func (fs *FSObjects) ListMultipartUploads(ctx context.Context, bucket, object, k
|
|||
}
|
||||
uploads = append(uploads, MultipartInfo{
|
||||
Object: object,
|
||||
UploadID: strings.TrimSuffix(uploadID, slashSeparator),
|
||||
UploadID: strings.TrimSuffix(uploadID, SlashSeparator),
|
||||
Initiated: fi.ModTime(),
|
||||
})
|
||||
}
|
||||
|
@ -326,7 +326,11 @@ func (fs *FSObjects) PutObjectPart(ctx context.Context, bucket, object, uploadID
|
|||
|
||||
partPath := pathJoin(uploadIDDir, fs.encodePartFile(partID, etag, data.ActualSize()))
|
||||
|
||||
if err = fsRenameFile(ctx, tmpPartPath, partPath); err != nil {
|
||||
// Make sure not to create parent directories if they don't exist - the upload might have been aborted.
|
||||
if err = fsSimpleRenameFile(ctx, tmpPartPath, partPath); err != nil {
|
||||
if err == errFileNotFound || err == errFileAccessDenied {
|
||||
return pi, InvalidUploadID{UploadID: uploadID}
|
||||
}
|
||||
return pi, toObjectErr(err, minioMetaMultipartBucket, partPath)
|
||||
}
|
||||
|
||||
|
|
14
vendor/github.com/minio/minio/cmd/fs-v1.go
generated
vendored
14
vendor/github.com/minio/minio/cmd/fs-v1.go
generated
vendored
|
@ -505,7 +505,7 @@ func (fs *FSObjects) GetObjectNInfo(ctx context.Context, bucket, object string,
|
|||
return nil, toObjectErr(err, bucket, object)
|
||||
}
|
||||
// For a directory, we need to send an reader that returns no bytes.
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
if hasSuffix(object, SlashSeparator) {
|
||||
// The lock taken above is released when
|
||||
// objReader.Close() is called by the caller.
|
||||
return NewGetObjectReaderFromReader(bytes.NewBuffer(nil), objInfo, opts.CheckCopyPrecondFn, nsUnlocker)
|
||||
|
@ -596,7 +596,7 @@ func (fs *FSObjects) getObject(ctx context.Context, bucket, object string, offse
|
|||
}
|
||||
|
||||
// If its a directory request, we return an empty body.
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
if hasSuffix(object, SlashSeparator) {
|
||||
_, err = writer.Write([]byte(""))
|
||||
logger.LogIf(ctx, err)
|
||||
return toObjectErr(err, bucket, object)
|
||||
|
@ -690,11 +690,7 @@ func (fs *FSObjects) defaultFsJSON(object string) fsMetaV1 {
|
|||
// getObjectInfo - wrapper for reading object metadata and constructs ObjectInfo.
|
||||
func (fs *FSObjects) getObjectInfo(ctx context.Context, bucket, object string) (oi ObjectInfo, e error) {
|
||||
fsMeta := fsMetaV1{}
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
// Since we support PUT of a "directory" object, we allow HEAD.
|
||||
if !fsIsDir(ctx, pathJoin(fs.fsPath, bucket, object)) {
|
||||
return oi, errFileNotFound
|
||||
}
|
||||
if hasSuffix(object, SlashSeparator) {
|
||||
fi, err := fsStatDir(ctx, pathJoin(fs.fsPath, bucket, object))
|
||||
if err != nil {
|
||||
return oi, err
|
||||
|
@ -754,7 +750,7 @@ func (fs *FSObjects) getObjectInfoWithLock(ctx context.Context, bucket, object s
|
|||
return oi, err
|
||||
}
|
||||
|
||||
if strings.HasSuffix(object, slashSeparator) && !fs.isObjectDir(bucket, object) {
|
||||
if strings.HasSuffix(object, SlashSeparator) && !fs.isObjectDir(bucket, object) {
|
||||
return oi, errFileNotFound
|
||||
}
|
||||
|
||||
|
@ -788,7 +784,7 @@ func (fs *FSObjects) GetObjectInfo(ctx context.Context, bucket, object string, o
|
|||
func (fs *FSObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool {
|
||||
var isParentDirObject func(string) bool
|
||||
isParentDirObject = func(p string) bool {
|
||||
if p == "." || p == "/" {
|
||||
if p == "." || p == SlashSeparator {
|
||||
return false
|
||||
}
|
||||
if fsIsFile(ctx, pathJoin(fs.fsPath, bucket, p)) {
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/fs-v1_test.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/fs-v1_test.go
generated
vendored
|
@ -71,7 +71,7 @@ func TestFSParentDirIsObject(t *testing.T) {
|
|||
// Should not cause infinite loop.
|
||||
{
|
||||
parentIsObject: false,
|
||||
objectName: "/",
|
||||
objectName: SlashSeparator,
|
||||
},
|
||||
{
|
||||
parentIsObject: false,
|
||||
|
@ -214,7 +214,7 @@ func TestFSPutObject(t *testing.T) {
|
|||
}
|
||||
|
||||
// With a directory object.
|
||||
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+"/", mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), ObjectOptions{})
|
||||
_, err = obj.PutObject(context.Background(), bucketName+"non-existent", objectName+SlashSeparator, mustGetPutObjReader(t, bytes.NewReader([]byte("abcd")), 0, "", ""), ObjectOptions{})
|
||||
if err == nil {
|
||||
t.Fatal("Unexpected should fail here, bucket doesn't exist")
|
||||
}
|
||||
|
|
25
vendor/github.com/minio/minio/cmd/gateway-main.go
generated
vendored
25
vendor/github.com/minio/minio/cmd/gateway-main.go
generated
vendored
|
@ -158,7 +158,7 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
registerSTSRouter(router)
|
||||
}
|
||||
|
||||
enableConfigOps := globalEtcdClient != nil && gatewayName == "nas"
|
||||
enableConfigOps := gatewayName == "nas"
|
||||
enableIAMOps := globalEtcdClient != nil
|
||||
|
||||
// Enable IAM admin APIs if etcd is enabled, if not just enable basic
|
||||
|
@ -236,6 +236,10 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
|
||||
// Load globalServerConfig from etcd
|
||||
logger.LogIf(context.Background(), globalConfigSys.Init(newObject))
|
||||
|
||||
// Start watching disk for reloading config, this
|
||||
// is only enabled for "NAS" gateway.
|
||||
globalConfigSys.WatchConfigNASDisk(newObject)
|
||||
}
|
||||
|
||||
// Load logger subsystem
|
||||
|
@ -274,23 +278,14 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
|
|||
|
||||
// Create new notification system.
|
||||
globalNotificationSys = NewNotificationSys(globalServerConfig, globalEndpoints)
|
||||
if globalEtcdClient != nil && newObject.IsNotificationSupported() {
|
||||
if enableConfigOps && newObject.IsNotificationSupported() {
|
||||
logger.LogIf(context.Background(), globalNotificationSys.Init(newObject))
|
||||
}
|
||||
|
||||
// Encryption support checks in gateway mode.
|
||||
{
|
||||
|
||||
if (globalAutoEncryption || GlobalKMS != nil) && !newObject.IsEncryptionSupported() {
|
||||
logger.Fatal(errInvalidArgument,
|
||||
"Encryption support is requested but (%s) gateway does not support encryption", gw.Name())
|
||||
}
|
||||
|
||||
if GlobalGatewaySSE.IsSet() && GlobalKMS == nil {
|
||||
logger.Fatal(uiErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured"),
|
||||
"Unable to start gateway with SSE")
|
||||
}
|
||||
}
|
||||
// Verify if object layer supports
|
||||
// - encryption
|
||||
// - compression
|
||||
verifyObjectLayerFeatures("gateway "+gatewayName, newObject)
|
||||
|
||||
// Once endpoints are finalized, initialize the new object api.
|
||||
globalObjLayerMutex.Lock()
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/gateway/azure/gateway-azure.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/gateway/azure/gateway-azure.go
generated
vendored
|
@ -431,7 +431,7 @@ func checkAzureUploadID(ctx context.Context, uploadID string) (err error) {
|
|||
|
||||
// parses partID from part metadata file name
|
||||
func parseAzurePart(metaPartFileName, prefix string) (partID int, err error) {
|
||||
partStr := strings.TrimPrefix(metaPartFileName, prefix+"/")
|
||||
partStr := strings.TrimPrefix(metaPartFileName, prefix+minio.SlashSeparator)
|
||||
if partID, err = strconv.Atoi(partStr); err != nil || partID <= 0 {
|
||||
err = fmt.Errorf("invalid part number in block id '%s'", string(partID))
|
||||
return
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/gateway/gcs/gateway-gcs.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/gateway/gcs/gateway-gcs.go
generated
vendored
|
@ -472,7 +472,7 @@ func (l *gcsGateway) ListBuckets(ctx context.Context) (buckets []minio.BucketInf
|
|||
// DeleteBucket delete a bucket on GCS.
|
||||
func (l *gcsGateway) DeleteBucket(ctx context.Context, bucket string) error {
|
||||
itObject := l.client.Bucket(bucket).Objects(ctx, &storage.Query{
|
||||
Delimiter: "/",
|
||||
Delimiter: minio.SlashSeparator,
|
||||
Versions: false,
|
||||
})
|
||||
// We list the bucket and if we find any objects we return BucketNotEmpty error. If we
|
||||
|
@ -1040,7 +1040,7 @@ func (l *gcsGateway) ListMultipartUploads(ctx context.Context, bucket string, pr
|
|||
if prefix == mpMeta.Object {
|
||||
// Extract uploadId
|
||||
// E.g minio.sys.tmp/multipart/v1/d063ad89-fdc4-4ea3-a99e-22dba98151f5/gcs.json
|
||||
components := strings.SplitN(attrs.Name, "/", 5)
|
||||
components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5)
|
||||
if len(components) != 5 {
|
||||
compErr := errors.New("Invalid multipart upload format")
|
||||
logger.LogIf(ctx, compErr)
|
||||
|
@ -1114,7 +1114,7 @@ func (l *gcsGateway) PutObjectPart(ctx context.Context, bucket string, key strin
|
|||
|
||||
// gcsGetPartInfo returns PartInfo of a given object part
|
||||
func gcsGetPartInfo(ctx context.Context, attrs *storage.ObjectAttrs) (minio.PartInfo, error) {
|
||||
components := strings.SplitN(attrs.Name, "/", 5)
|
||||
components := strings.SplitN(attrs.Name, minio.SlashSeparator, 5)
|
||||
if len(components) != 5 {
|
||||
logger.LogIf(ctx, errors.New("Invalid multipart upload format"))
|
||||
return minio.PartInfo{}, errors.New("Invalid multipart upload format")
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/gateway/hdfs/gateway-hdfs-utils.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/gateway/hdfs/gateway-hdfs-utils.go
generated
vendored
|
@ -36,7 +36,7 @@ const (
|
|||
|
||||
// Ignores all reserved bucket names or invalid bucket names.
|
||||
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
||||
bucketEntry = strings.TrimSuffix(bucketEntry, "/")
|
||||
bucketEntry = strings.TrimSuffix(bucketEntry, minio.SlashSeparator)
|
||||
if strict {
|
||||
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
||||
return true
|
||||
|
|
93
vendor/github.com/minio/minio/cmd/gateway/hdfs/gateway-hdfs.go
generated
vendored
93
vendor/github.com/minio/minio/cmd/gateway/hdfs/gateway-hdfs.go
generated
vendored
|
@ -18,6 +18,7 @@ package hdfs
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
|
@ -30,7 +31,11 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/minio/cli"
|
||||
krb "github.com/minio/gokrb5/v7/client"
|
||||
"github.com/minio/gokrb5/v7/config"
|
||||
"github.com/minio/gokrb5/v7/credentials"
|
||||
"github.com/minio/hdfs/v3"
|
||||
"github.com/minio/hdfs/v3/hadoopconf"
|
||||
"github.com/minio/minio-go/v6/pkg/s3utils"
|
||||
minio "github.com/minio/minio/cmd"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
|
@ -41,7 +46,7 @@ import (
|
|||
const (
|
||||
hdfsBackend = "hdfs"
|
||||
|
||||
hdfsSeparator = "/"
|
||||
hdfsSeparator = minio.SlashSeparator
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -102,7 +107,7 @@ EXAMPLES:
|
|||
// Handler for 'minio gateway hdfs' command line.
|
||||
func hdfsGatewayMain(ctx *cli.Context) {
|
||||
// Validate gateway arguments.
|
||||
if !ctx.Args().Present() || ctx.Args().First() == "help" {
|
||||
if ctx.Args().First() == "help" {
|
||||
cli.ShowCommandHelpAndExit(ctx, hdfsBackend, 1)
|
||||
}
|
||||
|
||||
|
@ -119,6 +124,43 @@ func (g *HDFS) Name() string {
|
|||
return hdfsBackend
|
||||
}
|
||||
|
||||
func getKerberosClient() (*krb.Client, error) {
|
||||
configPath := os.Getenv("KRB5_CONFIG")
|
||||
if configPath == "" {
|
||||
configPath = "/etc/krb5.conf"
|
||||
}
|
||||
|
||||
cfg, err := config.Load(configPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Determine the ccache location from the environment,
|
||||
// falling back to the default location.
|
||||
ccachePath := os.Getenv("KRB5CCNAME")
|
||||
if strings.Contains(ccachePath, ":") {
|
||||
if strings.HasPrefix(ccachePath, "FILE:") {
|
||||
ccachePath = strings.TrimPrefix(ccachePath, "FILE:")
|
||||
} else {
|
||||
return nil, fmt.Errorf("unable to use kerberos ccache: %s", ccachePath)
|
||||
}
|
||||
} else if ccachePath == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ccachePath = fmt.Sprintf("/tmp/krb5cc_%s", u.Uid)
|
||||
}
|
||||
|
||||
ccache, err := credentials.LoadCCache(ccachePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return krb.NewClientFromCCache(ccache, cfg)
|
||||
}
|
||||
|
||||
// NewGatewayLayer returns hdfs gatewaylayer.
|
||||
func (g *HDFS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error) {
|
||||
dialFunc := (&net.Dialer{
|
||||
|
@ -127,25 +169,42 @@ func (g *HDFS) NewGatewayLayer(creds auth.Credentials) (minio.ObjectLayer, error
|
|||
DualStack: true,
|
||||
}).DialContext
|
||||
|
||||
var addresses []string
|
||||
for _, s := range g.args {
|
||||
u, err := xnet.ParseURL(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addresses = append(addresses, u.Host)
|
||||
}
|
||||
|
||||
user, err := user.Current()
|
||||
hconfig, err := hadoopconf.LoadFromEnvironment()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opts := hdfs.ClientOptions{
|
||||
Addresses: addresses,
|
||||
User: user.Username,
|
||||
NamenodeDialFunc: dialFunc,
|
||||
DatanodeDialFunc: dialFunc,
|
||||
opts := hdfs.ClientOptionsFromConf(hconfig)
|
||||
opts.NamenodeDialFunc = dialFunc
|
||||
opts.DatanodeDialFunc = dialFunc
|
||||
|
||||
// Not addresses found, load it from command line.
|
||||
if len(opts.Addresses) == 0 {
|
||||
var addresses []string
|
||||
for _, s := range g.args {
|
||||
u, err := xnet.ParseURL(s)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
addresses = append(addresses, u.Host)
|
||||
}
|
||||
opts.Addresses = addresses
|
||||
}
|
||||
|
||||
if opts.KerberosClient != nil {
|
||||
opts.KerberosClient, err = getKerberosClient()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to initialize kerberos client: %s", err)
|
||||
}
|
||||
} else {
|
||||
opts.User = os.Getenv("HADOOP_USER_NAME")
|
||||
if opts.User == "" {
|
||||
u, err := user.Current()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to lookup local user: %s", err)
|
||||
}
|
||||
opts.User = u.Username
|
||||
}
|
||||
}
|
||||
|
||||
clnt, err := hdfs.NewClient(opts)
|
||||
|
|
13
vendor/github.com/minio/minio/cmd/gateway/s3/gateway-s3-sse.go
generated
vendored
13
vendor/github.com/minio/minio/cmd/gateway/s3/gateway-s3-sse.go
generated
vendored
|
@ -41,7 +41,6 @@ const (
|
|||
// custom multipart files are stored under the defaultMinioGWPrefix
|
||||
defaultMinioGWPrefix = ".minio"
|
||||
defaultGWContentFileName = "data"
|
||||
slashSeparator = "/"
|
||||
)
|
||||
|
||||
// s3EncObjects is a wrapper around s3Objects and implements gateway calls for
|
||||
|
@ -102,7 +101,7 @@ func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
|||
}
|
||||
// get objectname and ObjectInfo from the custom metadata file
|
||||
if strings.HasSuffix(obj.Name, gwdareMetaJSON) {
|
||||
objSlice := strings.Split(obj.Name, slashSeparator+defaultMinioGWPrefix)
|
||||
objSlice := strings.Split(obj.Name, minio.SlashSeparator+defaultMinioGWPrefix)
|
||||
gwMeta, e := l.getGWMetadata(ctx, bucket, getDareMetaPath(objSlice[0]))
|
||||
if e != nil {
|
||||
continue
|
||||
|
@ -117,7 +116,7 @@ func (l *s3EncObjects) ListObjectsV2(ctx context.Context, bucket, prefix, contin
|
|||
}
|
||||
}
|
||||
for _, p := range loi.Prefixes {
|
||||
objName := strings.TrimSuffix(p, slashSeparator)
|
||||
objName := strings.TrimSuffix(p, minio.SlashSeparator)
|
||||
gm, err := l.getGWMetadata(ctx, bucket, getDareMetaPath(objName))
|
||||
// if prefix is actually a custom multi-part object, append it to objects
|
||||
if err == nil {
|
||||
|
@ -165,7 +164,7 @@ func isGWObject(objName string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
pfxSlice := strings.Split(objName, slashSeparator)
|
||||
pfxSlice := strings.Split(objName, minio.SlashSeparator)
|
||||
var i1, i2 int
|
||||
for i := len(pfxSlice) - 1; i >= 0; i-- {
|
||||
p := pfxSlice[i]
|
||||
|
@ -401,10 +400,10 @@ func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string,
|
|||
if e != nil {
|
||||
return
|
||||
}
|
||||
lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath("/"))
|
||||
lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath("/"))
|
||||
lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath(minio.SlashSeparator))
|
||||
lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath(minio.SlashSeparator))
|
||||
for i := range lmi.Uploads {
|
||||
lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath("/"))
|
||||
lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath(minio.SlashSeparator))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
17
vendor/github.com/minio/minio/cmd/generic-handlers.go
generated
vendored
17
vendor/github.com/minio/minio/cmd/generic-handlers.go
generated
vendored
|
@ -153,7 +153,7 @@ func containsReservedMetadata(header http.Header) bool {
|
|||
// Reserved bucket.
|
||||
const (
|
||||
minioReservedBucket = "minio"
|
||||
minioReservedBucketPath = "/" + minioReservedBucket
|
||||
minioReservedBucketPath = SlashSeparator + minioReservedBucket
|
||||
)
|
||||
|
||||
// Adds redirect rules for incoming requests.
|
||||
|
@ -172,10 +172,10 @@ func setBrowserRedirectHandler(h http.Handler) http.Handler {
|
|||
// browser requests.
|
||||
func getRedirectLocation(urlPath string) (rLocation string) {
|
||||
if urlPath == minioReservedBucketPath {
|
||||
rLocation = minioReservedBucketPath + "/"
|
||||
rLocation = minioReservedBucketPath + SlashSeparator
|
||||
}
|
||||
if contains([]string{
|
||||
"/",
|
||||
SlashSeparator,
|
||||
"/webrpc",
|
||||
"/login",
|
||||
"/favicon.ico",
|
||||
|
@ -229,7 +229,7 @@ func guessIsRPCReq(req *http.Request) bool {
|
|||
return false
|
||||
}
|
||||
return req.Method == http.MethodPost &&
|
||||
strings.HasPrefix(req.URL.Path, minioReservedBucketPath+"/")
|
||||
strings.HasPrefix(req.URL.Path, minioReservedBucketPath+SlashSeparator)
|
||||
}
|
||||
|
||||
func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -258,7 +258,7 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler {
|
|||
func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
if r.Method == http.MethodGet && guessIsBrowserReq(r) {
|
||||
// For all browser requests set appropriate Cache-Control policies
|
||||
if hasPrefix(r.URL.Path, minioReservedBucketPath+"/") {
|
||||
if hasPrefix(r.URL.Path, minioReservedBucketPath+SlashSeparator) {
|
||||
if hasSuffix(r.URL.Path, ".js") || r.URL.Path == minioReservedBucketPath+"/favicon.ico" {
|
||||
// For assets set cache expiry of one year. For each release, the name
|
||||
// of the asset name will change and hence it can not be served from cache.
|
||||
|
@ -276,7 +276,7 @@ func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
// Check to allow access to the reserved "bucket" `/minio` for Admin
|
||||
// API requests.
|
||||
func isAdminReq(r *http.Request) bool {
|
||||
return strings.HasPrefix(r.URL.Path, adminAPIPathPrefix+"/")
|
||||
return strings.HasPrefix(r.URL.Path, adminAPIPathPrefix+SlashSeparator)
|
||||
}
|
||||
|
||||
// Adds verification for incoming paths.
|
||||
|
@ -596,7 +596,7 @@ const (
|
|||
// such as ".." and "."
|
||||
func hasBadPathComponent(path string) bool {
|
||||
path = strings.TrimSpace(path)
|
||||
for _, p := range strings.Split(path, slashSeparator) {
|
||||
for _, p := range strings.Split(path, SlashSeparator) {
|
||||
switch strings.TrimSpace(p) {
|
||||
case dotdotComponent:
|
||||
return true
|
||||
|
@ -746,6 +746,9 @@ func setBucketForwardingHandler(h http.Handler) http.Handler {
|
|||
fwd := handlers.NewForwarder(&handlers.Forwarder{
|
||||
PassHost: true,
|
||||
RoundTripper: NewCustomHTTPTransport(),
|
||||
Logger: func(err error) {
|
||||
logger.LogIf(context.Background(), err)
|
||||
},
|
||||
})
|
||||
return bucketForwardingHandler{fwd, h}
|
||||
}
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/generic-handlers_test.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/generic-handlers_test.go
generated
vendored
|
@ -35,12 +35,12 @@ func TestRedirectLocation(t *testing.T) {
|
|||
{
|
||||
// 1. When urlPath is '/minio'
|
||||
urlPath: minioReservedBucketPath,
|
||||
location: minioReservedBucketPath + "/",
|
||||
location: minioReservedBucketPath + SlashSeparator,
|
||||
},
|
||||
{
|
||||
// 2. When urlPath is '/'
|
||||
urlPath: "/",
|
||||
location: minioReservedBucketPath + "/",
|
||||
urlPath: SlashSeparator,
|
||||
location: minioReservedBucketPath + SlashSeparator,
|
||||
},
|
||||
{
|
||||
// 3. When urlPath is '/webrpc'
|
||||
|
|
23
vendor/github.com/minio/minio/cmd/handler-utils.go
generated
vendored
23
vendor/github.com/minio/minio/cmd/handler-utils.go
generated
vendored
|
@ -95,8 +95,8 @@ func isMetadataReplace(h http.Header) bool {
|
|||
// Splits an incoming path into bucket and object components.
|
||||
func path2BucketAndObject(path string) (bucket, object string) {
|
||||
// Skip the first element if it is '/', split the rest.
|
||||
path = strings.TrimPrefix(path, "/")
|
||||
pathComponents := strings.SplitN(path, "/", 2)
|
||||
path = strings.TrimPrefix(path, SlashSeparator)
|
||||
pathComponents := strings.SplitN(path, SlashSeparator, 2)
|
||||
|
||||
// Save the bucket and object extracted from path.
|
||||
switch len(pathComponents) {
|
||||
|
@ -370,7 +370,7 @@ func getResource(path string, host string, domains []string) (string, error) {
|
|||
continue
|
||||
}
|
||||
bucket := strings.TrimSuffix(host, "."+domain)
|
||||
return slashSeparator + pathJoin(bucket, path), nil
|
||||
return SlashSeparator + pathJoin(bucket, path), nil
|
||||
}
|
||||
return path, nil
|
||||
}
|
||||
|
@ -394,20 +394,3 @@ func getHostName(r *http.Request) (hostName string) {
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isHTTPStatusOK(statusCode int) bool {
|
||||
// List of success status.
|
||||
var successStatus = []int{
|
||||
http.StatusOK,
|
||||
http.StatusCreated,
|
||||
http.StatusAccepted,
|
||||
http.StatusNoContent,
|
||||
http.StatusPartialContent,
|
||||
}
|
||||
for _, okstatus := range successStatus {
|
||||
if statusCode == okstatus {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
|
599
vendor/github.com/minio/minio/cmd/iam-etcd-store.go
generated
vendored
Normal file
599
vendor/github.com/minio/minio/cmd/iam-etcd-store.go
generated
vendored
Normal file
|
@ -0,0 +1,599 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
etcd "github.com/coreos/etcd/clientv3"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/minio/minio-go/v6/pkg/set"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
var defaultContextTimeout = 30 * time.Second
|
||||
|
||||
func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
|
||||
users := set.NewStringSet()
|
||||
for _, kv := range kvs {
|
||||
// Extract user by stripping off the `prefix` value as suffix,
|
||||
// then strip off the remaining basename to obtain the prefix
|
||||
// value, usually in the following form.
|
||||
//
|
||||
// key := "config/iam/users/newuser/identity.json"
|
||||
// prefix := "config/iam/users/"
|
||||
// v := trim(trim(key, prefix), base(key)) == "newuser"
|
||||
//
|
||||
user := path.Clean(strings.TrimSuffix(strings.TrimPrefix(string(kv.Key), prefix), path.Base(string(kv.Key))))
|
||||
users.Add(user)
|
||||
}
|
||||
return users
|
||||
}
|
||||
|
||||
func etcdKvsToSetPolicyDB(prefix string, kvs []*mvccpb.KeyValue) set.StringSet {
|
||||
items := set.NewStringSet()
|
||||
for _, kv := range kvs {
|
||||
// Extract user item by stripping off prefix and then
|
||||
// stripping of ".json" suffix.
|
||||
//
|
||||
// key := "config/iam/policydb/users/myuser1.json"
|
||||
// prefix := "config/iam/policydb/users/"
|
||||
// v := trimSuffix(trimPrefix(key, prefix), ".json")
|
||||
key := string(kv.Key)
|
||||
item := path.Clean(strings.TrimSuffix(strings.TrimPrefix(key, prefix), ".json"))
|
||||
items.Add(item)
|
||||
}
|
||||
return items
|
||||
}
|
||||
|
||||
// IAMEtcdStore implements IAMStorageAPI
|
||||
type IAMEtcdStore struct {
|
||||
sync.RWMutex
|
||||
ctx context.Context
|
||||
|
||||
client *etcd.Client
|
||||
}
|
||||
|
||||
func newIAMEtcdStore() *IAMEtcdStore {
|
||||
return &IAMEtcdStore{client: globalEtcdClient}
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) getContext() context.Context {
|
||||
ies.RLock()
|
||||
defer ies.RUnlock()
|
||||
|
||||
if ies.ctx == nil {
|
||||
return context.Background()
|
||||
}
|
||||
return ies.ctx
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) setContext(ctx context.Context) {
|
||||
ies.Lock()
|
||||
defer ies.Unlock()
|
||||
|
||||
ies.ctx = ctx
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) clearContext() {
|
||||
ies.Lock()
|
||||
defer ies.Unlock()
|
||||
|
||||
ies.ctx = nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveIAMConfig(item interface{}, path string) error {
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return saveKeyEtcd(ies.getContext(), ies.client, path, data)
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadIAMConfig(item interface{}, path string) error {
|
||||
pdata, err := readKeyEtcd(ies.getContext(), ies.client, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(pdata, item)
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) deleteIAMConfig(path string) error {
|
||||
return deleteKeyEtcd(ies.getContext(), ies.client, path)
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) migrateUsersConfigToV1(isSTS bool) error {
|
||||
basePrefix := iamConfigUsersPrefix
|
||||
if isSTS {
|
||||
basePrefix = iamConfigSTSPrefix
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
ies.setContext(ctx)
|
||||
defer ies.clearContext()
|
||||
r, err := ies.client.Get(ctx, basePrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
users := etcdKvsToSet(basePrefix, r.Kvs)
|
||||
for _, user := range users.ToSlice() {
|
||||
{
|
||||
// 1. check if there is a policy file in the old loc.
|
||||
oldPolicyPath := pathJoin(basePrefix, user, iamPolicyFile)
|
||||
var policyName string
|
||||
err := ies.loadIAMConfig(&policyName, oldPolicyPath)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case errConfigNotFound:
|
||||
// No mapped policy or already migrated.
|
||||
default:
|
||||
// corrupt data/read error, etc
|
||||
}
|
||||
goto next
|
||||
}
|
||||
|
||||
// 2. copy policy to new loc.
|
||||
mp := newMappedPolicy(policyName)
|
||||
path := getMappedPolicyPath(user, isSTS, false)
|
||||
if err := ies.saveIAMConfig(mp, path); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 3. delete policy file in old loc.
|
||||
deleteKeyEtcd(ctx, ies.client, oldPolicyPath)
|
||||
}
|
||||
|
||||
next:
|
||||
// 4. check if user identity has old format.
|
||||
identityPath := pathJoin(basePrefix, user, iamIdentityFile)
|
||||
var cred auth.Credentials
|
||||
if err := ies.loadIAMConfig(&cred, identityPath); err != nil {
|
||||
switch err {
|
||||
case errConfigNotFound:
|
||||
// This case should not happen.
|
||||
default:
|
||||
// corrupt file or read error
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file is already in the new format,
|
||||
// then the parsed auth.Credentials will have
|
||||
// the zero value for the struct.
|
||||
var zeroCred auth.Credentials
|
||||
if cred == zeroCred {
|
||||
// nothing to do
|
||||
continue
|
||||
}
|
||||
|
||||
// Found a id file in old format. Copy value
|
||||
// into new format and save it.
|
||||
cred.AccessKey = user
|
||||
u := newUserIdentity(cred)
|
||||
if err := ies.saveIAMConfig(u, identityPath); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Nothing to delete as identity file location
|
||||
// has not changed.
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) migrateToV1() error {
|
||||
var iamFmt iamFormat
|
||||
path := getIAMFormatFilePath()
|
||||
if err := ies.loadIAMConfig(&iamFmt, path); err != nil {
|
||||
switch err {
|
||||
case errConfigNotFound:
|
||||
// Need to migrate to V1.
|
||||
default:
|
||||
return errors.New("corrupt IAM format file")
|
||||
}
|
||||
} else {
|
||||
if iamFmt.Version >= iamFormatVersion1 {
|
||||
// Already migrated to V1 of higher!
|
||||
return nil
|
||||
}
|
||||
// This case should not happen
|
||||
// (i.e. Version is 0 or negative.)
|
||||
return errors.New("got an invalid IAM format version")
|
||||
|
||||
}
|
||||
|
||||
// Migrate long-term users
|
||||
if err := ies.migrateUsersConfigToV1(false); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
// Migrate STS users
|
||||
if err := ies.migrateUsersConfigToV1(true); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
// Save iam version file.
|
||||
if err := ies.saveIAMConfig(newIAMFormatVersion1(), path); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Should be called under config migration lock
|
||||
func (ies *IAMEtcdStore) migrateBackendFormat(objAPI ObjectLayer) error {
|
||||
if err := ies.migrateToV1(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadPolicyDoc(policy string, m map[string]iampolicy.Policy) error {
|
||||
var p iampolicy.Policy
|
||||
err := ies.loadIAMConfig(&p, getPolicyDocPath(policy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[policy] = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadPolicyDocs(m map[string]iampolicy.Policy) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
ies.setContext(ctx)
|
||||
defer ies.clearContext()
|
||||
r, err := ies.client.Get(ctx, iamConfigPoliciesPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
policies := etcdKvsToSet(iamConfigPoliciesPrefix, r.Kvs)
|
||||
|
||||
// Reload config and policies for all policys.
|
||||
for _, policyName := range policies.ToSlice() {
|
||||
err = ies.loadPolicyDoc(policyName, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadUser(user string, isSTS bool, m map[string]auth.Credentials) error {
|
||||
var u UserIdentity
|
||||
err := ies.loadIAMConfig(&u, getUserIdentityPath(user, isSTS))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if u.Credentials.IsExpired() {
|
||||
// Delete expired identity.
|
||||
ctx := ies.getContext()
|
||||
deleteKeyEtcd(ctx, ies.client, getUserIdentityPath(user, isSTS))
|
||||
deleteKeyEtcd(ctx, ies.client, getMappedPolicyPath(user, isSTS, false))
|
||||
return nil
|
||||
}
|
||||
|
||||
if u.Credentials.AccessKey == "" {
|
||||
u.Credentials.AccessKey = user
|
||||
}
|
||||
m[user] = u.Credentials
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadUsers(isSTS bool, m map[string]auth.Credentials) error {
|
||||
basePrefix := iamConfigUsersPrefix
|
||||
if isSTS {
|
||||
basePrefix = iamConfigSTSPrefix
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
ies.setContext(ctx)
|
||||
defer ies.clearContext()
|
||||
r, err := ies.client.Get(ctx, basePrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
users := etcdKvsToSet(basePrefix, r.Kvs)
|
||||
|
||||
// Reload config for all users.
|
||||
for _, user := range users.ToSlice() {
|
||||
if err = ies.loadUser(user, isSTS, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadGroup(group string, m map[string]GroupInfo) error {
|
||||
var gi GroupInfo
|
||||
err := ies.loadIAMConfig(&gi, getGroupInfoPath(group))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[group] = gi
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadGroups(m map[string]GroupInfo) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
ies.setContext(ctx)
|
||||
defer ies.clearContext()
|
||||
r, err := ies.client.Get(ctx, iamConfigGroupsPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
groups := etcdKvsToSet(iamConfigGroupsPrefix, r.Kvs)
|
||||
|
||||
// Reload config for all groups.
|
||||
for _, group := range groups.ToSlice() {
|
||||
if err = ies.loadGroup(group, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadMappedPolicy(name string, isSTS, isGroup bool, m map[string]MappedPolicy) error {
|
||||
var p MappedPolicy
|
||||
err := ies.loadIAMConfig(&p, getMappedPolicyPath(name, isSTS, isGroup))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[name] = p
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadMappedPolicies(isSTS, isGroup bool, m map[string]MappedPolicy) error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout)
|
||||
defer cancel()
|
||||
ies.setContext(ctx)
|
||||
defer ies.clearContext()
|
||||
var basePrefix string
|
||||
switch {
|
||||
case isSTS:
|
||||
basePrefix = iamConfigPolicyDBSTSUsersPrefix
|
||||
case isGroup:
|
||||
basePrefix = iamConfigPolicyDBGroupsPrefix
|
||||
default:
|
||||
basePrefix = iamConfigPolicyDBUsersPrefix
|
||||
}
|
||||
r, err := ies.client.Get(ctx, basePrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
users := etcdKvsToSetPolicyDB(basePrefix, r.Kvs)
|
||||
|
||||
// Reload config and policies for all users.
|
||||
for _, user := range users.ToSlice() {
|
||||
if err = ies.loadMappedPolicy(user, isSTS, isGroup, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) loadAll(sys *IAMSys, objectAPI ObjectLayer) error {
|
||||
iamUsersMap := make(map[string]auth.Credentials)
|
||||
iamGroupsMap := make(map[string]GroupInfo)
|
||||
iamPolicyDocsMap := make(map[string]iampolicy.Policy)
|
||||
iamUserPolicyMap := make(map[string]MappedPolicy)
|
||||
iamGroupPolicyMap := make(map[string]MappedPolicy)
|
||||
|
||||
if err := ies.loadPolicyDocs(iamPolicyDocsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ies.loadUsers(false, iamUsersMap); err != nil {
|
||||
return err
|
||||
}
|
||||
// load STS temp users into the same map
|
||||
if err := ies.loadUsers(true, iamUsersMap); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ies.loadGroups(iamGroupsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ies.loadMappedPolicies(false, false, iamUserPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
// load STS policy mappings into the same map
|
||||
if err := ies.loadMappedPolicies(true, false, iamUserPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
// load policies mapped to groups
|
||||
if err := ies.loadMappedPolicies(false, true, iamGroupPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sets default canned policies, if none are set.
|
||||
setDefaultCannedPolicies(iamPolicyDocsMap)
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
sys.iamUsersMap = iamUsersMap
|
||||
sys.iamGroupsMap = iamGroupsMap
|
||||
sys.iamUserPolicyMap = iamUserPolicyMap
|
||||
sys.iamPolicyDocsMap = iamPolicyDocsMap
|
||||
sys.iamGroupPolicyMap = iamGroupPolicyMap
|
||||
sys.buildUserGroupMemberships()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) savePolicyDoc(policyName string, p iampolicy.Policy) error {
|
||||
return ies.saveIAMConfig(&p, getPolicyDocPath(policyName))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveMappedPolicy(name string, isSTS, isGroup bool, mp MappedPolicy) error {
|
||||
return ies.saveIAMConfig(mp, getMappedPolicyPath(name, isSTS, isGroup))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveUserIdentity(name string, isSTS bool, u UserIdentity) error {
|
||||
return ies.saveIAMConfig(u, getUserIdentityPath(name, isSTS))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) saveGroupInfo(name string, gi GroupInfo) error {
|
||||
return ies.saveIAMConfig(gi, getGroupInfoPath(name))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) deletePolicyDoc(name string) error {
|
||||
return ies.deleteIAMConfig(getPolicyDocPath(name))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) deleteMappedPolicy(name string, isSTS, isGroup bool) error {
|
||||
return ies.deleteIAMConfig(getMappedPolicyPath(name, isSTS, isGroup))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) deleteUserIdentity(name string, isSTS bool) error {
|
||||
return ies.deleteIAMConfig(getUserIdentityPath(name, isSTS))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) deleteGroupInfo(name string) error {
|
||||
return ies.deleteIAMConfig(getGroupInfoPath(name))
|
||||
}
|
||||
|
||||
func (ies *IAMEtcdStore) watch(sys *IAMSys) {
|
||||
watchEtcd := func() {
|
||||
// Refresh IAMSys with etcd watch.
|
||||
for {
|
||||
watchCh := ies.client.Watch(context.Background(),
|
||||
iamConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly())
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case watchResp, ok := <-watchCh:
|
||||
if !ok {
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
if err := watchResp.Err(); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
// log and retry.
|
||||
time.Sleep(1 * time.Second)
|
||||
continue
|
||||
}
|
||||
for _, event := range watchResp.Events {
|
||||
sys.Lock()
|
||||
ies.reloadFromEvent(sys, event)
|
||||
sys.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
go watchEtcd()
|
||||
}
|
||||
|
||||
// sys.RLock is held by caller.
|
||||
func (ies *IAMEtcdStore) reloadFromEvent(sys *IAMSys, event *etcd.Event) {
|
||||
eventCreate := event.IsModify() || event.IsCreate()
|
||||
eventDelete := event.Type == etcd.EventTypeDelete
|
||||
usersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigUsersPrefix)
|
||||
groupsPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigGroupsPrefix)
|
||||
stsPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigSTSPrefix)
|
||||
policyPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPoliciesPrefix)
|
||||
policyDBUsersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPolicyDBUsersPrefix)
|
||||
policyDBSTSUsersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPolicyDBSTSUsersPrefix)
|
||||
|
||||
switch {
|
||||
case eventCreate:
|
||||
switch {
|
||||
case usersPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigUsersPrefix))
|
||||
ies.loadUser(accessKey, false, sys.iamUsersMap)
|
||||
case stsPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigSTSPrefix))
|
||||
ies.loadUser(accessKey, true, sys.iamUsersMap)
|
||||
case groupsPrefix:
|
||||
group := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigGroupsPrefix))
|
||||
ies.loadGroup(group, sys.iamGroupsMap)
|
||||
gi := sys.iamGroupsMap[group]
|
||||
sys.removeGroupFromMembershipsMap(group)
|
||||
sys.updateGroupMembershipsMap(group, &gi)
|
||||
case policyPrefix:
|
||||
policyName := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigPoliciesPrefix))
|
||||
ies.loadPolicyDoc(policyName, sys.iamPolicyDocsMap)
|
||||
case policyDBUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigPolicyDBUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
ies.loadMappedPolicy(user, false, false, sys.iamUserPolicyMap)
|
||||
case policyDBSTSUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigPolicyDBSTSUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
ies.loadMappedPolicy(user, true, false, sys.iamUserPolicyMap)
|
||||
}
|
||||
case eventDelete:
|
||||
switch {
|
||||
case usersPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigUsersPrefix))
|
||||
delete(sys.iamUsersMap, accessKey)
|
||||
case stsPrefix:
|
||||
accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigSTSPrefix))
|
||||
delete(sys.iamUsersMap, accessKey)
|
||||
case groupsPrefix:
|
||||
group := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigGroupsPrefix))
|
||||
sys.removeGroupFromMembershipsMap(group)
|
||||
delete(sys.iamGroupsMap, group)
|
||||
delete(sys.iamGroupPolicyMap, group)
|
||||
case policyPrefix:
|
||||
policyName := path.Dir(strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigPoliciesPrefix))
|
||||
delete(sys.iamPolicyDocsMap, policyName)
|
||||
case policyDBUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigPolicyDBUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
delete(sys.iamUserPolicyMap, user)
|
||||
case policyDBSTSUsersPrefix:
|
||||
policyMapFile := strings.TrimPrefix(string(event.Kv.Key),
|
||||
iamConfigPolicyDBSTSUsersPrefix)
|
||||
user := strings.TrimSuffix(policyMapFile, ".json")
|
||||
delete(sys.iamUserPolicyMap, user)
|
||||
}
|
||||
}
|
||||
}
|
582
vendor/github.com/minio/minio/cmd/iam-object-store.go
generated
vendored
Normal file
582
vendor/github.com/minio/minio/cmd/iam-object-store.go
generated
vendored
Normal file
|
@ -0,0 +1,582 @@
|
|||
/*
|
||||
* MinIO Cloud Storage, (C) 2019 MinIO, Inc.
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
// IAMObjectStore implements IAMStorageAPI
|
||||
type IAMObjectStore struct {
|
||||
// Protect assignment to objAPI
|
||||
sync.RWMutex
|
||||
|
||||
objAPI ObjectLayer
|
||||
}
|
||||
|
||||
func newIAMObjectStore() *IAMObjectStore {
|
||||
return &IAMObjectStore{objAPI: nil}
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) getObjectAPI() ObjectLayer {
|
||||
iamOS.RLock()
|
||||
defer iamOS.RUnlock()
|
||||
if iamOS.objAPI != nil {
|
||||
return iamOS.objAPI
|
||||
}
|
||||
return newObjectLayerFn()
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) setObjectAPI(objAPI ObjectLayer) {
|
||||
iamOS.Lock()
|
||||
defer iamOS.Unlock()
|
||||
iamOS.objAPI = objAPI
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) clearObjectAPI() {
|
||||
iamOS.Lock()
|
||||
defer iamOS.Unlock()
|
||||
iamOS.objAPI = nil
|
||||
}
|
||||
|
||||
// Migrate users directory in a single scan.
|
||||
//
|
||||
// 1. Migrate user policy from:
|
||||
//
|
||||
// `iamConfigUsersPrefix + "<username>/policy.json"`
|
||||
//
|
||||
// to:
|
||||
//
|
||||
// `iamConfigPolicyDBUsersPrefix + "<username>.json"`.
|
||||
//
|
||||
// 2. Add versioning to the policy json file in the new
|
||||
// location.
|
||||
//
|
||||
// 3. Migrate user identity json file to include version info.
|
||||
func (iamOS *IAMObjectStore) migrateUsersConfigToV1(isSTS bool) error {
|
||||
basePrefix := iamConfigUsersPrefix
|
||||
if isSTS {
|
||||
basePrefix = iamConfigSTSPrefix
|
||||
}
|
||||
|
||||
objAPI := iamOS.getObjectAPI()
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
for item := range listIAMConfigItems(objAPI, basePrefix, true, doneCh) {
|
||||
if item.Err != nil {
|
||||
return item.Err
|
||||
}
|
||||
|
||||
user := item.Item
|
||||
|
||||
{
|
||||
// 1. check if there is policy file in old location.
|
||||
oldPolicyPath := pathJoin(basePrefix, user, iamPolicyFile)
|
||||
var policyName string
|
||||
if err := iamOS.loadIAMConfig(&policyName, oldPolicyPath); err != nil {
|
||||
switch err {
|
||||
case errConfigNotFound:
|
||||
// This case means it is already
|
||||
// migrated or there is no policy on
|
||||
// user.
|
||||
default:
|
||||
// File may be corrupt or network error
|
||||
}
|
||||
|
||||
// Nothing to do on the policy file,
|
||||
// so move on to check the id file.
|
||||
goto next
|
||||
}
|
||||
|
||||
// 2. copy policy file to new location.
|
||||
mp := newMappedPolicy(policyName)
|
||||
if err := iamOS.saveMappedPolicy(user, isSTS, false, mp); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// 3. delete policy file from old
|
||||
// location. Ignore error.
|
||||
iamOS.deleteIAMConfig(oldPolicyPath)
|
||||
}
|
||||
next:
|
||||
// 4. check if user identity has old format.
|
||||
identityPath := pathJoin(basePrefix, user, iamIdentityFile)
|
||||
var cred auth.Credentials
|
||||
if err := iamOS.loadIAMConfig(&cred, identityPath); err != nil {
|
||||
switch err.(type) {
|
||||
case ObjectNotFound:
|
||||
// This should not happen.
|
||||
default:
|
||||
// File may be corrupt or network error
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// If the file is already in the new format,
|
||||
// then the parsed auth.Credentials will have
|
||||
// the zero value for the struct.
|
||||
var zeroCred auth.Credentials
|
||||
if cred == zeroCred {
|
||||
// nothing to do
|
||||
continue
|
||||
}
|
||||
|
||||
// Found a id file in old format. Copy value
|
||||
// into new format and save it.
|
||||
cred.AccessKey = user
|
||||
u := newUserIdentity(cred)
|
||||
if err := iamOS.saveIAMConfig(u, identityPath); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Nothing to delete as identity file location
|
||||
// has not changed.
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) migrateToV1() error {
|
||||
var iamFmt iamFormat
|
||||
path := getIAMFormatFilePath()
|
||||
if err := iamOS.loadIAMConfig(&iamFmt, path); err != nil {
|
||||
switch err {
|
||||
case errConfigNotFound:
|
||||
// Need to migrate to V1.
|
||||
default:
|
||||
return errors.New("corrupt IAM format file")
|
||||
}
|
||||
} else {
|
||||
if iamFmt.Version >= iamFormatVersion1 {
|
||||
// Nothing to do.
|
||||
return nil
|
||||
}
|
||||
// This case should not happen
|
||||
// (i.e. Version is 0 or negative.)
|
||||
return errors.New("got an invalid IAM format version")
|
||||
}
|
||||
|
||||
// Migrate long-term users
|
||||
if err := iamOS.migrateUsersConfigToV1(false); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
// Migrate STS users
|
||||
if err := iamOS.migrateUsersConfigToV1(true); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
// Save iam format to version 1.
|
||||
if err := iamOS.saveIAMConfig(newIAMFormatVersion1(), path); err != nil {
|
||||
logger.LogIf(context.Background(), err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Should be called under config migration lock
|
||||
func (iamOS *IAMObjectStore) migrateBackendFormat(objAPI ObjectLayer) error {
|
||||
iamOS.setObjectAPI(objAPI)
|
||||
defer iamOS.clearObjectAPI()
|
||||
if err := iamOS.migrateToV1(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveIAMConfig(item interface{}, path string) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
data, err := json.Marshal(item)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return saveConfig(context.Background(), objectAPI, path, data)
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadIAMConfig(item interface{}, path string) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
data, err := readConfig(context.Background(), objectAPI, path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return json.Unmarshal(data, item)
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) deleteIAMConfig(path string) error {
|
||||
err := deleteConfig(context.Background(), iamOS.getObjectAPI(), path)
|
||||
if _, ok := err.(ObjectNotFound); ok {
|
||||
return errConfigNotFound
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadPolicyDoc(policy string, m map[string]iampolicy.Policy) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var p iampolicy.Policy
|
||||
err := iamOS.loadIAMConfig(&p, getPolicyDocPath(policy))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[policy] = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadPolicyDocs(m map[string]iampolicy.Policy) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
for item := range listIAMConfigItems(objectAPI, iamConfigPoliciesPrefix, true, doneCh) {
|
||||
if item.Err != nil {
|
||||
return item.Err
|
||||
}
|
||||
|
||||
policyName := item.Item
|
||||
err := iamOS.loadPolicyDoc(policyName, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadUser(user string, isSTS bool, m map[string]auth.Credentials) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var u UserIdentity
|
||||
err := iamOS.loadIAMConfig(&u, getUserIdentityPath(user, isSTS))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if u.Credentials.IsExpired() {
|
||||
// Delete expired identity - ignoring errors here.
|
||||
iamOS.deleteIAMConfig(getUserIdentityPath(user, isSTS))
|
||||
iamOS.deleteIAMConfig(getMappedPolicyPath(user, isSTS, false))
|
||||
return nil
|
||||
}
|
||||
|
||||
if u.Credentials.AccessKey == "" {
|
||||
u.Credentials.AccessKey = user
|
||||
}
|
||||
m[user] = u.Credentials
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadUsers(isSTS bool, m map[string]auth.Credentials) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
basePrefix := iamConfigUsersPrefix
|
||||
if isSTS {
|
||||
basePrefix = iamConfigSTSPrefix
|
||||
}
|
||||
for item := range listIAMConfigItems(objectAPI, basePrefix, true, doneCh) {
|
||||
if item.Err != nil {
|
||||
return item.Err
|
||||
}
|
||||
|
||||
userName := item.Item
|
||||
err := iamOS.loadUser(userName, isSTS, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadGroup(group string, m map[string]GroupInfo) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var g GroupInfo
|
||||
err := iamOS.loadIAMConfig(&g, getGroupInfoPath(group))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[group] = g
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadGroups(m map[string]GroupInfo) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
for item := range listIAMConfigItems(objectAPI, iamConfigGroupsPrefix, true, doneCh) {
|
||||
if item.Err != nil {
|
||||
return item.Err
|
||||
}
|
||||
|
||||
group := item.Item
|
||||
err := iamOS.loadGroup(group, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadMappedPolicy(name string, isSTS, isGroup bool,
|
||||
m map[string]MappedPolicy) error {
|
||||
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
var p MappedPolicy
|
||||
err := iamOS.loadIAMConfig(&p, getMappedPolicyPath(name, isSTS, isGroup))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
m[name] = p
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) loadMappedPolicies(isSTS, isGroup bool, m map[string]MappedPolicy) error {
|
||||
objectAPI := iamOS.getObjectAPI()
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
var basePath string
|
||||
switch {
|
||||
case isSTS:
|
||||
basePath = iamConfigPolicyDBSTSUsersPrefix
|
||||
case isGroup:
|
||||
basePath = iamConfigPolicyDBGroupsPrefix
|
||||
default:
|
||||
basePath = iamConfigPolicyDBUsersPrefix
|
||||
}
|
||||
for item := range listIAMConfigItems(objectAPI, basePath, false, doneCh) {
|
||||
if item.Err != nil {
|
||||
return item.Err
|
||||
}
|
||||
|
||||
policyFile := item.Item
|
||||
userOrGroupName := strings.TrimSuffix(policyFile, ".json")
|
||||
err := iamOS.loadMappedPolicy(userOrGroupName, isSTS, isGroup, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Refresh IAMSys. If an object layer is passed in use that, otherwise
|
||||
// load from global.
|
||||
func (iamOS *IAMObjectStore) loadAll(sys *IAMSys, objectAPI ObjectLayer) error {
|
||||
if objectAPI == nil {
|
||||
objectAPI = iamOS.getObjectAPI()
|
||||
}
|
||||
if objectAPI == nil {
|
||||
return errServerNotInitialized
|
||||
}
|
||||
// cache object layer for other load* functions
|
||||
iamOS.setObjectAPI(objectAPI)
|
||||
defer iamOS.clearObjectAPI()
|
||||
|
||||
iamUsersMap := make(map[string]auth.Credentials)
|
||||
iamGroupsMap := make(map[string]GroupInfo)
|
||||
iamPolicyDocsMap := make(map[string]iampolicy.Policy)
|
||||
iamUserPolicyMap := make(map[string]MappedPolicy)
|
||||
iamGroupPolicyMap := make(map[string]MappedPolicy)
|
||||
|
||||
if err := iamOS.loadPolicyDocs(iamPolicyDocsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := iamOS.loadUsers(false, iamUsersMap); err != nil {
|
||||
return err
|
||||
}
|
||||
// load STS temp users into the same map
|
||||
if err := iamOS.loadUsers(true, iamUsersMap); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := iamOS.loadGroups(iamGroupsMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := iamOS.loadMappedPolicies(false, false, iamUserPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
// load STS policy mappings into the same map
|
||||
if err := iamOS.loadMappedPolicies(true, false, iamUserPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
// load policies mapped to groups
|
||||
if err := iamOS.loadMappedPolicies(false, true, iamGroupPolicyMap); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Sets default canned policies, if none are set.
|
||||
setDefaultCannedPolicies(iamPolicyDocsMap)
|
||||
|
||||
sys.Lock()
|
||||
defer sys.Unlock()
|
||||
|
||||
sys.iamUsersMap = iamUsersMap
|
||||
sys.iamPolicyDocsMap = iamPolicyDocsMap
|
||||
sys.iamUserPolicyMap = iamUserPolicyMap
|
||||
sys.iamGroupPolicyMap = iamGroupPolicyMap
|
||||
sys.iamGroupsMap = iamGroupsMap
|
||||
sys.buildUserGroupMemberships()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) savePolicyDoc(policyName string, p iampolicy.Policy) error {
|
||||
return iamOS.saveIAMConfig(&p, getPolicyDocPath(policyName))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveMappedPolicy(name string, isSTS, isGroup bool, mp MappedPolicy) error {
|
||||
return iamOS.saveIAMConfig(mp, getMappedPolicyPath(name, isSTS, isGroup))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveUserIdentity(name string, isSTS bool, u UserIdentity) error {
|
||||
return iamOS.saveIAMConfig(u, getUserIdentityPath(name, isSTS))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) saveGroupInfo(name string, gi GroupInfo) error {
|
||||
return iamOS.saveIAMConfig(gi, getGroupInfoPath(name))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) deletePolicyDoc(name string) error {
|
||||
return iamOS.deleteIAMConfig(getPolicyDocPath(name))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) deleteMappedPolicy(name string, isSTS, isGroup bool) error {
|
||||
return iamOS.deleteIAMConfig(getMappedPolicyPath(name, isSTS, isGroup))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) deleteUserIdentity(name string, isSTS bool) error {
|
||||
return iamOS.deleteIAMConfig(getUserIdentityPath(name, isSTS))
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) deleteGroupInfo(name string) error {
|
||||
return iamOS.deleteIAMConfig(getGroupInfoPath(name))
|
||||
}
|
||||
|
||||
// helper type for listIAMConfigItems
|
||||
type itemOrErr struct {
|
||||
Item string
|
||||
Err error
|
||||
}
|
||||
|
||||
// Lists files or dirs in the minioMetaBucket at the given path
|
||||
// prefix. If dirs is true, only directories are listed, otherwise
|
||||
// only objects are listed. All returned items have the pathPrefix
|
||||
// removed from their names.
|
||||
func listIAMConfigItems(objectAPI ObjectLayer, pathPrefix string, dirs bool,
|
||||
doneCh <-chan struct{}) <-chan itemOrErr {
|
||||
|
||||
ch := make(chan itemOrErr)
|
||||
dirList := func(lo ListObjectsInfo) []string {
|
||||
return lo.Prefixes
|
||||
}
|
||||
filesList := func(lo ListObjectsInfo) (r []string) {
|
||||
for _, o := range lo.Objects {
|
||||
r = append(r, o.Name)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
go func() {
|
||||
marker := ""
|
||||
for {
|
||||
lo, err := objectAPI.ListObjects(context.Background(),
|
||||
minioMetaBucket, pathPrefix, marker, SlashSeparator, 1000)
|
||||
if err != nil {
|
||||
select {
|
||||
case ch <- itemOrErr{Err: err}:
|
||||
case <-doneCh:
|
||||
}
|
||||
close(ch)
|
||||
return
|
||||
}
|
||||
marker = lo.NextMarker
|
||||
lister := dirList(lo)
|
||||
if !dirs {
|
||||
lister = filesList(lo)
|
||||
}
|
||||
for _, itemPrefix := range lister {
|
||||
item := strings.TrimPrefix(itemPrefix, pathPrefix)
|
||||
item = strings.TrimSuffix(item, SlashSeparator)
|
||||
select {
|
||||
case ch <- itemOrErr{Item: item}:
|
||||
case <-doneCh:
|
||||
close(ch)
|
||||
return
|
||||
}
|
||||
}
|
||||
if !lo.IsTruncated {
|
||||
close(ch)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
func (iamOS *IAMObjectStore) watch(sys *IAMSys) {
|
||||
watchDisk := func() {
|
||||
ticker := time.NewTicker(globalRefreshIAMInterval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-GlobalServiceDoneCh:
|
||||
return
|
||||
case <-ticker.C:
|
||||
iamOS.loadAll(sys, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Refresh IAMSys in background.
|
||||
go watchDisk()
|
||||
}
|
1245
vendor/github.com/minio/minio/cmd/iam.go
generated
vendored
1245
vendor/github.com/minio/minio/cmd/iam.go
generated
vendored
File diff suppressed because it is too large
Load diff
34
vendor/github.com/minio/minio/cmd/lock-rest-client.go
generated
vendored
34
vendor/github.com/minio/minio/cmd/lock-rest-client.go
generated
vendored
|
@ -17,10 +17,8 @@
|
|||
package cmd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"io"
|
||||
"sync"
|
||||
|
@ -131,32 +129,16 @@ func (client *lockRESTClient) Close() error {
|
|||
|
||||
// restCall makes a call to the lock REST server.
|
||||
func (client *lockRESTClient) restCall(call string, args dsync.LockArgs) (reply bool, err error) {
|
||||
values := url.Values{}
|
||||
values.Set(lockRESTUID, args.UID)
|
||||
values.Set(lockRESTSource, args.Source)
|
||||
values.Set(lockRESTResource, args.Resource)
|
||||
values.Set(lockRESTServerAddr, args.ServerAddr)
|
||||
values.Set(lockRESTServerEndpoint, args.ServiceEndpoint)
|
||||
|
||||
reader := bytes.NewBuffer(make([]byte, 0, 2048))
|
||||
err = gob.NewEncoder(reader).Encode(args)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
respBody, err := client.call(call, nil, reader, -1)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
var resp lockResponse
|
||||
respBody, err := client.call(call, values, nil, -1)
|
||||
defer http.DrainBody(respBody)
|
||||
err = gob.NewDecoder(respBody).Decode(&resp)
|
||||
|
||||
if err != nil || !resp.Success {
|
||||
reqInfo := &logger.ReqInfo{}
|
||||
reqInfo.AppendTags("resource", args.Resource)
|
||||
reqInfo.AppendTags("serveraddress", args.ServerAddr)
|
||||
reqInfo.AppendTags("serviceendpoint", args.ServiceEndpoint)
|
||||
reqInfo.AppendTags("source", args.Source)
|
||||
reqInfo.AppendTags("uid", args.UID)
|
||||
ctx := logger.SetReqInfo(context.Background(), reqInfo)
|
||||
logger.LogIf(ctx, err)
|
||||
}
|
||||
return resp.Success, err
|
||||
return err == nil, err
|
||||
}
|
||||
|
||||
// RLock calls read lock REST API.
|
||||
|
|
18
vendor/github.com/minio/minio/cmd/lock-rest-server-common.go
generated
vendored
18
vendor/github.com/minio/minio/cmd/lock-rest-server-common.go
generated
vendored
|
@ -21,7 +21,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
const lockRESTVersion = "v1"
|
||||
const lockRESTVersion = "v2"
|
||||
const lockRESTPath = minioReservedBucketPath + "/lock/" + lockRESTVersion
|
||||
|
||||
var lockServicePath = path.Join(minioReservedBucketPath, lockServiceSubPath)
|
||||
|
@ -33,6 +33,18 @@ const (
|
|||
lockRESTMethodRUnlock = "runlock"
|
||||
lockRESTMethodForceUnlock = "forceunlock"
|
||||
lockRESTMethodExpired = "expired"
|
||||
|
||||
// Unique ID of lock/unlock request.
|
||||
lockRESTUID = "uid"
|
||||
// Source contains the line number, function and file name of the code
|
||||
// on the client node that requested the lock.
|
||||
lockRESTSource = "source"
|
||||
// Resource contains a entity to be locked/unlocked.
|
||||
lockRESTResource = "resource"
|
||||
// ServerAddr contains the address of the server who requested lock/unlock of the above resource.
|
||||
lockRESTServerAddr = "serverAddr"
|
||||
// ServiceEndpoint contains the network path of above server to do lock/unlock.
|
||||
lockRESTServerEndpoint = "serverEndpoint"
|
||||
)
|
||||
|
||||
// nameLockRequesterInfoPair is a helper type for lock maintenance
|
||||
|
@ -41,10 +53,6 @@ type nameLockRequesterInfoPair struct {
|
|||
lri lockRequesterInfo
|
||||
}
|
||||
|
||||
type lockResponse struct {
|
||||
Success bool
|
||||
}
|
||||
|
||||
// Similar to removeEntry but only removes an entry only if the lock entry exists in map.
|
||||
func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
|
||||
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
|
||||
|
|
144
vendor/github.com/minio/minio/cmd/lock-rest-server.go
generated
vendored
144
vendor/github.com/minio/minio/cmd/lock-rest-server.go
generated
vendored
|
@ -18,7 +18,6 @@ package cmd
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/gob"
|
||||
"errors"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
|
@ -60,6 +59,15 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func getLockArgs(r *http.Request) dsync.LockArgs {
|
||||
return dsync.LockArgs{
|
||||
UID: r.URL.Query().Get(lockRESTUID),
|
||||
Resource: r.URL.Query().Get(lockRESTResource),
|
||||
ServerAddr: r.URL.Query().Get(lockRESTServerAddr),
|
||||
ServiceEndpoint: r.URL.Query().Get(lockRESTServerEndpoint),
|
||||
}
|
||||
}
|
||||
|
||||
// LockHandler - Acquires a lock.
|
||||
func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !l.IsValid(w, r) {
|
||||
|
@ -67,28 +75,10 @@ func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "Lock")
|
||||
|
||||
var lockArgs dsync.LockArgs
|
||||
if r.ContentLength < 0 {
|
||||
l.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode(&lockArgs)
|
||||
if err != nil {
|
||||
if _, err := l.ll.Lock(getLockArgs(r)); err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
success, err := l.ll.Lock(lockArgs)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
resp := lockResponse{Success: success}
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// UnlockHandler - releases the acquired lock.
|
||||
|
@ -98,28 +88,10 @@ func (l *lockRESTServer) UnlockHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "Unlock")
|
||||
|
||||
var lockArgs dsync.LockArgs
|
||||
if r.ContentLength < 0 {
|
||||
l.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode(&lockArgs)
|
||||
if err != nil {
|
||||
if _, err := l.ll.Unlock(getLockArgs(r)); err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
success, err := l.ll.Unlock(lockArgs)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
resp := lockResponse{Success: success}
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LockHandler - Acquires an RLock.
|
||||
|
@ -129,27 +101,10 @@ func (l *lockRESTServer) RLockHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "RLock")
|
||||
var lockArgs dsync.LockArgs
|
||||
if r.ContentLength < 0 {
|
||||
l.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode(&lockArgs)
|
||||
if err != nil {
|
||||
if _, err := l.ll.RLock(getLockArgs(r)); err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
success, err := l.ll.RLock(lockArgs)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
resp := lockResponse{Success: success}
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// RUnlockHandler - releases the acquired read lock.
|
||||
|
@ -159,27 +114,10 @@ func (l *lockRESTServer) RUnlockHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "RUnlock")
|
||||
var lockArgs dsync.LockArgs
|
||||
if r.ContentLength < 0 {
|
||||
l.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode(&lockArgs)
|
||||
if err != nil {
|
||||
if _, err := l.ll.RUnlock(getLockArgs(r)); err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
success, err := l.ll.RUnlock(lockArgs)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
resp := lockResponse{Success: success}
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ForceUnlockHandler - force releases the acquired lock.
|
||||
|
@ -189,28 +127,10 @@ func (l *lockRESTServer) ForceUnlockHandler(w http.ResponseWriter, r *http.Reque
|
|||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "ForceUnlock")
|
||||
|
||||
var lockArgs dsync.LockArgs
|
||||
if r.ContentLength < 0 {
|
||||
l.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode(&lockArgs)
|
||||
if err != nil {
|
||||
if _, err := l.ll.ForceUnlock(getLockArgs(r)); err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
success, err := l.ll.ForceUnlock(lockArgs)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
resp := lockResponse{Success: success}
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp))
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// ExpiredHandler - query expired lock status.
|
||||
|
@ -220,19 +140,8 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request)
|
|||
return
|
||||
}
|
||||
|
||||
ctx := newContext(r, w, "Expired")
|
||||
lockArgs := getLockArgs(r)
|
||||
|
||||
var lockArgs dsync.LockArgs
|
||||
if r.ContentLength < 0 {
|
||||
l.writeErrorResponse(w, errInvalidArgument)
|
||||
return
|
||||
}
|
||||
|
||||
err := gob.NewDecoder(r.Body).Decode(&lockArgs)
|
||||
if err != nil {
|
||||
l.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
success := true
|
||||
l.ll.mutex.Lock()
|
||||
defer l.ll.mutex.Unlock()
|
||||
|
@ -246,11 +155,10 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
}
|
||||
}
|
||||
// When we get here lock is no longer active due to either dsync.LockArgs.Resource
|
||||
// being absent from map or uid not found for given dsync.LockArgs.Resource
|
||||
resp := lockResponse{Success: success}
|
||||
logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp))
|
||||
w.(http.Flusher).Flush()
|
||||
if !success {
|
||||
l.writeErrorResponse(w, errors.New("lock already expired"))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// lockMaintenance loops over locks that have been active for some time and checks back
|
||||
|
@ -323,12 +231,14 @@ func startLockMaintenance(lkSrv *lockRESTServer) {
|
|||
// registerLockRESTHandlers - register lock rest router.
|
||||
func registerLockRESTHandlers(router *mux.Router) {
|
||||
subrouter := router.PathPrefix(lockRESTPath).Subrouter()
|
||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(globalLockServer.LockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(globalLockServer.RLockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.UnlockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.RUnlockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.ForceUnlockHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodExpired).HandlerFunc(httpTraceAll(globalLockServer.ExpiredHandler))
|
||||
queries := restQueries(lockRESTUID, lockRESTSource, lockRESTResource, lockRESTServerAddr, lockRESTServerEndpoint)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(globalLockServer.LockHandler)).Queries(queries...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(globalLockServer.RLockHandler)).Queries(queries...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.UnlockHandler)).Queries(queries...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.RUnlockHandler)).Queries(queries...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.ForceUnlockHandler)).Queries(queries...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodExpired).HandlerFunc(httpTraceAll(globalLockServer.ExpiredHandler)).Queries(queries...)
|
||||
|
||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
|
||||
// Start lock maintenance from all lock servers.
|
||||
|
|
13
vendor/github.com/minio/minio/cmd/notification.go
generated
vendored
13
vendor/github.com/minio/minio/cmd/notification.go
generated
vendored
|
@ -232,6 +232,19 @@ func (sys *NotificationSys) LoadUsers() []NotificationPeerErr {
|
|||
return ng.Wait()
|
||||
}
|
||||
|
||||
// LoadGroup - loads a specific group on all peers.
|
||||
func (sys *NotificationSys) LoadGroup(group string) []NotificationPeerErr {
|
||||
ng := WithNPeers(len(sys.peerClients))
|
||||
for idx, client := range sys.peerClients {
|
||||
if client == nil {
|
||||
continue
|
||||
}
|
||||
client := client
|
||||
ng.Go(context.Background(), func() error { return client.LoadGroup(group) }, idx, *client.host)
|
||||
}
|
||||
return ng.Wait()
|
||||
}
|
||||
|
||||
// BackgroundHealStatus - returns background heal status of all peers
|
||||
func (sys *NotificationSys) BackgroundHealStatus() []madmin.BgHealState {
|
||||
states := make([]madmin.BgHealState, len(sys.peerClients))
|
||||
|
|
20
vendor/github.com/minio/minio/cmd/object-api-common.go
generated
vendored
20
vendor/github.com/minio/minio/cmd/object-api-common.go
generated
vendored
|
@ -56,10 +56,10 @@ func init() {
|
|||
}
|
||||
|
||||
// Checks if the object is a directory, this logic uses
|
||||
// if size == 0 and object ends with slashSeparator then
|
||||
// if size == 0 and object ends with SlashSeparator then
|
||||
// returns true.
|
||||
func isObjectDir(object string, size int64) bool {
|
||||
return hasSuffix(object, slashSeparator) && size == 0
|
||||
return hasSuffix(object, SlashSeparator) && size == 0
|
||||
}
|
||||
|
||||
// Converts just bucket, object metadata into ObjectInfo datatype.
|
||||
|
@ -110,7 +110,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string)
|
|||
var delFunc func(string) error
|
||||
// Function to delete entries recursively.
|
||||
delFunc = func(entryPath string) error {
|
||||
if !hasSuffix(entryPath, slashSeparator) {
|
||||
if !hasSuffix(entryPath, SlashSeparator) {
|
||||
// Delete the file entry.
|
||||
err := storage.DeleteFile(volume, entryPath)
|
||||
logger.LogIf(ctx, err)
|
||||
|
@ -129,7 +129,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string)
|
|||
|
||||
// Entry path is empty, just delete it.
|
||||
if len(entries) == 0 {
|
||||
err = storage.DeleteFile(volume, path.Clean(entryPath))
|
||||
err = storage.DeleteFile(volume, entryPath)
|
||||
logger.LogIf(ctx, err)
|
||||
return err
|
||||
}
|
||||
|
@ -157,7 +157,7 @@ func cleanupObjectsBulk(ctx context.Context, storage StorageAPI, volume string,
|
|||
var traverse func(string) ([]string, error)
|
||||
traverse = func(entryPath string) ([]string, error) {
|
||||
var output = make([]string, 0)
|
||||
if !hasSuffix(entryPath, slashSeparator) {
|
||||
if !hasSuffix(entryPath, SlashSeparator) {
|
||||
output = append(output, entryPath)
|
||||
return output, nil
|
||||
}
|
||||
|
@ -320,7 +320,7 @@ func listObjectsNonSlash(ctx context.Context, obj ObjectLayer, bucket, prefix, m
|
|||
}
|
||||
|
||||
func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) {
|
||||
if delimiter != slashSeparator && delimiter != "" {
|
||||
if delimiter != SlashSeparator && delimiter != "" {
|
||||
return listObjectsNonSlash(ctx, obj, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, getObjInfo, getObjectInfoDirs...)
|
||||
}
|
||||
|
||||
|
@ -346,7 +346,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||
// along // with the prefix. On a flat namespace with 'prefix'
|
||||
// as '/' we don't have any entries, since all the keys are
|
||||
// of form 'keyName/...'
|
||||
if delimiter == slashSeparator && prefix == slashSeparator {
|
||||
if delimiter == SlashSeparator && prefix == SlashSeparator {
|
||||
return loi, nil
|
||||
}
|
||||
|
||||
|
@ -357,7 +357,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||
|
||||
// Default is recursive, if delimiter is set then list non recursive.
|
||||
recursive := true
|
||||
if delimiter == slashSeparator {
|
||||
if delimiter == SlashSeparator {
|
||||
recursive = false
|
||||
}
|
||||
|
||||
|
@ -382,7 +382,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||
|
||||
var objInfo ObjectInfo
|
||||
var err error
|
||||
if hasSuffix(walkResult.entry, slashSeparator) {
|
||||
if hasSuffix(walkResult.entry, SlashSeparator) {
|
||||
for _, getObjectInfoDir := range getObjectInfoDirs {
|
||||
objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry)
|
||||
if err == nil {
|
||||
|
@ -429,7 +429,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d
|
|||
|
||||
result := ListObjectsInfo{}
|
||||
for _, objInfo := range objInfos {
|
||||
if objInfo.IsDir && delimiter == slashSeparator {
|
||||
if objInfo.IsDir && delimiter == SlashSeparator {
|
||||
result.Prefixes = append(result.Prefixes, objInfo.Name)
|
||||
continue
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/object-api-errors.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/object-api-errors.go
generated
vendored
|
@ -203,14 +203,14 @@ func (e ObjectExistsAsDirectory) Error() string {
|
|||
type PrefixAccessDenied GenericError
|
||||
|
||||
func (e PrefixAccessDenied) Error() string {
|
||||
return "Prefix access is denied: " + e.Bucket + "/" + e.Object
|
||||
return "Prefix access is denied: " + e.Bucket + SlashSeparator + e.Object
|
||||
}
|
||||
|
||||
// ParentIsObject object access is denied.
|
||||
type ParentIsObject GenericError
|
||||
|
||||
func (e ParentIsObject) Error() string {
|
||||
return "Parent is object " + e.Bucket + "/" + path.Dir(e.Object)
|
||||
return "Parent is object " + e.Bucket + SlashSeparator + path.Dir(e.Object)
|
||||
}
|
||||
|
||||
// BucketExists bucket exists.
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/object-api-getobject_test.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/object-api-getobject_test.go
generated
vendored
|
@ -259,7 +259,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [
|
|||
|
||||
for i, testCase := range testCases {
|
||||
for _, d := range disks {
|
||||
err = os.Chmod(d+"/"+testCase.bucketName+"/"+testCase.chmodPath, 0)
|
||||
err = os.Chmod(d+SlashSeparator+testCase.bucketName+SlashSeparator+testCase.chmodPath, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("Test %d, Unable to chmod: %v", i+1, err)
|
||||
}
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/object-api-input-checks.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/object-api-input-checks.go
generated
vendored
|
@ -74,7 +74,7 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker, delimiter st
|
|||
}
|
||||
}
|
||||
// Verify if delimiter is anything other than '/', which we do not support.
|
||||
if delimiter != "" && delimiter != slashSeparator {
|
||||
if delimiter != "" && delimiter != SlashSeparator {
|
||||
logger.LogIf(ctx, UnsupportedDelimiter{
|
||||
Delimiter: delimiter,
|
||||
})
|
||||
|
@ -102,7 +102,7 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo
|
|||
return err
|
||||
}
|
||||
if uploadIDMarker != "" {
|
||||
if hasSuffix(keyMarker, slashSeparator) {
|
||||
if hasSuffix(keyMarker, SlashSeparator) {
|
||||
|
||||
logger.LogIf(ctx, InvalidUploadIDKeyCombination{
|
||||
UploadIDMarker: uploadIDMarker,
|
||||
|
@ -196,7 +196,7 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLa
|
|||
return err
|
||||
}
|
||||
if len(object) == 0 ||
|
||||
(hasSuffix(object, slashSeparator) && size != 0) ||
|
||||
(hasSuffix(object, SlashSeparator) && size != 0) ||
|
||||
!IsValidObjectPrefix(object) {
|
||||
return ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
|
|
18
vendor/github.com/minio/minio/cmd/object-api-listobjects_test.go
generated
vendored
18
vendor/github.com/minio/minio/cmd/object-api-listobjects_test.go
generated
vendored
|
@ -543,20 +543,20 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
{"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true},
|
||||
// Tests with prefix and delimiter (55-57).
|
||||
// With delimeter the code should not recurse into the sub-directories of prefix Dir.
|
||||
{"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true},
|
||||
{"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true},
|
||||
{"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true},
|
||||
{"test-bucket-list-object", "Asia", "", SlashSeparator, 10, resultCases[25], nil, true},
|
||||
{"test-bucket-list-object", "new", "", SlashSeparator, 10, resultCases[26], nil, true},
|
||||
{"test-bucket-list-object", "Asia/India/", "", SlashSeparator, 10, resultCases[27], nil, true},
|
||||
// Test with marker set as hierarhical value and with delimiter. (58-59)
|
||||
{"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "/", 10, resultCases[28], nil, true},
|
||||
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "/", 10, resultCases[29], nil, true},
|
||||
{"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", SlashSeparator, 10, resultCases[28], nil, true},
|
||||
{"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", SlashSeparator, 10, resultCases[29], nil, true},
|
||||
// Test with prefix and delimiter set to '/'. (60)
|
||||
{"test-bucket-list-object", "/", "", "/", 10, resultCases[30], nil, true},
|
||||
{"test-bucket-list-object", SlashSeparator, "", SlashSeparator, 10, resultCases[30], nil, true},
|
||||
// Test with invalid prefix (61)
|
||||
{"test-bucket-list-object", "\\", "", "/", 10, ListObjectsInfo{}, ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "\\"}, false},
|
||||
{"test-bucket-list-object", "\\", "", SlashSeparator, 10, ListObjectsInfo{}, ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "\\"}, false},
|
||||
// Test listing an empty directory in recursive mode (62)
|
||||
{"test-bucket-empty-dir", "", "", "", 10, resultCases[31], nil, true},
|
||||
// Test listing an empty directory in a non recursive mode (63)
|
||||
{"test-bucket-empty-dir", "", "", "/", 10, resultCases[32], nil, true},
|
||||
{"test-bucket-empty-dir", "", "", SlashSeparator, 10, resultCases[32], nil, true},
|
||||
// Test listing a directory which contains an empty directory (64)
|
||||
{"test-bucket-empty-dir", "", "temporary/", "", 10, resultCases[33], nil, true},
|
||||
}
|
||||
|
@ -595,7 +595,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) {
|
|||
t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, result.Objects[j].Name)
|
||||
}
|
||||
// FIXME: we should always check for ETag
|
||||
if result.Objects[j].ETag == "" && !strings.HasSuffix(result.Objects[j].Name, slashSeparator) {
|
||||
if result.Objects[j].ETag == "" && !strings.HasSuffix(result.Objects[j].Name, SlashSeparator) {
|
||||
t.Errorf("Test %d: %s: Expected ETag to be not empty, but found empty instead (%v)", i+1, instanceType, result.Objects[j].Name)
|
||||
}
|
||||
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/object-api-multipart_test.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/object-api-multipart_test.go
generated
vendored
|
@ -680,7 +680,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||
// Expecting the result to contain one MultipartInfo entry and IsTruncated to be false.
|
||||
{
|
||||
MaxUploads: 2,
|
||||
Delimiter: "/",
|
||||
Delimiter: SlashSeparator,
|
||||
Prefix: "",
|
||||
IsTruncated: false,
|
||||
Uploads: []MultipartInfo{
|
||||
|
@ -1170,7 +1170,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan
|
|||
{bucketNames[0], "orange", "", "", "", 2, listMultipartResults[12], nil, true},
|
||||
{bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true},
|
||||
// setting delimiter (Test number 27).
|
||||
{bucketNames[0], "", "", "", "/", 2, listMultipartResults[14], nil, true},
|
||||
{bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true},
|
||||
//Test case with multiple uploadID listing for given object (Test number 28).
|
||||
{bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true},
|
||||
// Test case with multiple uploadID listing for given object, but uploadID marker set.
|
||||
|
|
24
vendor/github.com/minio/minio/cmd/object-api-utils.go
generated
vendored
24
vendor/github.com/minio/minio/cmd/object-api-utils.go
generated
vendored
|
@ -51,7 +51,7 @@ const (
|
|||
// Multipart meta prefix.
|
||||
mpartMetaPrefix = "multipart"
|
||||
// MinIO Multipart meta prefix.
|
||||
minioMetaMultipartBucket = minioMetaBucket + "/" + mpartMetaPrefix
|
||||
minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix
|
||||
// MinIO Tmp meta prefix.
|
||||
minioMetaTmpBucket = minioMetaBucket + "/tmp"
|
||||
// DNS separator (period), used for bucket name validation.
|
||||
|
@ -131,12 +131,12 @@ func IsValidBucketName(bucket string) bool {
|
|||
//
|
||||
// - Backslash ("\")
|
||||
//
|
||||
// additionally minio does not support object names with trailing "/".
|
||||
// additionally minio does not support object names with trailing SlashSeparator.
|
||||
func IsValidObjectName(object string) bool {
|
||||
if len(object) == 0 {
|
||||
return false
|
||||
}
|
||||
if hasSuffix(object, slashSeparator) {
|
||||
if hasSuffix(object, SlashSeparator) {
|
||||
return false
|
||||
}
|
||||
return IsValidObjectPrefix(object)
|
||||
|
@ -168,7 +168,7 @@ func checkObjectNameForLengthAndSlash(bucket, object string) error {
|
|||
}
|
||||
}
|
||||
// Check for slash as prefix in object name
|
||||
if hasPrefix(object, slashSeparator) {
|
||||
if hasPrefix(object, SlashSeparator) {
|
||||
return ObjectNamePrefixAsSlash{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
|
@ -177,20 +177,20 @@ func checkObjectNameForLengthAndSlash(bucket, object string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Slash separator.
|
||||
const slashSeparator = "/"
|
||||
// SlashSeparator - slash separator.
|
||||
const SlashSeparator = "/"
|
||||
|
||||
// retainSlash - retains slash from a path.
|
||||
func retainSlash(s string) string {
|
||||
return strings.TrimSuffix(s, slashSeparator) + slashSeparator
|
||||
return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator
|
||||
}
|
||||
|
||||
// pathJoin - like path.Join() but retains trailing "/" of the last element
|
||||
// pathJoin - like path.Join() but retains trailing SlashSeparator of the last element
|
||||
func pathJoin(elem ...string) string {
|
||||
trailingSlash := ""
|
||||
if len(elem) > 0 {
|
||||
if hasSuffix(elem[len(elem)-1], slashSeparator) {
|
||||
trailingSlash = "/"
|
||||
if hasSuffix(elem[len(elem)-1], SlashSeparator) {
|
||||
trailingSlash = SlashSeparator
|
||||
}
|
||||
}
|
||||
return path.Join(elem...) + trailingSlash
|
||||
|
@ -292,7 +292,7 @@ func isStringEqual(s1 string, s2 string) bool {
|
|||
|
||||
// Ignores all reserved bucket names or invalid bucket names.
|
||||
func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool {
|
||||
bucketEntry = strings.TrimSuffix(bucketEntry, slashSeparator)
|
||||
bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator)
|
||||
if strict {
|
||||
if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil {
|
||||
return true
|
||||
|
@ -319,7 +319,7 @@ func isMinioReservedBucket(bucketName string) bool {
|
|||
func getHostsSlice(records []dns.SrvRecord) []string {
|
||||
var hosts []string
|
||||
for _, r := range records {
|
||||
hosts = append(hosts, r.Host)
|
||||
hosts = append(hosts, net.JoinHostPort(r.Host, fmt.Sprintf("%d", r.Port)))
|
||||
}
|
||||
return hosts
|
||||
}
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/object-handlers.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/object-handlers.go
generated
vendored
|
@ -1233,7 +1233,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req
|
|||
|
||||
var objectEncryptionKey []byte
|
||||
if objectAPI.IsEncryptionSupported() {
|
||||
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests
|
||||
if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests
|
||||
reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata)
|
||||
if err != nil {
|
||||
writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r))
|
||||
|
|
80
vendor/github.com/minio/minio/cmd/object-handlers_test.go
generated
vendored
80
vendor/github.com/minio/minio/cmd/object-handlers_test.go
generated
vendored
|
@ -482,7 +482,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||
|
||||
expectedContent: encodeResponse(getAPIErrorResponse(ctx,
|
||||
getAPIError(ErrNoSuchKey),
|
||||
"/"+bucketName+"/"+". ./. ./etc", "", "")),
|
||||
SlashSeparator+bucketName+SlashSeparator+". ./. ./etc", "", "")),
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
},
|
||||
// Test case - 9.
|
||||
|
@ -496,7 +496,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a
|
|||
|
||||
expectedContent: encodeResponse(getAPIErrorResponse(ctx,
|
||||
getAPIError(ErrInvalidObjectName),
|
||||
"/"+bucketName+"/"+". ./../etc", "", "")),
|
||||
SlashSeparator+bucketName+SlashSeparator+". ./../etc", "", "")),
|
||||
expectedRespStatus: http.StatusBadRequest,
|
||||
},
|
||||
// Test case - 10.
|
||||
|
@ -1593,7 +1593,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
|
@ -1604,7 +1604,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/"),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1617,7 +1617,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + testObject),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + testObject),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1629,7 +1629,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceRange: "bytes=500-4096",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1642,7 +1642,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceRange: "bytes=6145-",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1655,7 +1655,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceRange: "bytes=0-6144",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1683,7 +1683,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1697,7 +1697,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: "non-existent-destination-bucket",
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1709,7 +1709,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: "Invalid-AccessID",
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1721,7 +1721,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: "-1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1732,7 +1732,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
invalidPartNumber: true,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1743,7 +1743,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
maximumPartNumber: true,
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1753,7 +1753,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=null",
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
|
@ -1762,7 +1762,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=17",
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
|
@ -1771,7 +1771,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceVersionID: "null",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1781,7 +1781,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
{
|
||||
bucketName: bucketName,
|
||||
uploadID: uploadID,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceVersionID: "17",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -1852,7 +1852,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri
|
|||
// Below is how CopyObjectPartHandler is registered.
|
||||
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}")
|
||||
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject))
|
||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+nilBucket+SlashSeparator+nilObject))
|
||||
|
||||
// execute the object layer set to `nil` test.
|
||||
// `ExecObjectLayerAPINilTest` manages the operation.
|
||||
|
@ -1947,7 +1947,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
metadata: map[string]string{
|
||||
|
@ -1961,7 +1961,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/"),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1973,7 +1973,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1986,7 +1986,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape(bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -1999,7 +1999,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
metadata: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
|
@ -2015,7 +2015,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
metadata: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
|
@ -2032,7 +2032,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
metadata: map[string]string{
|
||||
"Content-Type": "application/json",
|
||||
},
|
||||
|
@ -2050,7 +2050,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -2064,7 +2064,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: "non-existent-destination-bucket",
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -2076,7 +2076,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: objectName,
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
accessKey: "Invalid-AccessID",
|
||||
secretKey: credentials.SecretKey,
|
||||
|
||||
|
@ -2086,7 +2086,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copyModifiedHeader: "Mon, 02 Jan 2006 15:04:05 GMT",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2096,7 +2096,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2106,7 +2106,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 +00:00",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2116,7 +2116,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copyUnmodifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2126,7 +2126,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 GMT",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2136,7 +2136,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 +00:00",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2146,7 +2146,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=null",
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusOK,
|
||||
|
@ -2155,7 +2155,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=17",
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
expectedRespStatus: http.StatusNotFound,
|
||||
|
@ -2164,7 +2164,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceVersionID: "null",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2174,7 +2174,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
{
|
||||
bucketName: bucketName,
|
||||
newObjectName: "newObject1",
|
||||
copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName),
|
||||
copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName),
|
||||
copySourceVersionID: "17",
|
||||
accessKey: credentials.AccessKey,
|
||||
secretKey: credentials.SecretKey,
|
||||
|
@ -2307,7 +2307,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string,
|
|||
// Below is how CopyObjectHandler is registered.
|
||||
// bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?")
|
||||
// Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler.
|
||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject))
|
||||
nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+nilBucket+SlashSeparator+nilObject))
|
||||
if err != nil {
|
||||
t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType)
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/object_api_suite_test.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/object_api_suite_test.go
generated
vendored
|
@ -340,7 +340,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", "/", 10)
|
||||
result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", SlashSeparator, 10)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
@ -354,7 +354,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) {
|
|||
|
||||
// check delimited results with delimiter without prefix.
|
||||
{
|
||||
result, err = obj.ListObjects(context.Background(), "bucket", "", "", "/", 1000)
|
||||
result, err = obj.ListObjects(context.Background(), "bucket", "", "", SlashSeparator, 1000)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: <ERROR> %s", instanceType, err)
|
||||
}
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/os-reliable.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/os-reliable.go
generated
vendored
|
@ -156,11 +156,11 @@ func renameAll(srcFilePath, dstFilePath string) (err error) {
|
|||
// Reliably retries os.RenameAll if for some reason os.RenameAll returns
|
||||
// syscall.ENOENT (parent does not exist).
|
||||
func reliableRename(srcFilePath, dstFilePath string) (err error) {
|
||||
if err = reliableMkdirAll(path.Dir(dstFilePath), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
i := 0
|
||||
for {
|
||||
if err = reliableMkdirAll(path.Dir(dstFilePath), 0777); err != nil {
|
||||
return err
|
||||
}
|
||||
// After a successful parent directory create attempt a renameAll.
|
||||
if err = os.Rename(srcFilePath, dstFilePath); err != nil {
|
||||
// Retry only for the first retryable error.
|
||||
|
|
22
vendor/github.com/minio/minio/cmd/peer-rest-client.go
generated
vendored
22
vendor/github.com/minio/minio/cmd/peer-rest-client.go
generated
vendored
|
@ -443,6 +443,18 @@ func (client *peerRESTClient) LoadUsers() (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// LoadGroup - send load group command to peers.
|
||||
func (client *peerRESTClient) LoadGroup(group string) error {
|
||||
values := make(url.Values)
|
||||
values.Set(peerRESTGroup, group)
|
||||
respBody, err := client.call(peerRESTMethodLoadGroup, values, nil, -1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer http.DrainBody(respBody)
|
||||
return nil
|
||||
}
|
||||
|
||||
// SignalService - sends signal to peer nodes.
|
||||
func (client *peerRESTClient) SignalService(sig serviceSignal) error {
|
||||
values := make(url.Values)
|
||||
|
@ -499,10 +511,12 @@ func (client *peerRESTClient) doTrace(traceCh chan interface{}, doneCh chan stru
|
|||
if err = dec.Decode(&info); err != nil {
|
||||
return
|
||||
}
|
||||
select {
|
||||
case traceCh <- info:
|
||||
default:
|
||||
// Do not block on slow receivers.
|
||||
if len(info.NodeName) > 0 {
|
||||
select {
|
||||
case traceCh <- info:
|
||||
default:
|
||||
// Do not block on slow receivers.
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
2
vendor/github.com/minio/minio/cmd/peer-rest-common.go
generated
vendored
2
vendor/github.com/minio/minio/cmd/peer-rest-common.go
generated
vendored
|
@ -34,6 +34,7 @@ const (
|
|||
peerRESTMethodLoadPolicy = "loadpolicy"
|
||||
peerRESTMethodDeletePolicy = "deletepolicy"
|
||||
peerRESTMethodLoadUsers = "loadusers"
|
||||
peerRESTMethodLoadGroup = "loadgroup"
|
||||
peerRESTMethodStartProfiling = "startprofiling"
|
||||
peerRESTMethodDownloadProfilingData = "downloadprofilingdata"
|
||||
peerRESTMethodBucketPolicySet = "setbucketpolicy"
|
||||
|
@ -50,6 +51,7 @@ const (
|
|||
const (
|
||||
peerRESTBucket = "bucket"
|
||||
peerRESTUser = "user"
|
||||
peerRESTGroup = "group"
|
||||
peerRESTUserTemp = "user-temp"
|
||||
peerRESTPolicy = "policy"
|
||||
peerRESTSignal = "signal"
|
||||
|
|
101
vendor/github.com/minio/minio/cmd/peer-rest-server.go
generated
vendored
101
vendor/github.com/minio/minio/cmd/peer-rest-server.go
generated
vendored
|
@ -28,7 +28,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
xhttp "github.com/minio/minio/cmd/http"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
"github.com/minio/minio/pkg/event"
|
||||
"github.com/minio/minio/pkg/lifecycle"
|
||||
|
@ -246,13 +245,31 @@ func (s *peerRESTServer) LoadUsersHandler(w http.ResponseWriter, r *http.Request
|
|||
return
|
||||
}
|
||||
|
||||
err := globalIAMSys.Load()
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
}
|
||||
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
|
||||
// LoadGroupHandler - reloads group along with members list.
|
||||
func (s *peerRESTServer) LoadGroupHandler(w http.ResponseWriter, r *http.Request) {
|
||||
if !s.IsValid(w, r) {
|
||||
s.writeErrorResponse(w, errors.New("Invalid request"))
|
||||
return
|
||||
}
|
||||
|
||||
objAPI := newObjectLayerFn()
|
||||
if objAPI == nil {
|
||||
s.writeErrorResponse(w, errServerNotInitialized)
|
||||
return
|
||||
}
|
||||
|
||||
err := globalIAMSys.Load(objAPI)
|
||||
vars := mux.Vars(r)
|
||||
group := vars[peerRESTGroup]
|
||||
err := globalIAMSys.LoadGroup(objAPI, group)
|
||||
if err != nil {
|
||||
s.writeErrorResponse(w, err)
|
||||
return
|
||||
|
@ -719,30 +736,22 @@ func (s *peerRESTServer) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
trcAll := r.URL.Query().Get(peerRESTTraceAll) == "true"
|
||||
trcErr := r.URL.Query().Get(peerRESTTraceErr) == "true"
|
||||
|
||||
w.Header().Set(xhttp.Connection, "close")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
w.(http.Flusher).Flush()
|
||||
|
||||
filter := func(entry interface{}) bool {
|
||||
trcInfo := entry.(trace.Info)
|
||||
|
||||
if trcErr && isHTTPStatusOK(trcInfo.RespInfo.StatusCode) {
|
||||
return false
|
||||
}
|
||||
if trcAll {
|
||||
return true
|
||||
}
|
||||
return !strings.HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath)
|
||||
|
||||
}
|
||||
|
||||
doneCh := make(chan struct{})
|
||||
defer close(doneCh)
|
||||
|
||||
// Trace Publisher uses nonblocking publish and hence does not wait for slow subscribers.
|
||||
// Use buffered channel to take care of burst sends or slow w.Write()
|
||||
ch := make(chan interface{}, 2000)
|
||||
globalHTTPTrace.Subscribe(ch, doneCh, filter)
|
||||
|
||||
globalHTTPTrace.Subscribe(ch, doneCh, func(entry interface{}) bool {
|
||||
return mustTrace(entry, trcAll, trcErr)
|
||||
})
|
||||
|
||||
keepAliveTicker := time.NewTicker(500 * time.Millisecond)
|
||||
defer keepAliveTicker.Stop()
|
||||
|
||||
enc := gob.NewEncoder(w)
|
||||
for {
|
||||
|
@ -752,8 +761,11 @@ func (s *peerRESTServer) TraceHandler(w http.ResponseWriter, r *http.Request) {
|
|||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
case <-r.Context().Done():
|
||||
return
|
||||
case <-keepAliveTicker.C:
|
||||
if err := enc.Encode(&trace.Info{}); err != nil {
|
||||
return
|
||||
}
|
||||
w.(http.Flusher).Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -790,37 +802,38 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool {
|
|||
func registerPeerRESTHandlers(router *mux.Router) {
|
||||
server := &peerRESTServer{}
|
||||
subrouter := router.PathPrefix(peerRESTPath).Subrouter()
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeleteUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser, peerRESTUserTemp)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadUsers).HandlerFunc(httpTraceAll(server.LoadUsersHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeleteUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser, peerRESTUserTemp)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadUsers).HandlerFunc(httpTraceAll(server.LoadUsersHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadGroup).HandlerFunc(httpTraceAll(server.LoadGroupHandler)).Queries(restQueries(peerRESTGroup)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProflingDataHandler))
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProflingDataHandler))
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodTargetExists).HandlerFunc(httpTraceHdrs(server.TargetExistsHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodSendEvent).HandlerFunc(httpTraceHdrs(server.SendEventHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketNotificationListen).HandlerFunc(httpTraceHdrs(server.ListenBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodTargetExists).HandlerFunc(httpTraceHdrs(server.TargetExistsHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodSendEvent).HandlerFunc(httpTraceHdrs(server.SendEventHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketNotificationListen).HandlerFunc(httpTraceHdrs(server.ListenBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...)
|
||||
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
||||
subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodTrace).HandlerFunc(server.TraceHandler)
|
||||
subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler)
|
||||
|
||||
router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler))
|
||||
}
|
||||
|
|
6
vendor/github.com/minio/minio/cmd/posix-list-dir_other.go
generated
vendored
6
vendor/github.com/minio/minio/cmd/posix-list-dir_other.go
generated
vendored
|
@ -86,7 +86,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||
}
|
||||
// Append to entries if symbolic link exists and is valid.
|
||||
if st.IsDir() {
|
||||
entries = append(entries, fi.Name()+slashSeparator)
|
||||
entries = append(entries, fi.Name()+SlashSeparator)
|
||||
} else if st.Mode().IsRegular() {
|
||||
entries = append(entries, fi.Name())
|
||||
}
|
||||
|
@ -96,8 +96,8 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||
continue
|
||||
}
|
||||
if fi.Mode().IsDir() {
|
||||
// Append "/" instead of "\" so that sorting is achieved as expected.
|
||||
entries = append(entries, fi.Name()+slashSeparator)
|
||||
// Append SlashSeparator instead of "\" so that sorting is achieved as expected.
|
||||
entries = append(entries, fi.Name()+SlashSeparator)
|
||||
} else if fi.Mode().IsRegular() {
|
||||
entries = append(entries, fi.Name())
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/posix-list-dir_unix.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/posix-list-dir_unix.go
generated
vendored
|
@ -69,7 +69,7 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
|
|||
|
||||
switch dirent.Type {
|
||||
case syscall.DT_DIR:
|
||||
entries = append(entries, name+slashSeparator)
|
||||
entries = append(entries, name+SlashSeparator)
|
||||
case syscall.DT_REG:
|
||||
entries = append(entries, name)
|
||||
case syscall.DT_LNK, syscall.DT_UNKNOWN:
|
||||
|
@ -89,7 +89,7 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) {
|
|||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
entries = append(entries, name+slashSeparator)
|
||||
entries = append(entries, name+SlashSeparator)
|
||||
} else if fi.Mode().IsRegular() {
|
||||
entries = append(entries, name)
|
||||
}
|
||||
|
|
4
vendor/github.com/minio/minio/cmd/posix-list-dir_windows.go
generated
vendored
4
vendor/github.com/minio/minio/cmd/posix-list-dir_windows.go
generated
vendored
|
@ -92,12 +92,12 @@ func readDirN(dirPath string, count int) (entries []string, err error) {
|
|||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
entries = append(entries, name+slashSeparator)
|
||||
entries = append(entries, name+SlashSeparator)
|
||||
} else if fi.Mode().IsRegular() {
|
||||
entries = append(entries, name)
|
||||
}
|
||||
case data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0:
|
||||
entries = append(entries, name+slashSeparator)
|
||||
entries = append(entries, name+SlashSeparator)
|
||||
default:
|
||||
entries = append(entries, name)
|
||||
}
|
||||
|
|
40
vendor/github.com/minio/minio/cmd/posix.go
generated
vendored
40
vendor/github.com/minio/minio/cmd/posix.go
generated
vendored
|
@ -95,7 +95,7 @@ func checkPathLength(pathName string) error {
|
|||
}
|
||||
|
||||
// Check each path segment length is > 255
|
||||
for len(pathName) > 0 && pathName != "." && pathName != "/" {
|
||||
for len(pathName) > 0 && pathName != "." && pathName != SlashSeparator {
|
||||
dir, file := slashpath.Dir(pathName), slashpath.Base(pathName)
|
||||
|
||||
if len(file) > 255 {
|
||||
|
@ -214,8 +214,13 @@ func getDiskInfo(diskPath string) (di disk.Info, err error) {
|
|||
di, err = disk.GetInfo(diskPath)
|
||||
}
|
||||
|
||||
if os.IsNotExist(err) {
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
err = errDiskNotFound
|
||||
case isSysErrTooLong(err):
|
||||
err = errFileNameTooLong
|
||||
case isSysErrIO(err):
|
||||
err = errFaultyDisk
|
||||
}
|
||||
|
||||
return di, err
|
||||
|
@ -285,6 +290,9 @@ func (s *posix) String() string {
|
|||
}
|
||||
|
||||
func (s *posix) LastError() error {
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return errFaultyDisk
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -310,10 +318,21 @@ type DiskInfo struct {
|
|||
// DiskInfo provides current information about disk space usage,
|
||||
// total free inodes and underlying filesystem.
|
||||
func (s *posix) DiskInfo() (info DiskInfo, err error) {
|
||||
defer func() {
|
||||
if err == errFaultyDisk {
|
||||
atomic.AddInt32(&s.ioErrCount, 1)
|
||||
}
|
||||
}()
|
||||
|
||||
if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError {
|
||||
return info, errFaultyDisk
|
||||
}
|
||||
|
||||
di, err := getDiskInfo(s.diskPath)
|
||||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
used := di.Total - di.Free
|
||||
if !s.diskMount {
|
||||
used = atomic.LoadUint64(&s.totalUsed)
|
||||
|
@ -323,6 +342,7 @@ func (s *posix) DiskInfo() (info DiskInfo, err error) {
|
|||
if err != nil {
|
||||
return info, err
|
||||
}
|
||||
|
||||
return DiskInfo{
|
||||
Total: di.Total,
|
||||
Free: di.Free,
|
||||
|
@ -538,7 +558,7 @@ func listVols(dirPath string) ([]VolInfo, error) {
|
|||
}
|
||||
var volsInfo []VolInfo
|
||||
for _, entry := range entries {
|
||||
if !hasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
|
||||
if !hasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) {
|
||||
// Skip if entry is neither a directory not a valid volume name.
|
||||
continue
|
||||
}
|
||||
|
@ -698,7 +718,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st
|
|||
return
|
||||
}
|
||||
var fi FileInfo
|
||||
if hasSuffix(walkResult.entry, slashSeparator) {
|
||||
if hasSuffix(walkResult.entry, SlashSeparator) {
|
||||
fi = FileInfo{
|
||||
Volume: volume,
|
||||
Name: walkResult.entry,
|
||||
|
@ -723,7 +743,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st
|
|||
}
|
||||
|
||||
// ListDir - return all the entries at the given directory path.
|
||||
// If an entry is a directory it will be returned with a trailing "/".
|
||||
// If an entry is a directory it will be returned with a trailing SlashSeparator.
|
||||
func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) {
|
||||
defer func() {
|
||||
if err == errFaultyDisk {
|
||||
|
@ -766,7 +786,7 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent
|
|||
if leafFile != "" {
|
||||
for i, entry := range entries {
|
||||
if _, serr := os.Stat(pathJoin(dirPath, entry, leafFile)); serr == nil {
|
||||
entries[i] = strings.TrimSuffix(entry, slashSeparator)
|
||||
entries[i] = strings.TrimSuffix(entry, SlashSeparator)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1370,7 +1390,7 @@ func deleteFile(basePath, deletePath string) error {
|
|||
|
||||
// Trailing slash is removed when found to ensure
|
||||
// slashpath.Dir() to work as intended.
|
||||
deletePath = strings.TrimSuffix(deletePath, slashSeparator)
|
||||
deletePath = strings.TrimSuffix(deletePath, SlashSeparator)
|
||||
deletePath = slashpath.Dir(deletePath)
|
||||
|
||||
// Delete parent directory. Errors for parent directories shouldn't trickle down.
|
||||
|
@ -1410,7 +1430,7 @@ func (s *posix) DeleteFile(volume, path string) (err error) {
|
|||
return err
|
||||
}
|
||||
|
||||
// Following code is needed so that we retain "/" suffix if any in
|
||||
// Following code is needed so that we retain SlashSeparator suffix if any in
|
||||
// path argument.
|
||||
filePath := pathJoin(volumeDir, path)
|
||||
if err = checkPathLength((filePath)); err != nil {
|
||||
|
@ -1472,8 +1492,8 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e
|
|||
}
|
||||
}
|
||||
|
||||
srcIsDir := hasSuffix(srcPath, slashSeparator)
|
||||
dstIsDir := hasSuffix(dstPath, slashSeparator)
|
||||
srcIsDir := hasSuffix(srcPath, SlashSeparator)
|
||||
dstIsDir := hasSuffix(dstPath, SlashSeparator)
|
||||
// Either src and dst have to be directories or files, else return error.
|
||||
if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) {
|
||||
return errFileAccessDenied
|
||||
|
|
10
vendor/github.com/minio/minio/cmd/posix_test.go
generated
vendored
10
vendor/github.com/minio/minio/cmd/posix_test.go
generated
vendored
|
@ -69,7 +69,7 @@ func TestIsValidVolname(t *testing.T) {
|
|||
// cases for which test should fail.
|
||||
// passing invalid bucket names.
|
||||
{"", false},
|
||||
{"/", false},
|
||||
{SlashSeparator, false},
|
||||
{"a", false},
|
||||
{"ab", false},
|
||||
{"ab/", true},
|
||||
|
@ -319,9 +319,9 @@ func TestPosixReadAll(t *testing.T) {
|
|||
// TestPosixNewPosix all the cases handled in posix storage layer initialization.
|
||||
func TestPosixNewPosix(t *testing.T) {
|
||||
// Temporary dir name.
|
||||
tmpDirName := globalTestTmpDir + "/" + "minio-" + nextSuffix()
|
||||
tmpDirName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix()
|
||||
// Temporary file name.
|
||||
tmpFileName := globalTestTmpDir + "/" + "minio-" + nextSuffix()
|
||||
tmpFileName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix()
|
||||
f, _ := os.Create(tmpFileName)
|
||||
f.Close()
|
||||
defer os.Remove(tmpFileName)
|
||||
|
@ -1830,9 +1830,7 @@ func TestPosixVerifyFile(t *testing.T) {
|
|||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Fatal(err)
|
||||
}
|
||||
w.Close()
|
||||
if err := posixStorage.VerifyFile(volName, fileName, false, algo, nil, shardSize); err != nil {
|
||||
|
|
23
vendor/github.com/minio/minio/cmd/prepare-storage.go
generated
vendored
23
vendor/github.com/minio/minio/cmd/prepare-storage.go
generated
vendored
|
@ -63,7 +63,7 @@ func formatXLMigrateLocalEndpoints(endpoints EndpointList) error {
|
|||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return fmt.Errorf("unable to access (%s) %s", formatPath, err)
|
||||
}
|
||||
return formatXLMigrate(epPath)
|
||||
}, index)
|
||||
|
@ -92,11 +92,13 @@ func formatXLCleanupTmpLocalEndpoints(endpoints EndpointList) error {
|
|||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
return fmt.Errorf("unable to access (%s) %s", formatPath, err)
|
||||
}
|
||||
if _, err := os.Stat(pathJoin(epPath, minioMetaTmpBucket+"-old")); err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return err
|
||||
return fmt.Errorf("unable to access (%s) %s",
|
||||
pathJoin(epPath, minioMetaTmpBucket+"-old"),
|
||||
err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -110,15 +112,24 @@ func formatXLCleanupTmpLocalEndpoints(endpoints EndpointList) error {
|
|||
//
|
||||
// In this example, `33a58b40-aecc-4c9f-a22f-ff17bfa33b62` directory contains
|
||||
// temporary objects from one of the previous runs of minio server.
|
||||
tmpOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID())
|
||||
if err := renameAll(pathJoin(epPath, minioMetaTmpBucket),
|
||||
pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID())); err != nil {
|
||||
return err
|
||||
tmpOld); err != nil && err != errFileNotFound {
|
||||
return fmt.Errorf("unable to rename (%s -> %s) %s",
|
||||
pathJoin(epPath, minioMetaTmpBucket),
|
||||
tmpOld,
|
||||
err)
|
||||
}
|
||||
|
||||
// Removal of tmp-old folder is backgrounded completely.
|
||||
go removeAll(pathJoin(epPath, minioMetaTmpBucket+"-old"))
|
||||
|
||||
return mkdirAll(pathJoin(epPath, minioMetaTmpBucket), 0777)
|
||||
if err := mkdirAll(pathJoin(epPath, minioMetaTmpBucket), 0777); err != nil {
|
||||
return fmt.Errorf("unable to create (%s) %s",
|
||||
pathJoin(epPath, minioMetaTmpBucket),
|
||||
err)
|
||||
}
|
||||
return nil
|
||||
}, index)
|
||||
}
|
||||
for _, err := range g.Wait() {
|
||||
|
|
8
vendor/github.com/minio/minio/cmd/rest/client.go
generated
vendored
8
vendor/github.com/minio/minio/cmd/rest/client.go
generated
vendored
|
@ -52,9 +52,15 @@ type Client struct {
|
|||
newAuthToken func() string
|
||||
}
|
||||
|
||||
// URL query separator constants
|
||||
const (
|
||||
resourceSep = "/"
|
||||
querySep = "?"
|
||||
)
|
||||
|
||||
// CallWithContext - make a REST call with context.
|
||||
func (c *Client) CallWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) {
|
||||
req, err := http.NewRequest(http.MethodPost, c.url.String()+"/"+method+"?"+values.Encode(), body)
|
||||
req, err := http.NewRequest(http.MethodPost, c.url.String()+resourceSep+method+querySep+values.Encode(), body)
|
||||
if err != nil {
|
||||
return nil, &NetworkError{err}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue