From c839149fb979cf47908fc11d36a2910270c50f17 Mon Sep 17 00:00:00 2001 From: Shuting Zhao Date: Thu, 8 Aug 2019 16:33:28 -0700 Subject: [PATCH] update vendor --- vendor/github.com/evanphx/json-patch/go.mod | 5 + vendor/github.com/evanphx/json-patch/go.sum | 2 + vendor/github.com/hashicorp/golang-lru/go.mod | 2 + vendor/github.com/hashicorp/golang-lru/lru.go | 26 +- .../hashicorp/golang-lru/lru_test.go | 39 + .../hashicorp/golang-lru/simplelru/lru.go | 16 + .../golang-lru/simplelru/lru_interface.go | 7 +- .../golang-lru/simplelru/lru_test.go | 39 + vendor/github.com/json-iterator/go/adapter.go | 2 +- vendor/github.com/json-iterator/go/go.mod | 11 + vendor/github.com/json-iterator/go/go.sum | 14 + .../github.com/json-iterator/go/iter_skip.go | 25 +- .../go/misc_tests/jsoniter_array_test.go | 21 + .../go/misc_tests/jsoniter_float_test.go | 21 + .../json-iterator/go/reflect_native.go | 14 +- .../go/reflect_struct_decoder.go | 2 +- .../go/skip_tests/jsoniter_skip_test.go | 9 + .../json-iterator/go/stream_float.go | 17 + vendor/github.com/minio/minio/.travis.yml | 2 + .../minio/minio/Dockerfile.simpleci | 1 + vendor/github.com/minio/minio/Makefile | 30 +- vendor/github.com/minio/minio/README.md | 9 +- .../minio/minio/buildscripts/cross-compile.sh | 1 + .../minio/minio/buildscripts/go-coverage.sh | 2 +- .../minio/minio/buildscripts/verify-build.sh | 1 + .../minio/minio/cmd/admin-handlers.go | 177 ++- .../minio/minio/cmd/admin-handlers_test.go | 18 +- .../minio/minio/cmd/admin-heal-ops.go | 6 +- .../minio/minio/cmd/admin-router.go | 12 + .../github.com/minio/minio/cmd/api-errors.go | 16 + .../minio/minio/cmd/api-resources_test.go | 18 +- .../minio/minio/cmd/api-response.go | 4 +- .../github.com/minio/minio/cmd/api-router.go | 4 +- .../minio/minio/cmd/auth-handler_test.go | 10 +- .../minio/minio/cmd/bucket-handlers.go | 2 +- .../minio/minio/cmd/bucket-handlers_test.go | 4 +- .../minio/cmd/bucket-notification-handlers.go | 35 +- vendor/github.com/minio/minio/cmd/certs.go | 2 +- .../github.com/minio/minio/cmd/common-main.go | 19 + .../minio/minio/cmd/config-common.go | 53 - .../minio/minio/cmd/config-current.go | 57 +- .../minio/minio/cmd/config-current_test.go | 10 +- .../minio/minio/cmd/config-migrate.go | 2 +- .../minio/minio/cmd/config-migrate_test.go | 6 +- vendor/github.com/minio/minio/cmd/config.go | 20 + .../minio/minio/cmd/crypto/error.go | 2 + .../github.com/minio/minio/cmd/crypto/kms.go | 20 + .../minio/minio/cmd/crypto/kms_test.go | 11 +- .../minio/minio/cmd/crypto/vault.go | 27 + .../minio/minio/cmd/disk-cache-config.go | 2 +- .../minio/minio/cmd/disk-cache-fs.go | 2 +- .../github.com/minio/minio/cmd/disk-cache.go | 8 +- .../github.com/minio/minio/cmd/disk-usage.go | 4 +- vendor/github.com/minio/minio/cmd/endpoint.go | 21 +- .../minio/minio/cmd/endpoint_test.go | 43 +- vendor/github.com/minio/minio/cmd/etcd.go | 72 + .../github.com/minio/minio/cmd/format-xl.go | 16 + .../minio/minio/cmd/fs-v1-helpers.go | 36 +- .../minio/minio/cmd/fs-v1-helpers_test.go | 12 - .../minio/minio/cmd/fs-v1-metadata.go | 2 +- .../minio/minio/cmd/fs-v1-multipart.go | 10 +- vendor/github.com/minio/minio/cmd/fs-v1.go | 14 +- .../github.com/minio/minio/cmd/fs-v1_test.go | 4 +- .../minio/minio/cmd/gateway-main.go | 25 +- .../minio/cmd/gateway/azure/gateway-azure.go | 2 +- .../minio/cmd/gateway/gcs/gateway-gcs.go | 6 +- .../cmd/gateway/hdfs/gateway-hdfs-utils.go | 2 +- .../minio/cmd/gateway/hdfs/gateway-hdfs.go | 93 +- .../minio/cmd/gateway/s3/gateway-s3-sse.go | 13 +- .../minio/minio/cmd/generic-handlers.go | 17 +- .../minio/minio/cmd/generic-handlers_test.go | 6 +- .../minio/minio/cmd/handler-utils.go | 23 +- .../minio/minio/cmd/iam-etcd-store.go | 599 ++++++++ .../minio/minio/cmd/iam-object-store.go | 582 ++++++++ vendor/github.com/minio/minio/cmd/iam.go | 1245 +++++++++-------- .../minio/minio/cmd/lock-rest-client.go | 34 +- .../minio/cmd/lock-rest-server-common.go | 18 +- .../minio/minio/cmd/lock-rest-server.go | 144 +- .../minio/minio/cmd/notification.go | 13 + .../minio/minio/cmd/object-api-common.go | 20 +- .../minio/minio/cmd/object-api-errors.go | 4 +- .../minio/cmd/object-api-getobject_test.go | 2 +- .../minio/cmd/object-api-input-checks.go | 6 +- .../minio/cmd/object-api-listobjects_test.go | 18 +- .../minio/cmd/object-api-multipart_test.go | 4 +- .../minio/minio/cmd/object-api-utils.go | 24 +- .../minio/minio/cmd/object-handlers.go | 2 +- .../minio/minio/cmd/object-handlers_test.go | 80 +- .../minio/minio/cmd/object_api_suite_test.go | 4 +- .../github.com/minio/minio/cmd/os-reliable.go | 6 +- .../minio/minio/cmd/peer-rest-client.go | 22 +- .../minio/minio/cmd/peer-rest-common.go | 2 + .../minio/minio/cmd/peer-rest-server.go | 101 +- .../minio/minio/cmd/posix-list-dir_other.go | 6 +- .../minio/minio/cmd/posix-list-dir_unix.go | 4 +- .../minio/minio/cmd/posix-list-dir_windows.go | 4 +- vendor/github.com/minio/minio/cmd/posix.go | 40 +- .../github.com/minio/minio/cmd/posix_test.go | 10 +- .../minio/minio/cmd/prepare-storage.go | 23 +- .../github.com/minio/minio/cmd/rest/client.go | 8 +- .../github.com/minio/minio/cmd/server-main.go | 9 +- .../github.com/minio/minio/cmd/server_test.go | 6 +- .../minio/minio/cmd/signature-v2.go | 2 +- .../minio/minio/cmd/signature-v4-parser.go | 6 +- .../minio/cmd/signature-v4-parser_test.go | 6 +- .../minio/minio/cmd/signature-v4-utils.go | 3 +- .../minio/minio/cmd/signature-v4.go | 4 +- .../minio/minio/cmd/storage-rest-client.go | 4 +- .../minio/minio/cmd/storage-rest-common.go | 2 +- .../minio/minio/cmd/storage-rest-server.go | 38 +- .../minio/minio/cmd/sts-handlers.go | 11 +- .../minio/minio/cmd/test-utils_test.go | 18 +- .../github.com/minio/minio/cmd/tree-walk.go | 8 +- .../minio/minio/cmd/typed-errors.go | 7 + .../github.com/minio/minio/cmd/update-main.go | 2 +- vendor/github.com/minio/minio/cmd/utils.go | 44 +- .../github.com/minio/minio/cmd/utils_test.go | 2 +- .../minio/minio/cmd/web-handlers.go | 22 +- .../minio/minio/cmd/web-handlers_test.go | 4 +- .../github.com/minio/minio/cmd/web-router.go | 2 +- vendor/github.com/minio/minio/cmd/xl-sets.go | 32 +- .../minio/minio/cmd/xl-v1-common.go | 2 +- .../minio/minio/cmd/xl-v1-common_test.go | 2 +- .../minio/minio/cmd/xl-v1-healing.go | 11 +- .../minio/minio/cmd/xl-v1-healing_test.go | 76 + .../minio/minio/cmd/xl-v1-list-objects.go | 8 +- .../minio/minio/cmd/xl-v1-multipart.go | 2 +- .../minio/minio/cmd/xl-v1-object.go | 10 +- .../minio/minio/cmd/xl-v1-object_test.go | 2 +- .../minio/docs/bucket/notifications/README.md | 133 +- .../minio/minio/docs/config/README.md | 11 - .../minio/docs/config/config.sample.json | 18 +- .../minio/minio/docs/distributed/DESIGN.md | 97 +- .../minio/minio/docs/distributed/README.md | 1 - .../minio/minio/docs/docker/README.md | 36 +- .../minio/minio/docs/gateway/hdfs.md | 31 +- .../github.com/minio/minio/docs/kms/README.md | 4 +- .../docker-compose/docker-compose.yaml | 8 +- .../docker-swarm/docker-compose-secrets.yaml | 8 +- .../docker-swarm/docker-compose.yaml | 8 +- .../minio-distributed-daemonset.yaml | 2 +- .../minio-distributed-statefulset.yaml | 2 +- .../minio-gcs-gateway-deployment.yaml | 2 +- .../minio-standalone-deployment.yaml | 2 +- .../minio/minio/docs/sts/assume-role.md | 2 +- .../minio/minio/docs/sts/client-grants.md | 2 +- .../minio/minio/docs/sts/web-identity.md | 2 +- vendor/github.com/minio/minio/go.mod | 13 +- vendor/github.com/minio/minio/go.sum | 91 +- .../minio/mint/build/aws-sdk-go/install.sh | 2 +- .../minio/mint/build/healthcheck/install.sh | 2 +- .../minio/mint/build/minio-go/install.sh | 2 +- .../minio/mint/build/security/install.sh | 2 +- .../minio/minio/mint/build/worm/install.sh | 2 +- vendor/github.com/minio/minio/mint/mint.sh | 2 + .../minio/minio/pkg/auth/credentials.go | 2 +- .../minio/minio/pkg/disk/type_linux.go | 1 + .../minio/minio/pkg/dns/etcd_dns.go | 14 +- .../minio/minio/pkg/event/config.go | 15 +- .../minio/minio/pkg/event/target/amqp.go | 164 ++- .../minio/minio/pkg/event/target/mysql.go | 194 ++- .../minio/minio/pkg/event/target/nats.go | 172 ++- .../minio/minio/pkg/event/target/nsq.go | 22 +- .../minio/pkg/event/target/postgresql.go | 183 ++- .../minio/minio/pkg/event/target/redis.go | 197 ++- .../minio/minio/pkg/event/target/store.go | 14 + .../minio/minio/pkg/event/target/webhook.go | 15 - .../minio/minio/pkg/handlers/forwarder.go | 13 +- .../minio/minio/pkg/iam/policy/opa.go | 53 +- .../minio/minio/pkg/iam/validator/jwt.go | 26 +- .../minio/minio/pkg/madmin/group-commands.go | 164 +++ vendor/github.com/minio/minio/pkg/net/url.go | 6 + .../minio/minio/pkg/net/url_test.go | 5 +- vendor/github.com/tevino/abool/.gitignore | 24 + vendor/github.com/tevino/abool/LICENSE | 21 + vendor/github.com/tevino/abool/README.md | 49 + vendor/github.com/tevino/abool/bool.go | 63 + vendor/github.com/tevino/abool/bool_test.go | 176 +++ vendor/golang.org/x/net/http2/transport.go | 2 +- .../golang.org/x/sys/unix/affinity_linux.go | 8 +- vendor/golang.org/x/sys/unix/dirent.go | 2 +- vendor/golang.org/x/sys/unix/endian_little.go | 2 +- vendor/golang.org/x/sys/unix/sendfile_test.go | 7 +- .../x/sys/unix/syscall_linux_test.go | 4 +- vendor/golang.org/x/sys/unix/syscall_test.go | 15 +- vendor/golang.org/x/sys/windows/service.go | 4 + .../x/sys/windows/svc/mgr/config.go | 27 + .../golang.org/x/sys/windows/svc/mgr/mgr.go | 8 + .../x/sys/windows/svc/mgr/mgr_test.go | 12 + .../golang.org/x/sys/windows/types_windows.go | 7 +- vendor/golang.org/x/tools/go.mod | 1 + vendor/golang.org/x/tools/go.sum | 2 + .../x/tools/go/analysis/analysis.go | 6 +- .../x/tools/go/analysis/diagnostic.go | 32 +- .../go/analysis/diagnostic_experimental.go | 41 - .../tools/go/analysis/internal/facts/facts.go | 8 +- .../go/analysis/internal/facts/facts_test.go | 50 + .../tools/go/analysis/passes/printf/printf.go | 70 +- .../passes/printf/testdata/src/a/a.go | 15 +- .../tools/go/analysis/passes/printf/types.go | 6 + .../go/analysis/unitchecker/unitchecker.go | 10 +- .../tools/go/internal/packagesdriver/sizes.go | 27 +- .../golang.org/x/tools/go/packages/golist.go | 97 +- .../x/tools/go/packages/golist_overlay.go | 13 +- .../x/tools/go/packages/packages_test.go | 46 +- vendor/golang.org/x/tools/gopls/go.mod | 2 +- vendor/golang.org/x/tools/gopls/go.sum | 5 +- .../x/tools/internal/gopathwalk/walk.go | 2 +- .../x/tools/internal/imports/fix.go | 167 +-- .../x/tools/internal/imports/fix_test.go | 5 - .../x/tools/internal/imports/imports.go | 97 +- .../x/tools/internal/lsp/cache/check.go | 46 +- .../x/tools/internal/lsp/cache/external.go | 3 +- .../x/tools/internal/lsp/cache/gofile.go | 10 +- .../x/tools/internal/lsp/cache/load.go | 16 +- .../x/tools/internal/lsp/cache/modfile.go | 5 +- .../x/tools/internal/lsp/cache/parse.go | 35 +- .../x/tools/internal/lsp/cache/session.go | 4 +- .../x/tools/internal/lsp/cache/sumfile.go | 5 +- .../x/tools/internal/lsp/cache/token.go | 4 +- .../x/tools/internal/lsp/cache/view.go | 76 +- .../x/tools/internal/lsp/cmd/check.go | 5 +- .../x/tools/internal/lsp/cmd/cmd.go | 29 +- .../x/tools/internal/lsp/cmd/cmd_race_test.go | 11 - .../x/tools/internal/lsp/cmd/cmd_test.go | 2 - .../x/tools/internal/lsp/cmd/definition.go | 17 +- .../x/tools/internal/lsp/cmd/format.go | 7 +- .../x/tools/internal/lsp/cmd/serve.go | 21 +- .../x/tools/internal/lsp/code_action.go | 141 +- .../x/tools/internal/lsp/definition.go | 4 +- .../x/tools/internal/lsp/general.go | 81 +- .../golang.org/x/tools/internal/lsp/hover.go | 5 +- .../golang.org/x/tools/internal/lsp/link.go | 4 +- .../x/tools/internal/lsp/lsp_test.go | 18 +- .../x/tools/internal/lsp/protocol/context.go | 2 +- .../x/tools/internal/lsp/protocol/enums.go | 6 +- .../x/tools/internal/lsp/protocol/span.go | 4 +- .../tools/internal/lsp/protocol/tsprotocol.go | 16 +- .../lsp/protocol/typescript/README.md | 8 +- .../internal/lsp/protocol/typescript/go.ts | 2 +- .../x/tools/internal/lsp/references.go | 2 +- .../golang.org/x/tools/internal/lsp/rename.go | 2 +- .../golang.org/x/tools/internal/lsp/server.go | 15 +- .../x/tools/internal/lsp/source/analysis.go | 7 +- .../x/tools/internal/lsp/source/completion.go | 12 +- .../internal/lsp/source/completion_format.go | 2 +- .../tools/internal/lsp/source/diagnostics.go | 11 +- .../x/tools/internal/lsp/source/format.go | 121 +- .../x/tools/internal/lsp/source/highlight.go | 6 +- .../x/tools/internal/lsp/source/hover.go | 17 +- .../x/tools/internal/lsp/source/identifier.go | 70 +- .../x/tools/internal/lsp/source/references.go | 8 +- .../x/tools/internal/lsp/source/rename.go | 18 +- .../internal/lsp/source/signature_help.go | 16 +- .../tools/internal/lsp/source/source_test.go | 13 +- .../internal/lsp/source/suggested_fix.go | 24 +- .../lsp/source/suggested_fix_experimental.go | 26 - .../x/tools/internal/lsp/source/symbols.go | 4 +- .../x/tools/internal/lsp/source/util.go | 35 + .../x/tools/internal/lsp/source/view.go | 11 +- .../internal/lsp/telemetry/ocagent/ocagent.go | 10 +- .../lsp/telemetry/ocagent/ocagent_test.go | 145 ++ .../lsp/testdata/generated/generated.go | 7 + .../lsp/testdata/generated/generator.go | 5 + .../tools/internal/lsp/testdata/good/good0.go | 2 +- .../tools/internal/lsp/testdata/good/good1.go | 2 +- .../lsp/testdata/nodisk/newdisk_exists.go | 1 - .../noparse_format/parse_format.go.golden | 9 + .../noparse_format/parse_format.go.in | 5 + .../x/tools/internal/lsp/tests/tests.go | 9 +- .../internal/lsp/text_synchronization.go | 14 +- .../golang.org/x/tools/internal/lsp/util.go | 4 +- .../x/tools/internal/lsp/workspace.go | 12 +- vendor/k8s.io/klog/.travis.yml | 3 +- vendor/k8s.io/klog/README.md | 2 +- vendor/k8s.io/klog/go.mod | 5 + vendor/k8s.io/klog/go.sum | 2 + .../klog/integration_tests/internal/main.go | 46 + .../klog/integration_tests/klog_test.go | 311 ++++ vendor/k8s.io/klog/klog.go | 92 +- vendor/k8s.io/klog/klog_test.go | 91 +- .../sample-controller/Godeps/Godeps.json | 22 +- vendor/k8s.io/sample-controller/go.mod | 16 +- vendor/k8s.io/sample-controller/go.sum | 24 +- vendor/k8s.io/utils/trace/trace.go | 52 +- vendor/k8s.io/utils/trace/trace_test.go | 78 +- 286 files changed, 6947 insertions(+), 2563 deletions(-) create mode 100644 vendor/github.com/evanphx/json-patch/go.mod create mode 100644 vendor/github.com/evanphx/json-patch/go.sum create mode 100644 vendor/github.com/json-iterator/go/go.mod create mode 100644 vendor/github.com/json-iterator/go/go.sum create mode 100644 vendor/github.com/minio/minio/cmd/etcd.go create mode 100644 vendor/github.com/minio/minio/cmd/iam-etcd-store.go create mode 100644 vendor/github.com/minio/minio/cmd/iam-object-store.go create mode 100644 vendor/github.com/minio/minio/pkg/madmin/group-commands.go create mode 100644 vendor/github.com/tevino/abool/.gitignore create mode 100644 vendor/github.com/tevino/abool/LICENSE create mode 100644 vendor/github.com/tevino/abool/README.md create mode 100644 vendor/github.com/tevino/abool/bool.go create mode 100644 vendor/github.com/tevino/abool/bool_test.go delete mode 100644 vendor/golang.org/x/tools/go/analysis/diagnostic_experimental.go delete mode 100644 vendor/golang.org/x/tools/internal/lsp/cmd/cmd_race_test.go delete mode 100644 vendor/golang.org/x/tools/internal/lsp/source/suggested_fix_experimental.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent_test.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/testdata/generated/generated.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/testdata/generated/generator.go delete mode 100644 vendor/golang.org/x/tools/internal/lsp/testdata/nodisk/newdisk_exists.go create mode 100644 vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.golden create mode 100644 vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.in create mode 100644 vendor/k8s.io/klog/go.mod create mode 100644 vendor/k8s.io/klog/go.sum create mode 100644 vendor/k8s.io/klog/integration_tests/internal/main.go create mode 100644 vendor/k8s.io/klog/integration_tests/klog_test.go diff --git a/vendor/github.com/evanphx/json-patch/go.mod b/vendor/github.com/evanphx/json-patch/go.mod new file mode 100644 index 0000000000..a858cab296 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/go.mod @@ -0,0 +1,5 @@ +module github.com/evanphx/json-patch + +go 1.12 + +require github.com/pkg/errors v0.8.1 diff --git a/vendor/github.com/evanphx/json-patch/go.sum b/vendor/github.com/evanphx/json-patch/go.sum new file mode 100644 index 0000000000..f29ab350a5 --- /dev/null +++ b/vendor/github.com/evanphx/json-patch/go.sum @@ -0,0 +1,2 @@ +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= diff --git a/vendor/github.com/hashicorp/golang-lru/go.mod b/vendor/github.com/hashicorp/golang-lru/go.mod index 824cb97e83..8ad8826b36 100644 --- a/vendor/github.com/hashicorp/golang-lru/go.mod +++ b/vendor/github.com/hashicorp/golang-lru/go.mod @@ -1 +1,3 @@ module github.com/hashicorp/golang-lru + +go 1.12 diff --git a/vendor/github.com/hashicorp/golang-lru/lru.go b/vendor/github.com/hashicorp/golang-lru/lru.go index 1cbe04b7d0..052a38b4c4 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/lru.go @@ -86,17 +86,35 @@ func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evicted bool) { } // Remove removes the provided key from the cache. -func (c *Cache) Remove(key interface{}) { +func (c *Cache) Remove(key interface{}) (present bool) { c.lock.Lock() - c.lru.Remove(key) + present = c.lru.Remove(key) c.lock.Unlock() + return +} + +// Resize changes the cache size. +func (c *Cache) Resize(size int) (evicted int) { + c.lock.Lock() + evicted = c.lru.Resize(size) + c.lock.Unlock() + return evicted } // RemoveOldest removes the oldest item from the cache. -func (c *Cache) RemoveOldest() { +func (c *Cache) RemoveOldest() (key interface{}, value interface{}, ok bool) { c.lock.Lock() - c.lru.RemoveOldest() + key, value, ok = c.lru.RemoveOldest() c.lock.Unlock() + return +} + +// GetOldest returns the oldest entry +func (c *Cache) GetOldest() (key interface{}, value interface{}, ok bool) { + c.lock.Lock() + key, value, ok = c.lru.GetOldest() + c.lock.Unlock() + return } // Keys returns a slice of the keys in the cache, from oldest to newest. diff --git a/vendor/github.com/hashicorp/golang-lru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/lru_test.go index e7e23505e5..710b045334 100644 --- a/vendor/github.com/hashicorp/golang-lru/lru_test.go +++ b/vendor/github.com/hashicorp/golang-lru/lru_test.go @@ -219,3 +219,42 @@ func TestLRUPeek(t *testing.T) { t.Errorf("should not have updated recent-ness of 1") } } + +// test that Resize can upsize and downsize +func TestLRUResize(t *testing.T) { + onEvictCounter := 0 + onEvicted := func(k interface{}, v interface{}) { + onEvictCounter++ + } + l, err := NewWithEvict(2, onEvicted) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Downsize + l.Add(1, 1) + l.Add(2, 2) + evicted := l.Resize(1); + if evicted != 1 { + t.Errorf("1 element should have been evicted: %v", evicted) + } + if onEvictCounter != 1 { + t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter) + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("Element 1 should have been evicted") + } + + // Upsize + evicted = l.Resize(2); + if evicted != 0 { + t.Errorf("0 elements should have been evicted: %v", evicted) + } + + l.Add(4, 4) + if !l.Contains(3) || !l.Contains(4) { + t.Errorf("Cache should have contained 2 elements") + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go index 5673773b22..a86c8539e0 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -73,6 +73,9 @@ func (c *LRU) Add(key, value interface{}) (evicted bool) { func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { if ent, ok := c.items[key]; ok { c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } return ent.Value.(*entry).value, true } return @@ -142,6 +145,19 @@ func (c *LRU) Len() int { return c.evictList.Len() } +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + // removeOldest removes the oldest item from the cache. func (c *LRU) removeOldest() { ent := c.evictList.Back() diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go index 74c7077440..92d70934d6 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -10,7 +10,7 @@ type LRUCache interface { // updates the "recently used"-ness of the key. #value, isFound Get(key interface{}) (value interface{}, ok bool) - // Check if a key exsists in cache without updating the recent-ness. + // Checks if a key exists in cache without updating the recent-ness. Contains(key interface{}) (ok bool) // Returns key's value without updating the "recently used"-ness of the key. @@ -31,6 +31,9 @@ type LRUCache interface { // Returns the number of items in the cache. Len() int - // Clear all cache entries + // Clears all cache entries. Purge() + + // Resizes cache, returning number evicted + Resize(int) int } diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go index ca5676e1ee..bc7f696cc7 100644 --- a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_test.go @@ -165,3 +165,42 @@ func TestLRU_Peek(t *testing.T) { t.Errorf("should not have updated recent-ness of 1") } } + +// Test that Resize can upsize and downsize +func TestLRU_Resize(t *testing.T) { + onEvictCounter := 0 + onEvicted := func(k interface{}, v interface{}) { + onEvictCounter++ + } + l, err := NewLRU(2, onEvicted) + if err != nil { + t.Fatalf("err: %v", err) + } + + // Downsize + l.Add(1, 1) + l.Add(2, 2) + evicted := l.Resize(1); + if evicted != 1 { + t.Errorf("1 element should have been evicted: %v", evicted) + } + if onEvictCounter != 1 { + t.Errorf("onEvicted should have been called 1 time: %v", onEvictCounter) + } + + l.Add(3, 3) + if l.Contains(1) { + t.Errorf("Element 1 should have been evicted") + } + + // Upsize + evicted = l.Resize(2); + if evicted != 0 { + t.Errorf("0 elements should have been evicted: %v", evicted) + } + + l.Add(4, 4) + if !l.Contains(3) || !l.Contains(4) { + t.Errorf("Cache should have contained 2 elements") + } +} diff --git a/vendor/github.com/json-iterator/go/adapter.go b/vendor/github.com/json-iterator/go/adapter.go index e674d0f397..92d2cc4a3d 100644 --- a/vendor/github.com/json-iterator/go/adapter.go +++ b/vendor/github.com/json-iterator/go/adapter.go @@ -16,7 +16,7 @@ func Unmarshal(data []byte, v interface{}) error { return ConfigDefault.Unmarshal(data, v) } -// UnmarshalFromString convenient method to read from string instead of []byte +// UnmarshalFromString is a convenient method to read from string instead of []byte func UnmarshalFromString(str string, v interface{}) error { return ConfigDefault.UnmarshalFromString(str, v) } diff --git a/vendor/github.com/json-iterator/go/go.mod b/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 0000000000..e05c42ff58 --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/json-iterator/go/go.sum b/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 0000000000..d778b5a14d --- /dev/null +++ b/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/json-iterator/go/iter_skip.go b/vendor/github.com/json-iterator/go/iter_skip.go index f58beb9137..e91eefb15b 100644 --- a/vendor/github.com/json-iterator/go/iter_skip.go +++ b/vendor/github.com/json-iterator/go/iter_skip.go @@ -37,17 +37,24 @@ func (iter *Iterator) SkipAndReturnBytes() []byte { return iter.stopCapture() } -type captureBuffer struct { - startedAt int - captured []byte +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() } -func (iter *Iterator) startCapture(captureStartedAt int) { +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { if iter.captured != nil { panic("already in capture mode") } iter.captureStartedAt = captureStartedAt - iter.captured = make([]byte, 0, 32) + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) } func (iter *Iterator) stopCapture() []byte { @@ -58,13 +65,7 @@ func (iter *Iterator) stopCapture() []byte { remaining := iter.buf[iter.captureStartedAt:iter.head] iter.captureStartedAt = -1 iter.captured = nil - if len(captured) == 0 { - copied := make([]byte, len(remaining)) - copy(copied, remaining) - return copied - } - captured = append(captured, remaining...) - return captured + return append(captured, remaining...) } // Skip skips a json object and positions to relatively the next json object diff --git a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_array_test.go b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_array_test.go index 56e3e12c03..ef60420daa 100644 --- a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_array_test.go +++ b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_array_test.go @@ -158,6 +158,27 @@ func Test_encode_byte_array(t *testing.T) { should.Equal(`"AQID"`, string(bytes)) } +func Test_encode_empty_byte_array(t *testing.T) { + should := require.New(t) + bytes, err := json.Marshal([]byte{}) + should.Nil(err) + should.Equal(`""`, string(bytes)) + bytes, err = jsoniter.Marshal([]byte{}) + should.Nil(err) + should.Equal(`""`, string(bytes)) +} + +func Test_encode_nil_byte_array(t *testing.T) { + should := require.New(t) + var nilSlice []byte + bytes, err := json.Marshal(nilSlice) + should.Nil(err) + should.Equal(`null`, string(bytes)) + bytes, err = jsoniter.Marshal(nilSlice) + should.Nil(err) + should.Equal(`null`, string(bytes)) +} + func Test_decode_byte_array_from_base64(t *testing.T) { should := require.New(t) data := []byte{} diff --git a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_float_test.go b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_float_test.go index de7bc965ea..34edd4af88 100644 --- a/vendor/github.com/json-iterator/go/misc_tests/jsoniter_float_test.go +++ b/vendor/github.com/json-iterator/go/misc_tests/jsoniter_float_test.go @@ -2,6 +2,7 @@ package misc_tests import ( "encoding/json" + "math" "testing" "github.com/json-iterator/go" @@ -77,6 +78,26 @@ func Test_read_number(t *testing.T) { should.Equal(`92233720368547758079223372036854775807`, string(val)) } +func Test_encode_inf(t *testing.T) { + should := require.New(t) + _, err := json.Marshal(math.Inf(1)) + should.Error(err) + _, err = jsoniter.Marshal(float32(math.Inf(1))) + should.Error(err) + _, err = jsoniter.Marshal(math.Inf(-1)) + should.Error(err) +} + +func Test_encode_nan(t *testing.T) { + should := require.New(t) + _, err := json.Marshal(math.NaN()) + should.Error(err) + _, err = jsoniter.Marshal(float32(math.NaN())) + should.Error(err) + _, err = jsoniter.Marshal(math.NaN()) + should.Error(err) +} + func Benchmark_jsoniter_float(b *testing.B) { b.ReportAllocs() input := []byte(`1.1123,`) diff --git a/vendor/github.com/json-iterator/go/reflect_native.go b/vendor/github.com/json-iterator/go/reflect_native.go index 9042eb0cb9..f88722d14d 100644 --- a/vendor/github.com/json-iterator/go/reflect_native.go +++ b/vendor/github.com/json-iterator/go/reflect_native.go @@ -432,17 +432,19 @@ func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { } func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { - src := *((*[]byte)(ptr)) - if len(src) == 0 { + if codec.sliceType.UnsafeIsNil(ptr) { stream.WriteNil() return } + src := *((*[]byte)(ptr)) encoding := base64.StdEncoding stream.writeByte('"') - size := encoding.EncodedLen(len(src)) - buf := make([]byte, size) - encoding.Encode(buf, src) - stream.buf = append(stream.buf, buf...) + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } stream.writeByte('"') } diff --git a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go index 355d2d116b..932641ac46 100644 --- a/vendor/github.com/json-iterator/go/reflect_struct_decoder.go +++ b/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -530,8 +530,8 @@ func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *It } } if fieldDecoder == nil { - msg := "found unknown field: " + field if decoder.disallowUnknownFields { + msg := "found unknown field: " + field iter.ReportError("ReadObject", msg) } c := iter.nextToken() diff --git a/vendor/github.com/json-iterator/go/skip_tests/jsoniter_skip_test.go b/vendor/github.com/json-iterator/go/skip_tests/jsoniter_skip_test.go index bf054d1c30..785899a9e7 100644 --- a/vendor/github.com/json-iterator/go/skip_tests/jsoniter_skip_test.go +++ b/vendor/github.com/json-iterator/go/skip_tests/jsoniter_skip_test.go @@ -105,6 +105,15 @@ func Test_skip_and_return_bytes_with_reader(t *testing.T) { should.Equal(`{"a" : [{"stream": "c"}], "d": 102 }`, string(skipped)) } +func Test_append_skip_and_return_bytes_with_reader(t *testing.T) { + should := require.New(t) + iter := jsoniter.Parse(jsoniter.ConfigDefault, bytes.NewBufferString(`[ {"a" : [{"stream": "c"}], "d": 102 }, "stream"]`), 4) + iter.ReadArray() + buf := make([]byte, 0, 1024) + buf = iter.SkipAndAppendBytes(buf) + should.Equal(`{"a" : [{"stream": "c"}], "d": 102 }`, string(buf)) +} + func Test_skip_empty(t *testing.T) { should := require.New(t) should.NotNil(jsoniter.Get([]byte("")).LastError()) diff --git a/vendor/github.com/json-iterator/go/stream_float.go b/vendor/github.com/json-iterator/go/stream_float.go index f318d2c59d..826aa594ac 100644 --- a/vendor/github.com/json-iterator/go/stream_float.go +++ b/vendor/github.com/json-iterator/go/stream_float.go @@ -1,6 +1,7 @@ package jsoniter import ( + "fmt" "math" "strconv" ) @@ -13,6 +14,10 @@ func init() { // WriteFloat32 write float32 to stream func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } abs := math.Abs(float64(val)) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. @@ -26,6 +31,10 @@ func (stream *Stream) WriteFloat32(val float32) { // WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } if val < 0 { stream.writeByte('-') val = -val @@ -54,6 +63,10 @@ func (stream *Stream) WriteFloat32Lossy(val float32) { // WriteFloat64 write float64 to stream func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } abs := math.Abs(val) fmt := byte('f') // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. @@ -67,6 +80,10 @@ func (stream *Stream) WriteFloat64(val float64) { // WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } if val < 0 { stream.writeByte('-') val = -val diff --git a/vendor/github.com/minio/minio/.travis.yml b/vendor/github.com/minio/minio/.travis.yml index 9387386c5c..2f73884203 100644 --- a/vendor/github.com/minio/minio/.travis.yml +++ b/vendor/github.com/minio/minio/.travis.yml @@ -26,6 +26,7 @@ matrix: - ARCH=x86_64 - CGO_ENABLED=0 - GO111MODULE=on + - GOPROXY=https://proxy.golang.org # Enable build cache # https://restic.net/blog/2018-09-02/travis-build-cache cache: @@ -52,6 +53,7 @@ matrix: - ARCH=x86_64 - CGO_ENABLED=0 - GO111MODULE=on + - GOPROXY=https://proxy.golang.org go: 1.12.5 script: - go build --ldflags="$(go run buildscripts/gen-ldflags.go)" -o %GOPATH%\bin\minio.exe diff --git a/vendor/github.com/minio/minio/Dockerfile.simpleci b/vendor/github.com/minio/minio/Dockerfile.simpleci index 30bdad4beb..2c8bba41ad 100644 --- a/vendor/github.com/minio/minio/Dockerfile.simpleci +++ b/vendor/github.com/minio/minio/Dockerfile.simpleci @@ -8,6 +8,7 @@ WORKDIR /go/src/github.com/minio/minio RUN apt-get update && apt-get install -y jq ENV GO111MODULE=on +ENV GOPROXY=https://proxy.golang.org RUN git config --global http.cookiefile /gitcookie/.gitcookie diff --git a/vendor/github.com/minio/minio/Makefile b/vendor/github.com/minio/minio/Makefile index 1972c69edd..76f755f017 100644 --- a/vendor/github.com/minio/minio/Makefile +++ b/vendor/github.com/minio/minio/Makefile @@ -30,35 +30,35 @@ verifiers: getdeps vet fmt lint staticcheck spelling vet: @echo "Running $@" - @GO111MODULE=on go vet github.com/minio/minio/... + @GOPROXY=https://proxy.golang.org GO111MODULE=on go vet github.com/minio/minio/... fmt: @echo "Running $@" - @GO111MODULE=on gofmt -d cmd/ - @GO111MODULE=on gofmt -d pkg/ + @GOPROXY=https://proxy.golang.org GO111MODULE=on gofmt -d cmd/ + @GOPROXY=https://proxy.golang.org GO111MODULE=on gofmt -d pkg/ lint: @echo "Running $@" - @GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/... - @GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/... + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/cmd/... + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/golint -set_exit_status github.com/minio/minio/pkg/... staticcheck: @echo "Running $@" - @GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/... - @GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/... + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/cmd/... + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/staticcheck github.com/minio/minio/pkg/... spelling: - @GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/` - @GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/` - @GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/` - @GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/` - @GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/` + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find cmd/` + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find pkg/` + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find docs/` + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find buildscripts/` + @GOPROXY=https://proxy.golang.org GO111MODULE=on ${GOPATH}/bin/misspell -locale US -error `find dockerscripts/` # Builds minio, runs the verifiers then runs the tests. check: test test: verifiers build @echo "Running unit tests" - @GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null + @GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go test -tags kqueue ./... 1>/dev/null verify: build @echo "Verifying build" @@ -71,8 +71,8 @@ coverage: build # Builds minio locally. build: checks @echo "Building minio binary to './minio'" - @GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null - @GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null + @GOPROXY=https://proxy.golang.org GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/minio 1>/dev/null + @GOPROXY=https://proxy.golang.org GO111MODULE=on GOFLAGS="" CGO_ENABLED=0 go build -tags kqueue --ldflags $(BUILD_LDFLAGS) -o $(PWD)/dockerscripts/healthcheck $(PWD)/dockerscripts/healthcheck.go 1>/dev/null docker: build @docker build -t $(TAG) . -f Dockerfile.dev diff --git a/vendor/github.com/minio/minio/README.md b/vendor/github.com/minio/minio/README.md index 03a61bf7a5..4746e32f2f 100644 --- a/vendor/github.com/minio/minio/README.md +++ b/vendor/github.com/minio/minio/README.md @@ -1,10 +1,15 @@ # MinIO Quickstart Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) [![Go Report Card](https://goreportcard.com/badge/minio/minio)](https://goreportcard.com/report/minio/minio) [![Docker Pulls](https://img.shields.io/docker/pulls/minio/minio.svg?maxAge=604800)](https://hub.docker.com/r/minio/minio/) -MinIO is an object storage server released under Apache License v2.0. It is compatible with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB. +MinIO is an object storage server released under Apache License v2.0. It is compatible[1] with Amazon S3 cloud storage service. It is best suited for storing unstructured data such as photos, videos, log files, backups and container / VM images. Size of an object can range from a few KBs to a maximum of 5TB. MinIO server is light enough to be bundled with the application stack, similar to NodeJS, Redis and MySQL. +[1]: MinIO in its default mode is faster and does not calculate MD5Sum unless passed by client. This may lead to incompatibility with few S3 clients like s3ql that heavily depend on MD5Sum. For full compatibility with Amazon S3 API, start MinIO with `--compat` option. +```sh +minio --compat server /data +``` + ## Docker Container ### Stable ``` @@ -86,7 +91,7 @@ service minio start Source installation is only intended for developers and advanced users. If you do not have a working Golang environment, please follow [How to install Golang](https://golang.org/doc/install). Minimum version required is [go1.12](https://golang.org/dl/#stable) ```sh -GO111MODULE=on go get github.com/minio/minio +GOPROXY=https://proxy.golang.org GO111MODULE=on go get github.com/minio/minio ``` ## Allow port access for Firewalls diff --git a/vendor/github.com/minio/minio/buildscripts/cross-compile.sh b/vendor/github.com/minio/minio/buildscripts/cross-compile.sh index edf1786b96..a094a65112 100755 --- a/vendor/github.com/minio/minio/buildscripts/cross-compile.sh +++ b/vendor/github.com/minio/minio/buildscripts/cross-compile.sh @@ -23,6 +23,7 @@ function _build() { export GOOS=$os export GOARCH=$arch export GO111MODULE=on + export GOPROXY=https://proxy.golang.org go build -tags kqueue -o /dev/null } diff --git a/vendor/github.com/minio/minio/buildscripts/go-coverage.sh b/vendor/github.com/minio/minio/buildscripts/go-coverage.sh index 5efcfbb03d..67599fd025 100755 --- a/vendor/github.com/minio/minio/buildscripts/go-coverage.sh +++ b/vendor/github.com/minio/minio/buildscripts/go-coverage.sh @@ -2,4 +2,4 @@ set -e -GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./... +GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go test -v -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/minio/minio/buildscripts/verify-build.sh b/vendor/github.com/minio/minio/buildscripts/verify-build.sh index fd06a7e61e..fc147a9175 100755 --- a/vendor/github.com/minio/minio/buildscripts/verify-build.sh +++ b/vendor/github.com/minio/minio/buildscripts/verify-build.sh @@ -33,6 +33,7 @@ export ACCESS_KEY="minio" export SECRET_KEY="minio123" export ENABLE_HTTPS=0 export GO111MODULE=on +export GOPROXY=https://proxy.golang.org MINIO_CONFIG_DIR="$WORK_DIR/.minio" MINIO=( "$PWD/minio" --config-dir "$MINIO_CONFIG_DIR" ) diff --git a/vendor/github.com/minio/minio/cmd/admin-handlers.go b/vendor/github.com/minio/minio/cmd/admin-handlers.go index 3a2e0934d8..46cba6872d 100644 --- a/vendor/github.com/minio/minio/cmd/admin-handlers.go +++ b/vendor/github.com/minio/minio/cmd/admin-handlers.go @@ -23,6 +23,7 @@ import ( "encoding/json" "errors" "io" + "io/ioutil" "net/http" "os" "sort" @@ -1019,6 +1020,130 @@ func (a adminAPIHandlers) ListUsers(w http.ResponseWriter, r *http.Request) { writeSuccessResponseJSON(w, econfigData) } +// UpdateGroupMembers - PUT /minio/admin/v1/update-group-members +func (a adminAPIHandlers) UpdateGroupMembers(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "UpdateGroupMembers") + + objectAPI := validateAdminReq(ctx, w, r) + if objectAPI == nil { + return + } + + defer r.Body.Close() + data, err := ioutil.ReadAll(r.Body) + if err != nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + var updReq madmin.GroupAddRemove + err = json.Unmarshal(data, &updReq) + if err != nil { + writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrInvalidRequest), r.URL) + return + } + + if updReq.IsRemove { + err = globalIAMSys.RemoveUsersFromGroup(updReq.Group, updReq.Members) + } else { + err = globalIAMSys.AddUsersToGroup(updReq.Group, updReq.Members) + } + + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + // Notify all other MinIO peers to load group. + for _, nerr := range globalNotificationSys.LoadGroup(updReq.Group) { + if nerr.Err != nil { + logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) + logger.LogIf(ctx, nerr.Err) + } + } +} + +// GetGroup - /minio/admin/v1/group?group=mygroup1 +func (a adminAPIHandlers) GetGroup(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "GetGroup") + + objectAPI := validateAdminReq(ctx, w, r) + if objectAPI == nil { + return + } + + vars := mux.Vars(r) + group := vars["group"] + + gdesc, err := globalIAMSys.GetGroupDescription(group) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + body, err := json.Marshal(gdesc) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, body) +} + +// ListGroups - GET /minio/admin/v1/groups +func (a adminAPIHandlers) ListGroups(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "ListGroups") + + objectAPI := validateAdminReq(ctx, w, r) + if objectAPI == nil { + return + } + + groups := globalIAMSys.ListGroups() + body, err := json.Marshal(groups) + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + writeSuccessResponseJSON(w, body) +} + +// SetGroupStatus - PUT /minio/admin/v1/set-group-status?group=mygroup1&status=enabled +func (a adminAPIHandlers) SetGroupStatus(w http.ResponseWriter, r *http.Request) { + ctx := newContext(r, w, "SetGroupStatus") + + objectAPI := validateAdminReq(ctx, w, r) + if objectAPI == nil { + return + } + + vars := mux.Vars(r) + group := vars["group"] + status := vars["status"] + + var err error + if status == statusEnabled { + err = globalIAMSys.SetGroupStatus(group, true) + } else if status == statusDisabled { + err = globalIAMSys.SetGroupStatus(group, false) + } else { + err = errInvalidArgument + } + if err != nil { + writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) + return + } + + // Notify all other MinIO peers to reload user. + for _, nerr := range globalNotificationSys.LoadGroup(group) { + if nerr.Err != nil { + logger.GetReqInfo(ctx).SetTags("peerAddress", nerr.Host.String()) + logger.LogIf(ctx, nerr.Err) + } + } +} + // SetUserStatus - PUT /minio/admin/v1/set-user-status?accessKey=&status=[enabled|disabled] func (a adminAPIHandlers) SetUserStatus(w http.ResponseWriter, r *http.Request) { ctx := newContext(r, w, "SetUserStatus") @@ -1253,7 +1378,7 @@ func (a adminAPIHandlers) SetUserPolicy(w http.ResponseWriter, r *http.Request) return } - if err := globalIAMSys.SetUserPolicy(accessKey, policyName); err != nil { + if err := globalIAMSys.PolicyDBSet(accessKey, policyName, false); err != nil { writeErrorResponseJSON(ctx, w, toAdminAPIErr(ctx, err), r.URL) } @@ -1459,6 +1584,23 @@ func (a adminAPIHandlers) SetConfigKeysHandler(w http.ResponseWriter, r *http.Re writeSuccessResponseHeadersOnly(w) } +// Returns true if the trace.Info should be traced, +// false if certain conditions are not met. +// - input entry is not of the type *trace.Info* +// - errOnly entries are to be traced, not status code 2xx, 3xx. +// - all entries to be traced, if not trace only S3 API requests. +func mustTrace(entry interface{}, trcAll, errOnly bool) bool { + trcInfo, ok := entry.(trace.Info) + if !ok { + return false + } + trace := trcAll || !hasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath+SlashSeparator) + if errOnly { + return trace && trcInfo.RespInfo.StatusCode >= http.StatusBadRequest + } + return trace +} + // TraceHandler - POST /minio/admin/v1/trace // ---------- // The handler sends http trace to the connected HTTP client. @@ -1474,10 +1616,6 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { return } - // Avoid reusing tcp connection if read timeout is hit - // This is needed to make r.Context().Done() work as - // expected in case of read timeout - w.Header().Set(xhttp.Connection, "close") w.Header().Set(xhttp.ContentType, "text/event-stream") doneCh := make(chan struct{}) @@ -1487,28 +1625,22 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { // Use buffered channel to take care of burst sends or slow w.Write() traceCh := make(chan interface{}, 4000) - filter := func(entry interface{}) bool { - trcInfo := entry.(trace.Info) - if trcErr && isHTTPStatusOK(trcInfo.RespInfo.StatusCode) { - return false - } - if trcAll { - return true - } - return !strings.HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath) - - } - remoteHosts := getRemoteHosts(globalEndpoints) - peers, err := getRestClients(remoteHosts) + peers, err := getRestClients(getRemoteHosts(globalEndpoints)) if err != nil { return } - globalHTTPTrace.Subscribe(traceCh, doneCh, filter) + + globalHTTPTrace.Subscribe(traceCh, doneCh, func(entry interface{}) bool { + return mustTrace(entry, trcAll, trcErr) + }) for _, peer := range peers { peer.Trace(traceCh, doneCh, trcAll, trcErr) } + keepAliveTicker := time.NewTicker(500 * time.Millisecond) + defer keepAliveTicker.Stop() + enc := json.NewEncoder(w) for { select { @@ -1517,8 +1649,11 @@ func (a adminAPIHandlers) TraceHandler(w http.ResponseWriter, r *http.Request) { return } w.(http.Flusher).Flush() - case <-r.Context().Done(): - return + case <-keepAliveTicker.C: + if _, err := w.Write([]byte(" ")); err != nil { + return + } + w.(http.Flusher).Flush() case <-GlobalServiceDoneCh: return } diff --git a/vendor/github.com/minio/minio/cmd/admin-handlers_test.go b/vendor/github.com/minio/minio/cmd/admin-handlers_test.go index 81e95e632d..e06dda2002 100644 --- a/vendor/github.com/minio/minio/cmd/admin-handlers_test.go +++ b/vendor/github.com/minio/minio/cmd/admin-handlers_test.go @@ -83,7 +83,9 @@ var ( "durable": false, "internal": false, "noWait": false, - "autoDeleted": false + "autoDeleted": false, + "queueDir": "", + "queueLimit": 0 } }, "elasticsearch": { @@ -139,7 +141,9 @@ var ( "port": "", "user": "", "password": "", - "database": "" + "database": "", + "queueDir": "", + "queueLimit": 0 } }, "nats": { @@ -152,6 +156,8 @@ var ( "token": "", "secure": false, "pingInterval": 0, + "queueDir": "", + "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", @@ -183,7 +189,9 @@ var ( "port": "", "user": "", "password": "", - "database": "" + "database": "", + "queueDir": "", + "queueLimit": 0 } }, "redis": { @@ -192,7 +200,9 @@ var ( "format": "namespace", "address": "", "password": "", - "key": "" + "key": "", + "queueDir": "", + "queueLimit": 0 } }, "webhook": { diff --git a/vendor/github.com/minio/minio/cmd/admin-heal-ops.go b/vendor/github.com/minio/minio/cmd/admin-heal-ops.go index 4935ea0e32..8617d2ffcd 100644 --- a/vendor/github.com/minio/minio/cmd/admin-heal-ops.go +++ b/vendor/github.com/minio/minio/cmd/admin-heal-ops.go @@ -587,9 +587,9 @@ func (h *healSequence) healItemsFromSourceCh() error { var itemType madmin.HealItemType switch { - case path == "/": + case path == SlashSeparator: itemType = madmin.HealItemMetadata - case !strings.Contains(path, "/"): + case !strings.Contains(path, SlashSeparator): itemType = madmin.HealItemBucket default: itemType = madmin.HealItemObject @@ -693,7 +693,7 @@ func (h *healSequence) healDiskFormat() error { return errServerNotInitialized } - return h.queueHealTask("/", madmin.HealItemMetadata) + return h.queueHealTask(SlashSeparator, madmin.HealItemMetadata) } // healBuckets - check for all buckets heal or just particular bucket. diff --git a/vendor/github.com/minio/minio/cmd/admin-router.go b/vendor/github.com/minio/minio/cmd/admin-router.go index 63bbaaaebf..9ff9d4eb36 100644 --- a/vendor/github.com/minio/minio/cmd/admin-router.go +++ b/vendor/github.com/minio/minio/cmd/admin-router.go @@ -110,6 +110,18 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool) // List users adminV1Router.Methods(http.MethodGet).Path("/list-users").HandlerFunc(httpTraceHdrs(adminAPI.ListUsers)) + // Add/Remove members from group + adminV1Router.Methods(http.MethodPut).Path("/update-group-members").HandlerFunc(httpTraceHdrs(adminAPI.UpdateGroupMembers)) + + // Get Group + adminV1Router.Methods(http.MethodGet).Path("/group").HandlerFunc(httpTraceHdrs(adminAPI.GetGroup)).Queries("group", "{group:.*}") + + // List Groups + adminV1Router.Methods(http.MethodGet).Path("/groups").HandlerFunc(httpTraceHdrs(adminAPI.ListGroups)) + + // Set Group Status + adminV1Router.Methods(http.MethodPut).Path("/set-group-status").HandlerFunc(httpTraceHdrs(adminAPI.SetGroupStatus)).Queries("group", "{group:.*}").Queries("status", "{status:.*}") + // List policies adminV1Router.Methods(http.MethodGet).Path("/list-canned-policies").HandlerFunc(httpTraceHdrs(adminAPI.ListCannedPolicies)) } diff --git a/vendor/github.com/minio/minio/cmd/api-errors.go b/vendor/github.com/minio/minio/cmd/api-errors.go index 2bac435360..a929da94dc 100644 --- a/vendor/github.com/minio/minio/cmd/api-errors.go +++ b/vendor/github.com/minio/minio/cmd/api-errors.go @@ -203,6 +203,8 @@ const ( ErrMalformedJSON ErrAdminNoSuchUser + ErrAdminNoSuchGroup + ErrAdminGroupNotEmpty ErrAdminNoSuchPolicy ErrAdminInvalidArgument ErrAdminInvalidAccessKey @@ -923,6 +925,16 @@ var errorCodes = errorCodeMap{ Description: "The specified user does not exist.", HTTPStatusCode: http.StatusNotFound, }, + ErrAdminNoSuchGroup: { + Code: "XMinioAdminNoSuchGroup", + Description: "The specified group does not exist.", + HTTPStatusCode: http.StatusNotFound, + }, + ErrAdminGroupNotEmpty: { + Code: "XMinioAdminGroupNotEmpty", + Description: "The specified group is not empty - cannot remove it.", + HTTPStatusCode: http.StatusBadRequest, + }, ErrAdminNoSuchPolicy: { Code: "XMinioAdminNoSuchPolicy", Description: "The canned policy does not exist.", @@ -1500,6 +1512,10 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) { apiErr = ErrAdminInvalidArgument case errNoSuchUser: apiErr = ErrAdminNoSuchUser + case errNoSuchGroup: + apiErr = ErrAdminNoSuchGroup + case errGroupNotEmpty: + apiErr = ErrAdminGroupNotEmpty case errNoSuchPolicy: apiErr = ErrAdminNoSuchPolicy case errSignatureMismatch: diff --git a/vendor/github.com/minio/minio/cmd/api-resources_test.go b/vendor/github.com/minio/minio/cmd/api-resources_test.go index 10140c2dfe..9b0c98799a 100644 --- a/vendor/github.com/minio/minio/cmd/api-resources_test.go +++ b/vendor/github.com/minio/minio/cmd/api-resources_test.go @@ -36,7 +36,7 @@ func TestListObjectsV2Resources(t *testing.T) { "prefix": []string{"photos/"}, "continuation-token": []string{"token"}, "start-after": []string{"start-after"}, - "delimiter": []string{"/"}, + "delimiter": []string{SlashSeparator}, "fetch-owner": []string{"true"}, "max-keys": []string{"100"}, "encoding-type": []string{"gzip"}, @@ -44,7 +44,7 @@ func TestListObjectsV2Resources(t *testing.T) { prefix: "photos/", token: "token", startAfter: "start-after", - delimiter: "/", + delimiter: SlashSeparator, fetchOwner: true, maxKeys: 100, encodingType: "gzip", @@ -55,14 +55,14 @@ func TestListObjectsV2Resources(t *testing.T) { "prefix": []string{"photos/"}, "continuation-token": []string{"token"}, "start-after": []string{"start-after"}, - "delimiter": []string{"/"}, + "delimiter": []string{SlashSeparator}, "fetch-owner": []string{"true"}, "encoding-type": []string{"gzip"}, }, prefix: "photos/", token: "token", startAfter: "start-after", - delimiter: "/", + delimiter: SlashSeparator, fetchOwner: true, maxKeys: 1000, encodingType: "gzip", @@ -73,7 +73,7 @@ func TestListObjectsV2Resources(t *testing.T) { "prefix": []string{"photos/"}, "continuation-token": []string{""}, "start-after": []string{"start-after"}, - "delimiter": []string{"/"}, + "delimiter": []string{SlashSeparator}, "fetch-owner": []string{"true"}, "encoding-type": []string{"gzip"}, }, @@ -130,13 +130,13 @@ func TestListObjectsV1Resources(t *testing.T) { values: url.Values{ "prefix": []string{"photos/"}, "marker": []string{"test"}, - "delimiter": []string{"/"}, + "delimiter": []string{SlashSeparator}, "max-keys": []string{"100"}, "encoding-type": []string{"gzip"}, }, prefix: "photos/", marker: "test", - delimiter: "/", + delimiter: SlashSeparator, maxKeys: 100, encodingType: "gzip", }, @@ -144,12 +144,12 @@ func TestListObjectsV1Resources(t *testing.T) { values: url.Values{ "prefix": []string{"photos/"}, "marker": []string{"test"}, - "delimiter": []string{"/"}, + "delimiter": []string{SlashSeparator}, "encoding-type": []string{"gzip"}, }, prefix: "photos/", marker: "test", - delimiter: "/", + delimiter: SlashSeparator, maxKeys: 1000, encodingType: "gzip", }, diff --git a/vendor/github.com/minio/minio/cmd/api-response.go b/vendor/github.com/minio/minio/cmd/api-response.go index d0964ff702..72fc017190 100644 --- a/vendor/github.com/minio/minio/cmd/api-response.go +++ b/vendor/github.com/minio/minio/cmd/api-response.go @@ -293,14 +293,14 @@ func getObjectLocation(r *http.Request, domains []string, bucket, object string) } u := &url.URL{ Host: r.Host, - Path: path.Join(slashSeparator, bucket, object), + Path: path.Join(SlashSeparator, bucket, object), Scheme: proto, } // If domain is set then we need to use bucket DNS style. for _, domain := range domains { if strings.Contains(r.Host, domain) { u.Host = bucket + "." + r.Host - u.Path = path.Join(slashSeparator, object) + u.Path = path.Join(SlashSeparator, object) break } } diff --git a/vendor/github.com/minio/minio/cmd/api-router.go b/vendor/github.com/minio/minio/cmd/api-router.go index 328baa9057..18ead4f37a 100644 --- a/vendor/github.com/minio/minio/cmd/api-router.go +++ b/vendor/github.com/minio/minio/cmd/api-router.go @@ -48,7 +48,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) } // API Router - apiRouter := router.PathPrefix("/").Subrouter() + apiRouter := router.PathPrefix(SlashSeparator).Subrouter() var routers []*mux.Router for _, domainName := range globalDomainNames { routers = append(routers, apiRouter.Host("{bucket:.+}."+domainName).Subrouter()) @@ -157,7 +157,7 @@ func registerAPIRouter(router *mux.Router, encryptionEnabled, allowSSEKMS bool) /// Root operation // ListBuckets - apiRouter.Methods(http.MethodGet).Path("/").HandlerFunc(httpTraceAll(api.ListBucketsHandler)) + apiRouter.Methods(http.MethodGet).Path(SlashSeparator).HandlerFunc(httpTraceAll(api.ListBucketsHandler)) // If none of the routes match. apiRouter.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler)) diff --git a/vendor/github.com/minio/minio/cmd/auth-handler_test.go b/vendor/github.com/minio/minio/cmd/auth-handler_test.go index 75712295ed..858f12b1c8 100644 --- a/vendor/github.com/minio/minio/cmd/auth-handler_test.go +++ b/vendor/github.com/minio/minio/cmd/auth-handler_test.go @@ -44,7 +44,7 @@ func TestGetRequestAuthType(t *testing.T) { URL: &url.URL{ Host: "127.0.0.1:9000", Scheme: httpScheme, - Path: "/", + Path: SlashSeparator, }, Header: http.Header{ "Authorization": []string{"AWS4-HMAC-SHA256 "}, @@ -62,7 +62,7 @@ func TestGetRequestAuthType(t *testing.T) { URL: &url.URL{ Host: "127.0.0.1:9000", Scheme: httpScheme, - Path: "/", + Path: SlashSeparator, }, Header: http.Header{ "Authorization": []string{"Bearer 12313123"}, @@ -77,7 +77,7 @@ func TestGetRequestAuthType(t *testing.T) { URL: &url.URL{ Host: "127.0.0.1:9000", Scheme: httpScheme, - Path: "/", + Path: SlashSeparator, }, Header: http.Header{ "Authorization": []string{""}, @@ -92,7 +92,7 @@ func TestGetRequestAuthType(t *testing.T) { URL: &url.URL{ Host: "127.0.0.1:9000", Scheme: httpScheme, - Path: "/", + Path: SlashSeparator, RawQuery: "X-Amz-Credential=EXAMPLEINVALIDEXAMPL%2Fs3%2F20160314%2Fus-east-1", }, }, @@ -105,7 +105,7 @@ func TestGetRequestAuthType(t *testing.T) { URL: &url.URL{ Host: "127.0.0.1:9000", Scheme: httpScheme, - Path: "/", + Path: SlashSeparator, }, Header: http.Header{ "Content-Type": []string{"multipart/form-data"}, diff --git a/vendor/github.com/minio/minio/cmd/bucket-handlers.go b/vendor/github.com/minio/minio/cmd/bucket-handlers.go index ea3892245e..62517756af 100644 --- a/vendor/github.com/minio/minio/cmd/bucket-handlers.go +++ b/vendor/github.com/minio/minio/cmd/bucket-handlers.go @@ -650,7 +650,7 @@ func (api objectAPIHandlers) PostPolicyBucketHandler(w http.ResponseWriter, r *h return } if objectAPI.IsEncryptionSupported() { - if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, slashSeparator) { // handle SSE-C and SSE-S3 requests + if hasServerSideEncryptionHeader(formValues) && !hasSuffix(object, SlashSeparator) { // handle SSE-C and SSE-S3 requests var reader io.Reader var key []byte if crypto.SSEC.IsRequested(formValues) { diff --git a/vendor/github.com/minio/minio/cmd/bucket-handlers_test.go b/vendor/github.com/minio/minio/cmd/bucket-handlers_test.go index 78ae30dee5..6971d9f17f 100644 --- a/vendor/github.com/minio/minio/cmd/bucket-handlers_test.go +++ b/vendor/github.com/minio/minio/cmd/bucket-handlers_test.go @@ -69,7 +69,7 @@ func testGetBucketLocationHandler(obj ObjectLayer, instanceType, bucketName stri expectedRespStatus: http.StatusForbidden, locationResponse: []byte(""), errorResponse: APIErrorResponse{ - Resource: "/" + bucketName + "/", + Resource: SlashSeparator + bucketName + SlashSeparator, Code: "InvalidAccessKeyId", Message: "The access key ID you provided does not exist in our records.", }, @@ -394,7 +394,7 @@ func testListMultipartUploadsHandler(obj ObjectLayer, instanceType, bucketName s prefix: "", keyMarker: "", uploadIDMarker: "", - delimiter: "/", + delimiter: SlashSeparator, maxUploads: "100", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, diff --git a/vendor/github.com/minio/minio/cmd/bucket-notification-handlers.go b/vendor/github.com/minio/minio/cmd/bucket-notification-handlers.go index d339b21840..29120e758a 100644 --- a/vendor/github.com/minio/minio/cmd/bucket-notification-handlers.go +++ b/vendor/github.com/minio/minio/cmd/bucket-notification-handlers.go @@ -17,10 +17,12 @@ package cmd import ( + "bytes" "encoding/xml" "errors" "io" "net/http" + "path" "github.com/gorilla/mux" xhttp "github.com/minio/minio/cmd/http" @@ -49,6 +51,7 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, vars := mux.Vars(r) bucketName := vars["bucket"] + var config *event.Config objAPI := api.ObjectAPI() if objAPI == nil { @@ -72,24 +75,31 @@ func (api objectAPIHandlers) GetBucketNotificationHandler(w http.ResponseWriter, return } - // Attempt to successfully load notification config. - nConfig, err := readNotificationConfig(ctx, objAPI, bucketName) + // Construct path to notification.xml for the given bucket. + configFile := path.Join(bucketConfigPrefix, bucketName, bucketNotificationConfig) + + configData, err := readConfig(ctx, objAPI, configFile) if err != nil { - // Ignore errNoSuchNotifications to comply with AWS S3. - if err != errNoSuchNotifications { + if err != errConfigNotFound { + writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) + return + } + config = &event.Config{} + } else { + if err = xml.NewDecoder(bytes.NewReader(configData)).Decode(&config); err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return } - - nConfig = &event.Config{} } + config.SetRegion(globalServerConfig.GetRegion()) + // If xml namespace is empty, set a default value before returning. - if nConfig.XMLNS == "" { - nConfig.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" + if config.XMLNS == "" { + config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" } - notificationBytes, err := xml.Marshal(nConfig) + notificationBytes, err := xml.Marshal(config) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) return @@ -143,9 +153,10 @@ func (api objectAPIHandlers) PutBucketNotificationHandler(w http.ResponseWriter, if event.IsEventError(err) { apiErr = toAPIError(ctx, err) } - - writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) - return + if _, ok := err.(*event.ErrARNNotFound); !ok { + writeErrorResponse(ctx, w, apiErr, r.URL, guessIsBrowserReq(r)) + return + } } if err = saveNotificationConfig(ctx, objectAPI, bucketName, config); err != nil { diff --git a/vendor/github.com/minio/minio/cmd/certs.go b/vendor/github.com/minio/minio/cmd/certs.go index ef263e83b7..c886d72b7a 100644 --- a/vendor/github.com/minio/minio/cmd/certs.go +++ b/vendor/github.com/minio/minio/cmd/certs.go @@ -87,7 +87,7 @@ func getRootCAs(certsCAsDir string) (*x509.CertPool, error) { // Load all custom CA files. for _, fi := range fis { // Skip all directories. - if hasSuffix(fi, slashSeparator) { + if hasSuffix(fi, SlashSeparator) { continue } caCert, err := ioutil.ReadFile(pathJoin(certsCAsDir, fi)) diff --git a/vendor/github.com/minio/minio/cmd/common-main.go b/vendor/github.com/minio/minio/cmd/common-main.go index 9fb2fbd000..86baf12a51 100644 --- a/vendor/github.com/minio/minio/cmd/common-main.go +++ b/vendor/github.com/minio/minio/cmd/common-main.go @@ -38,6 +38,25 @@ import ( xnet "github.com/minio/minio/pkg/net" ) +func verifyObjectLayerFeatures(name string, objAPI ObjectLayer) { + if (globalAutoEncryption || GlobalKMS != nil) && !objAPI.IsEncryptionSupported() { + logger.Fatal(errInvalidArgument, + "Encryption support is requested but '%s' does not support encryption", name) + } + + if strings.HasPrefix(name, "gateway") { + if GlobalGatewaySSE.IsSet() && GlobalKMS == nil { + uiErr := uiErrInvalidGWSSEEnvValue(nil).Msg("MINIO_GATEWAY_SSE set but KMS is not configured") + logger.Fatal(uiErr, "Unable to start gateway with SSE") + } + } + + if globalIsCompressionEnabled && !objAPI.IsCompressionSupported() { + logger.Fatal(errInvalidArgument, + "Compression support is requested but '%s' does not support compression", name) + } +} + // Check for updates and print a notification message func checkUpdate(mode string) { // Its OK to ignore any errors during doUpdate() here. diff --git a/vendor/github.com/minio/minio/cmd/config-common.go b/vendor/github.com/minio/minio/cmd/config-common.go index 18040eccb5..8bb7dffe74 100644 --- a/vendor/github.com/minio/minio/cmd/config-common.go +++ b/vendor/github.com/minio/minio/cmd/config-common.go @@ -20,9 +20,7 @@ import ( "bytes" "context" "errors" - "fmt" - etcd "github.com/coreos/etcd/clientv3" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/hash" ) @@ -51,38 +49,10 @@ func readConfig(ctx context.Context, objAPI ObjectLayer, configFile string) ([]b return buffer.Bytes(), nil } -func deleteConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) error { - timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout) - defer cancel() - - _, err := client.Delete(timeoutCtx, configFile) - if err != nil { - if err == context.DeadlineExceeded { - return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", - client.Endpoints()) - } - return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", - err, client.Endpoints()) - } - return nil -} - func deleteConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error { return objAPI.DeleteObject(ctx, minioMetaBucket, configFile) } -func saveConfigEtcd(ctx context.Context, client *etcd.Client, configFile string, data []byte) error { - timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout) - defer cancel() - _, err := client.Put(timeoutCtx, configFile, string(data)) - if err == context.DeadlineExceeded { - return fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", client.Endpoints()) - } else if err != nil { - return fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", err, client.Endpoints()) - } - return nil -} - func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data []byte) error { hashReader, err := hash.NewReader(bytes.NewReader(data), int64(len(data)), "", getSHA256Hash(data), int64(len(data)), globalCLIContext.StrictS3Compat) if err != nil { @@ -93,29 +63,6 @@ func saveConfig(ctx context.Context, objAPI ObjectLayer, configFile string, data return err } -func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string) ([]byte, error) { - timeoutCtx, cancel := context.WithTimeout(ctx, defaultContextTimeout) - defer cancel() - resp, err := client.Get(timeoutCtx, configFile) - if err != nil { - if err == context.DeadlineExceeded { - return nil, fmt.Errorf("etcd setup is unreachable, please check your endpoints %s", - client.Endpoints()) - } - return nil, fmt.Errorf("unexpected error %s returned by etcd setup, please check your endpoints %s", - err, client.Endpoints()) - } - if resp.Count == 0 { - return nil, errConfigNotFound - } - for _, ev := range resp.Kvs { - if string(ev.Key) == configFile { - return ev.Value, nil - } - } - return nil, errConfigNotFound -} - func checkConfig(ctx context.Context, objAPI ObjectLayer, configFile string) error { if _, err := objAPI.GetObjectInfo(ctx, minioMetaBucket, configFile, ObjectOptions{}); err != nil { // Treat object not found as config not found. diff --git a/vendor/github.com/minio/minio/cmd/config-current.go b/vendor/github.com/minio/minio/cmd/config-current.go index b9591dd3e3..e61aef2f0f 100644 --- a/vendor/github.com/minio/minio/cmd/config-current.go +++ b/vendor/github.com/minio/minio/cmd/config-current.go @@ -30,7 +30,7 @@ import ( "github.com/minio/minio/pkg/auth" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/event/target" - "github.com/minio/minio/pkg/iam/policy" + iampolicy "github.com/minio/minio/pkg/iam/policy" "github.com/minio/minio/pkg/iam/validator" xnet "github.com/minio/minio/pkg/net" ) @@ -281,17 +281,27 @@ func (s *serverConfig) loadFromEnvs() { } if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok { - if u, err := xnet.ParseURL(jwksURL); err == nil { - s.OpenID.JWKS.URL = u - logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), "Unable to populate public key from JWKS URL") + u, err := xnet.ParseURL(jwksURL) + if err != nil { + logger.FatalIf(err, "Unable to parse MINIO_IAM_JWKS_URL %s", jwksURL) } + s.OpenID.JWKS.URL = u } if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok { - if u, err := xnet.ParseURL(opaURL); err == nil { - s.Policy.OPA.URL = u - s.Policy.OPA.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN") + u, err := xnet.ParseURL(opaURL) + if err != nil { + logger.FatalIf(err, "Unable to parse MINIO_IAM_OPA_URL %s", opaURL) } + opaArgs := iampolicy.OpaArgs{ + URL: u, + AuthToken: os.Getenv("MINIO_IAM_OPA_AUTHTOKEN"), + Transport: NewCustomHTTPTransport(), + CloseRespFn: xhttp.DrainBody, + } + logger.FatalIf(opaArgs.Validate(), "Unable to reach MINIO_IAM_OPA_URL %s", opaURL) + s.Policy.OPA.URL = opaArgs.URL + s.Policy.OPA.AuthToken = opaArgs.AuthToken } } @@ -303,7 +313,7 @@ func (s *serverConfig) TestNotificationTargets() error { if !v.Enable { continue } - t, err := target.NewAMQPTarget(k, v) + t, err := target.NewAMQPTarget(k, v, GlobalServiceDoneCh) if err != nil { return fmt.Errorf("amqp(%s): %s", k, err.Error()) } @@ -347,7 +357,7 @@ func (s *serverConfig) TestNotificationTargets() error { if !v.Enable { continue } - t, err := target.NewMySQLTarget(k, v) + t, err := target.NewMySQLTarget(k, v, GlobalServiceDoneCh) if err != nil { return fmt.Errorf("mysql(%s): %s", k, err.Error()) } @@ -358,7 +368,7 @@ func (s *serverConfig) TestNotificationTargets() error { if !v.Enable { continue } - t, err := target.NewNATSTarget(k, v) + t, err := target.NewNATSTarget(k, v, GlobalServiceDoneCh) if err != nil { return fmt.Errorf("nats(%s): %s", k, err.Error()) } @@ -380,7 +390,7 @@ func (s *serverConfig) TestNotificationTargets() error { if !v.Enable { continue } - t, err := target.NewPostgreSQLTarget(k, v) + t, err := target.NewPostgreSQLTarget(k, v, GlobalServiceDoneCh) if err != nil { return fmt.Errorf("postgreSQL(%s): %s", k, err.Error()) } @@ -391,7 +401,7 @@ func (s *serverConfig) TestNotificationTargets() error { if !v.Enable { continue } - t, err := target.NewRedisTarget(k, v) + t, err := target.NewRedisTarget(k, v, GlobalServiceDoneCh) if err != nil { return fmt.Errorf("redis(%s): %s", k, err.Error()) } @@ -536,7 +546,7 @@ func (s *serverConfig) loadToCachedConfigs() { globalCacheMaxUse = cacheConf.MaxUse } if err := Environment.LookupKMSConfig(s.KMS); err != nil { - logger.FatalIf(err, "Unable to setup the KMS") + logger.FatalIf(err, "Unable to setup the KMS %s", s.KMS.Vault.Endpoint) } if !globalIsCompressionEnabled { @@ -546,15 +556,22 @@ func (s *serverConfig) loadToCachedConfigs() { globalIsCompressionEnabled = compressionConf.Enabled } + if s.OpenID.JWKS.URL != nil && s.OpenID.JWKS.URL.String() != "" { + logger.FatalIf(s.OpenID.JWKS.PopulatePublicKey(), + "Unable to populate public key from JWKS URL %s", s.OpenID.JWKS.URL) + } + globalIAMValidators = getAuthValidators(s) if s.Policy.OPA.URL != nil && s.Policy.OPA.URL.String() != "" { - globalPolicyOPA = iampolicy.NewOpa(iampolicy.OpaArgs{ + opaArgs := iampolicy.OpaArgs{ URL: s.Policy.OPA.URL, AuthToken: s.Policy.OPA.AuthToken, Transport: NewCustomHTTPTransport(), CloseRespFn: xhttp.DrainBody, - }) + } + logger.FatalIf(opaArgs.Validate(), "Unable to reach OPA URL %s", s.Policy.OPA.URL) + globalPolicyOPA = iampolicy.NewOpa(opaArgs) } } @@ -637,7 +654,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList { } for id, args := range config.Notify.AMQP { if args.Enable { - newTarget, err := target.NewAMQPTarget(id, args) + newTarget, err := target.NewAMQPTarget(id, args, GlobalServiceDoneCh) if err != nil { logger.LogIf(context.Background(), err) continue @@ -696,7 +713,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList { for id, args := range config.Notify.MySQL { if args.Enable { - newTarget, err := target.NewMySQLTarget(id, args) + newTarget, err := target.NewMySQLTarget(id, args, GlobalServiceDoneCh) if err != nil { logger.LogIf(context.Background(), err) continue @@ -710,7 +727,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList { for id, args := range config.Notify.NATS { if args.Enable { - newTarget, err := target.NewNATSTarget(id, args) + newTarget, err := target.NewNATSTarget(id, args, GlobalServiceDoneCh) if err != nil { logger.LogIf(context.Background(), err) continue @@ -738,7 +755,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList { for id, args := range config.Notify.PostgreSQL { if args.Enable { - newTarget, err := target.NewPostgreSQLTarget(id, args) + newTarget, err := target.NewPostgreSQLTarget(id, args, GlobalServiceDoneCh) if err != nil { logger.LogIf(context.Background(), err) continue @@ -752,7 +769,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList { for id, args := range config.Notify.Redis { if args.Enable { - newTarget, err := target.NewRedisTarget(id, args) + newTarget, err := target.NewRedisTarget(id, args, GlobalServiceDoneCh) if err != nil { logger.LogIf(context.Background(), err) continue diff --git a/vendor/github.com/minio/minio/cmd/config-current_test.go b/vendor/github.com/minio/minio/cmd/config-current_test.go index ec4e9c3bbf..c1d65ae35f 100644 --- a/vendor/github.com/minio/minio/cmd/config-current_test.go +++ b/vendor/github.com/minio/minio/cmd/config-current_test.go @@ -185,10 +185,10 @@ func TestValidateConfig(t *testing.T) { {`{"version": "` + v + `", "browser": "on", "browser": "on", "region":"us-east-1", "credential" : {"accessKey":"minio", "secretKey":"minio123"}}`, false}, // Test 11 - Test AMQP - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "amqp": { "1": { "enable": true, "url": "", "exchange": "", "routingKey": "", "exchangeType": "", "mandatory": false, "immediate": false, "durable": false, "internal": false, "noWait": false, "autoDeleted": false, "queueDir": "", "queueLimit": 0}}}}`, false}, // Test 12 - Test NATS - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "nats": { "1": { "enable": true, "address": "", "subject": "", "username": "", "password": "", "token": "", "secure": false, "pingInterval": 0, "queueDir": "", "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", "async": false, "maxPubAcksInflight": 0 } } }}}`, false}, // Test 13 - Test ElasticSearch {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "url": "", "index": "" } }}}`, false}, @@ -197,7 +197,7 @@ func TestValidateConfig(t *testing.T) { {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "address": "", "password": "", "key": "" } }}}`, false}, // Test 15 - Test PostgreSQL - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false}, // Test 16 - Test Kafka {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false}, @@ -206,7 +206,7 @@ func TestValidateConfig(t *testing.T) { {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "", "queueDir": "", "queueLimit": 0} }}}`, false}, // Test 18 - Test MySQL - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "", "queueDir": "", "queueLimit": 0 }}}}`, false}, // Test 19 - Test Format for MySQL {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "mysql": { "1": { "enable": true, "dsnString": "", "format": "invalid", "table": "xxx", "host": "10.0.0.1", "port": "3306", "user": "abc", "password": "pqr", "database": "test1" }}}}`, false}, @@ -227,7 +227,7 @@ func TestValidateConfig(t *testing.T) { {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "elasticsearch": { "1": { "enable": true, "format": "namespace", "url": "example.com", "index": "myindex", "queueDir": "", "queueLimit": 0 } }}}`, true}, // Test 25 - Test Format for Redis - {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, false}, + {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "invalid", "address": "example.com:80", "password": "xxx", "key": "key1", "queueDir": "", "queueLimit": 0 } }}}`, false}, // Test 26 - Test valid Format for Redis {`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "redis": { "1": { "enable": true, "format": "namespace", "address": "example.com:80", "password": "xxx", "key": "key1" } }}}`, true}, diff --git a/vendor/github.com/minio/minio/cmd/config-migrate.go b/vendor/github.com/minio/minio/cmd/config-migrate.go index 028d22e0b7..8a4159763b 100644 --- a/vendor/github.com/minio/minio/cmd/config-migrate.go +++ b/vendor/github.com/minio/minio/cmd/config-migrate.go @@ -2426,7 +2426,7 @@ func migrateConfigToMinioSys(objAPI ObjectLayer) (err error) { defer func() { if err == nil { if globalEtcdClient != nil { - deleteConfigEtcd(context.Background(), globalEtcdClient, configFile) + deleteKeyEtcd(context.Background(), globalEtcdClient, configFile) } else { // Rename config.json to config.json.deprecated only upon // success of this function. diff --git a/vendor/github.com/minio/minio/cmd/config-migrate_test.go b/vendor/github.com/minio/minio/cmd/config-migrate_test.go index 46083322d3..53c53b2d37 100644 --- a/vendor/github.com/minio/minio/cmd/config-migrate_test.go +++ b/vendor/github.com/minio/minio/cmd/config-migrate_test.go @@ -175,7 +175,7 @@ func TestServerConfigMigrateV2toV33(t *testing.T) { } defer os.RemoveAll(fsDir) - configPath := rootPath + "/" + minioConfigFile + configPath := rootPath + SlashSeparator + minioConfigFile // Create a corrupted config file if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\","), 0644); err != nil { @@ -238,7 +238,7 @@ func TestServerConfigMigrateFaultyConfig(t *testing.T) { defer os.RemoveAll(rootPath) globalConfigDir = &ConfigDir{path: rootPath} - configPath := rootPath + "/" + minioConfigFile + configPath := rootPath + SlashSeparator + minioConfigFile // Create a corrupted config file if err := ioutil.WriteFile(configPath, []byte("{ \"version\":\"2\", \"test\":"), 0644); err != nil { @@ -335,7 +335,7 @@ func TestServerConfigMigrateCorruptedConfig(t *testing.T) { defer os.RemoveAll(rootPath) globalConfigDir = &ConfigDir{path: rootPath} - configPath := rootPath + "/" + minioConfigFile + configPath := rootPath + SlashSeparator + minioConfigFile for i := 3; i <= 17; i++ { // Create a corrupted config file diff --git a/vendor/github.com/minio/minio/cmd/config.go b/vendor/github.com/minio/minio/cmd/config.go index e9948540f3..e6be1d82ef 100644 --- a/vendor/github.com/minio/minio/cmd/config.go +++ b/vendor/github.com/minio/minio/cmd/config.go @@ -23,6 +23,7 @@ import ( "path" "runtime" "strings" + "time" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/quick" @@ -101,6 +102,25 @@ func (sys *ConfigSys) Load(objAPI ObjectLayer) error { return sys.Init(objAPI) } +// WatchConfigNASDisk - watches nas disk on periodic basis. +func (sys *ConfigSys) WatchConfigNASDisk(objAPI ObjectLayer) { + configInterval := globalRefreshIAMInterval + watchDisk := func() { + ticker := time.NewTicker(configInterval) + defer ticker.Stop() + for { + select { + case <-GlobalServiceDoneCh: + return + case <-ticker.C: + loadConfig(objAPI) + } + } + } + // Refresh configSys in background for NAS gateway. + go watchDisk() +} + // Init - initializes config system from config.json. func (sys *ConfigSys) Init(objAPI ObjectLayer) error { if objAPI == nil { diff --git a/vendor/github.com/minio/minio/cmd/crypto/error.go b/vendor/github.com/minio/minio/cmd/crypto/error.go index 5e35148c89..d3a3a2c6a5 100644 --- a/vendor/github.com/minio/minio/cmd/crypto/error.go +++ b/vendor/github.com/minio/minio/cmd/crypto/error.go @@ -62,6 +62,8 @@ var ( errInvalidInternalIV = Error{"The internal encryption IV is malformed"} errInvalidInternalSealAlgorithm = Error{"The internal seal algorithm is invalid and not supported"} + + errMissingUpdatedKey = Error{"The key update returned no error but also no sealed key"} ) var ( diff --git a/vendor/github.com/minio/minio/cmd/crypto/kms.go b/vendor/github.com/minio/minio/cmd/crypto/kms.go index 3eb9bf8ef4..53a27252b7 100644 --- a/vendor/github.com/minio/minio/cmd/crypto/kms.go +++ b/vendor/github.com/minio/minio/cmd/crypto/kms.go @@ -86,6 +86,19 @@ type KMS interface { // referenced by the keyID. The provided context must // match the context used to generate the sealed key. UnsealKey(keyID string, sealedKey []byte, context Context) (key [32]byte, err error) + + // UpdateKey re-wraps the sealedKey if the master key, referenced by + // `keyID`, has changed in the meantime. This usually happens when the + // KMS operator performs a key-rotation operation of the master key. + // UpdateKey fails if the provided sealedKey cannot be decrypted using + // the master key referenced by keyID. + // + // UpdateKey makes no guarantees whatsoever about whether the returned + // rotatedKey is actually different from the sealedKey. If nothing has + // changed at the KMS or if the KMS does not support updating generated + // keys this method may behave like a NOP and just return the sealedKey + // itself. + UpdateKey(keyID string, sealedKey []byte, context Context) (rotatedKey []byte, err error) } type masterKeyKMS struct { @@ -126,6 +139,13 @@ func (kms *masterKeyKMS) UnsealKey(keyID string, sealedKey []byte, ctx Context) return key, nil } +func (kms *masterKeyKMS) UpdateKey(keyID string, sealedKey []byte, ctx Context) ([]byte, error) { + if _, err := kms.UnsealKey(keyID, sealedKey, ctx); err != nil { + return nil, err + } + return sealedKey, nil // The master key cannot update data keys -> Do nothing. +} + func (kms *masterKeyKMS) deriveKey(keyID string, context Context) (key [32]byte) { if context == nil { context = Context{} diff --git a/vendor/github.com/minio/minio/cmd/crypto/kms_test.go b/vendor/github.com/minio/minio/cmd/crypto/kms_test.go index 8884ee9d28..8e985d04af 100644 --- a/vendor/github.com/minio/minio/cmd/crypto/kms_test.go +++ b/vendor/github.com/minio/minio/cmd/crypto/kms_test.go @@ -51,11 +51,20 @@ func TestMasterKeyKMS(t *testing.T) { t.Errorf("Test %d: KMS failed to unseal the generated key: %v", i, err) } if err == nil && test.ShouldFail { - t.Errorf("Test %d: KMS unsealed the generated successfully but should have failed", i) + t.Errorf("Test %d: KMS unsealed the generated key successfully but should have failed", i) } if !test.ShouldFail && !bytes.Equal(key[:], unsealedKey[:]) { t.Errorf("Test %d: The generated and unsealed key differ", i) } + + rotatedKey, err := kms.UpdateKey(test.UnsealKeyID, sealedKey, test.UnsealContext) + if err == nil && test.ShouldFail { + t.Errorf("Test %d: KMS updated the generated key successfully but should have failed", i) + } + if !test.ShouldFail && !bytes.Equal(rotatedKey, sealedKey[:]) { + t.Errorf("Test %d: The updated and sealed key differ", i) + } + } } diff --git a/vendor/github.com/minio/minio/cmd/crypto/vault.go b/vendor/github.com/minio/minio/cmd/crypto/vault.go index fd651294af..1ccd397472 100644 --- a/vendor/github.com/minio/minio/cmd/crypto/vault.go +++ b/vendor/github.com/minio/minio/cmd/crypto/vault.go @@ -250,3 +250,30 @@ func (v *vaultService) UnsealKey(keyID string, sealedKey []byte, ctx Context) (k copy(key[:], []byte(plainKey)) return key, nil } + +// UpdateKey re-wraps the sealedKey if the master key referenced by the keyID +// has been changed by the KMS operator - i.e. the master key has been rotated. +// If the master key hasn't changed since the sealedKey has been created / updated +// it may return the same sealedKey as rotatedKey. +// +// The context must be same context as the one provided while +// generating the plaintext key / sealedKey. +func (v *vaultService) UpdateKey(keyID string, sealedKey []byte, ctx Context) (rotatedKey []byte, err error) { + var contextStream bytes.Buffer + ctx.WriteTo(&contextStream) + + payload := map[string]interface{}{ + "ciphertext": string(sealedKey), + "context": base64.StdEncoding.EncodeToString(contextStream.Bytes()), + } + s, err := v.client.Logical().Write(fmt.Sprintf("/transit/rewrap/%s", keyID), payload) + if err != nil { + return nil, err + } + ciphertext, ok := s.Data["ciphertext"] + if !ok { + return nil, errMissingUpdatedKey + } + rotatedKey = ciphertext.([]byte) + return rotatedKey, nil +} diff --git a/vendor/github.com/minio/minio/cmd/disk-cache-config.go b/vendor/github.com/minio/minio/cmd/disk-cache-config.go index 16d8292b3e..4297526039 100644 --- a/vendor/github.com/minio/minio/cmd/disk-cache-config.go +++ b/vendor/github.com/minio/minio/cmd/disk-cache-config.go @@ -109,7 +109,7 @@ func parseCacheExcludes(excludes []string) ([]string, error) { if len(e) == 0 { return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude path (%s) cannot be empty", e) } - if hasPrefix(e, slashSeparator) { + if hasPrefix(e, SlashSeparator) { return nil, uiErrInvalidCacheExcludesValue(nil).Msg("cache exclude pattern (%s) cannot start with / as prefix", e) } } diff --git a/vendor/github.com/minio/minio/cmd/disk-cache-fs.go b/vendor/github.com/minio/minio/cmd/disk-cache-fs.go index 18aa2d1c89..36ed852455 100644 --- a/vendor/github.com/minio/minio/cmd/disk-cache-fs.go +++ b/vendor/github.com/minio/minio/cmd/disk-cache-fs.go @@ -182,7 +182,7 @@ func (cfs *cacheFSObjects) purgeTrash() { // Purge cache entries that were not accessed. func (cfs *cacheFSObjects) purge() { - delimiter := slashSeparator + delimiter := SlashSeparator maxKeys := 1000 ctx := context.Background() for { diff --git a/vendor/github.com/minio/minio/cmd/disk-cache.go b/vendor/github.com/minio/minio/cmd/disk-cache.go index c1ab3b398c..679f87b01b 100644 --- a/vendor/github.com/minio/minio/cmd/disk-cache.go +++ b/vendor/github.com/minio/minio/cmd/disk-cache.go @@ -395,7 +395,7 @@ func listDirCacheFactory(isLeaf func(string, string) bool, disks []*cacheFSObjec for i := range entries { if isLeaf(bucket, entries[i]) { - entries[i] = strings.TrimSuffix(entries[i], slashSeparator) + entries[i] = strings.TrimSuffix(entries[i], SlashSeparator) } } @@ -432,7 +432,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark var nextMarker string recursive := true - if delimiter == slashSeparator { + if delimiter == SlashSeparator { recursive = false } walkResultCh, endWalkCh := c.listPool.Release(listParams{bucket, recursive, marker, prefix, false}) @@ -460,7 +460,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark entry := walkResult.entry var objInfo ObjectInfo - if hasSuffix(entry, slashSeparator) { + if hasSuffix(entry, SlashSeparator) { // Object name needs to be full path. objInfo.Bucket = bucket objInfo.Name = entry @@ -502,7 +502,7 @@ func (c cacheObjects) listCacheObjects(ctx context.Context, bucket, prefix, mark result = ListObjectsInfo{IsTruncated: !eof} for _, objInfo := range objInfos { result.NextMarker = objInfo.Name - if objInfo.IsDir && delimiter == slashSeparator { + if objInfo.IsDir && delimiter == SlashSeparator { result.Prefixes = append(result.Prefixes, objInfo.Name) continue } diff --git a/vendor/github.com/minio/minio/cmd/disk-usage.go b/vendor/github.com/minio/minio/cmd/disk-usage.go index b14e19510b..c4522cabc0 100644 --- a/vendor/github.com/minio/minio/cmd/disk-usage.go +++ b/vendor/github.com/minio/minio/cmd/disk-usage.go @@ -23,7 +23,7 @@ import ( // getDiskUsage walks the file tree rooted at root, calling usageFn // for each file or directory in the tree, including root. func getDiskUsage(ctx context.Context, root string, usageFn usageFunc) error { - return walk(ctx, root+slashSeparator, usageFn) + return walk(ctx, root+SlashSeparator, usageFn) } type usageFunc func(ctx context.Context, entry string) error @@ -34,7 +34,7 @@ func walk(ctx context.Context, path string, usageFn usageFunc) error { return err } - if !hasSuffix(path, slashSeparator) { + if !hasSuffix(path, SlashSeparator) { return nil } diff --git a/vendor/github.com/minio/minio/cmd/endpoint.go b/vendor/github.com/minio/minio/cmd/endpoint.go index d7ec636fe6..c5f79d9d97 100644 --- a/vendor/github.com/minio/minio/cmd/endpoint.go +++ b/vendor/github.com/minio/minio/cmd/endpoint.go @@ -100,7 +100,7 @@ func (endpoint *Endpoint) UpdateIsLocal() error { func NewEndpoint(arg string) (ep Endpoint, e error) { // isEmptyPath - check whether given path is not empty. isEmptyPath := func(path string) bool { - return path == "" || path == "/" || path == `\` + return path == "" || path == SlashSeparator || path == `\` } if isEmptyPath(arg) { @@ -152,7 +152,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) { return ep, fmt.Errorf("empty or root path is not supported in URL endpoint") } - // On windows having a preceding "/" will cause problems, if the + // On windows having a preceding SlashSeparator will cause problems, if the // command line already has C:/= 0; i-- { p := pfxSlice[i] @@ -401,10 +400,10 @@ func (l *s3EncObjects) ListMultipartUploads(ctx context.Context, bucket string, if e != nil { return } - lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath("/")) - lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath("/")) + lmi.KeyMarker = strings.TrimSuffix(lmi.KeyMarker, getGWContentPath(minio.SlashSeparator)) + lmi.NextKeyMarker = strings.TrimSuffix(lmi.NextKeyMarker, getGWContentPath(minio.SlashSeparator)) for i := range lmi.Uploads { - lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath("/")) + lmi.Uploads[i].Object = strings.TrimSuffix(lmi.Uploads[i].Object, getGWContentPath(minio.SlashSeparator)) } return } diff --git a/vendor/github.com/minio/minio/cmd/generic-handlers.go b/vendor/github.com/minio/minio/cmd/generic-handlers.go index 6fadccc2bb..f8766ed4c9 100644 --- a/vendor/github.com/minio/minio/cmd/generic-handlers.go +++ b/vendor/github.com/minio/minio/cmd/generic-handlers.go @@ -153,7 +153,7 @@ func containsReservedMetadata(header http.Header) bool { // Reserved bucket. const ( minioReservedBucket = "minio" - minioReservedBucketPath = "/" + minioReservedBucket + minioReservedBucketPath = SlashSeparator + minioReservedBucket ) // Adds redirect rules for incoming requests. @@ -172,10 +172,10 @@ func setBrowserRedirectHandler(h http.Handler) http.Handler { // browser requests. func getRedirectLocation(urlPath string) (rLocation string) { if urlPath == minioReservedBucketPath { - rLocation = minioReservedBucketPath + "/" + rLocation = minioReservedBucketPath + SlashSeparator } if contains([]string{ - "/", + SlashSeparator, "/webrpc", "/login", "/favicon.ico", @@ -229,7 +229,7 @@ func guessIsRPCReq(req *http.Request) bool { return false } return req.Method == http.MethodPost && - strings.HasPrefix(req.URL.Path, minioReservedBucketPath+"/") + strings.HasPrefix(req.URL.Path, minioReservedBucketPath+SlashSeparator) } func (h redirectHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -258,7 +258,7 @@ func setBrowserCacheControlHandler(h http.Handler) http.Handler { func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { if r.Method == http.MethodGet && guessIsBrowserReq(r) { // For all browser requests set appropriate Cache-Control policies - if hasPrefix(r.URL.Path, minioReservedBucketPath+"/") { + if hasPrefix(r.URL.Path, minioReservedBucketPath+SlashSeparator) { if hasSuffix(r.URL.Path, ".js") || r.URL.Path == minioReservedBucketPath+"/favicon.ico" { // For assets set cache expiry of one year. For each release, the name // of the asset name will change and hence it can not be served from cache. @@ -276,7 +276,7 @@ func (h cacheControlHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { // Check to allow access to the reserved "bucket" `/minio` for Admin // API requests. func isAdminReq(r *http.Request) bool { - return strings.HasPrefix(r.URL.Path, adminAPIPathPrefix+"/") + return strings.HasPrefix(r.URL.Path, adminAPIPathPrefix+SlashSeparator) } // Adds verification for incoming paths. @@ -596,7 +596,7 @@ const ( // such as ".." and "." func hasBadPathComponent(path string) bool { path = strings.TrimSpace(path) - for _, p := range strings.Split(path, slashSeparator) { + for _, p := range strings.Split(path, SlashSeparator) { switch strings.TrimSpace(p) { case dotdotComponent: return true @@ -746,6 +746,9 @@ func setBucketForwardingHandler(h http.Handler) http.Handler { fwd := handlers.NewForwarder(&handlers.Forwarder{ PassHost: true, RoundTripper: NewCustomHTTPTransport(), + Logger: func(err error) { + logger.LogIf(context.Background(), err) + }, }) return bucketForwardingHandler{fwd, h} } diff --git a/vendor/github.com/minio/minio/cmd/generic-handlers_test.go b/vendor/github.com/minio/minio/cmd/generic-handlers_test.go index 24f81334bc..a7f98d78f0 100644 --- a/vendor/github.com/minio/minio/cmd/generic-handlers_test.go +++ b/vendor/github.com/minio/minio/cmd/generic-handlers_test.go @@ -35,12 +35,12 @@ func TestRedirectLocation(t *testing.T) { { // 1. When urlPath is '/minio' urlPath: minioReservedBucketPath, - location: minioReservedBucketPath + "/", + location: minioReservedBucketPath + SlashSeparator, }, { // 2. When urlPath is '/' - urlPath: "/", - location: minioReservedBucketPath + "/", + urlPath: SlashSeparator, + location: minioReservedBucketPath + SlashSeparator, }, { // 3. When urlPath is '/webrpc' diff --git a/vendor/github.com/minio/minio/cmd/handler-utils.go b/vendor/github.com/minio/minio/cmd/handler-utils.go index 2cf381ccb5..981c739c41 100644 --- a/vendor/github.com/minio/minio/cmd/handler-utils.go +++ b/vendor/github.com/minio/minio/cmd/handler-utils.go @@ -95,8 +95,8 @@ func isMetadataReplace(h http.Header) bool { // Splits an incoming path into bucket and object components. func path2BucketAndObject(path string) (bucket, object string) { // Skip the first element if it is '/', split the rest. - path = strings.TrimPrefix(path, "/") - pathComponents := strings.SplitN(path, "/", 2) + path = strings.TrimPrefix(path, SlashSeparator) + pathComponents := strings.SplitN(path, SlashSeparator, 2) // Save the bucket and object extracted from path. switch len(pathComponents) { @@ -370,7 +370,7 @@ func getResource(path string, host string, domains []string) (string, error) { continue } bucket := strings.TrimSuffix(host, "."+domain) - return slashSeparator + pathJoin(bucket, path), nil + return SlashSeparator + pathJoin(bucket, path), nil } return path, nil } @@ -394,20 +394,3 @@ func getHostName(r *http.Request) (hostName string) { } return } - -func isHTTPStatusOK(statusCode int) bool { - // List of success status. - var successStatus = []int{ - http.StatusOK, - http.StatusCreated, - http.StatusAccepted, - http.StatusNoContent, - http.StatusPartialContent, - } - for _, okstatus := range successStatus { - if statusCode == okstatus { - return true - } - } - return false -} diff --git a/vendor/github.com/minio/minio/cmd/iam-etcd-store.go b/vendor/github.com/minio/minio/cmd/iam-etcd-store.go new file mode 100644 index 0000000000..b044491754 --- /dev/null +++ b/vendor/github.com/minio/minio/cmd/iam-etcd-store.go @@ -0,0 +1,599 @@ +/* + * MinIO Cloud Storage, (C) 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "encoding/json" + "errors" + "path" + "strings" + "sync" + "time" + + etcd "github.com/coreos/etcd/clientv3" + "github.com/coreos/etcd/mvcc/mvccpb" + "github.com/minio/minio-go/v6/pkg/set" + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/auth" + iampolicy "github.com/minio/minio/pkg/iam/policy" +) + +var defaultContextTimeout = 30 * time.Second + +func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet { + users := set.NewStringSet() + for _, kv := range kvs { + // Extract user by stripping off the `prefix` value as suffix, + // then strip off the remaining basename to obtain the prefix + // value, usually in the following form. + // + // key := "config/iam/users/newuser/identity.json" + // prefix := "config/iam/users/" + // v := trim(trim(key, prefix), base(key)) == "newuser" + // + user := path.Clean(strings.TrimSuffix(strings.TrimPrefix(string(kv.Key), prefix), path.Base(string(kv.Key)))) + users.Add(user) + } + return users +} + +func etcdKvsToSetPolicyDB(prefix string, kvs []*mvccpb.KeyValue) set.StringSet { + items := set.NewStringSet() + for _, kv := range kvs { + // Extract user item by stripping off prefix and then + // stripping of ".json" suffix. + // + // key := "config/iam/policydb/users/myuser1.json" + // prefix := "config/iam/policydb/users/" + // v := trimSuffix(trimPrefix(key, prefix), ".json") + key := string(kv.Key) + item := path.Clean(strings.TrimSuffix(strings.TrimPrefix(key, prefix), ".json")) + items.Add(item) + } + return items +} + +// IAMEtcdStore implements IAMStorageAPI +type IAMEtcdStore struct { + sync.RWMutex + ctx context.Context + + client *etcd.Client +} + +func newIAMEtcdStore() *IAMEtcdStore { + return &IAMEtcdStore{client: globalEtcdClient} +} + +func (ies *IAMEtcdStore) getContext() context.Context { + ies.RLock() + defer ies.RUnlock() + + if ies.ctx == nil { + return context.Background() + } + return ies.ctx +} + +func (ies *IAMEtcdStore) setContext(ctx context.Context) { + ies.Lock() + defer ies.Unlock() + + ies.ctx = ctx +} + +func (ies *IAMEtcdStore) clearContext() { + ies.Lock() + defer ies.Unlock() + + ies.ctx = nil +} + +func (ies *IAMEtcdStore) saveIAMConfig(item interface{}, path string) error { + data, err := json.Marshal(item) + if err != nil { + return err + } + return saveKeyEtcd(ies.getContext(), ies.client, path, data) +} + +func (ies *IAMEtcdStore) loadIAMConfig(item interface{}, path string) error { + pdata, err := readKeyEtcd(ies.getContext(), ies.client, path) + if err != nil { + return err + } + return json.Unmarshal(pdata, item) +} + +func (ies *IAMEtcdStore) deleteIAMConfig(path string) error { + return deleteKeyEtcd(ies.getContext(), ies.client, path) +} + +func (ies *IAMEtcdStore) migrateUsersConfigToV1(isSTS bool) error { + basePrefix := iamConfigUsersPrefix + if isSTS { + basePrefix = iamConfigSTSPrefix + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) + defer cancel() + ies.setContext(ctx) + defer ies.clearContext() + r, err := ies.client.Get(ctx, basePrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) + if err != nil { + return err + } + + users := etcdKvsToSet(basePrefix, r.Kvs) + for _, user := range users.ToSlice() { + { + // 1. check if there is a policy file in the old loc. + oldPolicyPath := pathJoin(basePrefix, user, iamPolicyFile) + var policyName string + err := ies.loadIAMConfig(&policyName, oldPolicyPath) + if err != nil { + switch err { + case errConfigNotFound: + // No mapped policy or already migrated. + default: + // corrupt data/read error, etc + } + goto next + } + + // 2. copy policy to new loc. + mp := newMappedPolicy(policyName) + path := getMappedPolicyPath(user, isSTS, false) + if err := ies.saveIAMConfig(mp, path); err != nil { + return err + } + + // 3. delete policy file in old loc. + deleteKeyEtcd(ctx, ies.client, oldPolicyPath) + } + + next: + // 4. check if user identity has old format. + identityPath := pathJoin(basePrefix, user, iamIdentityFile) + var cred auth.Credentials + if err := ies.loadIAMConfig(&cred, identityPath); err != nil { + switch err { + case errConfigNotFound: + // This case should not happen. + default: + // corrupt file or read error + } + continue + } + + // If the file is already in the new format, + // then the parsed auth.Credentials will have + // the zero value for the struct. + var zeroCred auth.Credentials + if cred == zeroCred { + // nothing to do + continue + } + + // Found a id file in old format. Copy value + // into new format and save it. + cred.AccessKey = user + u := newUserIdentity(cred) + if err := ies.saveIAMConfig(u, identityPath); err != nil { + logger.LogIf(context.Background(), err) + return err + } + + // Nothing to delete as identity file location + // has not changed. + } + return nil +} + +func (ies *IAMEtcdStore) migrateToV1() error { + var iamFmt iamFormat + path := getIAMFormatFilePath() + if err := ies.loadIAMConfig(&iamFmt, path); err != nil { + switch err { + case errConfigNotFound: + // Need to migrate to V1. + default: + return errors.New("corrupt IAM format file") + } + } else { + if iamFmt.Version >= iamFormatVersion1 { + // Already migrated to V1 of higher! + return nil + } + // This case should not happen + // (i.e. Version is 0 or negative.) + return errors.New("got an invalid IAM format version") + + } + + // Migrate long-term users + if err := ies.migrateUsersConfigToV1(false); err != nil { + logger.LogIf(context.Background(), err) + return err + } + // Migrate STS users + if err := ies.migrateUsersConfigToV1(true); err != nil { + logger.LogIf(context.Background(), err) + return err + } + // Save iam version file. + if err := ies.saveIAMConfig(newIAMFormatVersion1(), path); err != nil { + logger.LogIf(context.Background(), err) + return err + } + return nil +} + +// Should be called under config migration lock +func (ies *IAMEtcdStore) migrateBackendFormat(objAPI ObjectLayer) error { + if err := ies.migrateToV1(); err != nil { + return err + } + return nil +} + +func (ies *IAMEtcdStore) loadPolicyDoc(policy string, m map[string]iampolicy.Policy) error { + var p iampolicy.Policy + err := ies.loadIAMConfig(&p, getPolicyDocPath(policy)) + if err != nil { + return err + } + m[policy] = p + return nil +} + +func (ies *IAMEtcdStore) loadPolicyDocs(m map[string]iampolicy.Policy) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) + defer cancel() + ies.setContext(ctx) + defer ies.clearContext() + r, err := ies.client.Get(ctx, iamConfigPoliciesPrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) + if err != nil { + return err + } + + policies := etcdKvsToSet(iamConfigPoliciesPrefix, r.Kvs) + + // Reload config and policies for all policys. + for _, policyName := range policies.ToSlice() { + err = ies.loadPolicyDoc(policyName, m) + if err != nil { + return err + } + } + return nil +} + +func (ies *IAMEtcdStore) loadUser(user string, isSTS bool, m map[string]auth.Credentials) error { + var u UserIdentity + err := ies.loadIAMConfig(&u, getUserIdentityPath(user, isSTS)) + if err != nil { + return err + } + + if u.Credentials.IsExpired() { + // Delete expired identity. + ctx := ies.getContext() + deleteKeyEtcd(ctx, ies.client, getUserIdentityPath(user, isSTS)) + deleteKeyEtcd(ctx, ies.client, getMappedPolicyPath(user, isSTS, false)) + return nil + } + + if u.Credentials.AccessKey == "" { + u.Credentials.AccessKey = user + } + m[user] = u.Credentials + return nil + +} + +func (ies *IAMEtcdStore) loadUsers(isSTS bool, m map[string]auth.Credentials) error { + basePrefix := iamConfigUsersPrefix + if isSTS { + basePrefix = iamConfigSTSPrefix + } + + ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) + defer cancel() + ies.setContext(ctx) + defer ies.clearContext() + r, err := ies.client.Get(ctx, basePrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) + if err != nil { + return err + } + + users := etcdKvsToSet(basePrefix, r.Kvs) + + // Reload config for all users. + for _, user := range users.ToSlice() { + if err = ies.loadUser(user, isSTS, m); err != nil { + return err + } + } + return nil +} + +func (ies *IAMEtcdStore) loadGroup(group string, m map[string]GroupInfo) error { + var gi GroupInfo + err := ies.loadIAMConfig(&gi, getGroupInfoPath(group)) + if err != nil { + return err + } + m[group] = gi + return nil + +} + +func (ies *IAMEtcdStore) loadGroups(m map[string]GroupInfo) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) + defer cancel() + ies.setContext(ctx) + defer ies.clearContext() + r, err := ies.client.Get(ctx, iamConfigGroupsPrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) + if err != nil { + return err + } + + groups := etcdKvsToSet(iamConfigGroupsPrefix, r.Kvs) + + // Reload config for all groups. + for _, group := range groups.ToSlice() { + if err = ies.loadGroup(group, m); err != nil { + return err + } + } + return nil + +} + +func (ies *IAMEtcdStore) loadMappedPolicy(name string, isSTS, isGroup bool, m map[string]MappedPolicy) error { + var p MappedPolicy + err := ies.loadIAMConfig(&p, getMappedPolicyPath(name, isSTS, isGroup)) + if err != nil { + return err + } + m[name] = p + return nil + +} + +func (ies *IAMEtcdStore) loadMappedPolicies(isSTS, isGroup bool, m map[string]MappedPolicy) error { + ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) + defer cancel() + ies.setContext(ctx) + defer ies.clearContext() + var basePrefix string + switch { + case isSTS: + basePrefix = iamConfigPolicyDBSTSUsersPrefix + case isGroup: + basePrefix = iamConfigPolicyDBGroupsPrefix + default: + basePrefix = iamConfigPolicyDBUsersPrefix + } + r, err := ies.client.Get(ctx, basePrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) + if err != nil { + return err + } + + users := etcdKvsToSetPolicyDB(basePrefix, r.Kvs) + + // Reload config and policies for all users. + for _, user := range users.ToSlice() { + if err = ies.loadMappedPolicy(user, isSTS, isGroup, m); err != nil { + return err + } + } + return nil + +} + +func (ies *IAMEtcdStore) loadAll(sys *IAMSys, objectAPI ObjectLayer) error { + iamUsersMap := make(map[string]auth.Credentials) + iamGroupsMap := make(map[string]GroupInfo) + iamPolicyDocsMap := make(map[string]iampolicy.Policy) + iamUserPolicyMap := make(map[string]MappedPolicy) + iamGroupPolicyMap := make(map[string]MappedPolicy) + + if err := ies.loadPolicyDocs(iamPolicyDocsMap); err != nil { + return err + } + if err := ies.loadUsers(false, iamUsersMap); err != nil { + return err + } + // load STS temp users into the same map + if err := ies.loadUsers(true, iamUsersMap); err != nil { + return err + } + if err := ies.loadGroups(iamGroupsMap); err != nil { + return err + } + + if err := ies.loadMappedPolicies(false, false, iamUserPolicyMap); err != nil { + return err + } + // load STS policy mappings into the same map + if err := ies.loadMappedPolicies(true, false, iamUserPolicyMap); err != nil { + return err + } + // load policies mapped to groups + if err := ies.loadMappedPolicies(false, true, iamGroupPolicyMap); err != nil { + return err + } + + // Sets default canned policies, if none are set. + setDefaultCannedPolicies(iamPolicyDocsMap) + + sys.Lock() + defer sys.Unlock() + + sys.iamUsersMap = iamUsersMap + sys.iamGroupsMap = iamGroupsMap + sys.iamUserPolicyMap = iamUserPolicyMap + sys.iamPolicyDocsMap = iamPolicyDocsMap + sys.iamGroupPolicyMap = iamGroupPolicyMap + sys.buildUserGroupMemberships() + + return nil +} + +func (ies *IAMEtcdStore) savePolicyDoc(policyName string, p iampolicy.Policy) error { + return ies.saveIAMConfig(&p, getPolicyDocPath(policyName)) +} + +func (ies *IAMEtcdStore) saveMappedPolicy(name string, isSTS, isGroup bool, mp MappedPolicy) error { + return ies.saveIAMConfig(mp, getMappedPolicyPath(name, isSTS, isGroup)) +} + +func (ies *IAMEtcdStore) saveUserIdentity(name string, isSTS bool, u UserIdentity) error { + return ies.saveIAMConfig(u, getUserIdentityPath(name, isSTS)) +} + +func (ies *IAMEtcdStore) saveGroupInfo(name string, gi GroupInfo) error { + return ies.saveIAMConfig(gi, getGroupInfoPath(name)) +} + +func (ies *IAMEtcdStore) deletePolicyDoc(name string) error { + return ies.deleteIAMConfig(getPolicyDocPath(name)) +} + +func (ies *IAMEtcdStore) deleteMappedPolicy(name string, isSTS, isGroup bool) error { + return ies.deleteIAMConfig(getMappedPolicyPath(name, isSTS, isGroup)) +} + +func (ies *IAMEtcdStore) deleteUserIdentity(name string, isSTS bool) error { + return ies.deleteIAMConfig(getUserIdentityPath(name, isSTS)) +} + +func (ies *IAMEtcdStore) deleteGroupInfo(name string) error { + return ies.deleteIAMConfig(getGroupInfoPath(name)) +} + +func (ies *IAMEtcdStore) watch(sys *IAMSys) { + watchEtcd := func() { + // Refresh IAMSys with etcd watch. + for { + watchCh := ies.client.Watch(context.Background(), + iamConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) + select { + case <-GlobalServiceDoneCh: + return + case watchResp, ok := <-watchCh: + if !ok { + time.Sleep(1 * time.Second) + continue + } + if err := watchResp.Err(); err != nil { + logger.LogIf(context.Background(), err) + // log and retry. + time.Sleep(1 * time.Second) + continue + } + for _, event := range watchResp.Events { + sys.Lock() + ies.reloadFromEvent(sys, event) + sys.Unlock() + } + } + } + } + go watchEtcd() +} + +// sys.RLock is held by caller. +func (ies *IAMEtcdStore) reloadFromEvent(sys *IAMSys, event *etcd.Event) { + eventCreate := event.IsModify() || event.IsCreate() + eventDelete := event.Type == etcd.EventTypeDelete + usersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigUsersPrefix) + groupsPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigGroupsPrefix) + stsPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigSTSPrefix) + policyPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPoliciesPrefix) + policyDBUsersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPolicyDBUsersPrefix) + policyDBSTSUsersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPolicyDBSTSUsersPrefix) + + switch { + case eventCreate: + switch { + case usersPrefix: + accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigUsersPrefix)) + ies.loadUser(accessKey, false, sys.iamUsersMap) + case stsPrefix: + accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigSTSPrefix)) + ies.loadUser(accessKey, true, sys.iamUsersMap) + case groupsPrefix: + group := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigGroupsPrefix)) + ies.loadGroup(group, sys.iamGroupsMap) + gi := sys.iamGroupsMap[group] + sys.removeGroupFromMembershipsMap(group) + sys.updateGroupMembershipsMap(group, &gi) + case policyPrefix: + policyName := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigPoliciesPrefix)) + ies.loadPolicyDoc(policyName, sys.iamPolicyDocsMap) + case policyDBUsersPrefix: + policyMapFile := strings.TrimPrefix(string(event.Kv.Key), + iamConfigPolicyDBUsersPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + ies.loadMappedPolicy(user, false, false, sys.iamUserPolicyMap) + case policyDBSTSUsersPrefix: + policyMapFile := strings.TrimPrefix(string(event.Kv.Key), + iamConfigPolicyDBSTSUsersPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + ies.loadMappedPolicy(user, true, false, sys.iamUserPolicyMap) + } + case eventDelete: + switch { + case usersPrefix: + accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigUsersPrefix)) + delete(sys.iamUsersMap, accessKey) + case stsPrefix: + accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigSTSPrefix)) + delete(sys.iamUsersMap, accessKey) + case groupsPrefix: + group := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigGroupsPrefix)) + sys.removeGroupFromMembershipsMap(group) + delete(sys.iamGroupsMap, group) + delete(sys.iamGroupPolicyMap, group) + case policyPrefix: + policyName := path.Dir(strings.TrimPrefix(string(event.Kv.Key), + iamConfigPoliciesPrefix)) + delete(sys.iamPolicyDocsMap, policyName) + case policyDBUsersPrefix: + policyMapFile := strings.TrimPrefix(string(event.Kv.Key), + iamConfigPolicyDBUsersPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + delete(sys.iamUserPolicyMap, user) + case policyDBSTSUsersPrefix: + policyMapFile := strings.TrimPrefix(string(event.Kv.Key), + iamConfigPolicyDBSTSUsersPrefix) + user := strings.TrimSuffix(policyMapFile, ".json") + delete(sys.iamUserPolicyMap, user) + } + } +} diff --git a/vendor/github.com/minio/minio/cmd/iam-object-store.go b/vendor/github.com/minio/minio/cmd/iam-object-store.go new file mode 100644 index 0000000000..4ac39a1f65 --- /dev/null +++ b/vendor/github.com/minio/minio/cmd/iam-object-store.go @@ -0,0 +1,582 @@ +/* + * MinIO Cloud Storage, (C) 2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cmd + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "time" + + "github.com/minio/minio/cmd/logger" + "github.com/minio/minio/pkg/auth" + iampolicy "github.com/minio/minio/pkg/iam/policy" +) + +// IAMObjectStore implements IAMStorageAPI +type IAMObjectStore struct { + // Protect assignment to objAPI + sync.RWMutex + + objAPI ObjectLayer +} + +func newIAMObjectStore() *IAMObjectStore { + return &IAMObjectStore{objAPI: nil} +} + +func (iamOS *IAMObjectStore) getObjectAPI() ObjectLayer { + iamOS.RLock() + defer iamOS.RUnlock() + if iamOS.objAPI != nil { + return iamOS.objAPI + } + return newObjectLayerFn() +} + +func (iamOS *IAMObjectStore) setObjectAPI(objAPI ObjectLayer) { + iamOS.Lock() + defer iamOS.Unlock() + iamOS.objAPI = objAPI +} + +func (iamOS *IAMObjectStore) clearObjectAPI() { + iamOS.Lock() + defer iamOS.Unlock() + iamOS.objAPI = nil +} + +// Migrate users directory in a single scan. +// +// 1. Migrate user policy from: +// +// `iamConfigUsersPrefix + "/policy.json"` +// +// to: +// +// `iamConfigPolicyDBUsersPrefix + ".json"`. +// +// 2. Add versioning to the policy json file in the new +// location. +// +// 3. Migrate user identity json file to include version info. +func (iamOS *IAMObjectStore) migrateUsersConfigToV1(isSTS bool) error { + basePrefix := iamConfigUsersPrefix + if isSTS { + basePrefix = iamConfigSTSPrefix + } + + objAPI := iamOS.getObjectAPI() + + doneCh := make(chan struct{}) + defer close(doneCh) + for item := range listIAMConfigItems(objAPI, basePrefix, true, doneCh) { + if item.Err != nil { + return item.Err + } + + user := item.Item + + { + // 1. check if there is policy file in old location. + oldPolicyPath := pathJoin(basePrefix, user, iamPolicyFile) + var policyName string + if err := iamOS.loadIAMConfig(&policyName, oldPolicyPath); err != nil { + switch err { + case errConfigNotFound: + // This case means it is already + // migrated or there is no policy on + // user. + default: + // File may be corrupt or network error + } + + // Nothing to do on the policy file, + // so move on to check the id file. + goto next + } + + // 2. copy policy file to new location. + mp := newMappedPolicy(policyName) + if err := iamOS.saveMappedPolicy(user, isSTS, false, mp); err != nil { + return err + } + + // 3. delete policy file from old + // location. Ignore error. + iamOS.deleteIAMConfig(oldPolicyPath) + } + next: + // 4. check if user identity has old format. + identityPath := pathJoin(basePrefix, user, iamIdentityFile) + var cred auth.Credentials + if err := iamOS.loadIAMConfig(&cred, identityPath); err != nil { + switch err.(type) { + case ObjectNotFound: + // This should not happen. + default: + // File may be corrupt or network error + } + continue + } + + // If the file is already in the new format, + // then the parsed auth.Credentials will have + // the zero value for the struct. + var zeroCred auth.Credentials + if cred == zeroCred { + // nothing to do + continue + } + + // Found a id file in old format. Copy value + // into new format and save it. + cred.AccessKey = user + u := newUserIdentity(cred) + if err := iamOS.saveIAMConfig(u, identityPath); err != nil { + logger.LogIf(context.Background(), err) + return err + } + + // Nothing to delete as identity file location + // has not changed. + } + return nil + +} + +func (iamOS *IAMObjectStore) migrateToV1() error { + var iamFmt iamFormat + path := getIAMFormatFilePath() + if err := iamOS.loadIAMConfig(&iamFmt, path); err != nil { + switch err { + case errConfigNotFound: + // Need to migrate to V1. + default: + return errors.New("corrupt IAM format file") + } + } else { + if iamFmt.Version >= iamFormatVersion1 { + // Nothing to do. + return nil + } + // This case should not happen + // (i.e. Version is 0 or negative.) + return errors.New("got an invalid IAM format version") + } + + // Migrate long-term users + if err := iamOS.migrateUsersConfigToV1(false); err != nil { + logger.LogIf(context.Background(), err) + return err + } + // Migrate STS users + if err := iamOS.migrateUsersConfigToV1(true); err != nil { + logger.LogIf(context.Background(), err) + return err + } + // Save iam format to version 1. + if err := iamOS.saveIAMConfig(newIAMFormatVersion1(), path); err != nil { + logger.LogIf(context.Background(), err) + return err + } + return nil +} + +// Should be called under config migration lock +func (iamOS *IAMObjectStore) migrateBackendFormat(objAPI ObjectLayer) error { + iamOS.setObjectAPI(objAPI) + defer iamOS.clearObjectAPI() + if err := iamOS.migrateToV1(); err != nil { + return err + } + return nil +} + +func (iamOS *IAMObjectStore) saveIAMConfig(item interface{}, path string) error { + objectAPI := iamOS.getObjectAPI() + data, err := json.Marshal(item) + if err != nil { + return err + } + return saveConfig(context.Background(), objectAPI, path, data) +} + +func (iamOS *IAMObjectStore) loadIAMConfig(item interface{}, path string) error { + objectAPI := iamOS.getObjectAPI() + data, err := readConfig(context.Background(), objectAPI, path) + if err != nil { + return err + } + return json.Unmarshal(data, item) +} + +func (iamOS *IAMObjectStore) deleteIAMConfig(path string) error { + err := deleteConfig(context.Background(), iamOS.getObjectAPI(), path) + if _, ok := err.(ObjectNotFound); ok { + return errConfigNotFound + } + return err +} + +func (iamOS *IAMObjectStore) loadPolicyDoc(policy string, m map[string]iampolicy.Policy) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + var p iampolicy.Policy + err := iamOS.loadIAMConfig(&p, getPolicyDocPath(policy)) + if err != nil { + return err + } + m[policy] = p + return nil +} + +func (iamOS *IAMObjectStore) loadPolicyDocs(m map[string]iampolicy.Policy) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + doneCh := make(chan struct{}) + defer close(doneCh) + for item := range listIAMConfigItems(objectAPI, iamConfigPoliciesPrefix, true, doneCh) { + if item.Err != nil { + return item.Err + } + + policyName := item.Item + err := iamOS.loadPolicyDoc(policyName, m) + if err != nil { + return err + } + } + return nil +} + +func (iamOS *IAMObjectStore) loadUser(user string, isSTS bool, m map[string]auth.Credentials) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + var u UserIdentity + err := iamOS.loadIAMConfig(&u, getUserIdentityPath(user, isSTS)) + if err != nil { + return err + } + + if u.Credentials.IsExpired() { + // Delete expired identity - ignoring errors here. + iamOS.deleteIAMConfig(getUserIdentityPath(user, isSTS)) + iamOS.deleteIAMConfig(getMappedPolicyPath(user, isSTS, false)) + return nil + } + + if u.Credentials.AccessKey == "" { + u.Credentials.AccessKey = user + } + m[user] = u.Credentials + return nil +} + +func (iamOS *IAMObjectStore) loadUsers(isSTS bool, m map[string]auth.Credentials) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + doneCh := make(chan struct{}) + defer close(doneCh) + basePrefix := iamConfigUsersPrefix + if isSTS { + basePrefix = iamConfigSTSPrefix + } + for item := range listIAMConfigItems(objectAPI, basePrefix, true, doneCh) { + if item.Err != nil { + return item.Err + } + + userName := item.Item + err := iamOS.loadUser(userName, isSTS, m) + if err != nil { + return err + } + } + return nil +} + +func (iamOS *IAMObjectStore) loadGroup(group string, m map[string]GroupInfo) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + var g GroupInfo + err := iamOS.loadIAMConfig(&g, getGroupInfoPath(group)) + if err != nil { + return err + } + m[group] = g + return nil +} + +func (iamOS *IAMObjectStore) loadGroups(m map[string]GroupInfo) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + doneCh := make(chan struct{}) + defer close(doneCh) + for item := range listIAMConfigItems(objectAPI, iamConfigGroupsPrefix, true, doneCh) { + if item.Err != nil { + return item.Err + } + + group := item.Item + err := iamOS.loadGroup(group, m) + if err != nil { + return err + } + } + return nil +} + +func (iamOS *IAMObjectStore) loadMappedPolicy(name string, isSTS, isGroup bool, + m map[string]MappedPolicy) error { + + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + var p MappedPolicy + err := iamOS.loadIAMConfig(&p, getMappedPolicyPath(name, isSTS, isGroup)) + if err != nil { + return err + } + m[name] = p + return nil +} + +func (iamOS *IAMObjectStore) loadMappedPolicies(isSTS, isGroup bool, m map[string]MappedPolicy) error { + objectAPI := iamOS.getObjectAPI() + if objectAPI == nil { + return errServerNotInitialized + } + + doneCh := make(chan struct{}) + defer close(doneCh) + var basePath string + switch { + case isSTS: + basePath = iamConfigPolicyDBSTSUsersPrefix + case isGroup: + basePath = iamConfigPolicyDBGroupsPrefix + default: + basePath = iamConfigPolicyDBUsersPrefix + } + for item := range listIAMConfigItems(objectAPI, basePath, false, doneCh) { + if item.Err != nil { + return item.Err + } + + policyFile := item.Item + userOrGroupName := strings.TrimSuffix(policyFile, ".json") + err := iamOS.loadMappedPolicy(userOrGroupName, isSTS, isGroup, m) + if err != nil { + return err + } + } + return nil +} + +// Refresh IAMSys. If an object layer is passed in use that, otherwise +// load from global. +func (iamOS *IAMObjectStore) loadAll(sys *IAMSys, objectAPI ObjectLayer) error { + if objectAPI == nil { + objectAPI = iamOS.getObjectAPI() + } + if objectAPI == nil { + return errServerNotInitialized + } + // cache object layer for other load* functions + iamOS.setObjectAPI(objectAPI) + defer iamOS.clearObjectAPI() + + iamUsersMap := make(map[string]auth.Credentials) + iamGroupsMap := make(map[string]GroupInfo) + iamPolicyDocsMap := make(map[string]iampolicy.Policy) + iamUserPolicyMap := make(map[string]MappedPolicy) + iamGroupPolicyMap := make(map[string]MappedPolicy) + + if err := iamOS.loadPolicyDocs(iamPolicyDocsMap); err != nil { + return err + } + if err := iamOS.loadUsers(false, iamUsersMap); err != nil { + return err + } + // load STS temp users into the same map + if err := iamOS.loadUsers(true, iamUsersMap); err != nil { + return err + } + if err := iamOS.loadGroups(iamGroupsMap); err != nil { + return err + } + + if err := iamOS.loadMappedPolicies(false, false, iamUserPolicyMap); err != nil { + return err + } + // load STS policy mappings into the same map + if err := iamOS.loadMappedPolicies(true, false, iamUserPolicyMap); err != nil { + return err + } + // load policies mapped to groups + if err := iamOS.loadMappedPolicies(false, true, iamGroupPolicyMap); err != nil { + return err + } + + // Sets default canned policies, if none are set. + setDefaultCannedPolicies(iamPolicyDocsMap) + + sys.Lock() + defer sys.Unlock() + + sys.iamUsersMap = iamUsersMap + sys.iamPolicyDocsMap = iamPolicyDocsMap + sys.iamUserPolicyMap = iamUserPolicyMap + sys.iamGroupPolicyMap = iamGroupPolicyMap + sys.iamGroupsMap = iamGroupsMap + sys.buildUserGroupMemberships() + + return nil +} + +func (iamOS *IAMObjectStore) savePolicyDoc(policyName string, p iampolicy.Policy) error { + return iamOS.saveIAMConfig(&p, getPolicyDocPath(policyName)) +} + +func (iamOS *IAMObjectStore) saveMappedPolicy(name string, isSTS, isGroup bool, mp MappedPolicy) error { + return iamOS.saveIAMConfig(mp, getMappedPolicyPath(name, isSTS, isGroup)) +} + +func (iamOS *IAMObjectStore) saveUserIdentity(name string, isSTS bool, u UserIdentity) error { + return iamOS.saveIAMConfig(u, getUserIdentityPath(name, isSTS)) +} + +func (iamOS *IAMObjectStore) saveGroupInfo(name string, gi GroupInfo) error { + return iamOS.saveIAMConfig(gi, getGroupInfoPath(name)) +} + +func (iamOS *IAMObjectStore) deletePolicyDoc(name string) error { + return iamOS.deleteIAMConfig(getPolicyDocPath(name)) +} + +func (iamOS *IAMObjectStore) deleteMappedPolicy(name string, isSTS, isGroup bool) error { + return iamOS.deleteIAMConfig(getMappedPolicyPath(name, isSTS, isGroup)) +} + +func (iamOS *IAMObjectStore) deleteUserIdentity(name string, isSTS bool) error { + return iamOS.deleteIAMConfig(getUserIdentityPath(name, isSTS)) +} + +func (iamOS *IAMObjectStore) deleteGroupInfo(name string) error { + return iamOS.deleteIAMConfig(getGroupInfoPath(name)) +} + +// helper type for listIAMConfigItems +type itemOrErr struct { + Item string + Err error +} + +// Lists files or dirs in the minioMetaBucket at the given path +// prefix. If dirs is true, only directories are listed, otherwise +// only objects are listed. All returned items have the pathPrefix +// removed from their names. +func listIAMConfigItems(objectAPI ObjectLayer, pathPrefix string, dirs bool, + doneCh <-chan struct{}) <-chan itemOrErr { + + ch := make(chan itemOrErr) + dirList := func(lo ListObjectsInfo) []string { + return lo.Prefixes + } + filesList := func(lo ListObjectsInfo) (r []string) { + for _, o := range lo.Objects { + r = append(r, o.Name) + } + return r + } + + go func() { + marker := "" + for { + lo, err := objectAPI.ListObjects(context.Background(), + minioMetaBucket, pathPrefix, marker, SlashSeparator, 1000) + if err != nil { + select { + case ch <- itemOrErr{Err: err}: + case <-doneCh: + } + close(ch) + return + } + marker = lo.NextMarker + lister := dirList(lo) + if !dirs { + lister = filesList(lo) + } + for _, itemPrefix := range lister { + item := strings.TrimPrefix(itemPrefix, pathPrefix) + item = strings.TrimSuffix(item, SlashSeparator) + select { + case ch <- itemOrErr{Item: item}: + case <-doneCh: + close(ch) + return + } + } + if !lo.IsTruncated { + close(ch) + return + } + } + }() + return ch +} + +func (iamOS *IAMObjectStore) watch(sys *IAMSys) { + watchDisk := func() { + ticker := time.NewTicker(globalRefreshIAMInterval) + defer ticker.Stop() + for { + select { + case <-GlobalServiceDoneCh: + return + case <-ticker.C: + iamOS.loadAll(sys, nil) + } + } + } + // Refresh IAMSys in background. + go watchDisk() +} diff --git a/vendor/github.com/minio/minio/cmd/iam.go b/vendor/github.com/minio/minio/cmd/iam.go index eb23cc5509..9c35a4bd20 100644 --- a/vendor/github.com/minio/minio/cmd/iam.go +++ b/vendor/github.com/minio/minio/cmd/iam.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -20,13 +20,9 @@ import ( "bytes" "context" "encoding/json" - "path" "strings" "sync" - "time" - etcd "github.com/coreos/etcd/clientv3" - "github.com/coreos/etcd/mvcc/mvccpb" "github.com/minio/minio-go/v6/pkg/set" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/auth" @@ -41,25 +37,209 @@ const ( // IAM users directory. iamConfigUsersPrefix = iamConfigPrefix + "/users/" + // IAM groups directory. + iamConfigGroupsPrefix = iamConfigPrefix + "/groups/" + // IAM policies directory. iamConfigPoliciesPrefix = iamConfigPrefix + "/policies/" // IAM sts directory. iamConfigSTSPrefix = iamConfigPrefix + "/sts/" + // IAM Policy DB prefixes. + iamConfigPolicyDBPrefix = iamConfigPrefix + "/policydb/" + iamConfigPolicyDBUsersPrefix = iamConfigPolicyDBPrefix + "users/" + iamConfigPolicyDBSTSUsersPrefix = iamConfigPolicyDBPrefix + "sts-users/" + iamConfigPolicyDBGroupsPrefix = iamConfigPolicyDBPrefix + "groups/" + // IAM identity file which captures identity credentials. iamIdentityFile = "identity.json" // IAM policy file which provides policies for each users. iamPolicyFile = "policy.json" + + // IAM group members file + iamGroupMembersFile = "members.json" + + // IAM format file + iamFormatFile = "format.json" + + iamFormatVersion1 = 1 ) +const ( + statusEnabled = "enabled" + statusDisabled = "disabled" +) + +type iamFormat struct { + Version int `json:"version"` +} + +func newIAMFormatVersion1() iamFormat { + return iamFormat{Version: iamFormatVersion1} +} + +func getIAMFormatFilePath() string { + return iamConfigPrefix + SlashSeparator + iamFormatFile +} + +func getUserIdentityPath(user string, isSTS bool) string { + basePath := iamConfigUsersPrefix + if isSTS { + basePath = iamConfigSTSPrefix + } + return pathJoin(basePath, user, iamIdentityFile) +} + +func getGroupInfoPath(group string) string { + return pathJoin(iamConfigGroupsPrefix, group, iamGroupMembersFile) +} + +func getPolicyDocPath(name string) string { + return pathJoin(iamConfigPoliciesPrefix, name, iamPolicyFile) +} + +func getMappedPolicyPath(name string, isSTS, isGroup bool) string { + switch { + case isSTS: + return pathJoin(iamConfigPolicyDBSTSUsersPrefix, name+".json") + case isGroup: + return pathJoin(iamConfigPolicyDBGroupsPrefix, name+".json") + default: + return pathJoin(iamConfigPolicyDBUsersPrefix, name+".json") + } +} + +// UserIdentity represents a user's secret key and their status +type UserIdentity struct { + Version int `json:"version"` + Credentials auth.Credentials `json:"credentials"` +} + +func newUserIdentity(creds auth.Credentials) UserIdentity { + return UserIdentity{Version: 1, Credentials: creds} +} + +// GroupInfo contains info about a group +type GroupInfo struct { + Version int `json:"version"` + Status string `json:"status"` + Members []string `json:"members"` +} + +func newGroupInfo(members []string) GroupInfo { + return GroupInfo{Version: 1, Status: statusEnabled, Members: members} +} + +// MappedPolicy represents a policy name mapped to a user or group +type MappedPolicy struct { + Version int `json:"version"` + Policy string `json:"policy"` +} + +func newMappedPolicy(policy string) MappedPolicy { + return MappedPolicy{Version: 1, Policy: policy} +} + // IAMSys - config system. type IAMSys struct { sync.RWMutex - iamUsersMap map[string]auth.Credentials - iamPolicyMap map[string]string - iamCannedPolicyMap map[string]iampolicy.Policy + // map of policy names to policy definitions + iamPolicyDocsMap map[string]iampolicy.Policy + // map of usernames to credentials + iamUsersMap map[string]auth.Credentials + // map of group names to group info + iamGroupsMap map[string]GroupInfo + // map of user names to groups they are a member of + iamUserGroupMemberships map[string]set.StringSet + // map of usernames/temporary access keys to policy names + iamUserPolicyMap map[string]MappedPolicy + // map of group names to policy names + iamGroupPolicyMap map[string]MappedPolicy + + // Persistence layer for IAM subsystem + store IAMStorageAPI +} + +// IAMStorageAPI defines an interface for the IAM persistence layer +type IAMStorageAPI interface { + migrateBackendFormat(ObjectLayer) error + + loadPolicyDoc(policy string, m map[string]iampolicy.Policy) error + loadPolicyDocs(m map[string]iampolicy.Policy) error + + loadUser(user string, isSTS bool, m map[string]auth.Credentials) error + loadUsers(isSTS bool, m map[string]auth.Credentials) error + + loadGroup(group string, m map[string]GroupInfo) error + loadGroups(m map[string]GroupInfo) error + + loadMappedPolicy(name string, isSTS, isGroup bool, m map[string]MappedPolicy) error + loadMappedPolicies(isSTS, isGroup bool, m map[string]MappedPolicy) error + + loadAll(*IAMSys, ObjectLayer) error + + saveIAMConfig(item interface{}, path string) error + loadIAMConfig(item interface{}, path string) error + deleteIAMConfig(path string) error + + savePolicyDoc(policyName string, p iampolicy.Policy) error + saveMappedPolicy(name string, isSTS, isGroup bool, mp MappedPolicy) error + saveUserIdentity(name string, isSTS bool, u UserIdentity) error + saveGroupInfo(group string, gi GroupInfo) error + + deletePolicyDoc(policyName string) error + deleteMappedPolicy(name string, isSTS, isGroup bool) error + deleteUserIdentity(name string, isSTS bool) error + deleteGroupInfo(name string) error + + watch(*IAMSys) +} + +// LoadGroup - loads a specific group from storage, and updates the +// memberships cache. If the specified group does not exist in +// storage, it is removed from in-memory maps as well - this +// simplifies the implementation for group removal. This is called +// only via IAM notifications. +func (sys *IAMSys) LoadGroup(objAPI ObjectLayer, group string) error { + if objAPI == nil { + return errInvalidArgument + } + + sys.Lock() + defer sys.Unlock() + + if globalEtcdClient != nil { + // Watch APIs cover this case, so nothing to do. + return nil + } + + err := sys.store.loadGroup(group, sys.iamGroupsMap) + if err != nil && err != errConfigNotFound { + return err + } + + if err == errConfigNotFound { + // group does not exist - so remove from memory. + sys.removeGroupFromMembershipsMap(group) + delete(sys.iamGroupsMap, group) + delete(sys.iamGroupPolicyMap, group) + return nil + } + + gi := sys.iamGroupsMap[group] + + // Updating the group memberships cache happens in two steps: + // + // 1. Remove the group from each user's list of memberships. + // 2. Add the group to each member's list of memberships. + // + // This ensures that regardless of members being added or + // removed, the cache stays current. + sys.removeGroupFromMembershipsMap(group) + sys.updateGroupMembershipsMap(group, &gi) + return nil } // LoadPolicy - reloads a specific canned policy from backend disks or etcd. @@ -71,9 +251,8 @@ func (sys *IAMSys) LoadPolicy(objAPI ObjectLayer, policyName string) error { sys.Lock() defer sys.Unlock() - prefix := iamConfigPoliciesPrefix if globalEtcdClient == nil { - return reloadPolicy(context.Background(), objAPI, prefix, policyName, sys.iamCannedPolicyMap) + return sys.store.loadPolicyDoc(policyName, sys.iamPolicyDocsMap) } // When etcd is set, we use watch APIs so this code is not needed. @@ -81,7 +260,7 @@ func (sys *IAMSys) LoadPolicy(objAPI ObjectLayer, policyName string) error { } // LoadUser - reloads a specific user from backend disks or etcd. -func (sys *IAMSys) LoadUser(objAPI ObjectLayer, accessKey string, temp bool) error { +func (sys *IAMSys) LoadUser(objAPI ObjectLayer, accessKey string, isSTS bool) error { if objAPI == nil { return errInvalidArgument } @@ -89,123 +268,39 @@ func (sys *IAMSys) LoadUser(objAPI ObjectLayer, accessKey string, temp bool) err sys.Lock() defer sys.Unlock() - prefix := iamConfigUsersPrefix - if temp { - prefix = iamConfigSTSPrefix - } - if globalEtcdClient == nil { - return reloadUser(context.Background(), objAPI, prefix, accessKey, sys.iamUsersMap, sys.iamPolicyMap) + err := sys.store.loadUser(accessKey, isSTS, sys.iamUsersMap) + if err != nil { + return err + } + err = sys.store.loadMappedPolicy(accessKey, isSTS, false, sys.iamUserPolicyMap) + // Ignore policy not mapped error + if err != nil && err != errConfigNotFound { + return err + } } // When etcd is set, we use watch APIs so this code is not needed. return nil } // Load - loads iam subsystem -func (sys *IAMSys) Load(objAPI ObjectLayer) error { - if globalEtcdClient != nil { - return sys.refreshEtcd() - } - return sys.refresh(objAPI) +func (sys *IAMSys) Load() error { + // Pass nil objectlayer here - it will be loaded internally + // from the IAMStorageAPI. + return sys.store.loadAll(sys, nil) } -func (sys *IAMSys) reloadFromEvent(event *etcd.Event) { - eventCreate := event.IsModify() || event.IsCreate() - eventDelete := event.Type == etcd.EventTypeDelete - usersPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigUsersPrefix) - stsPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigSTSPrefix) - policyPrefix := strings.HasPrefix(string(event.Kv.Key), iamConfigPoliciesPrefix) - - ctx, cancel := context.WithTimeout(context.Background(), - defaultContextTimeout) - defer cancel() - - switch { - case eventCreate: - switch { - case usersPrefix: - accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), - iamConfigUsersPrefix)) - reloadEtcdUser(ctx, iamConfigUsersPrefix, accessKey, - sys.iamUsersMap, sys.iamPolicyMap) - case stsPrefix: - accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), - iamConfigSTSPrefix)) - reloadEtcdUser(ctx, iamConfigSTSPrefix, accessKey, - sys.iamUsersMap, sys.iamPolicyMap) - case policyPrefix: - policyName := path.Dir(strings.TrimPrefix(string(event.Kv.Key), - iamConfigPoliciesPrefix)) - reloadEtcdPolicy(ctx, iamConfigPoliciesPrefix, - policyName, sys.iamCannedPolicyMap) - } - case eventDelete: - switch { - case usersPrefix: - accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), - iamConfigUsersPrefix)) - delete(sys.iamUsersMap, accessKey) - delete(sys.iamPolicyMap, accessKey) - case stsPrefix: - accessKey := path.Dir(strings.TrimPrefix(string(event.Kv.Key), - iamConfigSTSPrefix)) - delete(sys.iamUsersMap, accessKey) - delete(sys.iamPolicyMap, accessKey) - case policyPrefix: - policyName := path.Dir(strings.TrimPrefix(string(event.Kv.Key), - iamConfigPoliciesPrefix)) - delete(sys.iamCannedPolicyMap, policyName) - } +// Perform IAM configuration migration. +func (sys *IAMSys) doIAMConfigMigration(objAPI ObjectLayer) error { + // Take IAM configuration migration lock + lockPath := iamConfigPrefix + "/migration.lock" + objLock := globalNSMutex.NewNSLock(context.Background(), minioMetaBucket, lockPath) + if err := objLock.GetLock(globalOperationTimeout); err != nil { + return err } -} + defer objLock.Unlock() -// Watch etcd entries for IAM -func (sys *IAMSys) watchIAMEtcd() { - watchEtcd := func() { - // Refresh IAMSys with etcd watch. - for { - watchCh := globalEtcdClient.Watch(context.Background(), - iamConfigPrefix, etcd.WithPrefix(), etcd.WithKeysOnly()) - select { - case <-GlobalServiceDoneCh: - return - case watchResp, ok := <-watchCh: - if !ok { - time.Sleep(1 * time.Second) - continue - } - if err := watchResp.Err(); err != nil { - logger.LogIf(context.Background(), err) - // log and retry. - time.Sleep(1 * time.Second) - continue - } - for _, event := range watchResp.Events { - sys.Lock() - sys.reloadFromEvent(event) - sys.Unlock() - } - } - } - } - go watchEtcd() -} - -func (sys *IAMSys) watchIAMDisk(objAPI ObjectLayer) { - watchDisk := func() { - ticker := time.NewTicker(globalRefreshIAMInterval) - defer ticker.Stop() - for { - select { - case <-GlobalServiceDoneCh: - return - case <-ticker.C: - sys.refresh(objAPI) - } - } - } - // Refresh IAMSys in background. - go watchDisk() + return sys.store.migrateBackendFormat(objAPI) } // Init - initializes config system from iam.json @@ -214,25 +309,22 @@ func (sys *IAMSys) Init(objAPI ObjectLayer) error { return errInvalidArgument } - if globalEtcdClient != nil { - defer sys.watchIAMEtcd() + if globalEtcdClient == nil { + sys.store = newIAMObjectStore() } else { - defer sys.watchIAMDisk(objAPI) + sys.store = newIAMEtcdStore() } doneCh := make(chan struct{}) defer close(doneCh) - // Initializing IAM needs a retry mechanism for + // Migrating IAM needs a retry mechanism for // the following reasons: // - Read quorum is lost just after the initialization // of the object layer. for range newRetryTimerSimple(doneCh) { - if globalEtcdClient != nil { - return sys.refreshEtcd() - } - // Load IAMSys once during boot. - if err := sys.refresh(objAPI); err != nil { + // Migrate IAM configuration + if err := sys.doIAMConfigMigration(objAPI); err != nil { if err == errDiskNotFound || strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { @@ -243,6 +335,28 @@ func (sys *IAMSys) Init(objAPI ObjectLayer) error { } break } + + sys.store.watch(sys) + + // Initializing IAM needs a retry mechanism for + // the following reasons: + // - Read quorum is lost just after the initialization + // of the object layer. + for range newRetryTimerSimple(doneCh) { + // Load IAMSys once during boot. Need to pass in + // objAPI as server has not yet initialized. + if err := sys.store.loadAll(sys, objAPI); err != nil { + if err == errDiskNotFound || + strings.Contains(err.Error(), InsufficientReadQuorum{}.Error()) || + strings.Contains(err.Error(), InsufficientWriteQuorum{}.Error()) { + logger.Info("Waiting for IAM subsystem to be initialized..") + continue + } + return err + } + break + } + return nil } @@ -257,14 +371,7 @@ func (sys *IAMSys) DeletePolicy(policyName string) error { return errInvalidArgument } - var err error - pFile := pathJoin(iamConfigPoliciesPrefix, policyName, iamPolicyFile) - if globalEtcdClient != nil { - err = deleteConfigEtcd(context.Background(), globalEtcdClient, pFile) - } else { - err = deleteConfig(context.Background(), objectAPI, pFile) - } - + err := sys.store.deletePolicyDoc(policyName) switch err.(type) { case ObjectNotFound: // Ignore error if policy is already deleted. @@ -274,7 +381,7 @@ func (sys *IAMSys) DeletePolicy(policyName string) error { sys.Lock() defer sys.Unlock() - delete(sys.iamCannedPolicyMap, policyName) + delete(sys.iamPolicyDocsMap, policyName) return err } @@ -285,23 +392,23 @@ func (sys *IAMSys) ListPolicies() (map[string][]byte, error) { return nil, errServerNotInitialized } - var cannedPolicyMap = make(map[string][]byte) + var policyDocsMap = make(map[string][]byte) sys.RLock() defer sys.RUnlock() - for k, v := range sys.iamCannedPolicyMap { + for k, v := range sys.iamPolicyDocsMap { data, err := json.Marshal(v) if err != nil { return nil, err } - cannedPolicyMap[k] = data + policyDocsMap[k] = data } - return cannedPolicyMap, nil + return policyDocsMap, nil } -// SetPolicy - sets a new canned policy. +// SetPolicy - sets a new name policy. func (sys *IAMSys) SetPolicy(policyName string, p iampolicy.Policy) error { objectAPI := newObjectLayerFn() if objectAPI == nil { @@ -312,87 +419,26 @@ func (sys *IAMSys) SetPolicy(policyName string, p iampolicy.Policy) error { return errInvalidArgument } - configFile := pathJoin(iamConfigPoliciesPrefix, policyName, iamPolicyFile) - data, err := json.Marshal(p) - if err != nil { - return err - } - - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - if err != nil { + if err := sys.store.savePolicyDoc(policyName, p); err != nil { return err } sys.Lock() defer sys.Unlock() - - sys.iamCannedPolicyMap[policyName] = p - + sys.iamPolicyDocsMap[policyName] = p return nil } -// SetUserPolicy - sets policy to given user name. -func (sys *IAMSys) SetUserPolicy(accessKey, policyName string) error { - objectAPI := newObjectLayerFn() - if objectAPI == nil { - return errServerNotInitialized - } - - sys.Lock() - defer sys.Unlock() - - if _, ok := sys.iamUsersMap[accessKey]; !ok { - return errNoSuchUser - } - - if _, ok := sys.iamCannedPolicyMap[policyName]; !ok { - return errNoSuchPolicy - } - - data, err := json.Marshal(policyName) - if err != nil { - return err - } - - configFile := pathJoin(iamConfigUsersPrefix, accessKey, iamPolicyFile) - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - if err != nil { - return err - } - - sys.iamPolicyMap[accessKey] = policyName - return nil -} - -// DeleteUser - set user credentials. +// DeleteUser - delete user (only for long-term users not STS users). func (sys *IAMSys) DeleteUser(accessKey string) error { objectAPI := newObjectLayerFn() if objectAPI == nil { return errServerNotInitialized } - var err error - pFile := pathJoin(iamConfigUsersPrefix, accessKey, iamPolicyFile) - iFile := pathJoin(iamConfigUsersPrefix, accessKey, iamIdentityFile) - if globalEtcdClient != nil { - // It is okay to ignore errors when deleting policy.json for the user. - deleteConfigEtcd(context.Background(), globalEtcdClient, pFile) - err = deleteConfigEtcd(context.Background(), globalEtcdClient, iFile) - } else { - // It is okay to ignore errors when deleting policy.json for the user. - _ = deleteConfig(context.Background(), objectAPI, pFile) - err = deleteConfig(context.Background(), objectAPI, iFile) - } - - // + // It is ok to ignore deletion error on the mapped policy + sys.store.deleteMappedPolicy(accessKey, false, false) + err := sys.store.deleteUserIdentity(accessKey, false) switch err.(type) { case ObjectNotFound: // ignore if user is already deleted. @@ -403,7 +449,7 @@ func (sys *IAMSys) DeleteUser(accessKey string) error { defer sys.Unlock() delete(sys.iamUsersMap, accessKey) - delete(sys.iamPolicyMap, accessKey) + delete(sys.iamUserPolicyMap, accessKey) return err } @@ -422,46 +468,25 @@ func (sys *IAMSys) SetTempUser(accessKey string, cred auth.Credentials, policyNa // temporary user which match with pre-configured canned // policies for this server. if globalPolicyOPA == nil && policyName != "" { - p, ok := sys.iamCannedPolicyMap[policyName] + p, ok := sys.iamPolicyDocsMap[policyName] if !ok { return errInvalidArgument } if p.IsEmpty() { - delete(sys.iamPolicyMap, accessKey) + delete(sys.iamUserPolicyMap, accessKey) return nil } - data, err := json.Marshal(policyName) - if err != nil { + mp := newMappedPolicy(policyName) + if err := sys.store.saveMappedPolicy(accessKey, true, false, mp); err != nil { return err } - configFile := pathJoin(iamConfigSTSPrefix, accessKey, iamPolicyFile) - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - if err != nil { - return err - } - - sys.iamPolicyMap[accessKey] = policyName + sys.iamUserPolicyMap[accessKey] = mp } - configFile := pathJoin(iamConfigSTSPrefix, accessKey, iamIdentityFile) - data, err := json.Marshal(cred) - if err != nil { - return err - } - - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - - if err != nil { + u := newUserIdentity(cred) + if err := sys.store.saveUserIdentity(accessKey, true, u); err != nil { return err } @@ -469,27 +494,6 @@ func (sys *IAMSys) SetTempUser(accessKey string, cred auth.Credentials, policyNa return nil } -// GetUserPolicy - returns canned policy name associated with a user. -func (sys *IAMSys) GetUserPolicy(accessKey string) (policyName string, err error) { - objectAPI := newObjectLayerFn() - if objectAPI == nil { - return "", errServerNotInitialized - } - - sys.RLock() - defer sys.RUnlock() - - if _, ok := sys.iamUsersMap[accessKey]; !ok { - return "", errNoSuchUser - } - - if _, ok := sys.iamPolicyMap[accessKey]; !ok { - return "", errNoSuchUser - } - - return sys.iamPolicyMap[accessKey], nil -} - // ListUsers - list all users. func (sys *IAMSys) ListUsers() (map[string]madmin.UserInfo, error) { objectAPI := newObjectLayerFn() @@ -504,7 +508,7 @@ func (sys *IAMSys) ListUsers() (map[string]madmin.UserInfo, error) { for k, v := range sys.iamUsersMap { users[k] = madmin.UserInfo{ - PolicyName: sys.iamPolicyMap[k], + PolicyName: sys.iamUserPolicyMap[k].Policy, Status: madmin.AccountStatus(v.Status), } } @@ -531,68 +535,44 @@ func (sys *IAMSys) SetUserStatus(accessKey string, status madmin.AccountStatus) return errNoSuchUser } - uinfo := madmin.UserInfo{ - SecretKey: cred.SecretKey, - Status: status, - } - - configFile := pathJoin(iamConfigUsersPrefix, accessKey, iamIdentityFile) - data, err := json.Marshal(uinfo) - if err != nil { - return err - } - - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - - if err != nil { - return err - } - - sys.iamUsersMap[accessKey] = auth.Credentials{ + uinfo := newUserIdentity(auth.Credentials{ AccessKey: accessKey, - SecretKey: uinfo.SecretKey, - Status: string(uinfo.Status), + SecretKey: cred.SecretKey, + Status: string(status), + }) + if err := sys.store.saveUserIdentity(accessKey, false, uinfo); err != nil { + return err } + sys.iamUsersMap[accessKey] = uinfo.Credentials return nil } -// SetUser - set user credentials. +// SetUser - set user credentials and policy. func (sys *IAMSys) SetUser(accessKey string, uinfo madmin.UserInfo) error { objectAPI := newObjectLayerFn() if objectAPI == nil { return errServerNotInitialized } - configFile := pathJoin(iamConfigUsersPrefix, accessKey, iamIdentityFile) - data, err := json.Marshal(uinfo) - if err != nil { - return err - } - - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - - if err != nil { - return err - } + u := newUserIdentity(auth.Credentials{ + AccessKey: accessKey, + SecretKey: uinfo.SecretKey, + Status: string(uinfo.Status), + }) sys.Lock() defer sys.Unlock() - sys.iamUsersMap[accessKey] = auth.Credentials{ - AccessKey: accessKey, - SecretKey: uinfo.SecretKey, - Status: string(uinfo.Status), + if err := sys.store.saveUserIdentity(accessKey, false, u); err != nil { + return err } + sys.iamUsersMap[accessKey] = u.Credentials + // Set policy if specified. + if uinfo.PolicyName != "" { + return sys.policyDBSet(objectAPI, accessKey, uinfo.PolicyName, false, false) + } return nil } @@ -611,33 +591,13 @@ func (sys *IAMSys) SetUserSecretKey(accessKey string, secretKey string) error { return errNoSuchUser } - uinfo := madmin.UserInfo{ - SecretKey: secretKey, - Status: madmin.AccountStatus(cred.Status), - } - - configFile := pathJoin(iamConfigUsersPrefix, accessKey, iamIdentityFile) - data, err := json.Marshal(uinfo) - if err != nil { + cred.SecretKey = secretKey + u := newUserIdentity(cred) + if err := sys.store.saveUserIdentity(accessKey, false, u); err != nil { return err } - if globalEtcdClient != nil { - err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data) - } else { - err = saveConfig(context.Background(), objectAPI, configFile, data) - } - - if err != nil { - return err - } - - sys.iamUsersMap[accessKey] = auth.Credentials{ - AccessKey: accessKey, - SecretKey: secretKey, - Status: string(uinfo.Status), - } - + sys.iamUsersMap[accessKey] = cred return nil } @@ -650,6 +610,304 @@ func (sys *IAMSys) GetUser(accessKey string) (cred auth.Credentials, ok bool) { return cred, ok && cred.IsValid() } +// AddUsersToGroup - adds users to a group, creating the group if +// needed. No error if user(s) already are in the group. +func (sys *IAMSys) AddUsersToGroup(group string, members []string) error { + objectAPI := newObjectLayerFn() + if objectAPI == nil { + return errServerNotInitialized + } + + if group == "" { + return errInvalidArgument + } + + sys.Lock() + defer sys.Unlock() + + // Validate that all members exist. + for _, member := range members { + _, ok := sys.iamUsersMap[member] + if !ok { + return errNoSuchUser + } + } + + gi, ok := sys.iamGroupsMap[group] + if !ok { + // Set group as enabled by default when it doesn't + // exist. + gi = newGroupInfo(members) + } else { + mergedMembers := append(gi.Members, members...) + uniqMembers := set.CreateStringSet(mergedMembers...).ToSlice() + gi.Members = uniqMembers + } + + if err := sys.store.saveGroupInfo(group, gi); err != nil { + return err + } + + sys.iamGroupsMap[group] = gi + + // update user-group membership map + for _, member := range members { + gset := sys.iamUserGroupMemberships[member] + if gset == nil { + gset = set.CreateStringSet(group) + } else { + gset.Add(group) + } + sys.iamUserGroupMemberships[member] = gset + } + + return nil +} + +// RemoveUsersFromGroup - remove users from group. If no users are +// given, and the group is empty, deletes the group as well. +func (sys *IAMSys) RemoveUsersFromGroup(group string, members []string) error { + objectAPI := newObjectLayerFn() + if objectAPI == nil { + return errServerNotInitialized + } + + if group == "" { + return errInvalidArgument + } + + sys.Lock() + defer sys.Unlock() + + // Validate that all members exist. + for _, member := range members { + _, ok := sys.iamUsersMap[member] + if !ok { + return errNoSuchUser + } + } + + gi, ok := sys.iamGroupsMap[group] + if !ok { + return errNoSuchGroup + } + + // Check if attempting to delete a non-empty group. + if len(members) == 0 && len(gi.Members) != 0 { + return errGroupNotEmpty + } + + if len(members) == 0 { + // len(gi.Members) == 0 here. + + // Remove the group from storage. First delete the + // mapped policy. + err := sys.store.deleteMappedPolicy(group, false, true) + // No-mapped-policy case is ignored. + if err != nil && err != errConfigNotFound { + return err + } + err = sys.store.deleteGroupInfo(group) + if err != nil { + return err + } + + // Delete from server memory + delete(sys.iamGroupsMap, group) + delete(sys.iamGroupPolicyMap, group) + return nil + } + + // Only removing members. + s := set.CreateStringSet(gi.Members...) + d := set.CreateStringSet(members...) + gi.Members = s.Difference(d).ToSlice() + + err := sys.store.saveGroupInfo(group, gi) + if err != nil { + return err + } + sys.iamGroupsMap[group] = gi + + // update user-group membership map + for _, member := range members { + gset := sys.iamUserGroupMemberships[member] + if gset == nil { + continue + } + gset.Remove(group) + sys.iamUserGroupMemberships[member] = gset + } + + return nil +} + +// SetGroupStatus - enable/disabled a group +func (sys *IAMSys) SetGroupStatus(group string, enabled bool) error { + objectAPI := newObjectLayerFn() + if objectAPI == nil { + return errServerNotInitialized + } + + sys.Lock() + defer sys.Unlock() + + if group == "" { + return errInvalidArgument + } + + gi, ok := sys.iamGroupsMap[group] + if !ok { + return errNoSuchGroup + } + + if enabled { + gi.Status = statusEnabled + } else { + gi.Status = statusDisabled + } + + if err := sys.store.saveGroupInfo(group, gi); err != nil { + return err + } + sys.iamGroupsMap[group] = gi + return nil +} + +// GetGroupDescription - builds up group description +func (sys *IAMSys) GetGroupDescription(group string) (gd madmin.GroupDesc, err error) { + sys.RLock() + defer sys.RUnlock() + + gi, ok := sys.iamGroupsMap[group] + if !ok { + return gd, errNoSuchGroup + } + + var p []string + p, err = sys.policyDBGet(group, true) + if err != nil { + return gd, err + } + + policy := "" + if len(p) > 0 { + policy = p[0] + } + + return madmin.GroupDesc{ + Name: group, + Status: gi.Status, + Members: gi.Members, + Policy: policy, + }, nil +} + +// ListGroups - lists groups +func (sys *IAMSys) ListGroups() (r []string) { + sys.RLock() + defer sys.RUnlock() + + for k := range sys.iamGroupsMap { + r = append(r, k) + } + return r +} + +// PolicyDBSet - sets a policy for a user or group in the +// PolicyDB. This function applies only long-term users. For STS +// users, policy is set directly by called sys.policyDBSet(). +func (sys *IAMSys) PolicyDBSet(name, policy string, isGroup bool) error { + objectAPI := newObjectLayerFn() + if objectAPI == nil { + return errServerNotInitialized + } + + sys.Lock() + defer sys.Unlock() + + // isSTS is always false when called via PolicyDBSet as policy + // is never set by an external API call for STS users. + return sys.policyDBSet(objectAPI, name, policy, false, isGroup) +} + +// policyDBSet - sets a policy for user in the policy db. Assumes that +// caller has sys.Lock(). +func (sys *IAMSys) policyDBSet(objectAPI ObjectLayer, name, policy string, isSTS, isGroup bool) error { + if name == "" || policy == "" { + return errInvalidArgument + } + if _, ok := sys.iamUsersMap[name]; !ok { + return errNoSuchUser + } + if _, ok := sys.iamPolicyDocsMap[policy]; !ok { + return errNoSuchPolicy + } + if _, ok := sys.iamUsersMap[name]; !ok { + return errNoSuchUser + } + + mp := newMappedPolicy(policy) + if err := sys.store.saveMappedPolicy(name, isSTS, isGroup, mp); err != nil { + return err + } + sys.iamUserPolicyMap[name] = mp + return nil +} + +// PolicyDBGet - gets policy set on a user or group. Since a user may +// be a member of multiple groups, this function returns an array of +// applicable policies (each group is mapped to at most one policy). +func (sys *IAMSys) PolicyDBGet(name string, isGroup bool) ([]string, error) { + if name == "" { + return nil, errInvalidArgument + } + + objectAPI := newObjectLayerFn() + if objectAPI == nil { + return nil, errServerNotInitialized + } + + sys.RLock() + defer sys.RUnlock() + + return sys.policyDBGet(name, isGroup) +} + +// This call assumes that caller has the sys.RLock() +func (sys *IAMSys) policyDBGet(name string, isGroup bool) ([]string, error) { + if isGroup { + if _, ok := sys.iamGroupsMap[name]; !ok { + return nil, errNoSuchGroup + } + + policy := sys.iamGroupPolicyMap[name] + // returned policy could be empty + if policy.Policy == "" { + return nil, nil + } + return []string{policy.Policy}, nil + } + + if _, ok := sys.iamUsersMap[name]; !ok { + return nil, errNoSuchUser + } + + result := []string{} + policy := sys.iamUserPolicyMap[name] + // returned policy could be empty + if policy.Policy != "" { + result = append(result, policy.Policy) + } + for _, group := range sys.iamUserGroupMemberships[name].ToSlice() { + p, ok := sys.iamGroupPolicyMap[group] + if ok && p.Policy != "" { + result = append(result, p.Policy) + } + } + return result, nil +} + // IsAllowedSTS is meant for STS based temporary credentials, // which implements claims validation and verification other than // applying policies. @@ -669,11 +927,12 @@ func (sys *IAMSys) IsAllowedSTS(args iampolicy.Args) bool { defer sys.RUnlock() // If policy is available for given user, check the policy. - name, ok := sys.iamPolicyMap[args.AccountName] + mp, ok := sys.iamUserPolicyMap[args.AccountName] if !ok { // No policy available reject. return false } + name := mp.Policy if pnameStr != name { // When claims has a policy, it should match the @@ -687,7 +946,7 @@ func (sys *IAMSys) IsAllowedSTS(args iampolicy.Args) bool { if !ok { // Sub policy not set, this is most common since subPolicy // is optional, use the top level policy only. - p, ok := sys.iamCannedPolicyMap[pnameStr] + p, ok := sys.iamPolicyDocsMap[pnameStr] return ok && p.IsAllowed(args) } @@ -712,7 +971,7 @@ func (sys *IAMSys) IsAllowedSTS(args iampolicy.Args) bool { } // Sub policy is set and valid. - p, ok := sys.iamCannedPolicyMap[pnameStr] + p, ok := sys.iamPolicyDocsMap[pnameStr] return ok && p.IsAllowed(args) && subPolicy.IsAllowed(args) } @@ -720,7 +979,11 @@ func (sys *IAMSys) IsAllowedSTS(args iampolicy.Args) bool { func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool { // If opa is configured, use OPA always. if globalPolicyOPA != nil { - return globalPolicyOPA.IsAllowed(args) + ok, err := globalPolicyOPA.IsAllowed(args) + if err != nil { + logger.LogIf(context.Background(), err) + } + return ok } // With claims set, we should do STS related checks and validation. @@ -732,241 +995,16 @@ func (sys *IAMSys) IsAllowed(args iampolicy.Args) bool { defer sys.RUnlock() // If policy is available for given user, check the policy. - if name, found := sys.iamPolicyMap[args.AccountName]; found { - p, ok := sys.iamCannedPolicyMap[name] + if mp, found := sys.iamUserPolicyMap[args.AccountName]; found { + p, ok := sys.iamPolicyDocsMap[mp.Policy] return ok && p.IsAllowed(args) } - // As policy is not available and OPA is not configured, return the owner value. + // As policy is not available and OPA is not configured, + // return the owner value. return args.IsOwner } -var defaultContextTimeout = 30 * time.Second - -func etcdKvsToSet(prefix string, kvs []*mvccpb.KeyValue) set.StringSet { - users := set.NewStringSet() - for _, kv := range kvs { - // Extract user by stripping off the `prefix` value as suffix, - // then strip off the remaining basename to obtain the prefix - // value, usually in the following form. - // - // key := "config/iam/users/newuser/identity.json" - // prefix := "config/iam/users/" - // v := trim(trim(key, prefix), base(key)) == "newuser" - // - user := path.Clean(strings.TrimSuffix(strings.TrimPrefix(string(kv.Key), prefix), path.Base(string(kv.Key)))) - if !users.Contains(user) { - users.Add(user) - } - } - return users -} - -// Similar to reloadUsers but updates users, policies maps from etcd server, -func reloadEtcdUsers(prefix string, usersMap map[string]auth.Credentials, policyMap map[string]string) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) - defer cancel() - r, err := globalEtcdClient.Get(ctx, prefix, etcd.WithPrefix(), etcd.WithKeysOnly()) - if err != nil { - return err - } - // No users are created yet. - if r.Count == 0 { - return nil - } - - users := etcdKvsToSet(prefix, r.Kvs) - - // Reload config and policies for all users. - for _, user := range users.ToSlice() { - if err = reloadEtcdUser(ctx, prefix, user, usersMap, policyMap); err != nil { - return err - } - } - return nil -} - -func reloadEtcdPolicy(ctx context.Context, prefix string, policyName string, - cannedPolicyMap map[string]iampolicy.Policy) error { - pFile := pathJoin(prefix, policyName, iamPolicyFile) - pdata, err := readConfigEtcd(ctx, globalEtcdClient, pFile) - if err != nil { - return err - } - var p iampolicy.Policy - if err = json.Unmarshal(pdata, &p); err != nil { - return err - } - cannedPolicyMap[policyName] = p - return nil -} - -func reloadEtcdPolicies(prefix string, cannedPolicyMap map[string]iampolicy.Policy) error { - ctx, cancel := context.WithTimeout(context.Background(), defaultContextTimeout) - defer cancel() - r, err := globalEtcdClient.Get(ctx, prefix, etcd.WithPrefix(), etcd.WithKeysOnly()) - if err != nil { - return err - } - // No users are created yet. - if r.Count == 0 { - return nil - } - - policies := etcdKvsToSet(prefix, r.Kvs) - - // Reload config and policies for all policys. - for _, policyName := range policies.ToSlice() { - if err = reloadEtcdPolicy(ctx, prefix, policyName, cannedPolicyMap); err != nil { - return err - } - } - return nil -} - -func reloadPolicy(ctx context.Context, objectAPI ObjectLayer, prefix string, - policyName string, cannedPolicyMap map[string]iampolicy.Policy) error { - pFile := pathJoin(prefix, policyName, iamPolicyFile) - pdata, err := readConfig(context.Background(), objectAPI, pFile) - if err != nil { - return err - } - var p iampolicy.Policy - if err = json.Unmarshal(pdata, &p); err != nil { - return err - } - cannedPolicyMap[policyName] = p - return nil -} - -func reloadPolicies(objectAPI ObjectLayer, prefix string, cannedPolicyMap map[string]iampolicy.Policy) error { - marker := "" - for { - var lo ListObjectsInfo - var err error - lo, err = objectAPI.ListObjects(context.Background(), minioMetaBucket, prefix, marker, "/", 1000) - if err != nil { - return err - } - marker = lo.NextMarker - for _, prefix := range lo.Prefixes { - if err = reloadPolicy(context.Background(), objectAPI, iamConfigPoliciesPrefix, - path.Base(prefix), cannedPolicyMap); err != nil { - return err - } - } - if !lo.IsTruncated { - break - } - } - return nil - -} - -func reloadEtcdUser(ctx context.Context, prefix string, accessKey string, - usersMap map[string]auth.Credentials, policyMap map[string]string) error { - idFile := pathJoin(prefix, accessKey, iamIdentityFile) - pFile := pathJoin(prefix, accessKey, iamPolicyFile) - cdata, cerr := readConfigEtcd(ctx, globalEtcdClient, idFile) - pdata, perr := readConfigEtcd(ctx, globalEtcdClient, pFile) - if cerr != nil && cerr != errConfigNotFound { - return cerr - } - if perr != nil && perr != errConfigNotFound { - return perr - } - if cerr == errConfigNotFound && perr == errConfigNotFound { - return nil - } - if cerr == nil { - var cred auth.Credentials - if err := json.Unmarshal(cdata, &cred); err != nil { - return err - } - cred.AccessKey = path.Base(accessKey) - if cred.IsExpired() { - // Delete expired identity. - deleteConfigEtcd(ctx, globalEtcdClient, idFile) - // Delete expired identity policy. - deleteConfigEtcd(ctx, globalEtcdClient, pFile) - return nil - } - usersMap[cred.AccessKey] = cred - } - if perr == nil { - var policyName string - if err := json.Unmarshal(pdata, &policyName); err != nil { - return err - } - policyMap[path.Base(accessKey)] = policyName - } - return nil -} - -func reloadUser(ctx context.Context, objectAPI ObjectLayer, prefix string, accessKey string, - usersMap map[string]auth.Credentials, policyMap map[string]string) error { - idFile := pathJoin(prefix, accessKey, iamIdentityFile) - pFile := pathJoin(prefix, accessKey, iamPolicyFile) - cdata, cerr := readConfig(ctx, objectAPI, idFile) - pdata, perr := readConfig(ctx, objectAPI, pFile) - if cerr != nil && cerr != errConfigNotFound { - return cerr - } - if perr != nil && perr != errConfigNotFound { - return perr - } - if cerr == errConfigNotFound && perr == errConfigNotFound { - return nil - } - if cerr == nil { - var cred auth.Credentials - if err := json.Unmarshal(cdata, &cred); err != nil { - return err - } - cred.AccessKey = path.Base(accessKey) - if cred.IsExpired() { - // Delete expired identity. - objectAPI.DeleteObject(context.Background(), minioMetaBucket, idFile) - // Delete expired identity policy. - objectAPI.DeleteObject(context.Background(), minioMetaBucket, pFile) - return nil - } - usersMap[cred.AccessKey] = cred - } - if perr == nil { - var policyName string - if err := json.Unmarshal(pdata, &policyName); err != nil { - return err - } - policyMap[path.Base(accessKey)] = policyName - } - return nil -} - -// reloadUsers reads an updates users, policies from object layer into user and policy maps. -func reloadUsers(objectAPI ObjectLayer, prefix string, usersMap map[string]auth.Credentials, policyMap map[string]string) error { - marker := "" - for { - var lo ListObjectsInfo - var err error - lo, err = objectAPI.ListObjects(context.Background(), minioMetaBucket, prefix, marker, "/", 1000) - if err != nil { - return err - } - marker = lo.NextMarker - for _, prefix := range lo.Prefixes { - // Prefix is empty because prefix is already part of the List output. - if err = reloadUser(context.Background(), objectAPI, "", prefix, usersMap, policyMap); err != nil { - return err - } - } - if !lo.IsTruncated { - break - } - } - return nil -} - // Set default canned policies only if not already overridden by users. func setDefaultCannedPolicies(policies map[string]iampolicy.Policy) { _, ok := policies["writeonly"] @@ -983,68 +1021,53 @@ func setDefaultCannedPolicies(policies map[string]iampolicy.Policy) { } } -func (sys *IAMSys) refreshEtcd() error { - iamUsersMap := make(map[string]auth.Credentials) - iamPolicyMap := make(map[string]string) - iamCannedPolicyMap := make(map[string]iampolicy.Policy) - - if err := reloadEtcdPolicies(iamConfigPoliciesPrefix, iamCannedPolicyMap); err != nil { - return err +// buildUserGroupMemberships - builds the memberships map. IMPORTANT: +// Assumes that sys.Lock is held by caller. +func (sys *IAMSys) buildUserGroupMemberships() { + for group, gi := range sys.iamGroupsMap { + sys.updateGroupMembershipsMap(group, &gi) } - if err := reloadEtcdUsers(iamConfigUsersPrefix, iamUsersMap, iamPolicyMap); err != nil { - return err - } - if err := reloadEtcdUsers(iamConfigSTSPrefix, iamUsersMap, iamPolicyMap); err != nil { - return err - } - - // Sets default canned policies, if none are set. - setDefaultCannedPolicies(iamCannedPolicyMap) - - sys.Lock() - defer sys.Unlock() - - sys.iamUsersMap = iamUsersMap - sys.iamPolicyMap = iamPolicyMap - sys.iamCannedPolicyMap = iamCannedPolicyMap - - return nil } -// Refresh IAMSys. -func (sys *IAMSys) refresh(objAPI ObjectLayer) error { - iamUsersMap := make(map[string]auth.Credentials) - iamPolicyMap := make(map[string]string) - iamCannedPolicyMap := make(map[string]iampolicy.Policy) - - if err := reloadPolicies(objAPI, iamConfigPoliciesPrefix, iamCannedPolicyMap); err != nil { - return err +// updateGroupMembershipsMap - updates the memberships map for a +// group. IMPORTANT: Assumes sys.Lock() is held by caller. +func (sys *IAMSys) updateGroupMembershipsMap(group string, gi *GroupInfo) { + if gi == nil { + return } - if err := reloadUsers(objAPI, iamConfigUsersPrefix, iamUsersMap, iamPolicyMap); err != nil { - return err + for _, member := range gi.Members { + v := sys.iamUserGroupMemberships[member] + if v == nil { + v = set.CreateStringSet(group) + } else { + v.Add(group) + } + sys.iamUserGroupMemberships[member] = v } - if err := reloadUsers(objAPI, iamConfigSTSPrefix, iamUsersMap, iamPolicyMap); err != nil { - return err +} + +// removeGroupFromMembershipsMap - removes the group from every member +// in the cache. IMPORTANT: Assumes sys.Lock() is held by caller. +func (sys *IAMSys) removeGroupFromMembershipsMap(group string) { + if _, ok := sys.iamUserGroupMemberships[group]; !ok { + return + } + for member, groups := range sys.iamUserGroupMemberships { + if !groups.Contains(group) { + continue + } + groups.Remove(group) + sys.iamUserGroupMemberships[member] = groups } - - // Sets default canned policies, if none are set. - setDefaultCannedPolicies(iamCannedPolicyMap) - - sys.Lock() - defer sys.Unlock() - - sys.iamUsersMap = iamUsersMap - sys.iamPolicyMap = iamPolicyMap - sys.iamCannedPolicyMap = iamCannedPolicyMap - - return nil } // NewIAMSys - creates new config system object. func NewIAMSys() *IAMSys { return &IAMSys{ - iamUsersMap: make(map[string]auth.Credentials), - iamPolicyMap: make(map[string]string), - iamCannedPolicyMap: make(map[string]iampolicy.Policy), + iamUsersMap: make(map[string]auth.Credentials), + iamPolicyDocsMap: make(map[string]iampolicy.Policy), + iamUserPolicyMap: make(map[string]MappedPolicy), + iamGroupsMap: make(map[string]GroupInfo), + iamUserGroupMemberships: make(map[string]set.StringSet), } } diff --git a/vendor/github.com/minio/minio/cmd/lock-rest-client.go b/vendor/github.com/minio/minio/cmd/lock-rest-client.go index bdac5daee1..4e4cd6633f 100644 --- a/vendor/github.com/minio/minio/cmd/lock-rest-client.go +++ b/vendor/github.com/minio/minio/cmd/lock-rest-client.go @@ -17,10 +17,8 @@ package cmd import ( - "bytes" "context" "crypto/tls" - "encoding/gob" "errors" "io" "sync" @@ -131,32 +129,16 @@ func (client *lockRESTClient) Close() error { // restCall makes a call to the lock REST server. func (client *lockRESTClient) restCall(call string, args dsync.LockArgs) (reply bool, err error) { + values := url.Values{} + values.Set(lockRESTUID, args.UID) + values.Set(lockRESTSource, args.Source) + values.Set(lockRESTResource, args.Resource) + values.Set(lockRESTServerAddr, args.ServerAddr) + values.Set(lockRESTServerEndpoint, args.ServiceEndpoint) - reader := bytes.NewBuffer(make([]byte, 0, 2048)) - err = gob.NewEncoder(reader).Encode(args) - if err != nil { - return false, err - } - respBody, err := client.call(call, nil, reader, -1) - if err != nil { - return false, err - } - - var resp lockResponse + respBody, err := client.call(call, values, nil, -1) defer http.DrainBody(respBody) - err = gob.NewDecoder(respBody).Decode(&resp) - - if err != nil || !resp.Success { - reqInfo := &logger.ReqInfo{} - reqInfo.AppendTags("resource", args.Resource) - reqInfo.AppendTags("serveraddress", args.ServerAddr) - reqInfo.AppendTags("serviceendpoint", args.ServiceEndpoint) - reqInfo.AppendTags("source", args.Source) - reqInfo.AppendTags("uid", args.UID) - ctx := logger.SetReqInfo(context.Background(), reqInfo) - logger.LogIf(ctx, err) - } - return resp.Success, err + return err == nil, err } // RLock calls read lock REST API. diff --git a/vendor/github.com/minio/minio/cmd/lock-rest-server-common.go b/vendor/github.com/minio/minio/cmd/lock-rest-server-common.go index ce2d74c566..cb933df097 100644 --- a/vendor/github.com/minio/minio/cmd/lock-rest-server-common.go +++ b/vendor/github.com/minio/minio/cmd/lock-rest-server-common.go @@ -21,7 +21,7 @@ import ( "time" ) -const lockRESTVersion = "v1" +const lockRESTVersion = "v2" const lockRESTPath = minioReservedBucketPath + "/lock/" + lockRESTVersion var lockServicePath = path.Join(minioReservedBucketPath, lockServiceSubPath) @@ -33,6 +33,18 @@ const ( lockRESTMethodRUnlock = "runlock" lockRESTMethodForceUnlock = "forceunlock" lockRESTMethodExpired = "expired" + + // Unique ID of lock/unlock request. + lockRESTUID = "uid" + // Source contains the line number, function and file name of the code + // on the client node that requested the lock. + lockRESTSource = "source" + // Resource contains a entity to be locked/unlocked. + lockRESTResource = "resource" + // ServerAddr contains the address of the server who requested lock/unlock of the above resource. + lockRESTServerAddr = "serverAddr" + // ServiceEndpoint contains the network path of above server to do lock/unlock. + lockRESTServerEndpoint = "serverEndpoint" ) // nameLockRequesterInfoPair is a helper type for lock maintenance @@ -41,10 +53,6 @@ type nameLockRequesterInfoPair struct { lri lockRequesterInfo } -type lockResponse struct { - Success bool -} - // Similar to removeEntry but only removes an entry only if the lock entry exists in map. func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) { // Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry) diff --git a/vendor/github.com/minio/minio/cmd/lock-rest-server.go b/vendor/github.com/minio/minio/cmd/lock-rest-server.go index 1ce80e34b2..b1b2b6b4f2 100644 --- a/vendor/github.com/minio/minio/cmd/lock-rest-server.go +++ b/vendor/github.com/minio/minio/cmd/lock-rest-server.go @@ -18,7 +18,6 @@ package cmd import ( "context" - "encoding/gob" "errors" "math/rand" "net/http" @@ -60,6 +59,15 @@ func (l *lockRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool { return true } +func getLockArgs(r *http.Request) dsync.LockArgs { + return dsync.LockArgs{ + UID: r.URL.Query().Get(lockRESTUID), + Resource: r.URL.Query().Get(lockRESTResource), + ServerAddr: r.URL.Query().Get(lockRESTServerAddr), + ServiceEndpoint: r.URL.Query().Get(lockRESTServerEndpoint), + } +} + // LockHandler - Acquires a lock. func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) { if !l.IsValid(w, r) { @@ -67,28 +75,10 @@ func (l *lockRESTServer) LockHandler(w http.ResponseWriter, r *http.Request) { return } - ctx := newContext(r, w, "Lock") - - var lockArgs dsync.LockArgs - if r.ContentLength < 0 { - l.writeErrorResponse(w, errInvalidArgument) - return - } - - err := gob.NewDecoder(r.Body).Decode(&lockArgs) - if err != nil { + if _, err := l.ll.Lock(getLockArgs(r)); err != nil { l.writeErrorResponse(w, err) return } - - success, err := l.ll.Lock(lockArgs) - if err != nil { - l.writeErrorResponse(w, err) - return - } - resp := lockResponse{Success: success} - logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp)) - w.(http.Flusher).Flush() } // UnlockHandler - releases the acquired lock. @@ -98,28 +88,10 @@ func (l *lockRESTServer) UnlockHandler(w http.ResponseWriter, r *http.Request) { return } - ctx := newContext(r, w, "Unlock") - - var lockArgs dsync.LockArgs - if r.ContentLength < 0 { - l.writeErrorResponse(w, errInvalidArgument) - return - } - - err := gob.NewDecoder(r.Body).Decode(&lockArgs) - if err != nil { + if _, err := l.ll.Unlock(getLockArgs(r)); err != nil { l.writeErrorResponse(w, err) return } - - success, err := l.ll.Unlock(lockArgs) - if err != nil { - l.writeErrorResponse(w, err) - return - } - resp := lockResponse{Success: success} - logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp)) - w.(http.Flusher).Flush() } // LockHandler - Acquires an RLock. @@ -129,27 +101,10 @@ func (l *lockRESTServer) RLockHandler(w http.ResponseWriter, r *http.Request) { return } - ctx := newContext(r, w, "RLock") - var lockArgs dsync.LockArgs - if r.ContentLength < 0 { - l.writeErrorResponse(w, errInvalidArgument) - return - } - - err := gob.NewDecoder(r.Body).Decode(&lockArgs) - if err != nil { + if _, err := l.ll.RLock(getLockArgs(r)); err != nil { l.writeErrorResponse(w, err) return } - - success, err := l.ll.RLock(lockArgs) - if err != nil { - l.writeErrorResponse(w, err) - return - } - resp := lockResponse{Success: success} - logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp)) - w.(http.Flusher).Flush() } // RUnlockHandler - releases the acquired read lock. @@ -159,27 +114,10 @@ func (l *lockRESTServer) RUnlockHandler(w http.ResponseWriter, r *http.Request) return } - ctx := newContext(r, w, "RUnlock") - var lockArgs dsync.LockArgs - if r.ContentLength < 0 { - l.writeErrorResponse(w, errInvalidArgument) - return - } - - err := gob.NewDecoder(r.Body).Decode(&lockArgs) - if err != nil { + if _, err := l.ll.RUnlock(getLockArgs(r)); err != nil { l.writeErrorResponse(w, err) return } - - success, err := l.ll.RUnlock(lockArgs) - if err != nil { - l.writeErrorResponse(w, err) - return - } - resp := lockResponse{Success: success} - logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp)) - w.(http.Flusher).Flush() } // ForceUnlockHandler - force releases the acquired lock. @@ -189,28 +127,10 @@ func (l *lockRESTServer) ForceUnlockHandler(w http.ResponseWriter, r *http.Reque return } - ctx := newContext(r, w, "ForceUnlock") - - var lockArgs dsync.LockArgs - if r.ContentLength < 0 { - l.writeErrorResponse(w, errInvalidArgument) - return - } - - err := gob.NewDecoder(r.Body).Decode(&lockArgs) - if err != nil { + if _, err := l.ll.ForceUnlock(getLockArgs(r)); err != nil { l.writeErrorResponse(w, err) return } - - success, err := l.ll.ForceUnlock(lockArgs) - if err != nil { - l.writeErrorResponse(w, err) - return - } - resp := lockResponse{Success: success} - logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp)) - w.(http.Flusher).Flush() } // ExpiredHandler - query expired lock status. @@ -220,19 +140,8 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request) return } - ctx := newContext(r, w, "Expired") + lockArgs := getLockArgs(r) - var lockArgs dsync.LockArgs - if r.ContentLength < 0 { - l.writeErrorResponse(w, errInvalidArgument) - return - } - - err := gob.NewDecoder(r.Body).Decode(&lockArgs) - if err != nil { - l.writeErrorResponse(w, err) - return - } success := true l.ll.mutex.Lock() defer l.ll.mutex.Unlock() @@ -246,11 +155,10 @@ func (l *lockRESTServer) ExpiredHandler(w http.ResponseWriter, r *http.Request) } } } - // When we get here lock is no longer active due to either dsync.LockArgs.Resource - // being absent from map or uid not found for given dsync.LockArgs.Resource - resp := lockResponse{Success: success} - logger.LogIf(ctx, gob.NewEncoder(w).Encode(resp)) - w.(http.Flusher).Flush() + if !success { + l.writeErrorResponse(w, errors.New("lock already expired")) + return + } } // lockMaintenance loops over locks that have been active for some time and checks back @@ -323,12 +231,14 @@ func startLockMaintenance(lkSrv *lockRESTServer) { // registerLockRESTHandlers - register lock rest router. func registerLockRESTHandlers(router *mux.Router) { subrouter := router.PathPrefix(lockRESTPath).Subrouter() - subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(globalLockServer.LockHandler)) - subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(globalLockServer.RLockHandler)) - subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.UnlockHandler)) - subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.RUnlockHandler)) - subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.ForceUnlockHandler)) - subrouter.Methods(http.MethodPost).Path("/" + lockRESTMethodExpired).HandlerFunc(httpTraceAll(globalLockServer.ExpiredHandler)) + queries := restQueries(lockRESTUID, lockRESTSource, lockRESTResource, lockRESTServerAddr, lockRESTServerEndpoint) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodLock).HandlerFunc(httpTraceHdrs(globalLockServer.LockHandler)).Queries(queries...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodRLock).HandlerFunc(httpTraceHdrs(globalLockServer.RLockHandler)).Queries(queries...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.UnlockHandler)).Queries(queries...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodRUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.RUnlockHandler)).Queries(queries...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodForceUnlock).HandlerFunc(httpTraceHdrs(globalLockServer.ForceUnlockHandler)).Queries(queries...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + lockRESTMethodExpired).HandlerFunc(httpTraceAll(globalLockServer.ExpiredHandler)).Queries(queries...) + router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler)) // Start lock maintenance from all lock servers. diff --git a/vendor/github.com/minio/minio/cmd/notification.go b/vendor/github.com/minio/minio/cmd/notification.go index 9d2c5b5a6c..8907d61259 100644 --- a/vendor/github.com/minio/minio/cmd/notification.go +++ b/vendor/github.com/minio/minio/cmd/notification.go @@ -232,6 +232,19 @@ func (sys *NotificationSys) LoadUsers() []NotificationPeerErr { return ng.Wait() } +// LoadGroup - loads a specific group on all peers. +func (sys *NotificationSys) LoadGroup(group string) []NotificationPeerErr { + ng := WithNPeers(len(sys.peerClients)) + for idx, client := range sys.peerClients { + if client == nil { + continue + } + client := client + ng.Go(context.Background(), func() error { return client.LoadGroup(group) }, idx, *client.host) + } + return ng.Wait() +} + // BackgroundHealStatus - returns background heal status of all peers func (sys *NotificationSys) BackgroundHealStatus() []madmin.BgHealState { states := make([]madmin.BgHealState, len(sys.peerClients)) diff --git a/vendor/github.com/minio/minio/cmd/object-api-common.go b/vendor/github.com/minio/minio/cmd/object-api-common.go index d94af00b96..4ba3603d09 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-common.go +++ b/vendor/github.com/minio/minio/cmd/object-api-common.go @@ -56,10 +56,10 @@ func init() { } // Checks if the object is a directory, this logic uses -// if size == 0 and object ends with slashSeparator then +// if size == 0 and object ends with SlashSeparator then // returns true. func isObjectDir(object string, size int64) bool { - return hasSuffix(object, slashSeparator) && size == 0 + return hasSuffix(object, SlashSeparator) && size == 0 } // Converts just bucket, object metadata into ObjectInfo datatype. @@ -110,7 +110,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) var delFunc func(string) error // Function to delete entries recursively. delFunc = func(entryPath string) error { - if !hasSuffix(entryPath, slashSeparator) { + if !hasSuffix(entryPath, SlashSeparator) { // Delete the file entry. err := storage.DeleteFile(volume, entryPath) logger.LogIf(ctx, err) @@ -129,7 +129,7 @@ func cleanupDir(ctx context.Context, storage StorageAPI, volume, dirPath string) // Entry path is empty, just delete it. if len(entries) == 0 { - err = storage.DeleteFile(volume, path.Clean(entryPath)) + err = storage.DeleteFile(volume, entryPath) logger.LogIf(ctx, err) return err } @@ -157,7 +157,7 @@ func cleanupObjectsBulk(ctx context.Context, storage StorageAPI, volume string, var traverse func(string) ([]string, error) traverse = func(entryPath string) ([]string, error) { var output = make([]string, 0) - if !hasSuffix(entryPath, slashSeparator) { + if !hasSuffix(entryPath, SlashSeparator) { output = append(output, entryPath) return output, nil } @@ -320,7 +320,7 @@ func listObjectsNonSlash(ctx context.Context, obj ObjectLayer, bucket, prefix, m } func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, delimiter string, maxKeys int, tpool *TreeWalkPool, listDir ListDirFunc, getObjInfo func(context.Context, string, string) (ObjectInfo, error), getObjectInfoDirs ...func(context.Context, string, string) (ObjectInfo, error)) (loi ListObjectsInfo, err error) { - if delimiter != slashSeparator && delimiter != "" { + if delimiter != SlashSeparator && delimiter != "" { return listObjectsNonSlash(ctx, obj, bucket, prefix, marker, delimiter, maxKeys, tpool, listDir, getObjInfo, getObjectInfoDirs...) } @@ -346,7 +346,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d // along // with the prefix. On a flat namespace with 'prefix' // as '/' we don't have any entries, since all the keys are // of form 'keyName/...' - if delimiter == slashSeparator && prefix == slashSeparator { + if delimiter == SlashSeparator && prefix == SlashSeparator { return loi, nil } @@ -357,7 +357,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d // Default is recursive, if delimiter is set then list non recursive. recursive := true - if delimiter == slashSeparator { + if delimiter == SlashSeparator { recursive = false } @@ -382,7 +382,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d var objInfo ObjectInfo var err error - if hasSuffix(walkResult.entry, slashSeparator) { + if hasSuffix(walkResult.entry, SlashSeparator) { for _, getObjectInfoDir := range getObjectInfoDirs { objInfo, err = getObjectInfoDir(ctx, bucket, walkResult.entry) if err == nil { @@ -429,7 +429,7 @@ func listObjects(ctx context.Context, obj ObjectLayer, bucket, prefix, marker, d result := ListObjectsInfo{} for _, objInfo := range objInfos { - if objInfo.IsDir && delimiter == slashSeparator { + if objInfo.IsDir && delimiter == SlashSeparator { result.Prefixes = append(result.Prefixes, objInfo.Name) continue } diff --git a/vendor/github.com/minio/minio/cmd/object-api-errors.go b/vendor/github.com/minio/minio/cmd/object-api-errors.go index 461e5df46b..35695a0db3 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-errors.go +++ b/vendor/github.com/minio/minio/cmd/object-api-errors.go @@ -203,14 +203,14 @@ func (e ObjectExistsAsDirectory) Error() string { type PrefixAccessDenied GenericError func (e PrefixAccessDenied) Error() string { - return "Prefix access is denied: " + e.Bucket + "/" + e.Object + return "Prefix access is denied: " + e.Bucket + SlashSeparator + e.Object } // ParentIsObject object access is denied. type ParentIsObject GenericError func (e ParentIsObject) Error() string { - return "Parent is object " + e.Bucket + "/" + path.Dir(e.Object) + return "Parent is object " + e.Bucket + SlashSeparator + path.Dir(e.Object) } // BucketExists bucket exists. diff --git a/vendor/github.com/minio/minio/cmd/object-api-getobject_test.go b/vendor/github.com/minio/minio/cmd/object-api-getobject_test.go index d4d8788199..17ae841781 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-getobject_test.go +++ b/vendor/github.com/minio/minio/cmd/object-api-getobject_test.go @@ -259,7 +259,7 @@ func testGetObjectPermissionDenied(obj ObjectLayer, instanceType string, disks [ for i, testCase := range testCases { for _, d := range disks { - err = os.Chmod(d+"/"+testCase.bucketName+"/"+testCase.chmodPath, 0) + err = os.Chmod(d+SlashSeparator+testCase.bucketName+SlashSeparator+testCase.chmodPath, 0) if err != nil { t.Fatalf("Test %d, Unable to chmod: %v", i+1, err) } diff --git a/vendor/github.com/minio/minio/cmd/object-api-input-checks.go b/vendor/github.com/minio/minio/cmd/object-api-input-checks.go index 09bb7afc4f..da52945218 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-input-checks.go +++ b/vendor/github.com/minio/minio/cmd/object-api-input-checks.go @@ -74,7 +74,7 @@ func checkListObjsArgs(ctx context.Context, bucket, prefix, marker, delimiter st } } // Verify if delimiter is anything other than '/', which we do not support. - if delimiter != "" && delimiter != slashSeparator { + if delimiter != "" && delimiter != SlashSeparator { logger.LogIf(ctx, UnsupportedDelimiter{ Delimiter: delimiter, }) @@ -102,7 +102,7 @@ func checkListMultipartArgs(ctx context.Context, bucket, prefix, keyMarker, uplo return err } if uploadIDMarker != "" { - if hasSuffix(keyMarker, slashSeparator) { + if hasSuffix(keyMarker, SlashSeparator) { logger.LogIf(ctx, InvalidUploadIDKeyCombination{ UploadIDMarker: uploadIDMarker, @@ -196,7 +196,7 @@ func checkPutObjectArgs(ctx context.Context, bucket, object string, obj ObjectLa return err } if len(object) == 0 || - (hasSuffix(object, slashSeparator) && size != 0) || + (hasSuffix(object, SlashSeparator) && size != 0) || !IsValidObjectPrefix(object) { return ObjectNameInvalid{ Bucket: bucket, diff --git a/vendor/github.com/minio/minio/cmd/object-api-listobjects_test.go b/vendor/github.com/minio/minio/cmd/object-api-listobjects_test.go index 31ae30460b..de6e282484 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-listobjects_test.go +++ b/vendor/github.com/minio/minio/cmd/object-api-listobjects_test.go @@ -543,20 +543,20 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { {"test-bucket-list-object", "Asia", "", "", 10, resultCases[24], nil, true}, // Tests with prefix and delimiter (55-57). // With delimeter the code should not recurse into the sub-directories of prefix Dir. - {"test-bucket-list-object", "Asia", "", "/", 10, resultCases[25], nil, true}, - {"test-bucket-list-object", "new", "", "/", 10, resultCases[26], nil, true}, - {"test-bucket-list-object", "Asia/India/", "", "/", 10, resultCases[27], nil, true}, + {"test-bucket-list-object", "Asia", "", SlashSeparator, 10, resultCases[25], nil, true}, + {"test-bucket-list-object", "new", "", SlashSeparator, 10, resultCases[26], nil, true}, + {"test-bucket-list-object", "Asia/India/", "", SlashSeparator, 10, resultCases[27], nil, true}, // Test with marker set as hierarhical value and with delimiter. (58-59) - {"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", "/", 10, resultCases[28], nil, true}, - {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", "/", 10, resultCases[29], nil, true}, + {"test-bucket-list-object", "", "Asia/India/India-summer-photos-1", SlashSeparator, 10, resultCases[28], nil, true}, + {"test-bucket-list-object", "", "Asia/India/Karnataka/Bangalore/Koramangala/pics", SlashSeparator, 10, resultCases[29], nil, true}, // Test with prefix and delimiter set to '/'. (60) - {"test-bucket-list-object", "/", "", "/", 10, resultCases[30], nil, true}, + {"test-bucket-list-object", SlashSeparator, "", SlashSeparator, 10, resultCases[30], nil, true}, // Test with invalid prefix (61) - {"test-bucket-list-object", "\\", "", "/", 10, ListObjectsInfo{}, ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "\\"}, false}, + {"test-bucket-list-object", "\\", "", SlashSeparator, 10, ListObjectsInfo{}, ObjectNameInvalid{Bucket: "test-bucket-list-object", Object: "\\"}, false}, // Test listing an empty directory in recursive mode (62) {"test-bucket-empty-dir", "", "", "", 10, resultCases[31], nil, true}, // Test listing an empty directory in a non recursive mode (63) - {"test-bucket-empty-dir", "", "", "/", 10, resultCases[32], nil, true}, + {"test-bucket-empty-dir", "", "", SlashSeparator, 10, resultCases[32], nil, true}, // Test listing a directory which contains an empty directory (64) {"test-bucket-empty-dir", "", "temporary/", "", 10, resultCases[33], nil, true}, } @@ -595,7 +595,7 @@ func testListObjects(obj ObjectLayer, instanceType string, t1 TestErrHandler) { t.Errorf("Test %d: %s: Expected object name to be \"%s\", but found \"%s\" instead", i+1, instanceType, testCase.result.Objects[j].Name, result.Objects[j].Name) } // FIXME: we should always check for ETag - if result.Objects[j].ETag == "" && !strings.HasSuffix(result.Objects[j].Name, slashSeparator) { + if result.Objects[j].ETag == "" && !strings.HasSuffix(result.Objects[j].Name, SlashSeparator) { t.Errorf("Test %d: %s: Expected ETag to be not empty, but found empty instead (%v)", i+1, instanceType, result.Objects[j].Name) } diff --git a/vendor/github.com/minio/minio/cmd/object-api-multipart_test.go b/vendor/github.com/minio/minio/cmd/object-api-multipart_test.go index aa346e099d..1386da39de 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-multipart_test.go +++ b/vendor/github.com/minio/minio/cmd/object-api-multipart_test.go @@ -680,7 +680,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan // Expecting the result to contain one MultipartInfo entry and IsTruncated to be false. { MaxUploads: 2, - Delimiter: "/", + Delimiter: SlashSeparator, Prefix: "", IsTruncated: false, Uploads: []MultipartInfo{ @@ -1170,7 +1170,7 @@ func testListMultipartUploads(obj ObjectLayer, instanceType string, t TestErrHan {bucketNames[0], "orange", "", "", "", 2, listMultipartResults[12], nil, true}, {bucketNames[0], "Asia", "", "", "", 2, listMultipartResults[13], nil, true}, // setting delimiter (Test number 27). - {bucketNames[0], "", "", "", "/", 2, listMultipartResults[14], nil, true}, + {bucketNames[0], "", "", "", SlashSeparator, 2, listMultipartResults[14], nil, true}, //Test case with multiple uploadID listing for given object (Test number 28). {bucketNames[1], "", "", "", "", 100, listMultipartResults[15], nil, true}, // Test case with multiple uploadID listing for given object, but uploadID marker set. diff --git a/vendor/github.com/minio/minio/cmd/object-api-utils.go b/vendor/github.com/minio/minio/cmd/object-api-utils.go index 7810b6721b..dcbb77952a 100644 --- a/vendor/github.com/minio/minio/cmd/object-api-utils.go +++ b/vendor/github.com/minio/minio/cmd/object-api-utils.go @@ -51,7 +51,7 @@ const ( // Multipart meta prefix. mpartMetaPrefix = "multipart" // MinIO Multipart meta prefix. - minioMetaMultipartBucket = minioMetaBucket + "/" + mpartMetaPrefix + minioMetaMultipartBucket = minioMetaBucket + SlashSeparator + mpartMetaPrefix // MinIO Tmp meta prefix. minioMetaTmpBucket = minioMetaBucket + "/tmp" // DNS separator (period), used for bucket name validation. @@ -131,12 +131,12 @@ func IsValidBucketName(bucket string) bool { // // - Backslash ("\") // -// additionally minio does not support object names with trailing "/". +// additionally minio does not support object names with trailing SlashSeparator. func IsValidObjectName(object string) bool { if len(object) == 0 { return false } - if hasSuffix(object, slashSeparator) { + if hasSuffix(object, SlashSeparator) { return false } return IsValidObjectPrefix(object) @@ -168,7 +168,7 @@ func checkObjectNameForLengthAndSlash(bucket, object string) error { } } // Check for slash as prefix in object name - if hasPrefix(object, slashSeparator) { + if hasPrefix(object, SlashSeparator) { return ObjectNamePrefixAsSlash{ Bucket: bucket, Object: object, @@ -177,20 +177,20 @@ func checkObjectNameForLengthAndSlash(bucket, object string) error { return nil } -// Slash separator. -const slashSeparator = "/" +// SlashSeparator - slash separator. +const SlashSeparator = "/" // retainSlash - retains slash from a path. func retainSlash(s string) string { - return strings.TrimSuffix(s, slashSeparator) + slashSeparator + return strings.TrimSuffix(s, SlashSeparator) + SlashSeparator } -// pathJoin - like path.Join() but retains trailing "/" of the last element +// pathJoin - like path.Join() but retains trailing SlashSeparator of the last element func pathJoin(elem ...string) string { trailingSlash := "" if len(elem) > 0 { - if hasSuffix(elem[len(elem)-1], slashSeparator) { - trailingSlash = "/" + if hasSuffix(elem[len(elem)-1], SlashSeparator) { + trailingSlash = SlashSeparator } } return path.Join(elem...) + trailingSlash @@ -292,7 +292,7 @@ func isStringEqual(s1 string, s2 string) bool { // Ignores all reserved bucket names or invalid bucket names. func isReservedOrInvalidBucket(bucketEntry string, strict bool) bool { - bucketEntry = strings.TrimSuffix(bucketEntry, slashSeparator) + bucketEntry = strings.TrimSuffix(bucketEntry, SlashSeparator) if strict { if err := s3utils.CheckValidBucketNameStrict(bucketEntry); err != nil { return true @@ -319,7 +319,7 @@ func isMinioReservedBucket(bucketName string) bool { func getHostsSlice(records []dns.SrvRecord) []string { var hosts []string for _, r := range records { - hosts = append(hosts, r.Host) + hosts = append(hosts, net.JoinHostPort(r.Host, fmt.Sprintf("%d", r.Port))) } return hosts } diff --git a/vendor/github.com/minio/minio/cmd/object-handlers.go b/vendor/github.com/minio/minio/cmd/object-handlers.go index c63661cf53..43059da226 100644 --- a/vendor/github.com/minio/minio/cmd/object-handlers.go +++ b/vendor/github.com/minio/minio/cmd/object-handlers.go @@ -1233,7 +1233,7 @@ func (api objectAPIHandlers) PutObjectHandler(w http.ResponseWriter, r *http.Req var objectEncryptionKey []byte if objectAPI.IsEncryptionSupported() { - if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests + if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) if err != nil { writeErrorResponse(ctx, w, toAPIError(ctx, err), r.URL, guessIsBrowserReq(r)) diff --git a/vendor/github.com/minio/minio/cmd/object-handlers_test.go b/vendor/github.com/minio/minio/cmd/object-handlers_test.go index 9e83789eba..39bdb7993a 100644 --- a/vendor/github.com/minio/minio/cmd/object-handlers_test.go +++ b/vendor/github.com/minio/minio/cmd/object-handlers_test.go @@ -482,7 +482,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a expectedContent: encodeResponse(getAPIErrorResponse(ctx, getAPIError(ErrNoSuchKey), - "/"+bucketName+"/"+". ./. ./etc", "", "")), + SlashSeparator+bucketName+SlashSeparator+". ./. ./etc", "", "")), expectedRespStatus: http.StatusNotFound, }, // Test case - 9. @@ -496,7 +496,7 @@ func testAPIGetObjectHandler(obj ObjectLayer, instanceType, bucketName string, a expectedContent: encodeResponse(getAPIErrorResponse(ctx, getAPIError(ErrInvalidObjectName), - "/"+bucketName+"/"+". ./../etc", "", "")), + SlashSeparator+bucketName+SlashSeparator+". ./../etc", "", "")), expectedRespStatus: http.StatusBadRequest, }, // Test case - 10. @@ -1593,7 +1593,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, @@ -1604,7 +1604,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/"), + copySourceHeader: url.QueryEscape(SlashSeparator), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1617,7 +1617,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + testObject), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + testObject), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1629,7 +1629,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceRange: "bytes=500-4096", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1642,7 +1642,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceRange: "bytes=6145-", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1655,7 +1655,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceRange: "bytes=0-6144", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1683,7 +1683,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1697,7 +1697,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: "non-existent-destination-bucket", uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1709,7 +1709,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: "Invalid-AccessID", secretKey: credentials.SecretKey, @@ -1721,7 +1721,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: "-1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1732,7 +1732,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), invalidPartNumber: true, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1743,7 +1743,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), maximumPartNumber: true, accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1753,7 +1753,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=null", + copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, @@ -1762,7 +1762,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=17", + copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, @@ -1771,7 +1771,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceVersionID: "null", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1781,7 +1781,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri { bucketName: bucketName, uploadID: uploadID, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceVersionID: "17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1852,7 +1852,7 @@ func testAPICopyObjectPartHandler(obj ObjectLayer, instanceType, bucketName stri // Below is how CopyObjectPartHandler is registered. // bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?").HandlerFunc(api.CopyObjectPartHandler).Queries("partNumber", "{partNumber:[0-9]+}", "uploadId", "{uploadId:.*}") // Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler. - nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject)) + nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+nilBucket+SlashSeparator+nilObject)) // execute the object layer set to `nil` test. // `ExecObjectLayerAPINilTest` manages the operation. @@ -1947,7 +1947,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, metadata: map[string]string{ @@ -1961,7 +1961,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/"), + copySourceHeader: url.QueryEscape(SlashSeparator), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1973,7 +1973,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: objectName, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1986,7 +1986,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: objectName, - copySourceHeader: url.QueryEscape(bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -1999,7 +1999,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: objectName, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), metadata: map[string]string{ "Content-Type": "application/json", }, @@ -2015,7 +2015,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), metadata: map[string]string{ "Content-Type": "application/json", }, @@ -2032,7 +2032,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: objectName, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), metadata: map[string]string{ "Content-Type": "application/json", }, @@ -2050,7 +2050,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: objectName, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + "non-existent-object"), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + "non-existent-object"), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2064,7 +2064,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: "non-existent-destination-bucket", newObjectName: objectName, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2076,7 +2076,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: objectName, - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), accessKey: "Invalid-AccessID", secretKey: credentials.SecretKey, @@ -2086,7 +2086,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copyModifiedHeader: "Mon, 02 Jan 2006 15:04:05 GMT", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2096,7 +2096,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2106,7 +2106,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copyModifiedHeader: "Mon, 02 Jan 2217 15:04:05 +00:00", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2116,7 +2116,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copyUnmodifiedHeader: "Mon, 02 Jan 2217 15:04:05 GMT", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2126,7 +2126,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 GMT", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2136,7 +2136,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copyUnmodifiedHeader: "Mon, 02 Jan 2007 15:04:05 +00:00", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2146,7 +2146,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=null", + copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=null", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, expectedRespStatus: http.StatusOK, @@ -2155,7 +2155,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/"+bucketName+"/"+objectName) + "?versionId=17", + copySourceHeader: url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName) + "?versionId=17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, expectedRespStatus: http.StatusNotFound, @@ -2164,7 +2164,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceVersionID: "null", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2174,7 +2174,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, { bucketName: bucketName, newObjectName: "newObject1", - copySourceHeader: url.QueryEscape("/" + bucketName + "/" + objectName), + copySourceHeader: url.QueryEscape(SlashSeparator + bucketName + SlashSeparator + objectName), copySourceVersionID: "17", accessKey: credentials.AccessKey, secretKey: credentials.SecretKey, @@ -2307,7 +2307,7 @@ func testAPICopyObjectHandler(obj ObjectLayer, instanceType, bucketName string, // Below is how CopyObjectHandler is registered. // bucket.Methods("PUT").Path("/{object:.+}").HeadersRegexp("X-Amz-Copy-Source", ".*?(\\/|%2F).*?") // Its necessary to set the "X-Amz-Copy-Source" header for the request to be accepted by the handler. - nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+nilBucket+"/"+nilObject)) + nilReq.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+nilBucket+SlashSeparator+nilObject)) if err != nil { t.Errorf("MinIO %s: Failed to create HTTP request for testing the response when object Layer is set to `nil`.", instanceType) } diff --git a/vendor/github.com/minio/minio/cmd/object_api_suite_test.go b/vendor/github.com/minio/minio/cmd/object_api_suite_test.go index b4e83fe4f5..36a30670b8 100644 --- a/vendor/github.com/minio/minio/cmd/object_api_suite_test.go +++ b/vendor/github.com/minio/minio/cmd/object_api_suite_test.go @@ -340,7 +340,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { if err != nil { t.Fatalf("%s: %s", instanceType, err) } - result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", "/", 10) + result, err = obj.ListObjects(context.Background(), "bucket", "this/is/", "", SlashSeparator, 10) if err != nil { t.Fatalf("%s: %s", instanceType, err) } @@ -354,7 +354,7 @@ func testPaging(obj ObjectLayer, instanceType string, t TestErrHandler) { // check delimited results with delimiter without prefix. { - result, err = obj.ListObjects(context.Background(), "bucket", "", "", "/", 1000) + result, err = obj.ListObjects(context.Background(), "bucket", "", "", SlashSeparator, 1000) if err != nil { t.Fatalf("%s: %s", instanceType, err) } diff --git a/vendor/github.com/minio/minio/cmd/os-reliable.go b/vendor/github.com/minio/minio/cmd/os-reliable.go index 669bce6ca2..410bcc2d32 100644 --- a/vendor/github.com/minio/minio/cmd/os-reliable.go +++ b/vendor/github.com/minio/minio/cmd/os-reliable.go @@ -156,11 +156,11 @@ func renameAll(srcFilePath, dstFilePath string) (err error) { // Reliably retries os.RenameAll if for some reason os.RenameAll returns // syscall.ENOENT (parent does not exist). func reliableRename(srcFilePath, dstFilePath string) (err error) { + if err = reliableMkdirAll(path.Dir(dstFilePath), 0777); err != nil { + return err + } i := 0 for { - if err = reliableMkdirAll(path.Dir(dstFilePath), 0777); err != nil { - return err - } // After a successful parent directory create attempt a renameAll. if err = os.Rename(srcFilePath, dstFilePath); err != nil { // Retry only for the first retryable error. diff --git a/vendor/github.com/minio/minio/cmd/peer-rest-client.go b/vendor/github.com/minio/minio/cmd/peer-rest-client.go index 33da7efee1..b381499ca5 100644 --- a/vendor/github.com/minio/minio/cmd/peer-rest-client.go +++ b/vendor/github.com/minio/minio/cmd/peer-rest-client.go @@ -443,6 +443,18 @@ func (client *peerRESTClient) LoadUsers() (err error) { return nil } +// LoadGroup - send load group command to peers. +func (client *peerRESTClient) LoadGroup(group string) error { + values := make(url.Values) + values.Set(peerRESTGroup, group) + respBody, err := client.call(peerRESTMethodLoadGroup, values, nil, -1) + if err != nil { + return err + } + defer http.DrainBody(respBody) + return nil +} + // SignalService - sends signal to peer nodes. func (client *peerRESTClient) SignalService(sig serviceSignal) error { values := make(url.Values) @@ -499,10 +511,12 @@ func (client *peerRESTClient) doTrace(traceCh chan interface{}, doneCh chan stru if err = dec.Decode(&info); err != nil { return } - select { - case traceCh <- info: - default: - // Do not block on slow receivers. + if len(info.NodeName) > 0 { + select { + case traceCh <- info: + default: + // Do not block on slow receivers. + } } } } diff --git a/vendor/github.com/minio/minio/cmd/peer-rest-common.go b/vendor/github.com/minio/minio/cmd/peer-rest-common.go index 2408d7311b..2afe55c62b 100644 --- a/vendor/github.com/minio/minio/cmd/peer-rest-common.go +++ b/vendor/github.com/minio/minio/cmd/peer-rest-common.go @@ -34,6 +34,7 @@ const ( peerRESTMethodLoadPolicy = "loadpolicy" peerRESTMethodDeletePolicy = "deletepolicy" peerRESTMethodLoadUsers = "loadusers" + peerRESTMethodLoadGroup = "loadgroup" peerRESTMethodStartProfiling = "startprofiling" peerRESTMethodDownloadProfilingData = "downloadprofilingdata" peerRESTMethodBucketPolicySet = "setbucketpolicy" @@ -50,6 +51,7 @@ const ( const ( peerRESTBucket = "bucket" peerRESTUser = "user" + peerRESTGroup = "group" peerRESTUserTemp = "user-temp" peerRESTPolicy = "policy" peerRESTSignal = "signal" diff --git a/vendor/github.com/minio/minio/cmd/peer-rest-server.go b/vendor/github.com/minio/minio/cmd/peer-rest-server.go index 2e96e2f095..604c0f9f5a 100644 --- a/vendor/github.com/minio/minio/cmd/peer-rest-server.go +++ b/vendor/github.com/minio/minio/cmd/peer-rest-server.go @@ -28,7 +28,6 @@ import ( "time" "github.com/gorilla/mux" - xhttp "github.com/minio/minio/cmd/http" "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/event" "github.com/minio/minio/pkg/lifecycle" @@ -246,13 +245,31 @@ func (s *peerRESTServer) LoadUsersHandler(w http.ResponseWriter, r *http.Request return } + err := globalIAMSys.Load() + if err != nil { + s.writeErrorResponse(w, err) + return + } + + w.(http.Flusher).Flush() +} + +// LoadGroupHandler - reloads group along with members list. +func (s *peerRESTServer) LoadGroupHandler(w http.ResponseWriter, r *http.Request) { + if !s.IsValid(w, r) { + s.writeErrorResponse(w, errors.New("Invalid request")) + return + } + objAPI := newObjectLayerFn() if objAPI == nil { s.writeErrorResponse(w, errServerNotInitialized) return } - err := globalIAMSys.Load(objAPI) + vars := mux.Vars(r) + group := vars[peerRESTGroup] + err := globalIAMSys.LoadGroup(objAPI, group) if err != nil { s.writeErrorResponse(w, err) return @@ -719,30 +736,22 @@ func (s *peerRESTServer) TraceHandler(w http.ResponseWriter, r *http.Request) { trcAll := r.URL.Query().Get(peerRESTTraceAll) == "true" trcErr := r.URL.Query().Get(peerRESTTraceErr) == "true" - w.Header().Set(xhttp.Connection, "close") w.WriteHeader(http.StatusOK) w.(http.Flusher).Flush() - filter := func(entry interface{}) bool { - trcInfo := entry.(trace.Info) - - if trcErr && isHTTPStatusOK(trcInfo.RespInfo.StatusCode) { - return false - } - if trcAll { - return true - } - return !strings.HasPrefix(trcInfo.ReqInfo.Path, minioReservedBucketPath) - - } - doneCh := make(chan struct{}) defer close(doneCh) // Trace Publisher uses nonblocking publish and hence does not wait for slow subscribers. // Use buffered channel to take care of burst sends or slow w.Write() ch := make(chan interface{}, 2000) - globalHTTPTrace.Subscribe(ch, doneCh, filter) + + globalHTTPTrace.Subscribe(ch, doneCh, func(entry interface{}) bool { + return mustTrace(entry, trcAll, trcErr) + }) + + keepAliveTicker := time.NewTicker(500 * time.Millisecond) + defer keepAliveTicker.Stop() enc := gob.NewEncoder(w) for { @@ -752,8 +761,11 @@ func (s *peerRESTServer) TraceHandler(w http.ResponseWriter, r *http.Request) { return } w.(http.Flusher).Flush() - case <-r.Context().Done(): - return + case <-keepAliveTicker.C: + if err := enc.Encode(&trace.Info{}); err != nil { + return + } + w.(http.Flusher).Flush() } } } @@ -790,37 +802,38 @@ func (s *peerRESTServer) IsValid(w http.ResponseWriter, r *http.Request) bool { func registerPeerRESTHandlers(router *mux.Router) { server := &peerRESTServer{} subrouter := router.PathPrefix(peerRESTPath).Subrouter() - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler)) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler)) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler)) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler)) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler)) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodGetLocks).HandlerFunc(httpTraceHdrs(server.GetLocksHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodServerInfo).HandlerFunc(httpTraceHdrs(server.ServerInfoHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodCPULoadInfo).HandlerFunc(httpTraceHdrs(server.CPULoadInfoHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodMemUsageInfo).HandlerFunc(httpTraceHdrs(server.MemUsageInfoHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDrivePerfInfo).HandlerFunc(httpTraceHdrs(server.DrivePerfInfoHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeleteBucket).HandlerFunc(httpTraceHdrs(server.DeleteBucketHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodSignalService).HandlerFunc(httpTraceHdrs(server.SignalServiceHandler)).Queries(restQueries(peerRESTSignal)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketPolicyRemove).HandlerFunc(httpTraceAll(server.RemoveBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketPolicySet).HandlerFunc(httpTraceHdrs(server.SetBucketPolicyHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDeleteUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser, peerRESTUserTemp)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodLoadUsers).HandlerFunc(httpTraceAll(server.LoadUsersHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeletePolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadPolicy).HandlerFunc(httpTraceAll(server.LoadPolicyHandler)).Queries(restQueries(peerRESTPolicy)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDeleteUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadUser).HandlerFunc(httpTraceAll(server.LoadUserHandler)).Queries(restQueries(peerRESTUser, peerRESTUserTemp)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadUsers).HandlerFunc(httpTraceAll(server.LoadUsersHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodLoadGroup).HandlerFunc(httpTraceAll(server.LoadGroupHandler)).Queries(restQueries(peerRESTGroup)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProflingDataHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodStartProfiling).HandlerFunc(httpTraceAll(server.StartProfilingHandler)).Queries(restQueries(peerRESTProfiler)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodDownloadProfilingData).HandlerFunc(httpTraceHdrs(server.DownloadProflingDataHandler)) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodTargetExists).HandlerFunc(httpTraceHdrs(server.TargetExistsHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodSendEvent).HandlerFunc(httpTraceHdrs(server.SendEventHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketNotificationListen).HandlerFunc(httpTraceHdrs(server.ListenBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodTargetExists).HandlerFunc(httpTraceHdrs(server.TargetExistsHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodSendEvent).HandlerFunc(httpTraceHdrs(server.SendEventHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketNotificationPut).HandlerFunc(httpTraceHdrs(server.PutBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketNotificationListen).HandlerFunc(httpTraceHdrs(server.ListenBucketNotificationHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodReloadFormat).HandlerFunc(httpTraceHdrs(server.ReloadFormatHandler)).Queries(restQueries(peerRESTDryRun)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketLifecycleSet).HandlerFunc(httpTraceHdrs(server.SetBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBucketLifecycleRemove).HandlerFunc(httpTraceHdrs(server.RemoveBucketLifecycleHandler)).Queries(restQueries(peerRESTBucket)...) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodTrace).HandlerFunc(server.TraceHandler) - subrouter.Methods(http.MethodPost).Path("/" + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodTrace).HandlerFunc(server.TraceHandler) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + peerRESTMethodBackgroundHealStatus).HandlerFunc(server.BackgroundHealStatusHandler) router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler)) } diff --git a/vendor/github.com/minio/minio/cmd/posix-list-dir_other.go b/vendor/github.com/minio/minio/cmd/posix-list-dir_other.go index ae2e04bae0..97a1f7ad1a 100644 --- a/vendor/github.com/minio/minio/cmd/posix-list-dir_other.go +++ b/vendor/github.com/minio/minio/cmd/posix-list-dir_other.go @@ -86,7 +86,7 @@ func readDirN(dirPath string, count int) (entries []string, err error) { } // Append to entries if symbolic link exists and is valid. if st.IsDir() { - entries = append(entries, fi.Name()+slashSeparator) + entries = append(entries, fi.Name()+SlashSeparator) } else if st.Mode().IsRegular() { entries = append(entries, fi.Name()) } @@ -96,8 +96,8 @@ func readDirN(dirPath string, count int) (entries []string, err error) { continue } if fi.Mode().IsDir() { - // Append "/" instead of "\" so that sorting is achieved as expected. - entries = append(entries, fi.Name()+slashSeparator) + // Append SlashSeparator instead of "\" so that sorting is achieved as expected. + entries = append(entries, fi.Name()+SlashSeparator) } else if fi.Mode().IsRegular() { entries = append(entries, fi.Name()) } diff --git a/vendor/github.com/minio/minio/cmd/posix-list-dir_unix.go b/vendor/github.com/minio/minio/cmd/posix-list-dir_unix.go index a247fafd27..8433191ba5 100644 --- a/vendor/github.com/minio/minio/cmd/posix-list-dir_unix.go +++ b/vendor/github.com/minio/minio/cmd/posix-list-dir_unix.go @@ -69,7 +69,7 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) { switch dirent.Type { case syscall.DT_DIR: - entries = append(entries, name+slashSeparator) + entries = append(entries, name+SlashSeparator) case syscall.DT_REG: entries = append(entries, name) case syscall.DT_LNK, syscall.DT_UNKNOWN: @@ -89,7 +89,7 @@ func parseDirents(dirPath string, buf []byte) (entries []string, err error) { return nil, err } if fi.IsDir() { - entries = append(entries, name+slashSeparator) + entries = append(entries, name+SlashSeparator) } else if fi.Mode().IsRegular() { entries = append(entries, name) } diff --git a/vendor/github.com/minio/minio/cmd/posix-list-dir_windows.go b/vendor/github.com/minio/minio/cmd/posix-list-dir_windows.go index 5676c27904..b6b98434b6 100644 --- a/vendor/github.com/minio/minio/cmd/posix-list-dir_windows.go +++ b/vendor/github.com/minio/minio/cmd/posix-list-dir_windows.go @@ -92,12 +92,12 @@ func readDirN(dirPath string, count int) (entries []string, err error) { return nil, err } if fi.IsDir() { - entries = append(entries, name+slashSeparator) + entries = append(entries, name+SlashSeparator) } else if fi.Mode().IsRegular() { entries = append(entries, name) } case data.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0: - entries = append(entries, name+slashSeparator) + entries = append(entries, name+SlashSeparator) default: entries = append(entries, name) } diff --git a/vendor/github.com/minio/minio/cmd/posix.go b/vendor/github.com/minio/minio/cmd/posix.go index 30586c2c12..027384666a 100644 --- a/vendor/github.com/minio/minio/cmd/posix.go +++ b/vendor/github.com/minio/minio/cmd/posix.go @@ -95,7 +95,7 @@ func checkPathLength(pathName string) error { } // Check each path segment length is > 255 - for len(pathName) > 0 && pathName != "." && pathName != "/" { + for len(pathName) > 0 && pathName != "." && pathName != SlashSeparator { dir, file := slashpath.Dir(pathName), slashpath.Base(pathName) if len(file) > 255 { @@ -214,8 +214,13 @@ func getDiskInfo(diskPath string) (di disk.Info, err error) { di, err = disk.GetInfo(diskPath) } - if os.IsNotExist(err) { + switch { + case os.IsNotExist(err): err = errDiskNotFound + case isSysErrTooLong(err): + err = errFileNameTooLong + case isSysErrIO(err): + err = errFaultyDisk } return di, err @@ -285,6 +290,9 @@ func (s *posix) String() string { } func (s *posix) LastError() error { + if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError { + return errFaultyDisk + } return nil } @@ -310,10 +318,21 @@ type DiskInfo struct { // DiskInfo provides current information about disk space usage, // total free inodes and underlying filesystem. func (s *posix) DiskInfo() (info DiskInfo, err error) { + defer func() { + if err == errFaultyDisk { + atomic.AddInt32(&s.ioErrCount, 1) + } + }() + + if atomic.LoadInt32(&s.ioErrCount) > maxAllowedIOError { + return info, errFaultyDisk + } + di, err := getDiskInfo(s.diskPath) if err != nil { return info, err } + used := di.Total - di.Free if !s.diskMount { used = atomic.LoadUint64(&s.totalUsed) @@ -323,6 +342,7 @@ func (s *posix) DiskInfo() (info DiskInfo, err error) { if err != nil { return info, err } + return DiskInfo{ Total: di.Total, Free: di.Free, @@ -538,7 +558,7 @@ func listVols(dirPath string) ([]VolInfo, error) { } var volsInfo []VolInfo for _, entry := range entries { - if !hasSuffix(entry, slashSeparator) || !isValidVolname(slashpath.Clean(entry)) { + if !hasSuffix(entry, SlashSeparator) || !isValidVolname(slashpath.Clean(entry)) { // Skip if entry is neither a directory not a valid volume name. continue } @@ -698,7 +718,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st return } var fi FileInfo - if hasSuffix(walkResult.entry, slashSeparator) { + if hasSuffix(walkResult.entry, SlashSeparator) { fi = FileInfo{ Volume: volume, Name: walkResult.entry, @@ -723,7 +743,7 @@ func (s *posix) Walk(volume, dirPath, marker string, recursive bool, leafFile st } // ListDir - return all the entries at the given directory path. -// If an entry is a directory it will be returned with a trailing "/". +// If an entry is a directory it will be returned with a trailing SlashSeparator. func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (entries []string, err error) { defer func() { if err == errFaultyDisk { @@ -766,7 +786,7 @@ func (s *posix) ListDir(volume, dirPath string, count int, leafFile string) (ent if leafFile != "" { for i, entry := range entries { if _, serr := os.Stat(pathJoin(dirPath, entry, leafFile)); serr == nil { - entries[i] = strings.TrimSuffix(entry, slashSeparator) + entries[i] = strings.TrimSuffix(entry, SlashSeparator) } } } @@ -1370,7 +1390,7 @@ func deleteFile(basePath, deletePath string) error { // Trailing slash is removed when found to ensure // slashpath.Dir() to work as intended. - deletePath = strings.TrimSuffix(deletePath, slashSeparator) + deletePath = strings.TrimSuffix(deletePath, SlashSeparator) deletePath = slashpath.Dir(deletePath) // Delete parent directory. Errors for parent directories shouldn't trickle down. @@ -1410,7 +1430,7 @@ func (s *posix) DeleteFile(volume, path string) (err error) { return err } - // Following code is needed so that we retain "/" suffix if any in + // Following code is needed so that we retain SlashSeparator suffix if any in // path argument. filePath := pathJoin(volumeDir, path) if err = checkPathLength((filePath)); err != nil { @@ -1472,8 +1492,8 @@ func (s *posix) RenameFile(srcVolume, srcPath, dstVolume, dstPath string) (err e } } - srcIsDir := hasSuffix(srcPath, slashSeparator) - dstIsDir := hasSuffix(dstPath, slashSeparator) + srcIsDir := hasSuffix(srcPath, SlashSeparator) + dstIsDir := hasSuffix(dstPath, SlashSeparator) // Either src and dst have to be directories or files, else return error. if !(srcIsDir && dstIsDir || !srcIsDir && !dstIsDir) { return errFileAccessDenied diff --git a/vendor/github.com/minio/minio/cmd/posix_test.go b/vendor/github.com/minio/minio/cmd/posix_test.go index 9f82719e69..b27e85dca7 100644 --- a/vendor/github.com/minio/minio/cmd/posix_test.go +++ b/vendor/github.com/minio/minio/cmd/posix_test.go @@ -69,7 +69,7 @@ func TestIsValidVolname(t *testing.T) { // cases for which test should fail. // passing invalid bucket names. {"", false}, - {"/", false}, + {SlashSeparator, false}, {"a", false}, {"ab", false}, {"ab/", true}, @@ -319,9 +319,9 @@ func TestPosixReadAll(t *testing.T) { // TestPosixNewPosix all the cases handled in posix storage layer initialization. func TestPosixNewPosix(t *testing.T) { // Temporary dir name. - tmpDirName := globalTestTmpDir + "/" + "minio-" + nextSuffix() + tmpDirName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix() // Temporary file name. - tmpFileName := globalTestTmpDir + "/" + "minio-" + nextSuffix() + tmpFileName := globalTestTmpDir + SlashSeparator + "minio-" + nextSuffix() f, _ := os.Create(tmpFileName) f.Close() defer os.Remove(tmpFileName) @@ -1830,9 +1830,7 @@ func TestPosixVerifyFile(t *testing.T) { if err == io.EOF { break } - if err != nil { - t.Fatal(err) - } + t.Fatal(err) } w.Close() if err := posixStorage.VerifyFile(volName, fileName, false, algo, nil, shardSize); err != nil { diff --git a/vendor/github.com/minio/minio/cmd/prepare-storage.go b/vendor/github.com/minio/minio/cmd/prepare-storage.go index d3af02314d..ea863d1025 100644 --- a/vendor/github.com/minio/minio/cmd/prepare-storage.go +++ b/vendor/github.com/minio/minio/cmd/prepare-storage.go @@ -63,7 +63,7 @@ func formatXLMigrateLocalEndpoints(endpoints EndpointList) error { if os.IsNotExist(err) { return nil } - return err + return fmt.Errorf("unable to access (%s) %s", formatPath, err) } return formatXLMigrate(epPath) }, index) @@ -92,11 +92,13 @@ func formatXLCleanupTmpLocalEndpoints(endpoints EndpointList) error { if os.IsNotExist(err) { return nil } - return err + return fmt.Errorf("unable to access (%s) %s", formatPath, err) } if _, err := os.Stat(pathJoin(epPath, minioMetaTmpBucket+"-old")); err != nil { if !os.IsNotExist(err) { - return err + return fmt.Errorf("unable to access (%s) %s", + pathJoin(epPath, minioMetaTmpBucket+"-old"), + err) } } @@ -110,15 +112,24 @@ func formatXLCleanupTmpLocalEndpoints(endpoints EndpointList) error { // // In this example, `33a58b40-aecc-4c9f-a22f-ff17bfa33b62` directory contains // temporary objects from one of the previous runs of minio server. + tmpOld := pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID()) if err := renameAll(pathJoin(epPath, minioMetaTmpBucket), - pathJoin(epPath, minioMetaTmpBucket+"-old", mustGetUUID())); err != nil { - return err + tmpOld); err != nil && err != errFileNotFound { + return fmt.Errorf("unable to rename (%s -> %s) %s", + pathJoin(epPath, minioMetaTmpBucket), + tmpOld, + err) } // Removal of tmp-old folder is backgrounded completely. go removeAll(pathJoin(epPath, minioMetaTmpBucket+"-old")) - return mkdirAll(pathJoin(epPath, minioMetaTmpBucket), 0777) + if err := mkdirAll(pathJoin(epPath, minioMetaTmpBucket), 0777); err != nil { + return fmt.Errorf("unable to create (%s) %s", + pathJoin(epPath, minioMetaTmpBucket), + err) + } + return nil }, index) } for _, err := range g.Wait() { diff --git a/vendor/github.com/minio/minio/cmd/rest/client.go b/vendor/github.com/minio/minio/cmd/rest/client.go index fe435e0fe9..e6cbccbe28 100644 --- a/vendor/github.com/minio/minio/cmd/rest/client.go +++ b/vendor/github.com/minio/minio/cmd/rest/client.go @@ -52,9 +52,15 @@ type Client struct { newAuthToken func() string } +// URL query separator constants +const ( + resourceSep = "/" + querySep = "?" +) + // CallWithContext - make a REST call with context. func (c *Client) CallWithContext(ctx context.Context, method string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) { - req, err := http.NewRequest(http.MethodPost, c.url.String()+"/"+method+"?"+values.Encode(), body) + req, err := http.NewRequest(http.MethodPost, c.url.String()+resourceSep+method+querySep+values.Encode(), body) if err != nil { return nil, &NetworkError{err} } diff --git a/vendor/github.com/minio/minio/cmd/server-main.go b/vendor/github.com/minio/minio/cmd/server-main.go index 7d8da3e795..b0b455453b 100644 --- a/vendor/github.com/minio/minio/cmd/server-main.go +++ b/vendor/github.com/minio/minio/cmd/server-main.go @@ -19,7 +19,6 @@ package cmd import ( "context" "encoding/gob" - "errors" "fmt" "net/http" "os" @@ -380,9 +379,11 @@ func serverMain(ctx *cli.Context) { if err = globalNotificationSys.Init(newObject); err != nil { logger.LogIf(context.Background(), err) } - if globalAutoEncryption && !newObject.IsEncryptionSupported() { - logger.Fatal(errors.New("Invalid KMS configuration"), "auto-encryption is enabled but server does not support encryption") - } + + // Verify if object layer supports + // - encryption + // - compression + verifyObjectLayerFeatures("server", newObject) if globalIsXL { initBackgroundHealing() diff --git a/vendor/github.com/minio/minio/cmd/server_test.go b/vendor/github.com/minio/minio/cmd/server_test.go index 9d59c25dac..bb57d9d653 100644 --- a/vendor/github.com/minio/minio/cmd/server_test.go +++ b/vendor/github.com/minio/minio/cmd/server_test.go @@ -1000,7 +1000,7 @@ func (s *TestSuiteCommon) TestMultipleObjects(c *check) { func (s *TestSuiteCommon) TestNotImplemented(c *check) { // Generate a random bucket name. bucketName := getRandomBucketName() - request, err := newTestSignedRequest("GET", s.endPoint+"/"+bucketName+"/object?policy", + request, err := newTestSignedRequest("GET", s.endPoint+SlashSeparator+bucketName+"/object?policy", 0, nil, s.accessKey, s.secretKey, s.signer) c.Assert(err, nil) @@ -1111,7 +1111,7 @@ func (s *TestSuiteCommon) TestCopyObject(c *check) { request, err = newTestRequest("PUT", getPutObjectURL(s.endPoint, bucketName, objectName2), 0, nil) c.Assert(err, nil) // setting the "X-Amz-Copy-Source" to allow copying the content of previously uploaded object. - request.Header.Set("X-Amz-Copy-Source", url.QueryEscape("/"+bucketName+"/"+objectName)) + request.Header.Set("X-Amz-Copy-Source", url.QueryEscape(SlashSeparator+bucketName+SlashSeparator+objectName)) if s.signer == signerV4 { err = signRequestV4(request, s.accessKey, s.secretKey) } else { @@ -1821,7 +1821,7 @@ func (s *TestSuiteCommon) TestPutBucketErrors(c *check) { // request for ACL. // Since MinIO server doesn't support ACL's the request is expected to fail with "NotImplemented" error message. - request, err = newTestSignedRequest("PUT", s.endPoint+"/"+bucketName+"?acl", + request, err = newTestSignedRequest("PUT", s.endPoint+SlashSeparator+bucketName+"?acl", 0, nil, s.accessKey, s.secretKey, s.signer) c.Assert(err, nil) diff --git a/vendor/github.com/minio/minio/cmd/signature-v2.go b/vendor/github.com/minio/minio/cmd/signature-v2.go index e1c861056b..56be06a9b3 100644 --- a/vendor/github.com/minio/minio/cmd/signature-v2.go +++ b/vendor/github.com/minio/minio/cmd/signature-v2.go @@ -209,7 +209,7 @@ func getReqAccessKeyV2(r *http.Request) (auth.Credentials, bool, APIErrorCode) { // CanonicalizedProtocolHeaders + // CanonicalizedResource; // -// CanonicalizedResource = [ "/" + Bucket ] + +// CanonicalizedResource = [ SlashSeparator + Bucket ] + // + // [ subresource, if present. For example "?acl", "?location", "?logging", or "?torrent"]; // diff --git a/vendor/github.com/minio/minio/cmd/signature-v4-parser.go b/vendor/github.com/minio/minio/cmd/signature-v4-parser.go index e9a6836228..7655bcb96f 100644 --- a/vendor/github.com/minio/minio/cmd/signature-v4-parser.go +++ b/vendor/github.com/minio/minio/cmd/signature-v4-parser.go @@ -44,7 +44,7 @@ func (c credentialHeader) getScope() string { c.scope.region, c.scope.service, c.scope.request, - }, "/") + }, SlashSeparator) } func getReqAccessKeyV4(r *http.Request, region string, stype serviceType) (auth.Credentials, bool, APIErrorCode) { @@ -73,11 +73,11 @@ func parseCredentialHeader(credElement string, region string, stype serviceType) if creds[0] != "Credential" { return ch, ErrMissingCredTag } - credElements := strings.Split(strings.TrimSpace(creds[1]), "/") + credElements := strings.Split(strings.TrimSpace(creds[1]), SlashSeparator) if len(credElements) < 5 { return ch, ErrCredMalformed } - accessKey := strings.Join(credElements[:len(credElements)-4], "/") // The access key may contain one or more `/` + accessKey := strings.Join(credElements[:len(credElements)-4], SlashSeparator) // The access key may contain one or more `/` if !auth.IsAccessKeyValid(accessKey) { return ch, ErrInvalidAccessKeyID } diff --git a/vendor/github.com/minio/minio/cmd/signature-v4-parser_test.go b/vendor/github.com/minio/minio/cmd/signature-v4-parser_test.go index bd0a7d6545..b64c517073 100644 --- a/vendor/github.com/minio/minio/cmd/signature-v4-parser_test.go +++ b/vendor/github.com/minio/minio/cmd/signature-v4-parser_test.go @@ -36,7 +36,7 @@ func joinWithSlash(accessKey, date, region, service, requestVersion string) stri date, region, service, - requestVersion}, "/") + requestVersion}, SlashSeparator) } // generate CredentialHeader from its fields. @@ -79,12 +79,12 @@ func validateCredentialfields(t *testing.T, testNum int, expectedCredentials cre // TestParseCredentialHeader - validates the format validator and extractor for the Credential header in an aws v4 request. // A valid format of creadential should be of the following format. -// Credential = accessKey + "/"+ scope +// Credential = accessKey + SlashSeparator+ scope // where scope = string.Join([]string{ currTime.Format(yyyymmdd), // globalMinioDefaultRegion, // "s3", // "aws4_request", -// },"/") +// },SlashSeparator) func TestParseCredentialHeader(t *testing.T) { sampleTimeStr := UTCNow().Format(yyyymmdd) diff --git a/vendor/github.com/minio/minio/cmd/signature-v4-utils.go b/vendor/github.com/minio/minio/cmd/signature-v4-utils.go index 01b12dff2a..d0e58ed03f 100644 --- a/vendor/github.com/minio/minio/cmd/signature-v4-utils.go +++ b/vendor/github.com/minio/minio/cmd/signature-v4-utils.go @@ -21,6 +21,7 @@ import ( "context" "crypto/hmac" "encoding/hex" + "io" "io/ioutil" "net/http" "strconv" @@ -61,7 +62,7 @@ func skipContentSha256Cksum(r *http.Request) bool { // Returns SHA256 for calculating canonical-request. func getContentSha256Cksum(r *http.Request, stype serviceType) string { if stype == serviceSTS { - payload, err := ioutil.ReadAll(r.Body) + payload, err := ioutil.ReadAll(io.LimitReader(r.Body, stsRequestBodyLimit)) if err != nil { logger.CriticalIf(context.Background(), err) } diff --git a/vendor/github.com/minio/minio/cmd/signature-v4.go b/vendor/github.com/minio/minio/cmd/signature-v4.go index 06689517e1..97be5db95f 100644 --- a/vendor/github.com/minio/minio/cmd/signature-v4.go +++ b/vendor/github.com/minio/minio/cmd/signature-v4.go @@ -120,7 +120,7 @@ func getScope(t time.Time, region string) string { region, string(serviceS3), "aws4_request", - }, "/") + }, SlashSeparator) return scope } @@ -248,7 +248,7 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s query.Set(xhttp.AmzDate, t.Format(iso8601Format)) query.Set(xhttp.AmzExpires, strconv.Itoa(expireSeconds)) query.Set(xhttp.AmzSignedHeaders, getSignedHeaders(extractedSignedHeaders)) - query.Set(xhttp.AmzCredential, cred.AccessKey+"/"+pSignValues.Credential.getScope()) + query.Set(xhttp.AmzCredential, cred.AccessKey+SlashSeparator+pSignValues.Credential.getScope()) // Save other headers available in the request parameters. for k, v := range req.URL.Query() { diff --git a/vendor/github.com/minio/minio/cmd/storage-rest-client.go b/vendor/github.com/minio/minio/cmd/storage-rest-client.go index 71933dd180..f74a781d19 100644 --- a/vendor/github.com/minio/minio/cmd/storage-rest-client.go +++ b/vendor/github.com/minio/minio/cmd/storage-rest-client.go @@ -44,8 +44,8 @@ func isNetworkError(err error) bool { if err.Error() == errConnectionStale.Error() { return true } - if _, ok := err.(*rest.NetworkError); ok { - return true + if nerr, ok := err.(*rest.NetworkError); ok { + return isNetworkOrHostDown(nerr.Err) } return false } diff --git a/vendor/github.com/minio/minio/cmd/storage-rest-common.go b/vendor/github.com/minio/minio/cmd/storage-rest-common.go index 488b2b9374..281744b367 100644 --- a/vendor/github.com/minio/minio/cmd/storage-rest-common.go +++ b/vendor/github.com/minio/minio/cmd/storage-rest-common.go @@ -17,7 +17,7 @@ package cmd const storageRESTVersion = "v8" -const storageRESTPath = minioReservedBucketPath + "/storage/" + storageRESTVersion + "/" +const storageRESTPath = minioReservedBucketPath + "/storage/" + storageRESTVersion + SlashSeparator const ( storageRESTMethodDiskInfo = "diskinfo" diff --git a/vendor/github.com/minio/minio/cmd/storage-rest-server.go b/vendor/github.com/minio/minio/cmd/storage-rest-server.go index 35e1827776..5ac06658f4 100644 --- a/vendor/github.com/minio/minio/cmd/storage-rest-server.go +++ b/vendor/github.com/minio/minio/cmd/storage-rest-server.go @@ -567,41 +567,41 @@ func registerStorageRESTHandlers(router *mux.Router, endpoints EndpointList) { subrouter := router.PathPrefix(path.Join(storageRESTPath, endpoint.Path)).Subrouter() - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler)) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodStatVol).HandlerFunc(httpTraceHdrs(server.StatVolHandler)).Queries(restQueries(storageRESTVolume)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDeleteVol).HandlerFunc(httpTraceHdrs(server.DeleteVolHandler)).Queries(restQueries(storageRESTVolume)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodListVols).HandlerFunc(httpTraceHdrs(server.ListVolsHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDiskInfo).HandlerFunc(httpTraceHdrs(server.DiskInfoHandler)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodMakeVol).HandlerFunc(httpTraceHdrs(server.MakeVolHandler)).Queries(restQueries(storageRESTVolume)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodStatVol).HandlerFunc(httpTraceHdrs(server.StatVolHandler)).Queries(restQueries(storageRESTVolume)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteVol).HandlerFunc(httpTraceHdrs(server.DeleteVolHandler)).Queries(restQueries(storageRESTVolume)...) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodListVols).HandlerFunc(httpTraceHdrs(server.ListVolsHandler)) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodAppendFile).HandlerFunc(httpTraceHdrs(server.AppendFileHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodAppendFile).HandlerFunc(httpTraceHdrs(server.AppendFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodWriteAll).HandlerFunc(httpTraceHdrs(server.WriteAllHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodCreateFile).HandlerFunc(httpTraceHdrs(server.CreateFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTLength)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodStatFile).HandlerFunc(httpTraceHdrs(server.StatFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadAll).HandlerFunc(httpTraceHdrs(server.ReadAllHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadFile).HandlerFunc(httpTraceHdrs(server.ReadFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength, storageRESTBitrotAlgo, storageRESTBitrotHash)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodReadFileStream).HandlerFunc(httpTraceHdrs(server.ReadFileStreamHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTOffset, storageRESTLength)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodListDir).HandlerFunc(httpTraceHdrs(server.ListDirHandler)). Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTCount, storageRESTLeafFile)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodWalk).HandlerFunc(httpTraceHdrs(server.WalkHandler)). Queries(restQueries(storageRESTVolume, storageRESTDirPath, storageRESTMarkerPath, storageRESTRecursive, storageRESTLeafFile)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteFile).HandlerFunc(httpTraceHdrs(server.DeleteFileHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodDeleteFileBulk).HandlerFunc(httpTraceHdrs(server.DeleteFileBulkHandler)). Queries(restQueries(storageRESTVolume, storageRESTFilePath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodRenameFile).HandlerFunc(httpTraceHdrs(server.RenameFileHandler)). Queries(restQueries(storageRESTSrcVolume, storageRESTSrcPath, storageRESTDstVolume, storageRESTDstPath)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)). + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodVerifyFile).HandlerFunc(httpTraceHdrs(server.VerifyFile)). Queries(restQueries(storageRESTVolume, storageRESTFilePath, storageRESTBitrotAlgo, storageRESTLength)...) - subrouter.Methods(http.MethodPost).Path("/" + storageRESTMethodGetInstanceID).HandlerFunc(httpTraceAll(server.GetInstanceID)) + subrouter.Methods(http.MethodPost).Path(SlashSeparator + storageRESTMethodGetInstanceID).HandlerFunc(httpTraceAll(server.GetInstanceID)) } router.NotFoundHandler = http.HandlerFunc(httpTraceAll(notFoundHandler)) diff --git a/vendor/github.com/minio/minio/cmd/sts-handlers.go b/vendor/github.com/minio/minio/cmd/sts-handlers.go index f1522347e1..aa56b06ede 100644 --- a/vendor/github.com/minio/minio/cmd/sts-handlers.go +++ b/vendor/github.com/minio/minio/cmd/sts-handlers.go @@ -40,6 +40,8 @@ const ( clientGrants = "AssumeRoleWithClientGrants" webIdentity = "AssumeRoleWithWebIdentity" assumeRole = "AssumeRole" + + stsRequestBodyLimit = 10 * (1 << 20) // 10 MiB ) // stsAPIHandlers implements and provides http handlers for AWS STS API. @@ -51,7 +53,7 @@ func registerSTSRouter(router *mux.Router) { sts := &stsAPIHandlers{} // STS Router - stsRouter := router.NewRoute().PathPrefix("/").Subrouter() + stsRouter := router.NewRoute().PathPrefix(SlashSeparator).Subrouter() // Assume roles with no JWT, handles AssumeRole. stsRouter.Methods(http.MethodPost).MatcherFunc(func(r *http.Request, rm *mux.RouteMatch) bool { @@ -183,13 +185,18 @@ func (sts *stsAPIHandlers) AssumeRole(w http.ResponseWriter, r *http.Request) { return } - policyName, err := globalIAMSys.GetUserPolicy(user.AccessKey) + policies, err := globalIAMSys.PolicyDBGet(user.AccessKey, false) if err != nil { logger.LogIf(ctx, err) writeSTSErrorResponse(w, stsErrCodes.ToSTSErr(ErrSTSInvalidParameterValue)) return } + policyName := "" + if len(policies) > 0 { + policyName = policies[0] + } + // This policy is the policy associated with the user // requesting for temporary credentials. The temporary // credentials will inherit the same policy requirements. diff --git a/vendor/github.com/minio/minio/cmd/test-utils_test.go b/vendor/github.com/minio/minio/cmd/test-utils_test.go index e5125e4435..b9c8d60569 100644 --- a/vendor/github.com/minio/minio/cmd/test-utils_test.go +++ b/vendor/github.com/minio/minio/cmd/test-utils_test.go @@ -707,7 +707,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi globalMinioDefaultRegion, string(serviceS3), "aws4_request", - }, "/") + }, SlashSeparator) stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign = stringToSign + scope + "\n" @@ -722,7 +722,7 @@ func signStreamingRequest(req *http.Request, accessKey, secretKey string, currTi // final Authorization header parts := []string{ - "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + SlashSeparator + scope, "SignedHeaders=" + signedHeaders, "Signature=" + signature, } @@ -787,7 +787,7 @@ func assembleStreamingChunks(req *http.Request, body io.ReadSeeker, chunkSize in regionStr, string(serviceS3), "aws4_request", - }, "/") + }, SlashSeparator) stringToSign := "AWS4-HMAC-SHA256-PAYLOAD" + "\n" stringToSign = stringToSign + currTime.Format(iso8601Format) + "\n" @@ -1062,7 +1062,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { region, string(serviceS3), "aws4_request", - }, "/") + }, SlashSeparator) stringToSign := "AWS4-HMAC-SHA256" + "\n" + currTime.Format(iso8601Format) + "\n" stringToSign = stringToSign + scope + "\n" @@ -1077,7 +1077,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { // final Authorization header parts := []string{ - "AWS4-HMAC-SHA256" + " Credential=" + accessKey + "/" + scope, + "AWS4-HMAC-SHA256" + " Credential=" + accessKey + SlashSeparator + scope, "SignedHeaders=" + signedHeaders, "Signature=" + signature, } @@ -1089,7 +1089,7 @@ func signRequestV4(req *http.Request, accessKey, secretKey string) error { // getCredentialString generate a credential string. func getCredentialString(accessKeyID, location string, t time.Time) string { - return accessKeyID + "/" + getScope(t, location) + return accessKeyID + SlashSeparator + getScope(t, location) } // getMD5HashBase64 returns MD5 hash in base64 encoding of given data. @@ -1360,9 +1360,9 @@ func (t *EOFWriter) Write(p []byte) (n int, err error) { // construct URL for http requests for bucket operations. func makeTestTargetURL(endPoint, bucketName, objectName string, queryValues url.Values) string { - urlStr := endPoint + "/" + urlStr := endPoint + SlashSeparator if bucketName != "" { - urlStr = urlStr + bucketName + "/" + urlStr = urlStr + bucketName + SlashSeparator } if objectName != "" { urlStr = urlStr + s3utils.EncodePath(objectName) @@ -2142,7 +2142,7 @@ func registerAPIFunctions(muxRouter *mux.Router, objLayer ObjectLayer, apiFuncti return } // API Router. - apiRouter := muxRouter.PathPrefix("/").Subrouter() + apiRouter := muxRouter.PathPrefix(SlashSeparator).Subrouter() // Bucket router. bucketRouter := apiRouter.PathPrefix("/{bucket}").Subrouter() diff --git a/vendor/github.com/minio/minio/cmd/tree-walk.go b/vendor/github.com/minio/minio/cmd/tree-walk.go index f396ca1317..ad062eb81b 100644 --- a/vendor/github.com/minio/minio/cmd/tree-walk.go +++ b/vendor/github.com/minio/minio/cmd/tree-walk.go @@ -67,10 +67,10 @@ func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker var markerBase, markerDir string if marker != "" { // Ex: if marker="four/five.txt", markerDir="four/" markerBase="five.txt" - markerSplit := strings.SplitN(marker, slashSeparator, 2) + markerSplit := strings.SplitN(marker, SlashSeparator, 2) markerDir = markerSplit[0] if len(markerSplit) == 2 { - markerDir += slashSeparator + markerDir += SlashSeparator markerBase = markerSplit[1] } } @@ -95,7 +95,7 @@ func doTreeWalk(ctx context.Context, bucket, prefixDir, entryPrefixMatch, marker for i, entry := range entries { pentry := pathJoin(prefixDir, entry) - isDir := hasSuffix(pentry, slashSeparator) + isDir := hasSuffix(pentry, SlashSeparator) if i == 0 && markerDir == entry { if !recursive { @@ -165,7 +165,7 @@ func startTreeWalk(ctx context.Context, bucket, prefix, marker string, recursive resultCh := make(chan TreeWalkResult, maxObjectList) entryPrefixMatch := prefix prefixDir := "" - lastIndex := strings.LastIndex(prefix, slashSeparator) + lastIndex := strings.LastIndex(prefix, SlashSeparator) if lastIndex != -1 { entryPrefixMatch = prefix[lastIndex+1:] prefixDir = prefix[:lastIndex+1] diff --git a/vendor/github.com/minio/minio/cmd/typed-errors.go b/vendor/github.com/minio/minio/cmd/typed-errors.go index 9bdbe086f1..82ae36b491 100644 --- a/vendor/github.com/minio/minio/cmd/typed-errors.go +++ b/vendor/github.com/minio/minio/cmd/typed-errors.go @@ -80,6 +80,13 @@ var errInvalidDecompressedSize = errors.New("Invalid Decompressed Size") // error returned in IAM subsystem when user doesn't exist. var errNoSuchUser = errors.New("Specified user does not exist") +// error returned in IAM subsystem when groups doesn't exist. +var errNoSuchGroup = errors.New("Specified group does not exist") + +// error returned in IAM subsystem when a non-empty group needs to be +// deleted. +var errGroupNotEmpty = errors.New("Specified group is not empty - cannot remove it") + // error returned in IAM subsystem when policy doesn't exist. var errNoSuchPolicy = errors.New("Specified canned policy does not exist") diff --git a/vendor/github.com/minio/minio/cmd/update-main.go b/vendor/github.com/minio/minio/cmd/update-main.go index 43b149b6a8..2ba1b0fca2 100644 --- a/vendor/github.com/minio/minio/cmd/update-main.go +++ b/vendor/github.com/minio/minio/cmd/update-main.go @@ -72,7 +72,7 @@ EXAMPLES: const ( minioReleaseTagTimeLayout = "2006-01-02T15-04-05Z" minioOSARCH = runtime.GOOS + "-" + runtime.GOARCH - minioReleaseURL = "https://dl.min.io/server/minio/release/" + minioOSARCH + "/" + minioReleaseURL = "https://dl.min.io/server/minio/release/" + minioOSARCH + SlashSeparator ) var ( diff --git a/vendor/github.com/minio/minio/cmd/utils.go b/vendor/github.com/minio/minio/cmd/utils.go index 7a786591df..0d72472b28 100644 --- a/vendor/github.com/minio/minio/cmd/utils.go +++ b/vendor/github.com/minio/minio/cmd/utils.go @@ -29,7 +29,6 @@ import ( "io/ioutil" "net" "net/http" - "net/url" "os" "path/filepath" "reflect" @@ -82,16 +81,16 @@ func request2BucketObjectName(r *http.Request) (bucketName, objectName string) { // Convert url path into bucket and object name. func urlPath2BucketObjectName(path string) (bucketName, objectName string) { - if path == "" || path == slashSeparator { + if path == "" || path == SlashSeparator { return "", "" } // Trim any preceding slash separator. - urlPath := strings.TrimPrefix(path, slashSeparator) + urlPath := strings.TrimPrefix(path, SlashSeparator) // Split urlpath using slash separator into a given number of // expected tokens. - tokens := strings.SplitN(urlPath, slashSeparator, 2) + tokens := strings.SplitN(urlPath, SlashSeparator, 2) bucketName = tokens[0] if len(tokens) == 2 { objectName = tokens[1] @@ -443,30 +442,21 @@ func isNetworkOrHostDown(err error) bool { if err == nil { return false } - switch err.(type) { - case *net.DNSError, *net.OpError, net.UnknownNetworkError: - return true - case *url.Error: - // For a URL error, where it replies back "connection closed" - if strings.Contains(err.Error(), "Connection closed by foreign host") { - return true - } - return true - default: - if strings.Contains(err.Error(), "net/http: TLS handshake timeout") { - // If error is - tlsHandshakeTimeoutError,. - return true - } else if strings.Contains(err.Error(), "i/o timeout") { - // If error is - tcp timeoutError. - return true - } else if strings.Contains(err.Error(), "connection timed out") { - // If err is a net.Dial timeout. - return true - } else if strings.Contains(err.Error(), "net/http: HTTP/1.x transport connection broken") { - return true - } + // We need to figure if the error either a timeout + // or a non-temporary error. + e, ok := err.(net.Error) + if ok { + return e.Timeout() } - return false + // Fallback to other mechanisms. + if strings.Contains(err.Error(), "i/o timeout") { + // If error is - tcp timeoutError. + ok = true + } else if strings.Contains(err.Error(), "connection timed out") { + // If err is a net.Dial timeout. + ok = true + } + return ok } // Used for registering with rest handlers (have a look at registerStorageRESTHandlers for usage example) diff --git a/vendor/github.com/minio/minio/cmd/utils_test.go b/vendor/github.com/minio/minio/cmd/utils_test.go index 1cd8bbe1d1..ac9ebd1686 100644 --- a/vendor/github.com/minio/minio/cmd/utils_test.go +++ b/vendor/github.com/minio/minio/cmd/utils_test.go @@ -148,7 +148,7 @@ func TestURL2BucketObjectName(t *testing.T) { // Test case 2 where url only has separator. { u: &url.URL{ - Path: "/", + Path: SlashSeparator, }, bucket: "", object: "", diff --git a/vendor/github.com/minio/minio/cmd/web-handlers.go b/vendor/github.com/minio/minio/cmd/web-handlers.go index d7c59fc9af..7032325fe7 100644 --- a/vendor/github.com/minio/minio/cmd/web-handlers.go +++ b/vendor/github.com/minio/minio/cmd/web-handlers.go @@ -315,7 +315,7 @@ func (web *webAPIHandlers) ListBuckets(r *http.Request, args *WebGenericArgs, re r.Header.Set("prefix", "") // Set delimiter value for "s3:delimiter" policy conditionals. - r.Header.Set("delimiter", slashSeparator) + r.Header.Set("delimiter", SlashSeparator) // If etcd, dns federation configured list buckets from etcd. if globalDNSConfig != nil { @@ -429,7 +429,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r nextMarker := "" // Fetch all the objects for { - result, err := core.ListObjects(args.BucketName, args.Prefix, nextMarker, slashSeparator, 1000) + result, err := core.ListObjects(args.BucketName, args.Prefix, nextMarker, SlashSeparator, 1000) if err != nil { return toJSONError(ctx, err, args.BucketName) } @@ -464,7 +464,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r r.Header.Set("prefix", args.Prefix) // Set delimiter value for "s3:delimiter" policy conditionals. - r.Header.Set("delimiter", slashSeparator) + r.Header.Set("delimiter", SlashSeparator) // Check if anonymous (non-owner) has access to download objects. readable := globalPolicySys.IsAllowed(policy.Args{ @@ -480,7 +480,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", ""), IsOwner: false, - ObjectName: args.Prefix + "/", + ObjectName: args.Prefix + SlashSeparator, }) reply.Writable = writable @@ -503,7 +503,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r r.Header.Set("prefix", args.Prefix) // Set delimiter value for "s3:delimiter" policy conditionals. - r.Header.Set("delimiter", slashSeparator) + r.Header.Set("delimiter", SlashSeparator) readable := globalIAMSys.IsAllowed(iampolicy.Args{ AccountName: claims.Subject, @@ -519,7 +519,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r BucketName: args.BucketName, ConditionValues: getConditionValues(r, "", claims.Subject), IsOwner: owner, - ObjectName: args.Prefix + "/", + ObjectName: args.Prefix + SlashSeparator, }) reply.Writable = writable @@ -541,7 +541,7 @@ func (web *webAPIHandlers) ListObjects(r *http.Request, args *ListObjectsArgs, r nextMarker := "" // Fetch all the objects for { - lo, err := listObjects(ctx, args.BucketName, args.Prefix, nextMarker, slashSeparator, 1000) + lo, err := listObjects(ctx, args.BucketName, args.Prefix, nextMarker, SlashSeparator, 1000) if err != nil { return &json2.Error{Message: err.Error()} } @@ -671,7 +671,7 @@ func (web *webAPIHandlers) RemoveObject(r *http.Request, args *RemoveObjectArgs, next: for _, objectName := range args.Objects { // If not a directory, remove the object. - if !hasSuffix(objectName, slashSeparator) && objectName != "" { + if !hasSuffix(objectName, SlashSeparator) && objectName != "" { // Deny if WORM is enabled if globalWORMEnabled { if _, err = objectAPI.GetObjectInfo(ctx, args.BucketName, objectName, ObjectOptions{}); err == nil { @@ -1034,7 +1034,7 @@ func (web *webAPIHandlers) Upload(w http.ResponseWriter, r *http.Request) { return } if objectAPI.IsEncryptionSupported() { - if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, slashSeparator) { // handle SSE requests + if hasServerSideEncryptionHeader(r.Header) && !hasSuffix(object, SlashSeparator) { // handle SSE requests rawReader := hashReader var objectEncryptionKey []byte reader, objectEncryptionKey, err = EncryptRequest(hashReader, r, bucket, object, metadata) @@ -1436,7 +1436,7 @@ func (web *webAPIHandlers) DownloadZip(w http.ResponseWriter, r *http.Request) { return nil } - if !hasSuffix(object, slashSeparator) { + if !hasSuffix(object, SlashSeparator) { // If not a directory, compress the file and write it to response. err := zipit(pathJoin(args.Prefix, object)) if err != nil { @@ -1873,7 +1873,7 @@ func presignedGet(host, bucket, object string, expiry int64, creds auth.Credenti query.Set(xhttp.AmzSignedHeaders, "host") queryStr := s3utils.QueryEncode(query) - path := "/" + path.Join(bucket, object) + path := SlashSeparator + path.Join(bucket, object) // "host" is the only header required to be signed for Presigned URLs. extractedSignedHeaders := make(http.Header) diff --git a/vendor/github.com/minio/minio/cmd/web-handlers_test.go b/vendor/github.com/minio/minio/cmd/web-handlers_test.go index 4eb94d4ffe..aeecf18fb3 100644 --- a/vendor/github.com/minio/minio/cmd/web-handlers_test.go +++ b/vendor/github.com/minio/minio/cmd/web-handlers_test.go @@ -824,7 +824,7 @@ func testUploadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler test := func(token string, sendContentLength bool) int { rec := httptest.NewRecorder() - req, rErr := http.NewRequest("PUT", "/minio/upload/"+bucketName+"/"+objectName, nil) + req, rErr := http.NewRequest("PUT", "/minio/upload/"+bucketName+SlashSeparator+objectName, nil) if rErr != nil { t.Fatalf("Cannot create upload request, %v", rErr) } @@ -926,7 +926,7 @@ func testDownloadWebHandler(obj ObjectLayer, instanceType string, t TestErrHandl test := func(token string) (int, []byte) { rec := httptest.NewRecorder() - path := "/minio/download/" + bucketName + "/" + objectName + "?token=" + path := "/minio/download/" + bucketName + SlashSeparator + objectName + "?token=" if token != "" { path = path + token } diff --git a/vendor/github.com/minio/minio/cmd/web-router.go b/vendor/github.com/minio/minio/cmd/web-router.go index 1eaaa6b403..dcf8576f8b 100644 --- a/vendor/github.com/minio/minio/cmd/web-router.go +++ b/vendor/github.com/minio/minio/cmd/web-router.go @@ -40,7 +40,7 @@ type indexHandler struct { } func (h indexHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - r.URL.Path = minioReservedBucketPath + "/" + r.URL.Path = minioReservedBucketPath + SlashSeparator h.handler.ServeHTTP(w, r) } diff --git a/vendor/github.com/minio/minio/cmd/xl-sets.go b/vendor/github.com/minio/minio/cmd/xl-sets.go index 780a743df8..32f04d93f2 100644 --- a/vendor/github.com/minio/minio/cmd/xl-sets.go +++ b/vendor/github.com/minio/minio/cmd/xl-sets.go @@ -327,15 +327,22 @@ func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo { storageInfo.Backend.Sets[i] = make([]madmin.DriveInfo, s.drivesPerSet) } - storageDisks, err := initStorageDisks(s.endpoints) - if err != nil { - return storageInfo - } + storageDisks, dErrs := initDisksWithErrors(s.endpoints) defer closeStorageDisks(storageDisks) formats, sErrs := loadFormatXLAll(storageDisks) - drivesInfo := formatsToDrivesInfo(s.endpoints, formats, sErrs) + combineStorageErrors := func(diskErrs []error, storageErrs []error) []error { + for index, err := range diskErrs { + if err != nil { + storageErrs[index] = err + } + } + return storageErrs + } + + errs := combineStorageErrors(dErrs, sErrs) + drivesInfo := formatsToDrivesInfo(s.endpoints, formats, errs) refFormat, err := getFormatXLInQuorum(formats) if err != nil { // Ignore errors here, since this call cannot do anything at @@ -356,7 +363,6 @@ func (s *xlSets) StorageInfo(ctx context.Context) StorageInfo { } } } - // fill all the offline, missing endpoints as well. for _, drive := range drivesInfo { if drive.UUID == "" { @@ -1021,7 +1027,7 @@ func (s *xlSets) listObjectsNonSlash(ctx context.Context, bucket, prefix, marker // walked and merged at this layer. Resulting value through the merge process sends // the data in lexically sorted order. func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int, heal bool) (loi ListObjectsInfo, err error) { - if delimiter != slashSeparator && delimiter != "" { + if delimiter != SlashSeparator && delimiter != "" { // "heal" option passed can be ignored as the heal-listing does not send non-standard delimiter. return s.listObjectsNonSlash(ctx, bucket, prefix, marker, delimiter, maxKeys) } @@ -1048,7 +1054,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi // along // with the prefix. On a flat namespace with 'prefix' // as '/' we don't have any entries, since all the keys are // of form 'keyName/...' - if delimiter == slashSeparator && prefix == slashSeparator { + if delimiter == SlashSeparator && prefix == SlashSeparator { return loi, nil } @@ -1059,7 +1065,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi // Default is recursive, if delimiter is set then list non recursive. recursive := true - if delimiter == slashSeparator { + if delimiter == SlashSeparator { recursive = false } @@ -1086,7 +1092,7 @@ func (s *xlSets) listObjects(ctx context.Context, bucket, prefix, marker, delimi for _, entry := range entries.Files { var objInfo ObjectInfo - if hasSuffix(entry.Name, slashSeparator) { + if hasSuffix(entry.Name, SlashSeparator) { if !recursive { loi.Prefixes = append(loi.Prefixes, entry.Name) continue @@ -1250,17 +1256,17 @@ func formatsToDrivesInfo(endpoints EndpointList, formats []*formatXLV3, sErrs [] Endpoint: drive, State: madmin.DriveStateMissing, }) - case sErrs[i] == errCorruptedFormat: + case sErrs[i] == errDiskNotFound: beforeDrives = append(beforeDrives, madmin.DriveInfo{ UUID: "", Endpoint: drive, - State: madmin.DriveStateCorrupt, + State: madmin.DriveStateOffline, }) default: beforeDrives = append(beforeDrives, madmin.DriveInfo{ UUID: "", Endpoint: drive, - State: madmin.DriveStateOffline, + State: madmin.DriveStateCorrupt, }) } } diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-common.go b/vendor/github.com/minio/minio/cmd/xl-v1-common.go index 65c1c903d5..0df3ef764c 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-common.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-common.go @@ -37,7 +37,7 @@ func (xl xlObjects) getLoadBalancedDisks() (disks []StorageAPI) { func (xl xlObjects) parentDirIsObject(ctx context.Context, bucket, parent string) bool { var isParentDirObject func(string) bool isParentDirObject = func(p string) bool { - if p == "." || p == "/" { + if p == "." || p == SlashSeparator { return false } if xl.isObject(bucket, p) { diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-common_test.go b/vendor/github.com/minio/minio/cmd/xl-v1-common_test.go index 601d468d7e..70c64ac4b3 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-common_test.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-common_test.go @@ -72,7 +72,7 @@ func TestXLParentDirIsObject(t *testing.T) { // Should not cause infinite loop. { parentIsObject: false, - objectName: "/", + objectName: SlashSeparator, }, { parentIsObject: false, diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-healing.go b/vendor/github.com/minio/minio/cmd/xl-v1-healing.go index a8df3f94dd..7fc391db1a 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-healing.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-healing.go @@ -501,10 +501,14 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr drive = storageDisks[i].String() } switch err { + case nil: + hr.Before.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOk} + hr.After.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOk} case errDiskNotFound: hr.Before.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOffline} hr.After.Drives[i] = madmin.HealDriveInfo{State: madmin.DriveStateOffline} - case errVolumeNotFound: + case errVolumeNotFound, errFileNotFound: + // Bucket or prefix/directory not found hr.Before.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing} hr.After.Drives[i] = madmin.HealDriveInfo{Endpoint: drive, State: madmin.DriveStateMissing} default: @@ -517,7 +521,8 @@ func (xl xlObjects) healObjectDir(ctx context.Context, bucket, object string, dr } for i, err := range errs { switch err { - case errVolumeNotFound: + case errVolumeNotFound, errFileNotFound: + // Bucket or prefix/directory not found merr := storageDisks[i].MakeVol(pathJoin(bucket, object)) switch merr { case nil, errVolumeExists: @@ -692,7 +697,7 @@ func (xl xlObjects) HealObject(ctx context.Context, bucket, object string, dryRu healCtx := logger.SetReqInfo(context.Background(), newReqInfo) // Healing directories handle it separately. - if hasSuffix(object, slashSeparator) { + if hasSuffix(object, SlashSeparator) { return xl.healObjectDir(healCtx, bucket, object, dryRun) } diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-healing_test.go b/vendor/github.com/minio/minio/cmd/xl-v1-healing_test.go index 70a968f157..8939326705 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-healing_test.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-healing_test.go @@ -250,3 +250,79 @@ func TestHealObjectXL(t *testing.T) { t.Errorf("Expected %v but received %v", InsufficientReadQuorum{}, err) } } + +// Tests healing of empty directories +func TestHealEmptyDirectoryXL(t *testing.T) { + nDisks := 16 + fsDirs, err := getRandomDisks(nDisks) + if err != nil { + t.Fatal(err) + } + defer removeRoots(fsDirs) + + // Everything is fine, should return nil + obj, _, err := initObjectLayer(mustGetNewEndpointList(fsDirs...)) + if err != nil { + t.Fatal(err) + } + + bucket := "bucket" + object := "empty-dir/" + var opts ObjectOptions + + err = obj.MakeBucketWithLocation(context.Background(), bucket, "") + if err != nil { + t.Fatalf("Failed to make a bucket - %v", err) + } + + // Upload an empty directory + _, err = obj.PutObject(context.Background(), bucket, object, mustGetPutObjReader(t, bytes.NewReader([]byte{}), 0, "", ""), opts) + if err != nil { + t.Fatal(err) + } + + // Remove the object backend files from the first disk. + xl := obj.(*xlObjects) + firstDisk := xl.storageDisks[0] + err = firstDisk.DeleteFile(bucket, object) + if err != nil { + t.Fatalf("Failed to delete a file - %v", err) + } + + // Heal the object + hr, err := obj.HealObject(context.Background(), bucket, object, false, false, madmin.HealNormalScan) + if err != nil { + t.Fatalf("Failed to heal object - %v", err) + } + + // Check if the empty directory is restored in the first disk + _, err = firstDisk.StatVol(pathJoin(bucket, object)) + if err != nil { + t.Fatalf("Expected object to be present but stat failed - %v", err) + } + + // Check the state of the object in the first disk (should be missing) + if hr.Before.Drives[0].State != madmin.DriveStateMissing { + t.Fatalf("Unexpected drive state: %v", hr.Before.Drives[0].State) + } + + // Check the state of all other disks (should be ok) + for i, h := range append(hr.Before.Drives[1:], hr.After.Drives...) { + if h.State != madmin.DriveStateOk { + t.Fatalf("Unexpected drive state (%d): %v", i+1, h.State) + } + } + + // Heal the same object again + hr, err = obj.HealObject(context.Background(), bucket, object, false, false, madmin.HealNormalScan) + if err != nil { + t.Fatalf("Failed to heal object - %v", err) + } + + // Check that Before & After states are all okay + for i, h := range append(hr.Before.Drives, hr.After.Drives...) { + if h.State != madmin.DriveStateOk { + t.Fatalf("Unexpected drive state (%d): %v", i+1, h.State) + } + } +} diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-list-objects.go b/vendor/github.com/minio/minio/cmd/xl-v1-list-objects.go index c6d0d34ae2..be046699f8 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-list-objects.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-list-objects.go @@ -63,7 +63,7 @@ func listDirFactory(ctx context.Context, disks ...StorageAPI) ListDirFunc { func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, delimiter string, maxKeys int) (loi ListObjectsInfo, e error) { // Default is recursive, if delimiter is set then list non recursive. recursive := true - if delimiter == slashSeparator { + if delimiter == SlashSeparator { recursive = false } @@ -87,7 +87,7 @@ func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, del } entry := walkResult.entry var objInfo ObjectInfo - if hasSuffix(entry, slashSeparator) { + if hasSuffix(entry, SlashSeparator) { // Object name needs to be full path. objInfo.Bucket = bucket objInfo.Name = entry @@ -125,7 +125,7 @@ func (xl xlObjects) listObjects(ctx context.Context, bucket, prefix, marker, del result := ListObjectsInfo{} for _, objInfo := range objInfos { - if objInfo.IsDir && delimiter == slashSeparator { + if objInfo.IsDir && delimiter == SlashSeparator { result.Prefixes = append(result.Prefixes, objInfo.Name) continue } @@ -165,7 +165,7 @@ func (xl xlObjects) ListObjects(ctx context.Context, bucket, prefix, marker, del // since according to s3 spec we stop at the 'delimiter' along // with the prefix. On a flat namespace with 'prefix' as '/' // we don't have any entries, since all the keys are of form 'keyName/...' - if delimiter == slashSeparator && prefix == slashSeparator { + if delimiter == SlashSeparator && prefix == SlashSeparator { return loi, nil } diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-multipart.go b/vendor/github.com/minio/minio/cmd/xl-v1-multipart.go index 2f9b352cf4..8d167760ce 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-multipart.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-multipart.go @@ -169,7 +169,7 @@ func (xl xlObjects) ListMultipartUploads(ctx context.Context, bucket, object, ke return result, err } for i := range uploadIDs { - uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], slashSeparator) + uploadIDs[i] = strings.TrimSuffix(uploadIDs[i], SlashSeparator) } sort.Strings(uploadIDs) for _, uploadID := range uploadIDs { diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-object.go b/vendor/github.com/minio/minio/cmd/xl-v1-object.go index be54d30836..85fadfda4a 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-object.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-object.go @@ -147,7 +147,7 @@ func (xl xlObjects) GetObjectNInfo(ctx context.Context, bucket, object string, r // Handler directory request by returning a reader that // returns no bytes. - if hasSuffix(object, slashSeparator) { + if hasSuffix(object, SlashSeparator) { var objInfo ObjectInfo if objInfo, err = xl.getObjectInfoDir(ctx, bucket, object); err != nil { nsUnlocker() @@ -216,7 +216,7 @@ func (xl xlObjects) getObject(ctx context.Context, bucket, object string, startO } // If its a directory request, we return an empty body. - if hasSuffix(object, slashSeparator) { + if hasSuffix(object, SlashSeparator) { _, err := writer.Write([]byte("")) logger.LogIf(ctx, err) return toObjectErr(err, bucket, object) @@ -379,7 +379,7 @@ func (xl xlObjects) GetObjectInfo(ctx context.Context, bucket, object string, op return oi, err } - if hasSuffix(object, slashSeparator) { + if hasSuffix(object, SlashSeparator) { info, err := xl.getObjectInfoDir(ctx, bucket, object) if err != nil { return oi, toObjectErr(err, bucket, object) @@ -865,7 +865,7 @@ func (xl xlObjects) deleteObjects(ctx context.Context, bucket string, objects [] } for i, object := range objects { - isObjectDirs[i] = hasSuffix(object, slashSeparator) + isObjectDirs[i] = hasSuffix(object, SlashSeparator) } for i, object := range objects { @@ -972,7 +972,7 @@ func (xl xlObjects) DeleteObject(ctx context.Context, bucket, object string) (er } var writeQuorum int - var isObjectDir = hasSuffix(object, slashSeparator) + var isObjectDir = hasSuffix(object, SlashSeparator) if isObjectDir { _, err = xl.getObjectInfoDir(ctx, bucket, object) diff --git a/vendor/github.com/minio/minio/cmd/xl-v1-object_test.go b/vendor/github.com/minio/minio/cmd/xl-v1-object_test.go index 23dc3c37ec..dfbb90ee59 100644 --- a/vendor/github.com/minio/minio/cmd/xl-v1-object_test.go +++ b/vendor/github.com/minio/minio/cmd/xl-v1-object_test.go @@ -180,7 +180,7 @@ func TestXLDeleteObjectsXLSet(t *testing.T) { switch statErr.(type) { case ObjectNotFound: default: - t.Fatalf("Object %s is not removed", test.bucket+"/"+test.object) + t.Fatalf("Object %s is not removed", test.bucket+SlashSeparator+test.object) } } } diff --git a/vendor/github.com/minio/minio/docs/bucket/notifications/README.md b/vendor/github.com/minio/minio/docs/bucket/notifications/README.md index 805df0db45..d7c04f9b14 100644 --- a/vendor/github.com/minio/minio/docs/bucket/notifications/README.md +++ b/vendor/github.com/minio/minio/docs/bucket/notifications/README.md @@ -34,20 +34,22 @@ Install RabbitMQ from [here](https://www.rabbitmq.com/). The MinIO server configuration file is stored on the backend in json format. The AMQP configuration is located in the `amqp` key under the `notify` top-level key. Create a configuration key-value pair here for your AMQP instance. The key is a name for your AMQP endpoint, and the value is a collection of key-value parameters described in the table below. -| Parameter | Type | Description | -| :------------- | :------- | :------------------------------------------------------------------------------ | -| `enable` | _bool_ | (Required) Is this server endpoint configuration active/enabled? | -| `url` | _string_ | (Required) AMQP server endpoint, e.g. `amqp://myuser:mypassword@localhost:5672` | -| `exchange` | _string_ | Name of the exchange. | -| `routingKey` | _string_ | Routing key for publishing. | -| `exchangeType` | _string_ | Kind of exchange. | -| `deliveryMode` | _uint8_ | Delivery mode for publishing. 0 or 1 - transient; 2 - persistent. | -| `mandatory` | _bool_ | Publishing related bool. | -| `immediate` | _bool_ | Publishing related bool. | -| `durable` | _bool_ | Exchange declaration related bool. | -| `internal` | _bool_ | Exchange declaration related bool. | -| `noWait` | _bool_ | Exchange declaration related bool. | -| `autoDeleted` | _bool_ | Exchange declaration related bool. | +| Parameter | Type | Description | +| :------------- | :------- | :------------------------------------------------------------------------------- | +| `enable` | _bool_ | (Required) Is this server endpoint configuration active/enabled? | +| `url` | _string_ | (Required) AMQP server endpoint, e.g. `amqp://myuser:mypassword@localhost:5672` | +| `exchange` | _string_ | Name of the exchange. | +| `routingKey` | _string_ | Routing key for publishing. | +| `exchangeType` | _string_ | Kind of exchange. | +| `deliveryMode` | _uint8_ | Delivery mode for publishing. 0 or 1 - transient; 2 - persistent. | +| `mandatory` | _bool_ | Publishing related bool. | +| `immediate` | _bool_ | Publishing related bool. | +| `durable` | _bool_ | Exchange declaration related bool. | +| `internal` | _bool_ | Exchange declaration related bool. | +| `noWait` | _bool_ | Exchange declaration related bool. | +| `autoDeleted` | _bool_ | Exchange declaration related bool. | +| `queueDir` | _string_ | Persistent store for events when AMQP broker is offline | +| `queueLimit` | _int_ | Set the maximum event limit for the persistent store. The default limit is 10000 | An example configuration for RabbitMQ is shown below: @@ -65,10 +67,13 @@ An example configuration for RabbitMQ is shown below: "durable": false, "internal": false, "noWait": false, - "autoDeleted": false + "autoDeleted": false, + "queueDir": "", + "queueLimit": 0 } } ``` +MinIO supports persistent event store. The persistent store will backup events when the AMQP broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. @@ -310,7 +315,7 @@ An example of Elasticsearch configuration is as follows: }, ``` -Minio supports persistent event store. The persistent store will backup events when the Elasticsearch broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. +MinIO supports persistent event store. The persistent store will backup events when the Elasticsearch broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. If Elasticsearch has authentication enabled, the credentials can be supplied to MinIO via the `url` parameter formatted as `PROTO://USERNAME:PASSWORD@ELASTICSEARCH_HOST:PORT`. @@ -460,11 +465,15 @@ An example of Redis configuration is as follows: "format": "namespace", "address": "127.0.0.1:6379", "password": "yoursecret", - "key": "bucketevents" + "key": "bucketevents", + "queueDir": "", + "queueLimit": 0 } } ``` +MinIO supports persistent event store. The persistent store will backup events when the Redis broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. + To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. ```sh @@ -543,11 +552,12 @@ The NATS configuration block in `config.json` is as follows: "password": "yoursecret", "token": "", "secure": false, - "pingInterval": 0 + "pingInterval": 0, + "queueDir": "", + "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", - "clientID": "", "async": false, "maxPubAcksInflight": 0 } @@ -555,6 +565,8 @@ The NATS configuration block in `config.json` is as follows: }, ``` +MinIO supports persistent event store. The persistent store will backup events when the NATS broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. + To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. ```sh @@ -567,7 +579,7 @@ After updating the NATS configuration in /tmp/myconfig , use `mc admin config se $ mc admin config set myminio < /tmp/myconfig ``` -MinIO server also supports [NATS Streaming mode](http://nats.io/documentation/streaming/nats-streaming-intro/) that offers additional functionality like `Message/event persistence`, `At-least-once-delivery`, and `Publisher rate limiting`. To configure MinIO server to send notifications to NATS Streaming server, update the MinIO server configuration file as follows: +MinIO server also supports [NATS Streaming mode](http://nats.io/documentation/streaming/nats-streaming-intro/) that offers additional functionality like `At-least-once-delivery`, and `Publisher rate limiting`. To configure MinIO server to send notifications to NATS Streaming server, update the MinIO server configuration file as follows: ``` "nats": { @@ -580,10 +592,11 @@ MinIO server also supports [NATS Streaming mode](http://nats.io/documentation/st "token": "", "secure": false, "pingInterval": 0, + "queueDir": "", + "queueLimit": 0, "streaming": { "enable": true, "clusterID": "test-cluster", - "clientID": "minio-client", "async": true, "maxPubAcksInflight": 10 } @@ -673,20 +686,47 @@ import ( ) func main() { - natsConnection, _ := stan.Connect("test-cluster", "test-client") - log.Println("Connected") + + var stanConnection stan.Conn + + subscribe := func() { + fmt.Printf("Subscribing to subject 'bucketevents'\n") + stanConnection.Subscribe("bucketevents", func(m *stan.Msg) { + + // Handle the message + fmt.Printf("Received a message: %s\n", string(m.Data)) + }) + } + + + stanConnection, _ = stan.Connect("test-cluster", "test-client", stan.NatsURL("nats://yourusername:yoursecret@0.0.0.0:4222"), stan.SetConnectionLostHandler(func(c stan.Conn, _ error) { + go func() { + for { + // Reconnect if the connection is lost. + if stanConnection == nil || stanConnection.NatsConn() == nil || !stanConnection.NatsConn().IsConnected() { + stanConnection, _ = stan.Connect("test-cluster", "test-client", stan.NatsURL("nats://yourusername:yoursecret@0.0.0.0:4222"), stan.SetConnectionLostHandler(func(c stan.Conn, _ error) { + if c.NatsConn() != nil { + c.NatsConn().Close() + } + _ = c.Close() + })) + if stanConnection != nil { + subscribe() + } + + } + } + + }() + })) // Subscribe to subject - log.Printf("Subscribing to subject 'bucketevents'\n") - natsConnection.Subscribe("bucketevents", func(m *stan.Msg) { - - // Handle the message - fmt.Printf("Received a message: %s\n", string(m.Data)) - }) + subscribe() // Keep the connection alive runtime.Goexit() } + ``` ``` @@ -754,11 +794,15 @@ An example of PostgreSQL configuration is as follows: "port": "5432", "user": "postgres", "password": "password", - "database": "minio_events" + "database": "minio_events", + "queueDir": "", + "queueLimit": 0 } } ``` +MinIO supports persistent event store. The persistent store will backup events when the PostgreSQL connection goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. + Note that for illustration here, we have disabled SSL. In the interest of security, for production this is not recommended. To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. @@ -849,21 +893,27 @@ The MinIO server configuration file is stored on the backend in json format. The An example of MySQL configuration is as follows: -``` +```json "mysql": { - "1": { - "enable": true, - "dsnString": "", - "table": "minio_images", - "host": "172.17.0.1", - "port": "3306", - "user": "root", - "password": "password", - "database": "miniodb" - } + "1": { + "enable": true, + "dsnString": "", + "format": "namespace", + "table": "minio_images", + "host": "172.17.0.1", + "port": "3306", + "user": "root", + "password": "password", + "database": "miniodb", + "queueDir": "", + "queueLimit": 0 + } } ``` + +MinIO supports persistent event store. The persistent store will backup events when the MySQL connection goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. + To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. ```sh @@ -953,6 +1003,7 @@ The MinIO server configuration file is stored on the backend in json format. Upd } } ``` + MinIO supports persistent event store. The persistent store will backup events when the kafka broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000. To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally. diff --git a/vendor/github.com/minio/minio/docs/config/README.md b/vendor/github.com/minio/minio/docs/config/README.md index e3ef48f0e3..c584b62192 100644 --- a/vendor/github.com/minio/minio/docs/config/README.md +++ b/vendor/github.com/minio/minio/docs/config/README.md @@ -162,17 +162,6 @@ export MINIO_DOMAIN=sub1.mydomain.com,sub2.mydomain.com minio server /data ``` -### Drive Sync - -By default, MinIO writes to disk in synchronous mode for all metadata operations. Set `MINIO_DRIVE_SYNC` environment variable to enable synchronous mode for all data operations as well. - -Example: - -```sh -export MINIO_DRIVE_SYNC=on -minio server /data -``` - ### HTTP Trace HTTP tracing can be enabled by using [`mc admin trace`](https://github.com/minio/mc/blob/master/docs/minio-admin-complete-guide.md#command-trace---display-minio-server-http-trace) command. diff --git a/vendor/github.com/minio/minio/docs/config/config.sample.json b/vendor/github.com/minio/minio/docs/config/config.sample.json index be22b956c5..5ba269b9ec 100644 --- a/vendor/github.com/minio/minio/docs/config/config.sample.json +++ b/vendor/github.com/minio/minio/docs/config/config.sample.json @@ -48,7 +48,9 @@ "durable": false, "internal": false, "noWait": false, - "autoDeleted": false + "autoDeleted": false, + "queueDir": "", + "queueLimit": 0 } }, "elasticsearch": { @@ -104,7 +106,9 @@ "port": "", "user": "", "password": "", - "database": "" + "database": "", + "queueDir": "", + "queueLimit": 0 } }, "nats": { @@ -117,6 +121,8 @@ "token": "", "secure": false, "pingInterval": 0, + "queueDir": "", + "queueLimit": 0, "streaming": { "enable": false, "clusterID": "", @@ -148,7 +154,9 @@ "port": "", "user": "", "password": "", - "database": "" + "database": "", + "queueDir": "", + "queueLimit": 0 } }, "redis": { @@ -157,7 +165,9 @@ "format": "", "address": "", "password": "", - "key": "" + "key": "", + "queueDir": "", + "queueLimit": 0 } }, "webhook": { diff --git a/vendor/github.com/minio/minio/docs/distributed/DESIGN.md b/vendor/github.com/minio/minio/docs/distributed/DESIGN.md index d3cac04875..8609c64b8f 100644 --- a/vendor/github.com/minio/minio/docs/distributed/DESIGN.md +++ b/vendor/github.com/minio/minio/docs/distributed/DESIGN.md @@ -1,28 +1,26 @@ -# Large Bucket Support Design Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) - -This document explains the design approach, advanced use cases and limits of the large bucket feature. If you're looking to get started with large bucket support, we suggest you go through the [getting started document](https://github.com/minio/minio/blob/master/docs/large-bucket/README.md) first. +# Distributed Server Design Guide [![Slack](https://slack.min.io/slack?type=svg)](https://slack.min.io) +This document explains the design approach, advanced use cases and limits of the MinIO distributed server. ## Command-line - ``` NAME: - minio server - Start object storage server. + minio server - start object storage server. USAGE: minio server [FLAGS] DIR1 [DIR2..] minio server [FLAGS] DIR{1...64} DIR: - DIR points to a directory on a filesystem. When you want to combine multiple drives - into a single large system, pass one directory per filesystem separated by space. - You may also use a `...` convention to abbreviate the directory arguments. Remote - directories in a distributed setup are encoded as HTTP(s) URIs. + DIR points to a directory on a filesystem. When you want to combine + multiple drives into a single large system, pass one directory per + filesystem separated by space. You may also use a '...' convention + to abbreviate the directory arguments. Remote directories in a + distributed setup are encoded as HTTP(s) URIs. ``` ## Common usage Standalone erasure coded configuration with 4 sets with 16 disks each. - ``` minio server dir{1...64} ``` @@ -33,38 +31,21 @@ Distributed erasure coded configuration with 64 sets with 16 disks each. minio server http://host{1...16}/export{1...64} ``` -## Other usages +## Architecture -### Advanced use cases with multiple ellipses +Expansion of ellipses and choice of erasure sets based on this expansion is an automated process in MinIO. Here are some of the details of our underlying erasure coding behavior. -Standalone erasure coded configuration with 4 sets with 16 disks each, which spawns disks across controllers. -``` -minio server /mnt/controller{1...4}/data{1...16} -``` +- Erasure coding used by MinIO is [Reed-Solomon](https://github.com/klauspost/reedsolomon) erasure coding scheme, which has a total shard maximum of 256 i.e 128 data and 128 parity. MinIO design goes beyond this limitation by doing some practical architecture choices. -Standalone erasure coded configuration with 16 sets 16 disks per set, across mnts, across controllers. -``` -minio server /mnt{1..4}/controller{1...4}/data{1...16} -``` +- Erasure set is a single erasure coding unit within a MinIO deployment. An object is sharded within an erasure set. Erasure set size is automatically calculated based on the number of disks. MinIO supports unlimited number of disks but each erasure set can be upto 16 disks and a minimum of 4 disks. -Distributed erasure coded configuration with 2 sets 16 disks per set across hosts. -``` -minio server http://host{1...32}/disk1 -``` +- We limited the number of drives to 16 for erasure set because, erasure code shards more than 16 can become chatty and do not have any performance advantages. Additionally since 16 drive erasure set gives you tolerance of 8 disks per object by default which is plenty in any practical scenario. -Distributed erasure coded configuration with rack level redundancy 32 sets in total, 16 disks per set. -``` -minio server http://rack{1...4}-host{1...8}.example.net/export{1...16} -``` +- Choice of erasure set size is automatic based on the number of disks available, let's say for example if there are 32 servers and 32 disks which is a total of 1024 disks. In this scenario 16 becomes the erasure set size. This is decided based on the greatest common divisor (GCD) of acceptable erasure set sizes ranging from *4, 6, 8, 10, 12, 14, 16*. -Distributed erasure coded configuration with no rack level redundancy but redundancy with in the rack we split the arguments, 32 sets in total, 16 disks per set. -``` -minio server http://rack1-host{1...8}.example.net/export{1...16} http://rack2-host{1...8}.example.net/export{1...16} http://rack3-host{1...8}.example.net/export{1...16} http://rack4-host{1...8}.example.net/export{1...16} -``` +- *If total disks has many common divisors the algorithm chooses the minimum amounts of erasure sets possible for a erasure set size of any N*. In the example with 1024 disks - 4, 8, 16 are GCD factors. With 16 disks we get a total of 64 possible sets, with 8 disks we get a total of 128 possible sets, with 4 disks we get a total of 256 possible sets. So algorithm automatically chooses 64 sets, which is *16 * 64 = 1024* disks in total. -### Expected expansion for double ellipses - -MinIO server internally expands ellipses passed as arguments. Here is a sample expansion to demonstrate the process +- In this algorithm, we also make sure that we spread the disks out evenly. MinIO server expands ellipses passed as arguments. Here is a sample expansion to demonstrate the process. ``` minio server http://host{1...4}/export{1...8} @@ -106,6 +87,50 @@ Expected expansion > http://host4/export8 ``` +A noticeable trait of this expansion is that it chooses unique hosts such that the erasure code is efficient across drives and hosts. + +- Choosing an erasure set for the object is decided during `PutObject()`, object names are used to find the right erasure set using the following pseudo code. +```go +// hashes the key returning an integer. +func crcHashMod(key string, cardinality int) int { + keyCrc := crc32.Checksum([]byte(key), crc32.IEEETable) + return int(keyCrc % uint32(cardinality)) +} +``` +Input for the key is the object name specified in `PutObject()`, returns a unique index. This index is one of the erasure sets where the object will reside. This function is a consistent hash for a given object name i.e for a given object name the index returned is always the same. + +- Write and Read quorum are required to be satisfied only across the erasure set for an object. Healing is also done per object within the erasure set which contains the object. + +- MinIO does erasure coding at the object level not at the volume level, unlike other object storage vendors. This allows applications to choose different storage class by setting `x-amz-storage-class=STANDARD/REDUCED_REDUNDANCY` for each object uploads so effectively utilizing the capacity of the cluster. Additionally these can also be enforced using IAM policies to make sure the client uploads with correct HTTP headers. + +## Other usages + +### Advanced use cases with multiple ellipses + +Standalone erasure coded configuration with 4 sets with 16 disks each, which spawns disks across controllers. +``` +minio server /mnt/controller{1...4}/data{1...16} +``` + +Standalone erasure coded configuration with 16 sets, 16 disks per set, across mounts and controllers. +``` +minio server /mnt{1..4}/controller{1...4}/data{1...16} +``` + +Distributed erasure coded configuration with 2 sets, 16 disks per set across hosts. +``` +minio server http://host{1...32}/disk1 +``` + +Distributed erasure coded configuration with rack level redundancy 32 sets in total, 16 disks per set. +``` +minio server http://rack{1...4}-host{1...8}.example.net/export{1...16} +``` + +Distributed erasure coded configuration with no rack level redundancy but redundancy with in the rack we split the arguments, 32 sets in total, 16 disks per set. +``` +minio server http://rack1-host{1...8}.example.net/export{1...16} http://rack2-host{1...8}.example.net/export{1...16} http://rack3-host{1...8}.example.net/export{1...16} http://rack4-host{1...8}.example.net/export{1...16} +``` ## Backend `format.json` changes `format.json` has new fields @@ -186,5 +211,5 @@ type formatXLV2 struct { ## Limits -- Minimum of 4 disks are needed for erasure coded configuration. +- Minimum of 4 disks are needed for any erasure coded configuration. - Maximum of 32 distinct nodes are supported in distributed configuration. diff --git a/vendor/github.com/minio/minio/docs/distributed/README.md b/vendor/github.com/minio/minio/docs/distributed/README.md index a5cfa2e869..4f6fc320e9 100644 --- a/vendor/github.com/minio/minio/docs/distributed/README.md +++ b/vendor/github.com/minio/minio/docs/distributed/README.md @@ -67,7 +67,6 @@ __NOTE:__ `{1...n}` shown have 3 dots! Using only 2 dots `{1..32}` will be inter To test this setup, access the MinIO server via browser or [`mc`](https://docs.min.io/docs/minio-client-quickstart-guide). ## Explore Further -- [MinIO Large Bucket Support Guide](https://docs.min.io/docs/minio-large-bucket-support-quickstart-guide) - [MinIO Erasure Code QuickStart Guide](https://docs.min.io/docs/minio-erasure-code-quickstart-guide) - [Use `mc` with MinIO Server](https://docs.min.io/docs/minio-client-quickstart-guide) - [Use `aws-cli` with MinIO Server](https://docs.min.io/docs/aws-cli-with-minio) diff --git a/vendor/github.com/minio/minio/docs/docker/README.md b/vendor/github.com/minio/minio/docs/docker/README.md index da1d5a8777..503d3edce5 100644 --- a/vendor/github.com/minio/minio/docs/docker/README.md +++ b/vendor/github.com/minio/minio/docs/docker/README.md @@ -16,7 +16,6 @@ To create a MinIO container with persistent storage, you need to map local persi ```sh docker run -p 9000:9000 --name minio1 \ -v /mnt/data:/data \ - -v /mnt/config:/root/.minio \ minio/minio server /data ``` @@ -24,7 +23,6 @@ docker run -p 9000:9000 --name minio1 \ ```sh docker run -p 9000:9000 --name minio1 \ -v D:\data:/data \ - -v D:\minio\config:/root/.minio \ minio/minio server /data ``` @@ -44,7 +42,6 @@ docker run -p 9000:9000 --name minio1 \ -e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \ -e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ -v /mnt/data:/data \ - -v /mnt/config:/root/.minio \ minio/minio server /data ``` @@ -54,33 +51,38 @@ docker run -p 9000:9000 --name minio1 \ -e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \ -e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ -v D:\data:/data \ - -v D:\minio\config:/root/.minio \ minio/minio server /data ``` -### Run MinIO Docker as regular user -MinIO server doesn't run as a regular user by default in docker containers. To run MinIO container as regular user use environment variables `MINIO_USERNAME` and `MINIO_GROUPNAME`. - -> NOTE: If you are upgrading from existing deployments, you need to make sure this user has write access to previous persistent volumes. MinIO will not migrate the content automatically. +### Run MinIO Docker as a regular user +Docker provides standardized mechanisms to run docker containers as non-root users. #### GNU/Linux and macOS +On Linux and macOS you can use `--user` to run the container as regular user. + +> NOTE: make sure --user has write permission to *${HOME}/data* prior to using `--user`. ```sh -docker run -p 9000:9000 --name minio1 \ +mkdir -p ${HOME}/data +docker run -p 9000:9000 \ + --user $(id -u):$(id -g) \ + --name minio1 \ -e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \ - -e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ - -e "MINIO_USERNAME=minio-user" \ - -e "MINIO_GROUPNAME=minio-user" \ - -v /mnt/data:/data \ + -e "MINIO_SECRET_KEY=wJalrXUtnFEMIK7MDENGbPxRfiCYEXAMPLEKEY" \ + -v ${HOME}/data:/data \ minio/minio server /data ``` #### Windows +On windows you would need to use [Docker integrated windows authentication](https://success.docker.com/article/modernizing-traditional-dot-net-applications#integratedwindowsauthentication) and [Create a container with Active Directory Support](https://blogs.msdn.microsoft.com/containerstuff/2017/01/30/create-a-container-with-active-directory-support/) + +> NOTE: make sure your AD/Windows user has write permissions to *D:\data* prior to using `credentialspec=`. + ```powershell -docker run -p 9000:9000 --name minio1 \ +docker run -p 9000:9000 \ + --name minio1 \ + --security-opt "credentialspec=file://myuser.json" -e "MINIO_ACCESS_KEY=AKIAIOSFODNN7EXAMPLE" \ - -e "MINIO_SECRET_KEY=wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY" \ - -e "MINIO_USERNAME=minio-user" \ - -e "MINIO_GROUPNAME=minio-user" \ + -e "MINIO_SECRET_KEY=wJalrXUtnFEMIK7MDENGbPxRfiCYEXAMPLEKEY" \ -v D:\data:/data \ minio/minio server /data ``` diff --git a/vendor/github.com/minio/minio/docs/gateway/hdfs.md b/vendor/github.com/minio/minio/docs/gateway/hdfs.md index 9ca2664b9d..41c25effe1 100644 --- a/vendor/github.com/minio/minio/docs/gateway/hdfs.md +++ b/vendor/github.com/minio/minio/docs/gateway/hdfs.md @@ -3,27 +3,32 @@ MinIO HDFS gateway adds Amazon S3 API support to Hadoop HDFS filesystem. Applica ## Run MinIO Gateway for HDFS Storage -### Using Docker - -Please ensure to replace `hdfs://namenode:8200` with *an* actual name node ip/hostname and port. - -``` -docker run -p 9000:9000 --name hdfs-s3 \ - -e "MINIO_ACCESS_KEY=minio" \ - -e "MINIO_SECRET_KEY=minio123" \ - minio/minio gateway hdfs hdfs://namenode:8200 -``` - ### Using Binary +Namenode information is obtained by reading `core-site.xml` automatically from your hadoop environment variables *$HADOOP_HOME* +``` +export MINIO_ACCESS_KEY=minio +export MINIO_SECRET_KEY=minio123 +minio gateway hdfs +``` +You can also override the namenode endpoint as shown below. ``` export MINIO_ACCESS_KEY=minio export MINIO_SECRET_KEY=minio123 minio gateway hdfs hdfs://namenode:8200 ``` -## Test using MinIO Browser +### Using Docker +Using docker is experimental, most Hadoop environments are not dockerized and may require additional steps in getting this to work properly. You are better off just using the binary in this situation. +``` +docker run -p 9000:9000 \ + --name hdfs-s3 \ + -e "MINIO_ACCESS_KEY=minio" \ + -e "MINIO_SECRET_KEY=minio123" \ + minio/minio gateway hdfs hdfs://namenode:8200 +``` +## Test using MinIO Browser *MinIO gateway* comes with an embedded web based object browser. Point your web browser to http://127.0.0.1:9000 to ensure that your server has started successfully. ![Screenshot](https://raw.githubusercontent.com/minio/minio/master/docs/screenshots/minio-browser-gateway.png) @@ -59,6 +64,8 @@ Gateway inherits the following limitations of HDFS storage layer: - Additional metadata support for Multipart operations - Background append to provide concurrency support for multipart operations +Please open a GitHub issue if you wish these to be fixed https://github.com/minio/minio/issues + ## Explore Further - [`mc` command-line interface](https://docs.minio.io/docs/minio-client-quickstart-guide) - [`aws` command-line interface](https://docs.minio.io/docs/aws-cli-with-minio) diff --git a/vendor/github.com/minio/minio/docs/kms/README.md b/vendor/github.com/minio/minio/docs/kms/README.md index 6bf4235b42..a19bc916fa 100644 --- a/vendor/github.com/minio/minio/docs/kms/README.md +++ b/vendor/github.com/minio/minio/docs/kms/README.md @@ -141,8 +141,8 @@ path "transit/datakey/plaintext/my-minio-key" { path "transit/decrypt/my-minio-key" { capabilities = [ "read", "update"] } -path "transit/encrypt/my-minio-key" { - capabilities = [ "read", "update"] +path "transit/rewrap/my-minio-key" { + capabilities = ["update"] } EOF diff --git a/vendor/github.com/minio/minio/docs/orchestration/docker-compose/docker-compose.yaml b/vendor/github.com/minio/minio/docs/orchestration/docker-compose/docker-compose.yaml index 698ea82cd7..33ecf05662 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/docker-compose/docker-compose.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/docker-compose/docker-compose.yaml @@ -5,7 +5,7 @@ version: '2' # 9001 through 9004. services: minio1: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z volumes: - data1:/data ports: @@ -15,7 +15,7 @@ services: MINIO_SECRET_KEY: minio123 command: server http://minio1/data http://minio2/data http://minio3/data http://minio4/data minio2: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z volumes: - data2:/data ports: @@ -25,7 +25,7 @@ services: MINIO_SECRET_KEY: minio123 command: server http://minio1/data http://minio2/data http://minio3/data http://minio4/data minio3: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z volumes: - data3:/data ports: @@ -35,7 +35,7 @@ services: MINIO_SECRET_KEY: minio123 command: server http://minio1/data http://minio2/data http://minio3/data http://minio4/data minio4: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z volumes: - data4:/data ports: diff --git a/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose-secrets.yaml b/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose-secrets.yaml index cdb382a43f..225e18fb5d 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose-secrets.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose-secrets.yaml @@ -2,7 +2,7 @@ version: '3.1' services: minio1: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio1 volumes: - minio1-data:/export @@ -24,7 +24,7 @@ services: - access_key minio2: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio2 volumes: - minio2-data:/export @@ -46,7 +46,7 @@ services: - access_key minio3: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio3 volumes: - minio3-data:/export @@ -68,7 +68,7 @@ services: - access_key minio4: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio4 volumes: - minio4-data:/export diff --git a/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose.yaml b/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose.yaml index 9d6453240d..d0f6abc3cb 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/docker-swarm/docker-compose.yaml @@ -2,7 +2,7 @@ version: '3' services: minio1: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio1 volumes: - minio1-data:/export @@ -24,7 +24,7 @@ services: command: server http://minio1/export http://minio2/export http://minio3/export http://minio4/export minio2: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio2 volumes: - minio2-data:/export @@ -46,7 +46,7 @@ services: command: server http://minio1/export http://minio2/export http://minio3/export http://minio4/export minio3: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio3 volumes: - minio3-data:/export @@ -68,7 +68,7 @@ services: command: server http://minio1/export http://minio2/export http://minio3/export http://minio4/export minio4: - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z hostname: minio4 volumes: - minio4-data:/export diff --git a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-daemonset.yaml b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-daemonset.yaml index 31bbd5cc88..3f67e681a9 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-daemonset.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-daemonset.yaml @@ -30,7 +30,7 @@ spec: value: "minio" - name: MINIO_SECRET_KEY value: "minio123" - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z # Unfortunately you must manually define each server. Perhaps autodiscovery via DNS can be implemented in the future. args: - server diff --git a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-statefulset.yaml b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-statefulset.yaml index fbc5d59777..bfa8f559b3 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-statefulset.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-distributed-statefulset.yaml @@ -21,7 +21,7 @@ spec: value: "minio" - name: MINIO_SECRET_KEY value: "minio123" - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z args: - server - http://minio-0.minio.default.svc.cluster.local/data diff --git a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml index e3a0358dbb..3b07b2f24f 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-gcs-gateway-deployment.yaml @@ -21,7 +21,7 @@ spec: containers: - name: minio # Pulls the default Minio image from Docker Hub - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z args: - gateway - gcs diff --git a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-standalone-deployment.yaml b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-standalone-deployment.yaml index 229d87dbc1..726cf71475 100644 --- a/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-standalone-deployment.yaml +++ b/vendor/github.com/minio/minio/docs/orchestration/kubernetes/minio-standalone-deployment.yaml @@ -29,7 +29,7 @@ spec: - name: data mountPath: "/data" # Pulls the lastest Minio image from Docker Hub - image: minio/minio:RELEASE.2019-07-17T22-54-12Z + image: minio/minio:RELEASE.2019-08-07T01-59-21Z args: - server - /data diff --git a/vendor/github.com/minio/minio/docs/sts/assume-role.md b/vendor/github.com/minio/minio/docs/sts/assume-role.md index ee19345b60..62c4fba792 100644 --- a/vendor/github.com/minio/minio/docs/sts/assume-role.md +++ b/vendor/github.com/minio/minio/docs/sts/assume-role.md @@ -59,7 +59,7 @@ http://minio:9000/?Action=AssumeRole&DurationSeconds=3600&Version=2011-06-15&Pol Y4RJU1RNFGK48LGO9I2S sYLRKS1Z7hSjluf6gEbb9066hnx315wHTiACPAjg - 2018-11-09T16:51:11-08:00 + 2019-08-08T20:26:12Z eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJZNFJKVTFSTkZHSzQ4TEdPOUkyUyIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTQxODExMDcxLCJpYXQiOjE1NDE4MDc0NzEsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiYTBiMjc2MjktZWUxYS00M2JmLTg3MzktZjMzNzRhNGNkYmMwIn0.ewHqKVFTaP-j_kgZrcOEKroNUjk10GEp8bqQjxBbYVovV0nHO985VnRESFbcT6XMDDKHZiWqN2vi_ETX_u3Q-w diff --git a/vendor/github.com/minio/minio/docs/sts/client-grants.md b/vendor/github.com/minio/minio/docs/sts/client-grants.md index 99c7a0f681..6edc1f7484 100644 --- a/vendor/github.com/minio/minio/docs/sts/client-grants.md +++ b/vendor/github.com/minio/minio/docs/sts/client-grants.md @@ -64,7 +64,7 @@ http://minio.cluster:9000?Action=AssumeRoleWithClientGrants&DurationSeconds=3600 Y4RJU1RNFGK48LGO9I2S sYLRKS1Z7hSjluf6gEbb9066hnx315wHTiACPAjg - 2018-11-09T16:51:11-08:00 + 2019-08-08T20:26:12Z eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJZNFJKVTFSTkZHSzQ4TEdPOUkyUyIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTQxODExMDcxLCJpYXQiOjE1NDE4MDc0NzEsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiYTBiMjc2MjktZWUxYS00M2JmLTg3MzktZjMzNzRhNGNkYmMwIn0.ewHqKVFTaP-j_kgZrcOEKroNUjk10GEp8bqQjxBbYVovV0nHO985VnRESFbcT6XMDDKHZiWqN2vi_ETX_u3Q-w diff --git a/vendor/github.com/minio/minio/docs/sts/web-identity.md b/vendor/github.com/minio/minio/docs/sts/web-identity.md index 1f5bcf3c36..eb79b34c74 100644 --- a/vendor/github.com/minio/minio/docs/sts/web-identity.md +++ b/vendor/github.com/minio/minio/docs/sts/web-identity.md @@ -62,7 +62,7 @@ http://minio.cluster:9000?Action=AssumeRoleWithWebIdentity&DurationSeconds=3600& Y4RJU1RNFGK48LGO9I2S sYLRKS1Z7hSjluf6gEbb9066hnx315wHTiACPAjg - 2018-11-09T16:51:11-08:00 + 2019-08-08T20:26:12Z eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJhY2Nlc3NLZXkiOiJZNFJKVTFSTkZHSzQ4TEdPOUkyUyIsImF1ZCI6IlBvRWdYUDZ1Vk80NUlzRU5SbmdEWGo1QXU1WWEiLCJhenAiOiJQb0VnWFA2dVZPNDVJc0VOUm5nRFhqNUF1NVlhIiwiZXhwIjoxNTQxODExMDcxLCJpYXQiOjE1NDE4MDc0NzEsImlzcyI6Imh0dHBzOi8vbG9jYWxob3N0Ojk0NDMvb2F1dGgyL3Rva2VuIiwianRpIjoiYTBiMjc2MjktZWUxYS00M2JmLTg3MzktZjMzNzRhNGNkYmMwIn0.ewHqKVFTaP-j_kgZrcOEKroNUjk10GEp8bqQjxBbYVovV0nHO985VnRESFbcT6XMDDKHZiWqN2vi_ETX_u3Q-w diff --git a/vendor/github.com/minio/minio/go.mod b/vendor/github.com/minio/minio/go.mod index 84158d8082..2fad7b8935 100644 --- a/vendor/github.com/minio/minio/go.mod +++ b/vendor/github.com/minio/minio/go.mod @@ -37,7 +37,6 @@ require ( github.com/grpc-ecosystem/grpc-gateway v1.9.0 // indirect github.com/hashicorp/go-rootcerts v1.0.1 // indirect github.com/hashicorp/go-sockaddr v1.0.2 // indirect - github.com/hashicorp/go-uuid v1.0.1 // indirect github.com/hashicorp/golang-lru v0.5.1 // indirect github.com/hashicorp/hcl v1.0.0 // indirect github.com/hashicorp/raft v1.1.0 // indirect @@ -56,7 +55,8 @@ require ( github.com/miekg/dns v1.1.8 github.com/minio/cli v1.21.0 github.com/minio/dsync/v2 v2.0.0 - github.com/minio/hdfs/v3 v3.0.0 + github.com/minio/gokrb5/v7 v7.2.5 + github.com/minio/hdfs/v3 v3.0.1 github.com/minio/highwayhash v1.0.0 github.com/minio/lsync v1.0.1 github.com/minio/mc v0.0.0-20190529152718-f4bb0b8850cb @@ -94,11 +94,14 @@ require ( github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a go.etcd.io/bbolt v1.3.3 // indirect go.uber.org/atomic v1.3.2 - golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 - golang.org/x/net v0.0.0-20190620200207-3b0461eec859 // indirect - golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb + golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 + golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 // indirect + golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa google.golang.org/api v0.4.0 gopkg.in/Shopify/sarama.v1 v1.20.0 gopkg.in/olivere/elastic.v5 v5.0.80 gopkg.in/yaml.v2 v2.2.2 ) + +// Added for go1.13 migration https://github.com/golang/go/issues/32805 +replace github.com/gorilla/rpc v1.2.0+incompatible => github.com/gorilla/rpc v1.2.0 diff --git a/vendor/github.com/minio/minio/go.sum b/vendor/github.com/minio/minio/go.sum index 32eb52e3d1..b0e8e58cce 100644 --- a/vendor/github.com/minio/minio/go.sum +++ b/vendor/github.com/minio/minio/go.sum @@ -5,7 +5,6 @@ cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.37.2 h1:4y4L7BdHenTfZL0HervofNTHh9Ad6mNX72cQvl+5eH0= cloud.google.com/go v0.37.2/go.mod h1:H8IAquKe2L30IxoupDgqTaQvKSwF/c8prYHynGIWQbA= contrib.go.opencensus.io/exporter/ocagent v0.4.7/go.mod h1:+KkYrcvvEN0E5ls626sqMv8PdMx2931feKtzIwP01qI= -contrib.go.opencensus.io/exporter/ocagent v0.4.10 h1:Trr4zF3bbDxrde1svPSW0PkGwCzoHY7f3JL8g5Gl+hM= contrib.go.opencensus.io/exporter/ocagent v0.4.10/go.mod h1:ueLzZcP7LPhPulEBukGn4aLh7Mx9YJwpVJ9nL2FYltw= contrib.go.opencensus.io/exporter/ocagent v0.5.0 h1:TKXjQSRS0/cCDrP7KvkgU6SmILtF/yV2TOs/02K/WZQ= contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= @@ -22,7 +21,6 @@ github.com/Azure/go-autorest v11.7.0+incompatible h1:gzma19dc9ejB75D90E5S+/wXouz github.com/Azure/go-autorest v11.7.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= -github.com/DataDog/zstd v1.3.5 h1:DtpNbljikUepEPD16hD4LvIcmhnhdLTiW/5pHgbmp14= github.com/DataDog/zstd v1.3.5/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/DataDog/zstd v1.4.0 h1:vhoV+DUHnRZdKW1i5UMjAk2G4JY8wN4ayRfYDNdEhwo= github.com/DataDog/zstd v1.4.0/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= @@ -49,7 +47,6 @@ github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYU github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/araddon/gou v0.0.0-20190110011759-c797efecbb61/go.mod h1:ikc1XA58M+Rx7SEbf0bLJCfBkwayZ8T5jBo5FXK8Uz8= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= -github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da h1:8GUt8eRujhVEGZFFEjBj46YV4rDjvGrNxb0KMWYkL2I= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878 h1:EFSB7Zo9Eg91v7MJPVsifUysc/wPdN+NOnVe6bWbdBM= github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg= @@ -86,7 +83,6 @@ github.com/containerd/continuity v0.0.0-20181203112020-004b46473808/go.mod h1:GL github.com/coredns/coredns v0.0.0-20180121192821-d4bf076ccf4e/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= github.com/coredns/coredns v1.4.0 h1:RubBkYmkByUqZWWkjRHvNLnUHgkRVqAWgSMmRFvpE1A= github.com/coredns/coredns v1.4.0/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0= -github.com/coreos/bbolt v1.3.2 h1:wZwiHHUieZCquLkDL0B8UhzreNWsPHooDAG3q34zk0s= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/bbolt v1.3.3 h1:n6AiVyVRKQFNb6mJlwESEvvLoDyiTzXX7ORAUlkeBdY= github.com/coreos/bbolt v1.3.3/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= @@ -96,7 +92,6 @@ github.com/coreos/etcd v3.3.12+incompatible h1:pAWNwdf7QiT1zfaWyqCtNZQWCLByQyA3J github.com/coreos/etcd v3.3.12+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-oidc v2.0.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.0.0-20180108230905-e214231b295a/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= -github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -128,7 +123,6 @@ github.com/dustin/go-humanize v0.0.0-20170228161531-259d2a102b87/go.mod h1:Htrtb github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v0.0.0-20160104191539-b86b1ec0dd42/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-resiliency v1.1.0 h1:1NtRmCAqadE2FN4ZcN6g90TP3uk8cg9rn9eNK2197aU= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -162,7 +156,6 @@ github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeME github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.42.0 h1:TWr1wGj35+UiWHlBA8er89seFXxzwFn11spilrrj+38= github.com/go-ini/ini v1.42.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= @@ -180,7 +173,6 @@ github.com/gogo/protobuf v1.2.1 h1:/s5zKNz0uPFCZ5hddgPdo2TK2TVrUNMn0OOX8/aZMTE= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef h1:veQD95Isof8w9/WXiA+pa3tz3fJXkt5B7QaRBrM62gk= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -189,7 +181,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.0 h1:kbxbvI4Un1LUWKxufD+BiE6AEExYYgkQLQmLFqA1LFk= github.com/golang/protobuf v1.3.0/go.mod h1:Qd/q+1AKNOZr9uGQzbzCmRO6sUih6GTPZv6a1/R87v0= github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -209,7 +200,6 @@ github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57 h1:eqyIo2HjKhKe/mJzTG8n4VqvLXIOEG+SLdDqX7xGtkY= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible h1:j0GKcs05QVmm7yesiZq2+9cxHkNK9YM6zKx4D2qucQU= @@ -218,7 +208,6 @@ github.com/googleapis/gax-go/v2 v2.0.4 h1:hU4mGcQI4DaAYW+IbTun+2qEZVFxK0ySjQLTbS github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20181103185306-d547d1d9531e/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9 h1:Z0f701LpR4dqO92bP6TnIe3ZURClzJtBhds8R8u1HBE= github.com/gopherjs/gopherjs v0.0.0-20190309154008-847fc94819f9/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gopherjs/gopherjs v0.0.0-20190328170749-bb2674552d8f h1:4Gslotqbs16iAg+1KR/XdabIfq8TlAWHdwS5QJFksLc= github.com/gopherjs/gopherjs v0.0.0-20190328170749-bb2674552d8f/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -233,8 +222,8 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.0 h1:tOSd0UKHQd6urX6ApfOn4XdBMY6Sh1MfxV3kmaazO+U= github.com/gorilla/mux v1.7.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/rpc v0.0.0-20160517062331-bd3317b8f670/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= -github.com/gorilla/rpc v1.2.0+incompatible h1:V3Dz9mWwCvHKm0N+mVM2A/hShV+hLUMUdzoyHQjr1NA= -github.com/gorilla/rpc v1.2.0+incompatible/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= +github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= +github.com/gorilla/rpc v1.2.0/go.mod h1:V4h9r+4sF5HnzqbwIez0fKSpANP0zlYd3qR7p36jkTQ= github.com/gorilla/websocket v1.4.0 h1:WDFjx/TMzVgy9VdMMQi2K2Emtwi2QcUQsztZ/zLaH/Q= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= @@ -246,7 +235,6 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.7.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.8.5 h1:2+KSC78XiO6Qy0hIjfc1OD9H+hsaJdJlb8Kqsd41CTE= github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.0 h1:bM6ZAFZmc/wPFaRDi0d5L7hGEZEx/2u+Tmr2evNHDiI= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= @@ -266,9 +254,7 @@ github.com/hashicorp/go-hclog v0.9.1/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrj github.com/hashicorp/go-immutable-radix v1.0.0 h1:AKDB1HM5PWEA7i4nhcpwOrO2byshxBjXVn/J/3+z5/0= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-memdb v0.0.0-20190306140544-eea0b16292ad/go.mod h1:kbfItVoBJwCfKXDXN4YoAXjxcFVZ7MRrJzyTX6H4giE= -github.com/hashicorp/go-msgpack v0.5.3 h1:zKjpN5BK/P5lMYrLmBHdBULWbJ0XpYR+7NGzqkZzoD4= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= -github.com/hashicorp/go-msgpack v0.5.4 h1:SFT72YqIkOcLdWJUYcriVX7hbrZpwc/f7h8aW2NUqrA= github.com/hashicorp/go-msgpack v0.5.4/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-msgpack v0.5.5 h1:i9R9JSrqIz0QVLz3sz+i3YJdT7TTSLcfLLzJi9aZTuI= github.com/hashicorp/go-msgpack v0.5.5/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= @@ -279,7 +265,6 @@ github.com/hashicorp/go-retryablehttp v0.0.0-20180718195005-e651d75abec6/go.mod github.com/hashicorp/go-retryablehttp v0.5.3 h1:QlWt0KvWT0lq8MFppF9tsJGF+ynG7ztc2KIPhzRGk7s= github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es7NDiJgTc6Er0maI1Xs= github.com/hashicorp/go-rootcerts v0.0.0-20160503143440-6bb64b370b90/go.mod h1:o4zcYY1e0GEZI6eSEr+43QDYmuGglw1qSO6qdHUHCgg= -github.com/hashicorp/go-rootcerts v1.0.0 h1:Rqb66Oo1X/eSV1x66xbDccZjhJigjg0+e82kpwzSwCI= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-rootcerts v1.0.1 h1:DMo4fmknnz0E0evoNYnV48RjWndOsmd6OW+09R3cEP8= github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= @@ -292,7 +277,6 @@ github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/b github.com/hashicorp/go-uuid v1.0.1 h1:fv1ep09latC32wFoVwnqcnKJGnMSdBanPczbHAYm1BE= github.com/hashicorp/go-uuid v1.0.1/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v0.0.0-20160214002439-2e7f5ea8e27b/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/go-version v1.1.0 h1:bPIoEKD27tNdebFGGxxYwcL4nepeY4j1QP23PFRGzg0= github.com/hashicorp/go-version v1.1.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= @@ -304,9 +288,7 @@ github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/nomad v0.8.7/go.mod h1:WRaKjdO1G2iqi86TvTjIYtKTyxg4pl7NLr9InxtWaI0= -github.com/hashicorp/raft v1.0.0 h1:htBVktAOtGs4Le5Z7K8SF5H2+oWsQFYVmOgH5loro7Y= github.com/hashicorp/raft v1.0.0/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= -github.com/hashicorp/raft v1.0.1 h1:94uRdS11oEneUkxmXq6Vg9shNhBILh2UTb9crQjJWl0= github.com/hashicorp/raft v1.0.1/go.mod h1:DVSAWItjLjTOkVbSpWQ0j0kUADIvDaCtBxIcbNAQLkI= github.com/hashicorp/raft v1.1.0 h1:qPMePEczgbkiQsqCsRfuHRqvDUO+zmAInDaD5ptXlq0= github.com/hashicorp/raft v1.1.0/go.mod h1:4Ak7FSPnuvmb0GV6vgIAJ4vYT4bek9bb6Q+7HVbyzqM= @@ -327,12 +309,10 @@ github.com/hashicorp/vault-plugin-secrets-gcp v0.0.0-20190311200649-621231cb86fe github.com/hashicorp/vault-plugin-secrets-gcpkms v0.0.0-20190116164938-d6b25b0b4a39/go.mod h1:2n62quNV4DvfMY5Lxx82NJmx+9pYtv4RltLIFKxEO4E= github.com/hashicorp/vault-plugin-secrets-kv v0.0.0-20190315192709-dccffee64925/go.mod h1:VJHHT2SC1tAPrfENQeBhLlb5FbZoKZM+oC/ROmEftz0= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= -github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c h1:kQWxfPIHVLbgLzphqk3QUflDy9QdksZR4ygR807bpy0= github.com/howeyc/gopass v0.0.0-20170109162249-bf9dde6d0d2c/go.mod h1:lADxMC39cJJqL93Duh1xhAs4I2Zs8mKS89XWXFGp9cs= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf h1:WfD7VjIE6z8dIvMsI4/s+1qr5EL+zoIGev1BQj1eoJ8= github.com/inconshreveable/go-update v0.0.0-20160112193335-8152e7eb6ccf/go.mod h1:hyb9oH7vZsitZCiBt0ZvifOrB+qc8PS5IiilCIb87rg= -github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930 h1:v4CYlQ+HeysPHsr2QFiEO60gKqnvn1xwvuKhhAhuEkk= github.com/jcmturner/gofork v0.0.0-20180107083740-2aebee971930/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03 h1:FUwcHNlEqkqLjLBdCp5PRlCFijNjvcYANOZXzCfXwCM= github.com/jcmturner/gofork v0.0.0-20190328161633-dc7c13fece03/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= @@ -355,12 +335,10 @@ github.com/keybase/go-crypto v0.0.0-20190312101036-b475f2ecc1fe/go.mod h1:ghbZsc github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.3.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.4.1 h1:8VMb5+0wMgdBykOV96DwNwKFQ+WTI4pzYURP99CcB9E= github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.5.0 h1:iDac0ZKbmSA4PRrRuXXjZL8C7UoJan8oBYxXkMzEQrI= github.com/klauspost/compress v1.5.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/cpuid v0.0.0-20160106104451-349c67577817/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= -github.com/klauspost/cpuid v1.2.0 h1:NMpwD2G9JSFOE1/TJjGSo5zG7Yb2bTe7eq1jH+irmeE= github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.1 h1:vJi+O/nMdFt0vqm8NZBI6wzALWdA2X+egi0ogNyrC/w= github.com/klauspost/cpuid v1.2.1/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -409,31 +387,28 @@ github.com/michaelklishin/rabbit-hole v1.5.0/go.mod h1:vvI1uOitYZi0O5HEGXhaWC1XT github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.8 h1:1QYRAKU3lN5cRfLCkPU08hwvLJFhvjP6MqNMmQz6ZVI= github.com/miekg/dns v1.1.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/minio/blazer v0.0.0-20171126203752-2081f5bf0465 h1:rV63TOug3ph7F/cpkDSXdZNxyH3LCta+Lk5A/IYkt/0= github.com/minio/blazer v0.0.0-20171126203752-2081f5bf0465/go.mod h1:ChYiRE5crAmaOo0dQth8FJCtLTiPH9QEMVlDcMp+938= github.com/minio/cli v0.0.0-20170227073228-b8ae5507c0ce/go.mod h1:hLsWNQy2wIf3FKFnMlH69f4RdEyn8nbRA2shaulTjGY= -github.com/minio/cli v1.3.0 h1:vB0iUpmyaH54+1jJJj62Aa0qFF3xO3i0J3IcKiM6bHM= github.com/minio/cli v1.3.0/go.mod h1:hLsWNQy2wIf3FKFnMlH69f4RdEyn8nbRA2shaulTjGY= -github.com/minio/cli v1.20.0 h1:OVNIt8Rg5+mpYb8siWT2gBV5hvUyFbRvBikC+Ytvf5A= github.com/minio/cli v1.20.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= github.com/minio/cli v1.21.0 h1:8gE8iZc0ONOhHy/T28tCsNew5f5VzWU558U9Myjfq50= github.com/minio/cli v1.21.0/go.mod h1:bYxnK0uS629N3Bq+AOZZ+6lwF77Sodk4+UL9vNuXhOY= github.com/minio/dsync v0.0.0-20190104003057-61c41ffdeea2/go.mod h1:eLQe3mXL0h02kNpPtBJiLr1fIEIJftgXRAjncjQbxJo= -github.com/minio/dsync v1.0.0 h1:l6pQgUPBM41idlR0UOcpAP+EYim9MCwIAUh6sQQI1gk= github.com/minio/dsync v1.0.0/go.mod h1:eLQe3mXL0h02kNpPtBJiLr1fIEIJftgXRAjncjQbxJo= github.com/minio/dsync/v2 v2.0.0 h1:p353BZ9od4xgHSXHn5GQ9V3WcnsxqH6aaShy0jDSX54= github.com/minio/dsync/v2 v2.0.0/go.mod h1:kxZSSQoDZa5OAsfgM8JJ0iRQOkGsg0op9unAnQVMm7o= -github.com/minio/hdfs/v3 v3.0.0 h1:yHa9ugB2UeazkiphO6Q2dmbqiKR5lZssN/1vda4gwoY= +github.com/minio/gokrb5/v7 v7.2.5 h1:GPnlzrvEol/uJHouCEQ382Gd+w4in5s4wCTQ4umDki8= +github.com/minio/gokrb5/v7 v7.2.5/go.mod h1:z6fE6twrvMN004M+KRTHnmtfpxsBIztP0PVsak0/4f8= github.com/minio/hdfs/v3 v3.0.0/go.mod h1:k04lEYpgeojX3o1vQep6rQs4MCTD+qlh2xHFEa/BH6A= +github.com/minio/hdfs/v3 v3.0.1 h1:MMpqqS9CtuBTYrsMYZMWfDPmWemRf11zhcvj+mbKUYc= +github.com/minio/hdfs/v3 v3.0.1/go.mod h1:6ALh9HsAwG9xAXdpdrZJcSY0vR6z3K+9XIz6Y9pQG/c= github.com/minio/highwayhash v0.0.0-20181220011308-93ed73d64169/go.mod h1:NL8wme5P5MoscwAkXfGroz3VgpCdhBw3KYOu5mEsvpU= github.com/minio/highwayhash v1.0.0 h1:iMSDhgUILCr0TNm8LWlSjF8N0ZIj2qbO8WHp6Q/J2BA= github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5dxBpaMbdc= -github.com/minio/lsync v0.0.0-20190207022115-a4e43e3d0887 h1:MIpCDz3d2FR2a+FjdizuFdjsoeHuLlSkl3YNQJ55jV8= github.com/minio/lsync v0.0.0-20190207022115-a4e43e3d0887/go.mod h1:ni10+iSX7FO8N2rv41XM444V6w4rYO0dZo5KIkbn/YA= github.com/minio/lsync v1.0.1 h1:AVvILxA976xc27hstd1oR+X9PQG0sPSom1MNb1ImfUs= github.com/minio/lsync v1.0.1/go.mod h1:tCFzfo0dlvdGl70IT4IAK/5Wtgb0/BrTmo/jE8pArKA= github.com/minio/mc v0.0.0-20190311071728-2e612b23d665/go.mod h1:7qLZXNjCD55DJ3iqe1uWoUh1MASRVd1M6wnqSdyhx7Y= -github.com/minio/mc v0.0.0-20190401030144-a1355e50e2e8 h1:npKHywsxVECDu+YHfR8Sswm3giEdRbCcLRSYudE3UxQ= github.com/minio/mc v0.0.0-20190401030144-a1355e50e2e8/go.mod h1:rnJByweU1h98rGmAcWcKen1sCAlekF38kbSrq6OLmAg= github.com/minio/mc v0.0.0-20190529152718-f4bb0b8850cb h1:GrlwnnSxGA0c6pgGn0elnX7kbPMhggeISQIachf1DOQ= github.com/minio/mc v0.0.0-20190529152718-f4bb0b8850cb/go.mod h1:GMIQKmYuGc7q10DsJJO8peBXCTRx5itwIHheVo+bKso= @@ -441,11 +416,9 @@ github.com/minio/minio v0.0.0-20190206103305-fd4e15c11641/go.mod h1:lXcp05uxYaW9 github.com/minio/minio v0.0.0-20190325204105-0250f7de678b/go.mod h1:6ODmvb06uOpNy0IM+3pJRTHaauOMpLJ51jLhipbcifI= github.com/minio/minio v0.0.0-20190510004154-ac3b59645e92/go.mod h1:yFbQSwuA61mB/SDurPvsaSydqDyJdfAlBYpMiEe1lz8= github.com/minio/minio-go v0.0.0-20190227180923-59af836a7e6d/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM= -github.com/minio/minio-go v0.0.0-20190313212832-5d20267d970d h1:a12xhHWsc1raqp9S4j14W1kTnXtmAroJhqF9NeK1WdU= github.com/minio/minio-go v0.0.0-20190313212832-5d20267d970d/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM= github.com/minio/minio-go v0.0.0-20190327203652-5325257a208f h1:u+iNxfkLrfyWp7KxSTV+ZhO4SMHT6qUFxSZ6yhYMQ0Q= github.com/minio/minio-go v0.0.0-20190327203652-5325257a208f/go.mod h1:/haSOWG8hQNx2+JOfLJ9GKp61EAmgPwRVw/Sac0NzaM= -github.com/minio/minio-go/v6 v6.0.26 h1:nHLr1A+sJBv/sQu6zc5BrHLFAStCXxloC+jmZp4FtW0= github.com/minio/minio-go/v6 v6.0.26/go.mod h1:vaNT59cWULS37E+E9zkuN/BVnKHyXtVGS+b04Boc66Y= github.com/minio/minio-go/v6 v6.0.29 h1:p4YPxK1beY13reFJjCE5QwCnXUMT9D5sV5wl0BSy5Xo= github.com/minio/minio-go/v6 v6.0.29/go.mod h1:vaNT59cWULS37E+E9zkuN/BVnKHyXtVGS+b04Boc66Y= @@ -501,7 +474,6 @@ github.com/nats-io/nats.go v1.8.0 h1:PXePcr71qzI9MMvQFfV0OBuNItkRQyyZowPfXzvdmVI github.com/nats-io/nats.go v1.8.0/go.mod h1:BrFz9vVn0fU3AcH9Vn4Kd7W0NpJ651tD5omQ3M8LwxM= github.com/nats-io/nkeys v0.0.2 h1:+qM7QpgXnvDDixitZtQUBDY9w/s9mu1ghS+JIbsrx6M= github.com/nats-io/nkeys v0.0.2/go.mod h1:dab7URMsZm6Z/jp9Z5UGa87Uutgc2mVpXLC4B7TDb/4= -github.com/nats-io/nuid v1.0.0 h1:44QGdhbiANq8ZCbUkdn6W5bqtg+mHuDE4wOUuxxndFs= github.com/nats-io/nuid v1.0.0/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= @@ -543,18 +515,15 @@ github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6J github.com/pkg/profile v1.3.0 h1:OQIvuDgm00gWVWGTf4m4mCt6W1/0YqU7Ntg0mySWgaI= github.com/pkg/profile v1.3.0/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/xattr v0.0.0-20170808190211-56ed87199eba/go.mod h1:wuo6utqb0b/WNJYm0fQyg57cKpORNfpX2lY6Ew6+Grg= -github.com/pkg/xattr v0.4.1 h1:dhclzL6EqOXNaPDWqoeb9tIxATfBSmjqL0b4DpSjwRw= github.com/pkg/xattr v0.4.1/go.mod h1:W2cGD0TBEus7MkUgv0tNZ9JutLtVO3cXu+IBRuHqnFs= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/posener/complete v1.2.1 h1:LrvDIY//XNo65Lq84G/akBuMGlawHvGBABv8f/ZN6DI= github.com/posener/complete v1.2.1/go.mod h1:6gapUrK/U1TAN7ciCoNRIdVC5sbdBTUh1DKN0g6uH7E= github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA= github.com/pquerna/otp v1.1.0/go.mod h1:Zad1CMQfSQZI5KLpahDiSUX4tMMREnXw98IvL1nhgMk= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= @@ -571,7 +540,6 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rcrowley/go-metrics v0.0.0-20190704165056-9c2d0518ed81 h1:zQTtDd7fQiF9e80lbl+ShnD9/5NSq5r1EhcS8955ECg= github.com/rcrowley/go-metrics v0.0.0-20190704165056-9c2d0518ed81/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -590,11 +558,9 @@ github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/segmentio/go-prompt v0.0.0-20161017233205-f0d19b6901ad/go.mod h1:B3ehdD1xPoWDKgrQgUaGk+m8H1xb1J5TyYDfKpKNeEE= -github.com/segmentio/go-prompt v1.2.1-0.20161017233205-f0d19b6901ad h1:EqOdoSJGI7CsBQczPcIgmpm3hJE7X8Hj3jrgI002whs= github.com/segmentio/go-prompt v1.2.1-0.20161017233205-f0d19b6901ad/go.mod h1:B3ehdD1xPoWDKgrQgUaGk+m8H1xb1J5TyYDfKpKNeEE= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.3.0 h1:hI/7Q+DtNZ2kINb6qt/lS+IyXnHQe9e90POfeewL/ME= github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= @@ -603,14 +569,12 @@ github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e h1:jrZSSgP github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e/go.mod h1:d8hQseuYt4rJoOo21lFzYJdhMjmDqLY++ayArbgYjWI= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190116191733-b6c0e53d7304/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac h1:wbW+Bybf9pXxnCFAOWZTqkRjAc7rAIwo2e1ArUhiHxg= github.com/smartystreets/assertions v0.0.0-20190215210624-980c5ac6f3ac/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3 h1:hBSHahWMEgzwRyS6dRpxY0XyjZsHyQ61s084wo5PJe0= github.com/smartystreets/assertions v0.0.0-20190401211740-f487f9de1cd3/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v0.0.0-20181108003508-044398e4856c/go.mod h1:XDJAKZRPZ1CvBcN2aX5YOUTYGHki24fSF0Iv48Ibg0s= github.com/smartystreets/goconvey v0.0.0-20190222223459-a17d461953aa/go.mod h1:2RVY1rIf+2J2o/IM9+vPq9RzmHDSseB7FoXiSNIUsoU= -github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff h1:86HlEv0yBCry9syNuylzqznKXDK11p6D0DT596yNMys= github.com/smartystreets/goconvey v0.0.0-20190306220146-200a235640ff/go.mod h1:KSQcGKpxUMHk3nbYzs/tIBAM2iDooCn0BmttHOJEbLs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= @@ -633,7 +597,6 @@ github.com/tidwall/gjson v1.2.1/go.mod h1:c/nTNbUr0E0OrXEhq1pwa8iEgc2DOt4ZZqAt1H github.com/tidwall/match v1.0.0/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= -github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65 h1:rQ229MBgvW68s1/g6f1/63TgYwYxfF4E+bi/KC19P8g= github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= @@ -643,17 +606,14 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5 h1:LnC5Kc github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= github.com/ugorji/go v0.0.0-20180628102755-7d51bbe6161d/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= -github.com/ugorji/go v1.1.2 h1:JON3E2/GPW2iDNGoSAusl1KDf5TRQ8k8q7Tp097pZGs= github.com/ugorji/go v1.1.2/go.mod h1:hnLbHMwcvSihnDhEfx2/BzKp2xb0Y+ErdfYcrs9tkJQ= github.com/ugorji/go v1.1.4 h1:j4s+tAvLfL3bZyefP2SEWmhBzmuIlH/eqNuPdFPgngw= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go/codec v0.0.0-20190320090025-2dc34c0b8780 h1:vG/gY/PxA3v3l04qxe3tDjXyu3bozii8ulSlIPOYKhI= github.com/ugorji/go/codec v0.0.0-20190320090025-2dc34c0b8780/go.mod h1:iT03XoTwV7xq/+UGwKO3UbC1nNNlopQiY61beSdrtOA= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6JcmhJgXi5E4b8Wg84ihbmUKp/GvSPEzc= github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -go.etcd.io/bbolt v1.3.2 h1:Z/90sZLPOeCy2PwprqkFa25PdkusRzaj9P8zm/KNyvk= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -661,7 +621,6 @@ go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.18.1-0.20181204023538-aab39bd6a98b/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= -go.opencensus.io v0.19.2 h1:ZZpq6xI6kv/LuE/5s5UQvBU5vMjvRnPb8PvJrIntAnc= go.opencensus.io v0.19.2/go.mod h1:NO/8qkisMZLZ1FCsKNqtJPwc8/TaclWyY0B6wcYNg9M= go.opencensus.io v0.21.0 h1:mU6zScU4U1YAFPHEHYk+3JC4SY7JxgkqS10ZOSyksNg= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -669,7 +628,6 @@ go.uber.org/atomic v1.3.2 h1:2Oa65PReHzfn29GpvgsYwloV9AVFHPDk8tYxt2c2tr4= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= -go.uber.org/zap v1.9.1 h1:XCJQEf3W6eZaVwhRBof6ImoYGJSITeKWsyeh3HFu/5o= go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= @@ -688,20 +646,17 @@ golang.org/x/crypto v0.0.0-20190131182504-b8fe1690c613/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190228161510-8dd112bcdc25/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c h1:Vj5n4GlwjmQteupaxJ9+0FNOmBrHfq7vN4btdGoDZgI= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f h1:R423Cnkcp5JABoeemiGEPlt9tHXFfw5kvc0yqlxRPWo= golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4 h1:ydJNl0ENAG67pFbB+9tfhiL2pYqLhfoaZFw/cjLhY4A= -golang.org/x/crypto v0.0.0-20190621222207-cc06ce4a13d4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4 h1:HuIa8hRrWRSrqYzx1qI49NNxhdi2PrY7gxVSq1JjLDc= +golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -720,13 +675,11 @@ golang.org/x/net v0.0.0-20190301231341-16b79f2e4e95/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190318221613-d196dffd7c2b/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190324223953-e3b2ff56ed87/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3 h1:0GoQqolDA55aaLxZyTzK/Y2ePZzZTUrRacwib7cNsYQ= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80 h1:Ao/3l156eZf2AW5wK8a7/smtodRU+gha3+BeqJ69lRk= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180603041954-1e0a3fa8ba9a/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -737,7 +690,6 @@ golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5Tlb golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -767,10 +719,9 @@ golang.org/x/sys v0.0.0-20190322080309-f49334f85ddc/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190429190828-d89cdac9e872/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb h1:fgwFCsaw9buMuxNd6+DQfAuSFqbNiQZpcgJQAgJsK6k= -golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa h1:KIDDMLT1O0Nr7TSxp8xM5tJcdn8tgyAONntO829og1M= +golang.org/x/sys v0.0.0-20190804053845-51ab0e2deafa/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2 h1:z99zHgr7hKfrUcX/KsoJk5FJfjTceCKIp96+biqP4To= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -787,7 +738,6 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190318200714-bb1270c20edf/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190328211700-ab21143f2384 h1:TFlARGu6Czu1z7q93HTxcP1P+/ZFC/IKythI5RzrnRg= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/api v0.0.0-20180603000442-8e296ef26005/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= @@ -795,7 +745,6 @@ google.golang.org/api v0.0.0-20180916000451-19ff8768a5c0/go.mod h1:4mhQ8q/RsB7i+ google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= -google.golang.org/api v0.3.0 h1:UIJY20OEo3+tK5MBlcdx37kmdH6EnRjGkW78mc6+EeA= google.golang.org/api v0.3.0/go.mod h1:IuvZyQh8jgscv8qWfQ4ABd8m7hEudgBFM/EdhA3BnXw= google.golang.org/api v0.4.0 h1:KKgc1aqhV8wDPbDzlDtpvyjZFY3vjz85FP7p4wcQUyI= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= @@ -819,16 +768,13 @@ google.golang.org/grpc v1.15.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9M google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.18.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= -google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -google.golang.org/grpc v1.19.1 h1:TrBcJ1yqAl1G++wO39nD/qtgpsW9/1+QGrluyMGEYgM= google.golang.org/grpc v1.19.1/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= gopkg.in/Shopify/sarama.v1 v1.10.1/go.mod h1:AxnvoaevB2nBjNK17cG61A3LleFcWFwVBHBt+cot4Oc= gopkg.in/Shopify/sarama.v1 v1.20.0 h1:DrCuMOhmuaUwb5o4aL9JJnW+whbEnuuL6AZ99ySMoQA= gopkg.in/Shopify/sarama.v1 v1.20.0/go.mod h1:AxnvoaevB2nBjNK17cG61A3LleFcWFwVBHBt+cot4Oc= -gopkg.in/VividCortex/ewma.v1 v1.1.1 h1:tWHEKkKq802K/JT9RiqGCBU5fW3raAPnJGTE9ostZvg= gopkg.in/VividCortex/ewma.v1 v1.1.1/go.mod h1:TekXuFipeiHWiAlO1+wSS23vTcyFau5u3rxXUSXj710= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw= @@ -838,15 +784,12 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/cheggaaa/pb.v1 v1.0.27/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/cheggaaa/pb.v1 v1.0.28 h1:n1tBJnnK2r7g9OW2btFH91V92STTUevLXYFb8gy9EMk= gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v2 v2.0.6 h1:L2KAo2l2ZQTzxmh8b9RdQpzgLpK2mX3paGCMJSUugBk= gopkg.in/cheggaaa/pb.v2 v2.0.6/go.mod h1:0CiZ1p8pvtxBlQpLXkHuUTpdJ1shm3OqCF1QugkjHL4= -gopkg.in/fatih/color.v1 v1.7.0 h1:bYGjb+HezBM6j/QmgBfgm1adxHpzzrss6bj4r9ROppk= gopkg.in/fatih/color.v1 v1.7.0/go.mod h1:P7yosIhqIl/sX8J8UypY5M+dDpD2KmyfP5IRs5v/fo0= gopkg.in/fatih/pool.v2 v2.0.0/go.mod h1:8xVGeu1/2jr2wm5V9SPuMht2H5AEmf5aFMGSQixtjTY= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gorethink/gorethink.v4 v4.1.0/go.mod h1:M7JgwrUAmshJ3iUbEK0Pt049MPyPK+CYDGGaEjdZb/c= gopkg.in/h2non/filetype.v1 v1.0.3/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo= -gopkg.in/h2non/filetype.v1 v1.0.5 h1:CC1jjJjoEhNVbMhXYalmGBhOBK2V70Q1N850wt/98/Y= gopkg.in/h2non/filetype.v1 v1.0.5/go.mod h1:M0yem4rwSX5lLVrkEuRRp2/NinFMD5vgJ4DlAhZcfNo= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.41.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -856,17 +799,15 @@ gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hr gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1 h1:cIuC1OLRGZrld+16ZJvvZxVJeKPsvd5eUIvxfoN5hSM= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= -gopkg.in/jcmturner/goidentity.v2 v2.0.0 h1:6Bmcdaxb0dD3HyHbo/MtJ2Q1wXLDuZJFwXZmuZvM+zw= gopkg.in/jcmturner/goidentity.v2 v2.0.0/go.mod h1:vCwK9HeXksMeUmQ4SxDd1tRz4LejrKh3KRVjQWhjvZI= -gopkg.in/jcmturner/gokrb5.v5 v5.3.0 h1:RS1MYApX27Hx1Xw7NECs7XxGxxrm69/4OmaRuX9kwec= +gopkg.in/jcmturner/goidentity.v3 v3.0.0 h1:1duIyWiTaYvVx3YX2CYtpJbUFd7/UuPYCfgXtQ3VTbI= +gopkg.in/jcmturner/goidentity.v3 v3.0.0/go.mod h1:oG2kH0IvSYNIu80dVAyu/yoefjq1mNfM5bm88whjWx4= gopkg.in/jcmturner/gokrb5.v5 v5.3.0/go.mod h1:oQz8Wc5GsctOTgCVyKad1Vw4TCWz5G6gfIQr88RPv4k= -gopkg.in/jcmturner/rpc.v0 v0.0.2 h1:wBTgrbL1qmLBUPsYVCqdJiI5aJgQhexmK+JkTHPUNJI= gopkg.in/jcmturner/rpc.v0 v0.0.2/go.mod h1:NzMq6cRzR9lipgw7WxRBHNx5N8SifBuaCQsOT1kWY/E= -gopkg.in/mattn/go-colorable.v0 v0.1.0 h1:WYuADWvfvYC07fm8ygYB3LMcsc5CunpxfMGKawHkAos= +gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= +gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/mattn/go-colorable.v0 v0.1.0/go.mod h1:BVJlBXzARQxdi3nZo6f6bnl5yR20/tOL6p+V0KejgSY= -gopkg.in/mattn/go-isatty.v0 v0.0.4 h1:NtS1rQGQr4IaFWBGz4Cz4BhB///gyys4gDVtKA7hIsc= gopkg.in/mattn/go-isatty.v0 v0.0.4/go.mod h1:wt691ab7g0X4ilKZNmMII3egK0bTxl37fEn/Fwbd8gc= -gopkg.in/mattn/go-runewidth.v0 v0.0.4 h1:r0P71TnzQDlNIcizCqvPSSANoFa3WVGtcNJf3TWurcY= gopkg.in/mattn/go-runewidth.v0 v0.0.4/go.mod h1:BmXejnxvhwdaATwiJbB1vZ2dtXkQKZGu9yLFCZb4msQ= gopkg.in/mgo.v2 v2.0.0-20180705113604-9856a29383ce/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= gopkg.in/olivere/elastic.v5 v5.0.31/go.mod h1:FylZT6jQWtfHsicejzOm3jIMVPOAksa80i3o+6qtQRk= diff --git a/vendor/github.com/minio/minio/mint/build/aws-sdk-go/install.sh b/vendor/github.com/minio/minio/mint/build/aws-sdk-go/install.sh index d73162840e..0c5529078a 100755 --- a/vendor/github.com/minio/minio/mint/build/aws-sdk-go/install.sh +++ b/vendor/github.com/minio/minio/mint/build/aws-sdk-go/install.sh @@ -16,4 +16,4 @@ # test_run_dir="$MINT_RUN_CORE_DIR/aws-sdk-go" -GO111MODULE=on go build -o "$test_run_dir/aws-sdk-go" "$test_run_dir/quick-tests.go" +GOPROXY=https://proxy.golang.org GO111MODULE=on go build -o "$test_run_dir/aws-sdk-go" "$test_run_dir/quick-tests.go" diff --git a/vendor/github.com/minio/minio/mint/build/healthcheck/install.sh b/vendor/github.com/minio/minio/mint/build/healthcheck/install.sh index 71ffe48238..906a62e679 100755 --- a/vendor/github.com/minio/minio/mint/build/healthcheck/install.sh +++ b/vendor/github.com/minio/minio/mint/build/healthcheck/install.sh @@ -16,4 +16,4 @@ # test_run_dir="$MINT_RUN_CORE_DIR/healthcheck" -GO111MODULE=on go build -o "$test_run_dir/healthcheck" "$test_run_dir/healthcheck.go" +GOPROXY=https://proxy.golang.org GO111MODULE=on go build -o "$test_run_dir/healthcheck" "$test_run_dir/healthcheck.go" diff --git a/vendor/github.com/minio/minio/mint/build/minio-go/install.sh b/vendor/github.com/minio/minio/mint/build/minio-go/install.sh index 5e30051996..1debb922bc 100755 --- a/vendor/github.com/minio/minio/mint/build/minio-go/install.sh +++ b/vendor/github.com/minio/minio/mint/build/minio-go/install.sh @@ -23,5 +23,5 @@ fi test_run_dir="$MINT_RUN_CORE_DIR/minio-go" (git clone https://github.com/minio/minio-go && cd minio-go && git checkout --quiet "tags/$MINIO_GO_VERSION") -GO111MODULE=on CGO_ENABLED=0 go build -o "$test_run_dir/minio-go" "minio-go/functional_tests.go" +GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go build -o "$test_run_dir/minio-go" "minio-go/functional_tests.go" rm -rf minio-go diff --git a/vendor/github.com/minio/minio/mint/build/security/install.sh b/vendor/github.com/minio/minio/mint/build/security/install.sh index a517bfb18a..e45c6789a1 100755 --- a/vendor/github.com/minio/minio/mint/build/security/install.sh +++ b/vendor/github.com/minio/minio/mint/build/security/install.sh @@ -16,4 +16,4 @@ # test_run_dir="$MINT_RUN_CORE_DIR/security" -GO111MODULE=on go build -o "$test_run_dir/tls-tests" "$test_run_dir/tls-tests.go" +GOPROXY=https://proxy.golang.org GO111MODULE=on go build -o "$test_run_dir/tls-tests" "$test_run_dir/tls-tests.go" diff --git a/vendor/github.com/minio/minio/mint/build/worm/install.sh b/vendor/github.com/minio/minio/mint/build/worm/install.sh index 7cdf1fd0ff..49f3191bfc 100755 --- a/vendor/github.com/minio/minio/mint/build/worm/install.sh +++ b/vendor/github.com/minio/minio/mint/build/worm/install.sh @@ -16,4 +16,4 @@ # test_run_dir="$MINT_RUN_CORE_DIR/worm" -GO111MODULE=on CGO_ENABLED=0 go build -o "$test_run_dir/worm" "$test_run_dir/quick-worm-tests.go" +GOPROXY=https://proxy.golang.org GO111MODULE=on CGO_ENABLED=0 go build -o "$test_run_dir/worm" "$test_run_dir/quick-worm-tests.go" diff --git a/vendor/github.com/minio/minio/mint/mint.sh b/vendor/github.com/minio/minio/mint/mint.sh index 953c3f0966..5d9e9972cb 100755 --- a/vendor/github.com/minio/minio/mint/mint.sh +++ b/vendor/github.com/minio/minio/mint/mint.sh @@ -22,6 +22,7 @@ SERVER_REGION=${SERVER_REGION:-us-east-1} ENABLE_HTTPS=${ENABLE_HTTPS:-0} ENABLE_VIRTUAL_STYLE=${ENABLE_VIRTUAL_STYLE:-0} GO111MODULE=on +GOPROXY=https://proxy.golang.org if [ -z "$SERVER_ENDPOINT" ]; then SERVER_ENDPOINT="play.minio.io:9000" @@ -137,6 +138,7 @@ function main() export SERVER_REGION export ENABLE_VIRTUAL_STYLE export GO111MODULE + export GOPROXY echo "Running with" echo "SERVER_ENDPOINT: $SERVER_ENDPOINT" diff --git a/vendor/github.com/minio/minio/pkg/auth/credentials.go b/vendor/github.com/minio/minio/pkg/auth/credentials.go index 1be04c6373..04f5b9619d 100644 --- a/vendor/github.com/minio/minio/pkg/auth/credentials.go +++ b/vendor/github.com/minio/minio/pkg/auth/credentials.go @@ -170,7 +170,7 @@ func GetNewCredentialsWithMetadata(m map[string]interface{}, tokenSecret string) m["accessKey"] = cred.AccessKey jwt := jwtgo.NewWithClaims(jwtgo.SigningMethodHS512, jwtgo.MapClaims(m)) - cred.Expiration = time.Unix(expiry, 0) + cred.Expiration = time.Unix(expiry, 0).UTC() cred.SessionToken, err = jwt.SignedString([]byte(tokenSecret)) if err != nil { return cred, err diff --git a/vendor/github.com/minio/minio/pkg/disk/type_linux.go b/vendor/github.com/minio/minio/pkg/disk/type_linux.go index a2c6483cd2..2c4e0b48d6 100644 --- a/vendor/github.com/minio/minio/pkg/disk/type_linux.go +++ b/vendor/github.com/minio/minio/pkg/disk/type_linux.go @@ -37,6 +37,7 @@ var fsType2StringMap = map[string]string{ "794c7630": "overlayfs", "2fc12fc1": "zfs", "ff534d42": "cifs", + "53464846": "wslfs", } // getFSType returns the filesystem type of the underlying mounted filesystem diff --git a/vendor/github.com/minio/minio/pkg/dns/etcd_dns.go b/vendor/github.com/minio/minio/pkg/dns/etcd_dns.go index 44802c0a80..91e1ea90d4 100644 --- a/vendor/github.com/minio/minio/pkg/dns/etcd_dns.go +++ b/vendor/github.com/minio/minio/pkg/dns/etcd_dns.go @@ -21,6 +21,7 @@ import ( "encoding/json" "errors" "fmt" + "net" "sort" "strconv" "strings" @@ -215,9 +216,20 @@ func NewCoreDNS(domainNames []string, domainIPs set.StringSet, domainPort string return nil, err } + // strip ports off of domainIPs + domainIPsWithoutPorts := domainIPs.ApplyFunc(func(ip string) string { + host, _, err := net.SplitHostPort(ip) + if err != nil { + if strings.Contains(err.Error(), "missing port in address") { + host = ip + } + } + return host + }) + return &coreDNS{ domainNames: domainNames, - domainIPs: domainIPs, + domainIPs: domainIPsWithoutPorts, domainPort: port, etcdClient: etcdClient, }, nil diff --git a/vendor/github.com/minio/minio/pkg/event/config.go b/vendor/github.com/minio/minio/pkg/event/config.go index 50fb359950..49efc2dab6 100644 --- a/vendor/github.com/minio/minio/pkg/event/config.go +++ b/vendor/github.com/minio/minio/pkg/event/config.go @@ -276,15 +276,20 @@ func (conf *Config) ToRulesMap() RulesMap { // ParseConfig - parses data in reader to notification configuration. func ParseConfig(reader io.Reader, region string, targetList *TargetList) (*Config, error) { var config Config - if err := xml.NewDecoder(reader).Decode(&config); err != nil { + var err error + + if err = xml.NewDecoder(reader).Decode(&config); err != nil { return nil, err } - if err := config.Validate(region, targetList); err != nil { - return nil, err - } + err = config.Validate(region, targetList) config.SetRegion(region) - return &config, nil + // If xml namespace is empty, set a default value before returning. + if config.XMLNS == "" { + config.XMLNS = "http://s3.amazonaws.com/doc/2006-03-01/" + } + + return &config, err } diff --git a/vendor/github.com/minio/minio/pkg/event/target/amqp.go b/vendor/github.com/minio/minio/pkg/event/target/amqp.go index 8322557bdd..4b72d4d64e 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/amqp.go +++ b/vendor/github.com/minio/minio/pkg/event/target/amqp.go @@ -17,12 +17,16 @@ package target import ( + "context" "encoding/json" + "errors" "net" "net/url" + "os" + "path/filepath" "sync" - "time" + "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/event" xnet "github.com/minio/minio/pkg/net" "github.com/streadway/amqp" @@ -42,6 +46,8 @@ type AMQPArgs struct { Internal bool `json:"internal"` NoWait bool `json:"noWait"` AutoDeleted bool `json:"autoDeleted"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` } // Validate AMQP arguments @@ -52,6 +58,15 @@ func (a *AMQPArgs) Validate() error { if _, err := amqp.ParseURI(a.URL.String()); err != nil { return err } + if a.QueueDir != "" { + if !filepath.IsAbs(a.QueueDir) { + return errors.New("queueDir path should be absolute") + } + } + if a.QueueLimit > 10000 { + return errors.New("queueLimit should not exceed 10000") + } + return nil } @@ -61,6 +76,7 @@ type AMQPTarget struct { args AMQPArgs conn *amqp.Connection connMutex sync.Mutex + store Store } // ID - returns TargetID. @@ -69,6 +85,10 @@ func (target *AMQPTarget) ID() event.TargetID { } func (target *AMQPTarget) channel() (*amqp.Channel, error) { + var err error + var conn *amqp.Connection + var ch *amqp.Channel + isAMQPClosedErr := func(err error) bool { if err == amqp.ErrClosed { return true @@ -84,21 +104,27 @@ func (target *AMQPTarget) channel() (*amqp.Channel, error) { target.connMutex.Lock() defer target.connMutex.Unlock() - ch, err := target.conn.Channel() - if err == nil { - return ch, nil + if target.conn != nil { + ch, err = target.conn.Channel() + if err == nil { + return ch, nil + } + + if !isAMQPClosedErr(err) { + return nil, err + } } - if !isAMQPClosedErr(err) { + conn, err = amqp.Dial(target.args.URL.String()) + if err != nil { + if IsConnRefusedErr(err) { + return nil, errNotConnected + } return nil, err } - var conn *amqp.Connection - if conn, err = amqp.Dial(target.args.URL.String()); err != nil { - return nil, err - } - - if ch, err = conn.Channel(); err != nil { + ch, err = conn.Channel() + if err != nil { return nil, err } @@ -107,21 +133,8 @@ func (target *AMQPTarget) channel() (*amqp.Channel, error) { return ch, nil } -// Save - Sends event directly without persisting. -func (target *AMQPTarget) Save(eventData event.Event) error { - return target.send(eventData) -} - -func (target *AMQPTarget) send(eventData event.Event) error { - ch, err := target.channel() - if err != nil { - return err - } - defer func() { - // FIXME: log returned error. ignore time being. - _ = ch.Close() - }() - +// send - sends an event to the AMQP. +func (target *AMQPTarget) send(eventData event.Event, ch *amqp.Channel) error { objectName, err := url.QueryUnescape(eventData.S3.Object.Key) if err != nil { return err @@ -138,17 +151,62 @@ func (target *AMQPTarget) send(eventData event.Event) error { return err } - return ch.Publish(target.args.Exchange, target.args.RoutingKey, target.args.Mandatory, + if err := ch.Publish(target.args.Exchange, target.args.RoutingKey, target.args.Mandatory, target.args.Immediate, amqp.Publishing{ ContentType: "application/json", DeliveryMode: target.args.DeliveryMode, Body: data, - }) + }); err != nil { + return err + } + + return nil } -// Send - interface compatible method does no-op. +// Save - saves the events to the store which will be replayed when the amqp connection is active. +func (target *AMQPTarget) Save(eventData event.Event) error { + if target.store != nil { + return target.store.Put(eventData) + } + ch, err := target.channel() + if err != nil { + return err + } + defer func() { + cErr := ch.Close() + logger.LogOnceIf(context.Background(), cErr, target.ID()) + }() + + return target.send(eventData, ch) +} + +// Send - sends event to AMQP. func (target *AMQPTarget) Send(eventKey string) error { - return nil + ch, err := target.channel() + if err != nil { + return err + } + defer func() { + cErr := ch.Close() + logger.LogOnceIf(context.Background(), cErr, target.ID()) + }() + + eventData, eErr := target.store.Get(eventKey) + if eErr != nil { + // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() + // Such events will not exist and wouldve been already been sent successfully. + if os.IsNotExist(eErr) { + return nil + } + return eErr + } + + if err := target.send(eventData, ch); err != nil { + return err + } + + // Delete the event from store. + return target.store.Del(eventKey) } // Close - does nothing and available for interface compatibility. @@ -157,24 +215,40 @@ func (target *AMQPTarget) Close() error { } // NewAMQPTarget - creates new AMQP target. -func NewAMQPTarget(id string, args AMQPArgs) (*AMQPTarget, error) { +func NewAMQPTarget(id string, args AMQPArgs, doneCh <-chan struct{}) (*AMQPTarget, error) { var conn *amqp.Connection var err error - // Retry 5 times with time interval of 2 seconds. - for i := 1; i <= 5; i++ { - conn, err = amqp.Dial(args.URL.String()) - if err == nil { - break + + var store Store + + if args.QueueDir != "" { + queueDir := filepath.Join(args.QueueDir, storePrefix+"-amqp-"+id) + store = NewQueueStore(queueDir, args.QueueLimit) + if oErr := store.Open(); oErr != nil { + return nil, oErr } - if err != nil && i == 5 { - return nil, err - } - time.Sleep(2 * time.Second) } - return &AMQPTarget{ - id: event.TargetID{ID: id, Name: "amqp"}, - args: args, - conn: conn, - }, nil + conn, err = amqp.Dial(args.URL.String()) + if err != nil { + if store == nil || !IsConnRefusedErr(err) { + return nil, err + } + } + + target := &AMQPTarget{ + id: event.TargetID{ID: id, Name: "amqp"}, + args: args, + conn: conn, + store: store, + } + + if target.store != nil { + // Replays the events from the store. + eventKeyCh := replayEvents(target.store, doneCh) + // Start replaying events from the store. + go sendEvents(target, eventKeyCh, doneCh) + } + + return target, nil } diff --git a/vendor/github.com/minio/minio/pkg/event/target/mysql.go b/vendor/github.com/minio/minio/pkg/event/target/mysql.go index cbe01c2350..ad5611c866 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/mysql.go +++ b/vendor/github.com/minio/minio/pkg/event/target/mysql.go @@ -56,8 +56,11 @@ package target import ( "database/sql" "encoding/json" + "errors" "fmt" "net/url" + "os" + "path/filepath" "strconv" "strings" "time" @@ -79,15 +82,17 @@ const ( // MySQLArgs - MySQL target arguments. type MySQLArgs struct { - Enable bool `json:"enable"` - Format string `json:"format"` - DSN string `json:"dsnString"` - Table string `json:"table"` - Host xnet.URL `json:"host"` - Port string `json:"port"` - User string `json:"user"` - Password string `json:"password"` - Database string `json:"database"` + Enable bool `json:"enable"` + Format string `json:"format"` + DSN string `json:"dsnString"` + Table string `json:"table"` + Host xnet.URL `json:"host"` + Port string `json:"port"` + User string `json:"user"` + Password string `json:"password"` + Database string `json:"database"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` } // Validate MySQLArgs fields @@ -123,6 +128,16 @@ func (m MySQLArgs) Validate() error { return fmt.Errorf("database unspecified") } } + + if m.QueueDir != "" { + if !filepath.IsAbs(m.QueueDir) { + return errors.New("queueDir path should be absolute") + } + } + if m.QueueLimit > 10000 { + return errors.New("queueLimit should not exceed 10000") + } + return nil } @@ -134,6 +149,8 @@ type MySQLTarget struct { deleteStmt *sql.Stmt insertStmt *sql.Stmt db *sql.DB + store Store + firstPing bool } // ID - returns target ID. @@ -141,11 +158,21 @@ func (target *MySQLTarget) ID() event.TargetID { return target.id } -// Save - Sends event directly without persisting. +// Save - saves the events to the store which will be replayed when the SQL connection is active. func (target *MySQLTarget) Save(eventData event.Event) error { + if target.store != nil { + return target.store.Put(eventData) + } + if err := target.db.Ping(); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } return target.send(eventData) } +// send - sends an event to the mysql. func (target *MySQLTarget) send(eventData event.Event) error { if target.args.Format == event.NamespaceFormat { objectName, err := url.QueryUnescape(eventData.S3.Object.Key) @@ -164,6 +191,7 @@ func (target *MySQLTarget) send(eventData event.Event) error { _, err = target.updateStmt.Exec(key, data) } + return err } @@ -179,15 +207,51 @@ func (target *MySQLTarget) send(eventData event.Event) error { } _, err = target.insertStmt.Exec(eventTime, data) + return err } return nil } -// Send - interface compatible method does no-op. +// Send - reads an event from store and sends it to MySQL. func (target *MySQLTarget) Send(eventKey string) error { - return nil + + if err := target.db.Ping(); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } + + if !target.firstPing { + if err := target.executeStmts(); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } + } + + eventData, eErr := target.store.Get(eventKey) + if eErr != nil { + // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() + // Such events will not exist and wouldve been already been sent successfully. + if os.IsNotExist(eErr) { + return nil + } + return eErr + } + + if err := target.send(eventData); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } + + // Delete the event from store. + return target.store.Del(eventKey) } // Close - closes underneath connections to MySQL database. @@ -210,8 +274,45 @@ func (target *MySQLTarget) Close() error { return target.db.Close() } +// Executes the table creation statements. +func (target *MySQLTarget) executeStmts() error { + + _, err := target.db.Exec(fmt.Sprintf(mysqlTableExists, target.args.Table)) + if err != nil { + createStmt := mysqlCreateNamespaceTable + if target.args.Format == event.AccessFormat { + createStmt = mysqlCreateAccessTable + } + + if _, dbErr := target.db.Exec(fmt.Sprintf(createStmt, target.args.Table)); dbErr != nil { + return dbErr + } + } + + switch target.args.Format { + case event.NamespaceFormat: + // insert or update statement + if target.updateStmt, err = target.db.Prepare(fmt.Sprintf(mysqlUpdateRow, target.args.Table)); err != nil { + return err + } + // delete statement + if target.deleteStmt, err = target.db.Prepare(fmt.Sprintf(mysqlDeleteRow, target.args.Table)); err != nil { + return err + } + case event.AccessFormat: + // insert statement + if target.insertStmt, err = target.db.Prepare(fmt.Sprintf(mysqlInsertRow, target.args.Table)); err != nil { + return err + } + } + + return nil + +} + // NewMySQLTarget - creates new MySQL target. -func NewMySQLTarget(id string, args MySQLArgs) (*MySQLTarget, error) { +func NewMySQLTarget(id string, args MySQLArgs, doneCh <-chan struct{}) (*MySQLTarget, error) { + var firstPing bool if args.DSN == "" { config := mysql.Config{ User: args.User, @@ -230,45 +331,42 @@ func NewMySQLTarget(id string, args MySQLArgs) (*MySQLTarget, error) { return nil, err } - if err = db.Ping(); err != nil { - return nil, err - } + var store Store - if _, err = db.Exec(fmt.Sprintf(mysqlTableExists, args.Table)); err != nil { - createStmt := mysqlCreateNamespaceTable - if args.Format == event.AccessFormat { - createStmt = mysqlCreateAccessTable - } - - if _, err = db.Exec(fmt.Sprintf(createStmt, args.Table)); err != nil { - return nil, err + if args.QueueDir != "" { + queueDir := filepath.Join(args.QueueDir, storePrefix+"-mysql-"+id) + store = NewQueueStore(queueDir, args.QueueLimit) + if oErr := store.Open(); oErr != nil { + return nil, oErr } } - var updateStmt, deleteStmt, insertStmt *sql.Stmt - switch args.Format { - case event.NamespaceFormat: - // insert or update statement - if updateStmt, err = db.Prepare(fmt.Sprintf(mysqlUpdateRow, args.Table)); err != nil { - return nil, err - } - // delete statement - if deleteStmt, err = db.Prepare(fmt.Sprintf(mysqlDeleteRow, args.Table)); err != nil { - return nil, err - } - case event.AccessFormat: - // insert statement - if insertStmt, err = db.Prepare(fmt.Sprintf(mysqlInsertRow, args.Table)); err != nil { - return nil, err - } + target := &MySQLTarget{ + id: event.TargetID{ID: id, Name: "mysql"}, + args: args, + db: db, + store: store, + firstPing: firstPing, } - return &MySQLTarget{ - id: event.TargetID{ID: id, Name: "mysql"}, - args: args, - updateStmt: updateStmt, - deleteStmt: deleteStmt, - insertStmt: insertStmt, - db: db, - }, nil + err = target.db.Ping() + if err != nil { + if target.store == nil || !IsConnRefusedErr(err) { + return nil, err + } + } else { + if err = target.executeStmts(); err != nil { + return nil, err + } + target.firstPing = true + } + + if target.store != nil { + // Replays the events from the store. + eventKeyCh := replayEvents(target.store, doneCh) + // Start replaying events from the store. + go sendEvents(target, eventKeyCh, doneCh) + } + + return target, nil } diff --git a/vendor/github.com/minio/minio/pkg/event/target/nats.go b/vendor/github.com/minio/minio/pkg/event/target/nats.go index 32d8d1c344..65ebf0852d 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/nats.go +++ b/vendor/github.com/minio/minio/pkg/event/target/nats.go @@ -20,6 +20,8 @@ import ( "encoding/json" "errors" "net/url" + "os" + "path/filepath" "github.com/minio/minio/pkg/event" xnet "github.com/minio/minio/pkg/net" @@ -37,6 +39,8 @@ type NATSArgs struct { Token string `json:"token"` Secure bool `json:"secure"` PingInterval int64 `json:"pingInterval"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` Streaming struct { Enable bool `json:"enable"` ClusterID string `json:"clusterID"` @@ -65,15 +69,57 @@ func (n NATSArgs) Validate() error { } } + if n.QueueDir != "" { + if !filepath.IsAbs(n.QueueDir) { + return errors.New("queueDir path should be absolute") + } + } + if n.QueueLimit > 10000 { + return errors.New("queueLimit should not exceed 10000") + } + return nil } +// To obtain a nats connection from args. +func (n NATSArgs) connectNats() (*nats.Conn, error) { + options := nats.DefaultOptions + options.Url = "nats://" + n.Address.String() + options.User = n.Username + options.Password = n.Password + options.Token = n.Token + options.Secure = n.Secure + return options.Connect() +} + +// To obtain a streaming connection from args. +func (n NATSArgs) connectStan() (stan.Conn, error) { + scheme := "nats" + if n.Secure { + scheme = "tls" + } + addressURL := scheme + "://" + n.Username + ":" + n.Password + "@" + n.Address.String() + + clientID, err := getNewUUID() + if err != nil { + return nil, err + } + + connOpts := []stan.Option{stan.NatsURL(addressURL)} + if n.Streaming.MaxPubAcksInflight > 0 { + connOpts = append(connOpts, stan.MaxPubAcksInflight(n.Streaming.MaxPubAcksInflight)) + } + + return stan.Connect(n.Streaming.ClusterID, clientID, connOpts...) +} + // NATSTarget - NATS target. type NATSTarget struct { id event.TargetID args NATSArgs natsConn *nats.Conn stanConn stan.Conn + store Store } // ID - returns target ID. @@ -81,11 +127,24 @@ func (target *NATSTarget) ID() event.TargetID { return target.id } -// Save - Sends event directly without persisting. +// Save - saves the events to the store which will be replayed when the Nats connection is active. func (target *NATSTarget) Save(eventData event.Event) error { + if target.store != nil { + return target.store.Put(eventData) + } + if target.args.Streaming.Enable { + if !target.stanConn.NatsConn().IsConnected() { + return errNotConnected + } + } else { + if !target.natsConn.IsConnected() { + return errNotConnected + } + } return target.send(eventData) } +// send - sends an event to the Nats. func (target *NATSTarget) send(eventData event.Event) error { objectName, err := url.QueryUnescape(eventData.S3.Object.Key) if err != nil { @@ -107,18 +166,62 @@ func (target *NATSTarget) send(eventData event.Event) error { } else { err = target.natsConn.Publish(target.args.Subject, data) } - return err } -// Send - interface compatible method does no-op. +// Send - sends event to Nats. func (target *NATSTarget) Send(eventKey string) error { - return nil + var connErr error + + if target.args.Streaming.Enable { + if target.stanConn == nil || target.stanConn.NatsConn() == nil { + target.stanConn, connErr = target.args.connectStan() + } else { + if !target.stanConn.NatsConn().IsConnected() { + return errNotConnected + } + } + } else { + if target.natsConn == nil { + target.natsConn, connErr = target.args.connectNats() + } else { + if !target.natsConn.IsConnected() { + return errNotConnected + } + } + } + + if connErr != nil { + if connErr.Error() == nats.ErrNoServers.Error() { + return errNotConnected + } + return connErr + } + + eventData, eErr := target.store.Get(eventKey) + if eErr != nil { + // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() + // Such events will not exist and wouldve been already been sent successfully. + if os.IsNotExist(eErr) { + return nil + } + return eErr + } + + if err := target.send(eventData); err != nil { + return err + } + + return target.store.Del(eventKey) } // Close - closes underneath connections to NATS server. func (target *NATSTarget) Close() (err error) { if target.stanConn != nil { + // closing the streaming connection does not close the provided NATS connection. + if target.stanConn.NatsConn() != nil { + target.stanConn.NatsConn().Close() + } err = target.stanConn.Close() } @@ -130,47 +233,48 @@ func (target *NATSTarget) Close() (err error) { } // NewNATSTarget - creates new NATS target. -func NewNATSTarget(id string, args NATSArgs) (*NATSTarget, error) { +func NewNATSTarget(id string, args NATSArgs, doneCh <-chan struct{}) (*NATSTarget, error) { var natsConn *nats.Conn var stanConn stan.Conn - var clientID string + var err error - if args.Streaming.Enable { - scheme := "nats" - if args.Secure { - scheme = "tls" - } - addressURL := scheme + "://" + args.Username + ":" + args.Password + "@" + args.Address.String() + var store Store - clientID, err = getNewUUID() - if err != nil { + if args.QueueDir != "" { + queueDir := filepath.Join(args.QueueDir, storePrefix+"-nats-"+id) + store = NewQueueStore(queueDir, args.QueueLimit) + if oErr := store.Open(); oErr != nil { + return nil, oErr + } + } + + if args.Streaming.Enable { + stanConn, err = args.connectStan() + } else { + natsConn, err = args.connectNats() + } + + if err != nil { + if store == nil || err.Error() != nats.ErrNoServers.Error() { return nil, err } - - connOpts := []stan.Option{stan.NatsURL(addressURL)} - if args.Streaming.MaxPubAcksInflight > 0 { - connOpts = append(connOpts, stan.MaxPubAcksInflight(args.Streaming.MaxPubAcksInflight)) - } - - stanConn, err = stan.Connect(args.Streaming.ClusterID, clientID, connOpts...) - } else { - options := nats.DefaultOptions - options.Url = "nats://" + args.Address.String() - options.User = args.Username - options.Password = args.Password - options.Token = args.Token - options.Secure = args.Secure - natsConn, err = options.Connect() - } - if err != nil { - return nil, err } - return &NATSTarget{ + target := &NATSTarget{ id: event.TargetID{ID: id, Name: "nats"}, args: args, stanConn: stanConn, natsConn: natsConn, - }, nil + store: store, + } + + if target.store != nil { + // Replays the events from the store. + eventKeyCh := replayEvents(target.store, doneCh) + // Start replaying events from the store. + go sendEvents(target, eventKeyCh, doneCh) + } + + return target, nil } diff --git a/vendor/github.com/minio/minio/pkg/event/target/nsq.go b/vendor/github.com/minio/minio/pkg/event/target/nsq.go index b2ae34f0c2..1bb92cd289 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/nsq.go +++ b/vendor/github.com/minio/minio/pkg/event/target/nsq.go @@ -20,11 +20,9 @@ import ( "crypto/tls" "encoding/json" "errors" - "net" "net/url" "os" "path/filepath" - "syscall" "github.com/nsqio/go-nsq" @@ -90,7 +88,7 @@ func (target *NSQTarget) Save(eventData event.Event) error { } if err := target.producer.Ping(); err != nil { // To treat "connection refused" errors as errNotConnected. - if isConnRefusedErr(err) { + if IsConnRefusedErr(err) { return errNotConnected } return err @@ -98,20 +96,6 @@ func (target *NSQTarget) Save(eventData event.Event) error { return target.send(eventData) } -// isConnRefusedErr - To check fot "connection refused" error. -func isConnRefusedErr(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - if errno, ok := sysErr.Err.(syscall.Errno); ok { - if errno == syscall.ECONNREFUSED { - return true - } - } - } - } - return false -} - // send - sends an event to the NSQ. func (target *NSQTarget) send(eventData event.Event) error { objectName, err := url.QueryUnescape(eventData.S3.Object.Key) @@ -133,7 +117,7 @@ func (target *NSQTarget) Send(eventKey string) error { if err := target.producer.Ping(); err != nil { // To treat "connection refused" errors as errNotConnected. - if isConnRefusedErr(err) { + if IsConnRefusedErr(err) { return errNotConnected } return err @@ -198,7 +182,7 @@ func NewNSQTarget(id string, args NSQArgs, doneCh <-chan struct{}) (*NSQTarget, if err := target.producer.Ping(); err != nil { // To treat "connection refused" errors as errNotConnected. - if target.store == nil || !isConnRefusedErr(err) { + if target.store == nil || !IsConnRefusedErr(err) { return nil, err } } diff --git a/vendor/github.com/minio/minio/pkg/event/target/postgresql.go b/vendor/github.com/minio/minio/pkg/event/target/postgresql.go index bea633de82..839a017fb1 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/postgresql.go +++ b/vendor/github.com/minio/minio/pkg/event/target/postgresql.go @@ -56,8 +56,11 @@ package target import ( "database/sql" "encoding/json" + "errors" "fmt" "net/url" + "os" + "path/filepath" "strconv" "strings" "time" @@ -89,6 +92,8 @@ type PostgreSQLArgs struct { User string `json:"user"` // default: user running minio Password string `json:"password"` // default: no password Database string `json:"database"` // default: same as user + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` } // Validate PostgreSQLArgs fields @@ -122,6 +127,15 @@ func (p PostgreSQLArgs) Validate() error { } } + if p.QueueDir != "" { + if !filepath.IsAbs(p.QueueDir) { + return errors.New("queueDir path should be absolute") + } + } + if p.QueueLimit > 10000 { + return errors.New("queueLimit should not exceed 10000") + } + return nil } @@ -133,6 +147,8 @@ type PostgreSQLTarget struct { deleteStmt *sql.Stmt insertStmt *sql.Stmt db *sql.DB + store Store + firstPing bool } // ID - returns target ID. @@ -140,11 +156,26 @@ func (target *PostgreSQLTarget) ID() event.TargetID { return target.id } -// Save - Sends event directly without persisting. +// Save - saves the events to the store if questore is configured, which will be replayed when the PostgreSQL connection is active. func (target *PostgreSQLTarget) Save(eventData event.Event) error { + if target.store != nil { + return target.store.Put(eventData) + } + if err := target.db.Ping(); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } return target.send(eventData) } +// IsConnErr - To detect a connection error. +func IsConnErr(err error) bool { + return IsConnRefusedErr(err) || err.Error() == "sql: database is closed" || err.Error() == "sql: statement is closed" || err.Error() == "invalid connection" +} + +// send - sends an event to the PostgreSQL. func (target *PostgreSQLTarget) send(eventData event.Event) error { if target.args.Format == event.NamespaceFormat { objectName, err := url.QueryUnescape(eventData.S3.Object.Key) @@ -177,16 +208,52 @@ func (target *PostgreSQLTarget) send(eventData event.Event) error { return err } - _, err = target.insertStmt.Exec(eventTime, data) - return err + if _, err = target.insertStmt.Exec(eventTime, data); err != nil { + return err + } } return nil } -// Send - interface compatible method does no-op. +// Send - reads an event from store and sends it to PostgreSQL. func (target *PostgreSQLTarget) Send(eventKey string) error { - return nil + + if err := target.db.Ping(); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } + + if !target.firstPing { + if err := target.executeStmts(); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } + } + + eventData, eErr := target.store.Get(eventKey) + if eErr != nil { + // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() + // Such events will not exist and wouldve been already been sent successfully. + if os.IsNotExist(eErr) { + return nil + } + return eErr + } + + if err := target.send(eventData); err != nil { + if IsConnErr(err) { + return errNotConnected + } + return err + } + + // Delete the event from store. + return target.store.Del(eventKey) } // Close - closes underneath connections to PostgreSQL database. @@ -209,8 +276,45 @@ func (target *PostgreSQLTarget) Close() error { return target.db.Close() } +// Executes the table creation statements. +func (target *PostgreSQLTarget) executeStmts() error { + + _, err := target.db.Exec(fmt.Sprintf(psqlTableExists, target.args.Table)) + if err != nil { + createStmt := psqlCreateNamespaceTable + if target.args.Format == event.AccessFormat { + createStmt = psqlCreateAccessTable + } + + if _, dbErr := target.db.Exec(fmt.Sprintf(createStmt, target.args.Table)); dbErr != nil { + return dbErr + } + } + + switch target.args.Format { + case event.NamespaceFormat: + // insert or update statement + if target.updateStmt, err = target.db.Prepare(fmt.Sprintf(psqlUpdateRow, target.args.Table)); err != nil { + return err + } + // delete statement + if target.deleteStmt, err = target.db.Prepare(fmt.Sprintf(psqlDeleteRow, target.args.Table)); err != nil { + return err + } + case event.AccessFormat: + // insert statement + if target.insertStmt, err = target.db.Prepare(fmt.Sprintf(psqlInsertRow, target.args.Table)); err != nil { + return err + } + } + + return nil +} + // NewPostgreSQLTarget - creates new PostgreSQL target. -func NewPostgreSQLTarget(id string, args PostgreSQLArgs) (*PostgreSQLTarget, error) { +func NewPostgreSQLTarget(id string, args PostgreSQLArgs, doneCh <-chan struct{}) (*PostgreSQLTarget, error) { + var firstPing bool + params := []string{args.ConnectionString} if !args.Host.IsEmpty() { params = append(params, "host="+args.Host.String()) @@ -234,45 +338,42 @@ func NewPostgreSQLTarget(id string, args PostgreSQLArgs) (*PostgreSQLTarget, err return nil, err } - if err = db.Ping(); err != nil { - return nil, err - } + var store Store - if _, err = db.Exec(fmt.Sprintf(psqlTableExists, args.Table)); err != nil { - createStmt := psqlCreateNamespaceTable - if args.Format == event.AccessFormat { - createStmt = psqlCreateAccessTable - } - - if _, err = db.Exec(fmt.Sprintf(createStmt, args.Table)); err != nil { - return nil, err + if args.QueueDir != "" { + queueDir := filepath.Join(args.QueueDir, storePrefix+"-postgresql-"+id) + store = NewQueueStore(queueDir, args.QueueLimit) + if oErr := store.Open(); oErr != nil { + return nil, oErr } } - var updateStmt, deleteStmt, insertStmt *sql.Stmt - switch args.Format { - case event.NamespaceFormat: - // insert or update statement - if updateStmt, err = db.Prepare(fmt.Sprintf(psqlUpdateRow, args.Table)); err != nil { - return nil, err - } - // delete statement - if deleteStmt, err = db.Prepare(fmt.Sprintf(psqlDeleteRow, args.Table)); err != nil { - return nil, err - } - case event.AccessFormat: - // insert statement - if insertStmt, err = db.Prepare(fmt.Sprintf(psqlInsertRow, args.Table)); err != nil { - return nil, err - } + target := &PostgreSQLTarget{ + id: event.TargetID{ID: id, Name: "postgresql"}, + args: args, + db: db, + store: store, + firstPing: firstPing, } - return &PostgreSQLTarget{ - id: event.TargetID{ID: id, Name: "postgresql"}, - args: args, - updateStmt: updateStmt, - deleteStmt: deleteStmt, - insertStmt: insertStmt, - db: db, - }, nil + err = target.db.Ping() + if err != nil { + if target.store == nil || !IsConnRefusedErr(err) { + return nil, err + } + } else { + if err = target.executeStmts(); err != nil { + return nil, err + } + target.firstPing = true + } + + if target.store != nil { + // Replays the events from the store. + eventKeyCh := replayEvents(target.store, doneCh) + // Start replaying events from the store. + go sendEvents(target, eventKeyCh, doneCh) + } + + return target, nil } diff --git a/vendor/github.com/minio/minio/pkg/event/target/redis.go b/vendor/github.com/minio/minio/pkg/event/target/redis.go index 2db62ba975..259b81ded7 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/redis.go +++ b/vendor/github.com/minio/minio/pkg/event/target/redis.go @@ -17,24 +17,31 @@ package target import ( + "context" "encoding/json" + "errors" "fmt" "net/url" + "os" + "path/filepath" "strings" "time" "github.com/gomodule/redigo/redis" + "github.com/minio/minio/cmd/logger" "github.com/minio/minio/pkg/event" xnet "github.com/minio/minio/pkg/net" ) // RedisArgs - Redis target arguments. type RedisArgs struct { - Enable bool `json:"enable"` - Format string `json:"format"` - Addr xnet.Host `json:"address"` - Password string `json:"password"` - Key string `json:"key"` + Enable bool `json:"enable"` + Format string `json:"format"` + Addr xnet.Host `json:"address"` + Password string `json:"password"` + Key string `json:"key"` + QueueDir string `json:"queueDir"` + QueueLimit uint64 `json:"queueLimit"` } // Validate RedisArgs fields @@ -54,14 +61,45 @@ func (r RedisArgs) Validate() error { return fmt.Errorf("empty key") } + if r.QueueDir != "" { + if !filepath.IsAbs(r.QueueDir) { + return errors.New("queueDir path should be absolute") + } + } + if r.QueueLimit > 10000 { + return errors.New("queueLimit should not exceed 10000") + } + + return nil +} + +func (r RedisArgs) validateFormat(c redis.Conn) error { + typeAvailable, err := redis.String(c.Do("TYPE", r.Key)) + if err != nil { + return err + } + + if typeAvailable != "none" { + expectedType := "hash" + if r.Format == event.AccessFormat { + expectedType = "list" + } + + if typeAvailable != expectedType { + return fmt.Errorf("expected type %v does not match with available type %v", expectedType, typeAvailable) + } + } + return nil } // RedisTarget - Redis target. type RedisTarget struct { - id event.TargetID - args RedisArgs - pool *redis.Pool + id event.TargetID + args RedisArgs + pool *redis.Pool + store Store + firstPing bool } // ID - returns target ID. @@ -69,16 +107,32 @@ func (target *RedisTarget) ID() event.TargetID { return target.id } -// Save - Sends event directly without persisting. +// Save - saves the events to the store if questore is configured, which will be replayed when the redis connection is active. func (target *RedisTarget) Save(eventData event.Event) error { + if target.store != nil { + return target.store.Put(eventData) + } + conn := target.pool.Get() + defer func() { + cErr := conn.Close() + logger.LogOnceIf(context.Background(), cErr, target.ID()) + }() + _, pingErr := conn.Do("PING") + if pingErr != nil { + if IsConnRefusedErr(pingErr) { + return errNotConnected + } + return pingErr + } return target.send(eventData) } +// send - sends an event to the redis. func (target *RedisTarget) send(eventData event.Event) error { conn := target.pool.Get() defer func() { - // FIXME: log returned error. ignore time being. - _ = conn.Close() + cErr := conn.Close() + logger.LogOnceIf(context.Background(), cErr, target.ID()) }() if target.args.Format == event.NamespaceFormat { @@ -98,7 +152,9 @@ func (target *RedisTarget) send(eventData event.Event) error { _, err = conn.Do("HSET", target.args.Key, key, data) } - return err + if err != nil { + return err + } } if target.args.Format == event.AccessFormat { @@ -106,16 +162,58 @@ func (target *RedisTarget) send(eventData event.Event) error { if err != nil { return err } - _, err = conn.Do("RPUSH", target.args.Key, data) - return err + if _, err := conn.Do("RPUSH", target.args.Key, data); err != nil { + return err + } } return nil } -// Send - interface compatible method does no-op. +// Send - reads an event from store and sends it to redis. func (target *RedisTarget) Send(eventKey string) error { - return nil + conn := target.pool.Get() + defer func() { + cErr := conn.Close() + logger.LogOnceIf(context.Background(), cErr, target.ID()) + }() + _, pingErr := conn.Do("PING") + if pingErr != nil { + if IsConnRefusedErr(pingErr) { + return errNotConnected + } + return pingErr + } + + if !target.firstPing { + if err := target.args.validateFormat(conn); err != nil { + if IsConnRefusedErr(err) { + return errNotConnected + } + return err + } + target.firstPing = true + } + + eventData, eErr := target.store.Get(eventKey) + if eErr != nil { + // The last event key in a successful batch will be sent in the channel atmost once by the replayEvents() + // Such events will not exist and would've been already been sent successfully. + if os.IsNotExist(eErr) { + return nil + } + return eErr + } + + if err := target.send(eventData); err != nil { + if IsConnRefusedErr(err) { + return errNotConnected + } + return err + } + + // Delete the event from store. + return target.store.Del(eventKey) } // Close - does nothing and available for interface compatibility. @@ -124,7 +222,7 @@ func (target *RedisTarget) Close() error { } // NewRedisTarget - creates new Redis target. -func NewRedisTarget(id string, args RedisArgs) (*RedisTarget, error) { +func NewRedisTarget(id string, args RedisArgs, doneCh <-chan struct{}) (*RedisTarget, error) { pool := &redis.Pool{ MaxIdle: 3, IdleTimeout: 2 * 60 * time.Second, @@ -139,8 +237,9 @@ func NewRedisTarget(id string, args RedisArgs) (*RedisTarget, error) { } if _, err = conn.Do("AUTH", args.Password); err != nil { - // FIXME: log returned error. ignore time being. - _ = conn.Close() + cErr := conn.Close() + targetID := event.TargetID{ID: id, Name: "redis"} + logger.LogOnceIf(context.Background(), cErr, targetID.String()) return nil, err } @@ -152,35 +251,47 @@ func NewRedisTarget(id string, args RedisArgs) (*RedisTarget, error) { }, } - conn := pool.Get() + var store Store + + if args.QueueDir != "" { + queueDir := filepath.Join(args.QueueDir, storePrefix+"-redis-"+id) + store = NewQueueStore(queueDir, args.QueueLimit) + if oErr := store.Open(); oErr != nil { + return nil, oErr + } + } + + target := &RedisTarget{ + id: event.TargetID{ID: id, Name: "redis"}, + args: args, + pool: pool, + store: store, + } + + conn := target.pool.Get() defer func() { - // FIXME: log returned error. ignore time being. - _ = conn.Close() + cErr := conn.Close() + logger.LogOnceIf(context.Background(), cErr, target.ID()) }() - if _, err := conn.Do("PING"); err != nil { - return nil, err - } - - typeAvailable, err := redis.String(conn.Do("TYPE", args.Key)) - if err != nil { - return nil, err - } - - if typeAvailable != "none" { - expectedType := "hash" - if args.Format == event.AccessFormat { - expectedType = "list" + _, pingErr := conn.Do("PING") + if pingErr != nil { + if target.store == nil || !IsConnRefusedErr(pingErr) { + return nil, pingErr } - - if typeAvailable != expectedType { - return nil, fmt.Errorf("expected type %v does not match with available type %v", expectedType, typeAvailable) + } else { + if err := target.args.validateFormat(conn); err != nil { + return nil, err } + target.firstPing = true } - return &RedisTarget{ - id: event.TargetID{ID: id, Name: "redis"}, - args: args, - pool: pool, - }, nil + if target.store != nil { + // Replays the events from the store. + eventKeyCh := replayEvents(target.store, doneCh) + // Start replaying events from the store. + go sendEvents(target, eventKeyCh, doneCh) + } + + return target, nil } diff --git a/vendor/github.com/minio/minio/pkg/event/target/store.go b/vendor/github.com/minio/minio/pkg/event/target/store.go index 1b94ca7e10..ae30683e13 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/store.go +++ b/vendor/github.com/minio/minio/pkg/event/target/store.go @@ -79,6 +79,20 @@ func replayEvents(store Store, doneCh <-chan struct{}) <-chan string { return eventKeyCh } +// IsConnRefusedErr - To check fot "connection refused" error. +func IsConnRefusedErr(err error) bool { + if opErr, ok := err.(*net.OpError); ok { + if sysErr, ok := opErr.Err.(*os.SyscallError); ok { + if errno, ok := sysErr.Err.(syscall.Errno); ok { + if errno == syscall.ECONNREFUSED { + return true + } + } + } + } + return false +} + // isConnResetErr - Checks for connection reset errors. func isConnResetErr(err error) bool { if opErr, ok := err.(*net.OpError); ok { diff --git a/vendor/github.com/minio/minio/pkg/event/target/webhook.go b/vendor/github.com/minio/minio/pkg/event/target/webhook.go index 2f2a4a0b34..c95bdab1de 100644 --- a/vendor/github.com/minio/minio/pkg/event/target/webhook.go +++ b/vendor/github.com/minio/minio/pkg/event/target/webhook.go @@ -30,7 +30,6 @@ import ( "net/url" "os" "path/filepath" - "syscall" "time" "github.com/minio/minio/pkg/event" @@ -134,20 +133,6 @@ func (target *WebhookTarget) send(eventData event.Event) error { return nil } -// IsConnRefusedErr - To check for "connection refused" errors. -func IsConnRefusedErr(err error) bool { - if opErr, ok := err.(*net.OpError); ok { - if sysErr, ok := opErr.Err.(*os.SyscallError); ok { - if errno, ok := sysErr.Err.(syscall.Errno); ok { - if errno == syscall.ECONNREFUSED { - return true - } - } - } - } - return false -} - // Send - reads an event from store and sends it to webhook. func (target *WebhookTarget) Send(eventKey string) error { diff --git a/vendor/github.com/minio/minio/pkg/handlers/forwarder.go b/vendor/github.com/minio/minio/pkg/handlers/forwarder.go index 1065e19116..9c0eb2ad5f 100644 --- a/vendor/github.com/minio/minio/pkg/handlers/forwarder.go +++ b/vendor/github.com/minio/minio/pkg/handlers/forwarder.go @@ -1,5 +1,5 @@ /* - * MinIO Cloud Storage, (C) 2018 MinIO, Inc. + * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. @@ -32,6 +32,7 @@ const defaultFlushInterval = time.Duration(100) * time.Millisecond type Forwarder struct { RoundTripper http.RoundTripper PassHost bool + Logger func(error) // internal variables rewriter *headerRewriter @@ -58,10 +59,20 @@ func (f *Forwarder) ServeHTTP(w http.ResponseWriter, inReq *http.Request) { }, Transport: f.RoundTripper, FlushInterval: defaultFlushInterval, + ErrorHandler: f.customErrHandler, } revproxy.ServeHTTP(w, outReq) } +// customErrHandler is originally implemented to avoid having the following error +// `http: proxy error: context canceled` printed by Golang +func (f *Forwarder) customErrHandler(w http.ResponseWriter, r *http.Request, err error) { + if f.Logger != nil && err != context.Canceled { + f.Logger(err) + } + w.WriteHeader(http.StatusBadGateway) +} + func (f *Forwarder) getURLFromRequest(req *http.Request) *url.URL { // If the Request was created by Go via a real HTTP request, RequestURI will // contain the original query string. If the Request was created in code, RequestURI diff --git a/vendor/github.com/minio/minio/pkg/iam/policy/opa.go b/vendor/github.com/minio/minio/pkg/iam/policy/opa.go index 49c4d41cbb..d8b3faa8f5 100644 --- a/vendor/github.com/minio/minio/pkg/iam/policy/opa.go +++ b/vendor/github.com/minio/minio/pkg/iam/policy/opa.go @@ -22,7 +22,6 @@ import ( "io" "io/ioutil" "net/http" - "os" xnet "github.com/minio/minio/pkg/net" ) @@ -37,6 +36,23 @@ type OpaArgs struct { // Validate - validate opa configuration params. func (a *OpaArgs) Validate() error { + req, err := http.NewRequest("POST", a.URL.String(), bytes.NewReader([]byte(""))) + if err != nil { + return err + } + + req.Header.Set("Content-Type", "application/json") + if a.AuthToken != "" { + req.Header.Set("Authorization", a.AuthToken) + } + + client := &http.Client{Transport: a.Transport} + resp, err := client.Do(req) + if err != nil { + return err + } + defer a.CloseRespFn(resp.Body) + return nil } @@ -46,17 +62,8 @@ func (a *OpaArgs) UnmarshalJSON(data []byte) error { type subOpaArgs OpaArgs var so subOpaArgs - if opaURL, ok := os.LookupEnv("MINIO_IAM_OPA_URL"); ok { - u, err := xnet.ParseURL(opaURL) - if err != nil { - return err - } - so.URL = u - so.AuthToken = os.Getenv("MINIO_IAM_OPA_AUTHTOKEN") - } else { - if err := json.Unmarshal(data, &so); err != nil { - return err - } + if err := json.Unmarshal(data, &so); err != nil { + return err } oa := OpaArgs(so) @@ -65,10 +72,6 @@ func (a *OpaArgs) UnmarshalJSON(data []byte) error { return nil } - if err := oa.Validate(); err != nil { - return err - } - *a = oa return nil } @@ -92,9 +95,9 @@ func NewOpa(args OpaArgs) *Opa { } // IsAllowed - checks given policy args is allowed to continue the REST API. -func (o *Opa) IsAllowed(args Args) bool { +func (o *Opa) IsAllowed(args Args) (bool, error) { if o == nil { - return false + return false, nil } // OPA input @@ -103,12 +106,12 @@ func (o *Opa) IsAllowed(args Args) bool { inputBytes, err := json.Marshal(body) if err != nil { - return false + return false, err } req, err := http.NewRequest("POST", o.args.URL.String(), bytes.NewReader(inputBytes)) if err != nil { - return false + return false, err } req.Header.Set("Content-Type", "application/json") @@ -118,14 +121,14 @@ func (o *Opa) IsAllowed(args Args) bool { resp, err := o.client.Do(req) if err != nil { - return false + return false, err } defer o.args.CloseRespFn(resp.Body) // Read the body to be saved later. opaRespBytes, err := ioutil.ReadAll(resp.Body) if err != nil { - return false + return false, err } // Handle large OPA responses when OPA URL is of @@ -149,9 +152,9 @@ func (o *Opa) IsAllowed(args Args) bool { respBody.Seek(0, 0) var resultAllow opaResultAllow if err = json.NewDecoder(respBody).Decode(&resultAllow); err != nil { - return false + return false, err } - return resultAllow.Result.Allow + return resultAllow.Result.Allow, nil } - return result.Result + return result.Result, nil } diff --git a/vendor/github.com/minio/minio/pkg/iam/validator/jwt.go b/vendor/github.com/minio/minio/pkg/iam/validator/jwt.go index 13c2abb798..6afed4e5fe 100644 --- a/vendor/github.com/minio/minio/pkg/iam/validator/jwt.go +++ b/vendor/github.com/minio/minio/pkg/iam/validator/jwt.go @@ -24,7 +24,6 @@ import ( "fmt" "net" "net/http" - "os" "strconv" "time" @@ -38,11 +37,6 @@ type JWKSArgs struct { publicKeys map[string]crypto.PublicKey } -// Validate JWT authentication target arguments -func (r *JWKSArgs) Validate() error { - return nil -} - // PopulatePublicKey - populates a new publickey from the JWKS URL. func (r *JWKSArgs) PopulatePublicKey() error { insecureClient := &http.Client{Transport: newCustomHTTPTransport(true)} @@ -83,17 +77,8 @@ func (r *JWKSArgs) UnmarshalJSON(data []byte) error { type subJWKSArgs JWKSArgs var sr subJWKSArgs - // IAM related envs. - if jwksURL, ok := os.LookupEnv("MINIO_IAM_JWKS_URL"); ok { - u, err := xnet.ParseURL(jwksURL) - if err != nil { - return err - } - sr.URL = u - } else { - if err := json.Unmarshal(data, &sr); err != nil { - return err - } + if err := json.Unmarshal(data, &sr); err != nil { + return err } ar := JWKSArgs(sr) @@ -101,13 +86,6 @@ func (r *JWKSArgs) UnmarshalJSON(data []byte) error { *r = ar return nil } - if err := ar.Validate(); err != nil { - return err - } - - if err := ar.PopulatePublicKey(); err != nil { - return err - } *r = ar return nil diff --git a/vendor/github.com/minio/minio/pkg/madmin/group-commands.go b/vendor/github.com/minio/minio/pkg/madmin/group-commands.go new file mode 100644 index 0000000000..84377fdb8f --- /dev/null +++ b/vendor/github.com/minio/minio/pkg/madmin/group-commands.go @@ -0,0 +1,164 @@ +/* + * MinIO Cloud Storage, (C) 2018-2019 MinIO, Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package madmin + +import ( + "encoding/json" + "io/ioutil" + "net/http" + "net/url" +) + +// GroupAddRemove is type for adding/removing members to/from a group. +type GroupAddRemove struct { + Group string `json:"group"` + Members []string `json:"members"` + IsRemove bool `json:"isRemove"` +} + +// UpdateGroupMembers - adds/removes users to/from a group. Server +// creates the group as needed. Group is removed if remove request is +// made on empty group. +func (adm *AdminClient) UpdateGroupMembers(g GroupAddRemove) error { + data, err := json.Marshal(g) + if err != nil { + return err + } + + reqData := requestData{ + relPath: "/v1/update-group-members", + content: data, + } + + // Execute PUT on /minio/admin/v1/update-group-members + resp, err := adm.executeMethod("PUT", reqData) + + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp) + } + + return nil +} + +// GroupDesc is a type that holds group info along with the policy +// attached to it. +type GroupDesc struct { + Name string `json:"name"` + Status string `json:"status"` + Members []string `json:"members"` + Policy string `json:"policy"` +} + +// GetGroupDescription - fetches information on a group. +func (adm *AdminClient) GetGroupDescription(group string) (*GroupDesc, error) { + v := url.Values{} + v.Set("group", group) + reqData := requestData{ + relPath: "/v1/group", + queryValues: v, + } + + resp, err := adm.executeMethod("GET", reqData) + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + gd := GroupDesc{} + if err = json.Unmarshal(data, &gd); err != nil { + return nil, err + } + + return &gd, nil +} + +// ListGroups - lists all groups names present on the server. +func (adm *AdminClient) ListGroups() ([]string, error) { + reqData := requestData{ + relPath: "/v1/groups", + } + + resp, err := adm.executeMethod("GET", reqData) + defer closeResponse(resp) + if err != nil { + return nil, err + } + + if resp.StatusCode != http.StatusOK { + return nil, httpRespToErrorResponse(resp) + } + + data, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, err + } + + groups := []string{} + if err = json.Unmarshal(data, &groups); err != nil { + return nil, err + } + + return groups, nil +} + +// GroupStatus - group status. +type GroupStatus string + +// GroupStatus values. +const ( + GroupEnabled GroupStatus = "enabled" + GroupDisabled GroupStatus = "disabled" +) + +// SetGroupStatus - sets the status of a group. +func (adm *AdminClient) SetGroupStatus(group string, status GroupStatus) error { + v := url.Values{} + v.Set("group", group) + v.Set("status", string(status)) + + reqData := requestData{ + relPath: "/v1/set-group-status", + queryValues: v, + } + + resp, err := adm.executeMethod("PUT", reqData) + defer closeResponse(resp) + if err != nil { + return err + } + + if resp.StatusCode != http.StatusOK { + return httpRespToErrorResponse(resp) + } + + return nil +} diff --git a/vendor/github.com/minio/minio/pkg/net/url.go b/vendor/github.com/minio/minio/pkg/net/url.go index 49549ba50c..8ec122df4a 100644 --- a/vendor/github.com/minio/minio/pkg/net/url.go +++ b/vendor/github.com/minio/minio/pkg/net/url.go @@ -21,6 +21,7 @@ import ( "errors" "net/url" "path" + "strings" ) // URL - improved JSON friendly url.URL. @@ -100,6 +101,11 @@ func ParseURL(s string) (u *URL, err error) { uu.Path = path.Clean(uu.Path) } + // path.Clean removes the trailing '/' and converts '//' to '/'. + if strings.HasSuffix(s, "/") && !strings.HasSuffix(uu.Path, "/") { + uu.Path += "/" + } + v := URL(*uu) u = &v return u, nil diff --git a/vendor/github.com/minio/minio/pkg/net/url_test.go b/vendor/github.com/minio/minio/pkg/net/url_test.go index 96b8421db4..49dcd4339a 100644 --- a/vendor/github.com/minio/minio/pkg/net/url_test.go +++ b/vendor/github.com/minio/minio/pkg/net/url_test.go @@ -107,8 +107,9 @@ func TestURLUnmarshalJSON(t *testing.T) { {[]byte(`"https://play.min.io:0"`), &URL{Scheme: "https", Host: "play.min.io:0"}, false}, {[]byte(`"https://147.75.201.93:9000/"`), &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, {[]byte(`"https://s3.amazonaws.com/?location"`), &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, - {[]byte(`"http://myminio:10000/mybucket//myobject/"`), &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, false}, + {[]byte(`"http://myminio:10000/mybucket/myobject//"`), &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject/"}, false}, {[]byte(`"ftp://myftp.server:10000/myuser"`), &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false}, + {[]byte(`"http://webhook.server:10000/mywebhook/"`), &URL{Scheme: "http", Host: "webhook.server:10000", Path: "/mywebhook/"}, false}, {[]byte(`"myserver:1000"`), nil, true}, {[]byte(`"http://:1000/mybucket"`), nil, true}, {[]byte(`"https://147.75.201.93:90000/"`), nil, true}, @@ -142,7 +143,7 @@ func TestParseURL(t *testing.T) { {"https://play.min.io:0", &URL{Scheme: "https", Host: "play.min.io:0"}, false}, {"https://147.75.201.93:9000/", &URL{Scheme: "https", Host: "147.75.201.93:9000", Path: "/"}, false}, {"https://s3.amazonaws.com/?location", &URL{Scheme: "https", Host: "s3.amazonaws.com", Path: "/", RawQuery: "location"}, false}, - {"http://myminio:10000/mybucket//myobject/", &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject"}, false}, + {"http://myminio:10000/mybucket//myobject/", &URL{Scheme: "http", Host: "myminio:10000", Path: "/mybucket/myobject/"}, false}, {"ftp://myftp.server:10000/myuser", &URL{Scheme: "ftp", Host: "myftp.server:10000", Path: "/myuser"}, false}, {"myserver:1000", nil, true}, {"http://:1000/mybucket", nil, true}, diff --git a/vendor/github.com/tevino/abool/.gitignore b/vendor/github.com/tevino/abool/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/tevino/abool/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/tevino/abool/LICENSE b/vendor/github.com/tevino/abool/LICENSE new file mode 100644 index 0000000000..f20dac8ac9 --- /dev/null +++ b/vendor/github.com/tevino/abool/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Tevin Zhang + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/tevino/abool/README.md b/vendor/github.com/tevino/abool/README.md new file mode 100644 index 0000000000..042d1cadd9 --- /dev/null +++ b/vendor/github.com/tevino/abool/README.md @@ -0,0 +1,49 @@ +# ABool :bulb: +[![Go Report Card](https://goreportcard.com/badge/github.com/tevino/abool)](https://goreportcard.com/report/github.com/tevino/abool) +[![GoDoc](https://godoc.org/github.com/tevino/abool?status.svg)](https://godoc.org/github.com/tevino/abool) + +Atomic Boolean library for Go, optimized for performance yet simple to use. + +Use this for cleaner code. + +## Usage + +```go +import "github.com/tevino/abool" + +cond := abool.New() // default to false + +cond.Set() // Set to true +cond.IsSet() // Returns true +cond.UnSet() // Set to false +cond.SetTo(true) // Set to whatever you want +cond.SetToIf(false, true) // Set to true if it is false, returns false(not set) + + +// embedding +type Foo struct { + cond *abool.AtomicBool // always use pointer to avoid copy +} +``` + +## Benchmark: + +- Go 1.6.2 +- OS X 10.11.4 + +```shell +# Read +BenchmarkMutexRead-4 100000000 21.0 ns/op +BenchmarkAtomicValueRead-4 200000000 6.30 ns/op +BenchmarkAtomicBoolRead-4 300000000 4.21 ns/op # <--- This package + +# Write +BenchmarkMutexWrite-4 100000000 21.6 ns/op +BenchmarkAtomicValueWrite-4 30000000 43.4 ns/op +BenchmarkAtomicBoolWrite-4 200000000 9.87 ns/op # <--- This package + +# CAS +BenchmarkMutexCAS-4 30000000 44.9 ns/op +BenchmarkAtomicBoolCAS-4 100000000 11.7 ns/op # <--- This package +``` + diff --git a/vendor/github.com/tevino/abool/bool.go b/vendor/github.com/tevino/abool/bool.go new file mode 100644 index 0000000000..fdda210365 --- /dev/null +++ b/vendor/github.com/tevino/abool/bool.go @@ -0,0 +1,63 @@ +// Package abool provides atomic Boolean type for cleaner code and +// better performance. +package abool + +import "sync/atomic" + +// New creates an AtomicBool with default to false +func New() *AtomicBool { + return new(AtomicBool) +} + +// NewBool creates an AtomicBool with given default value +func NewBool(ok bool) *AtomicBool { + ab := New() + if ok { + ab.Set() + } + return ab +} + +// AtomicBool is an atomic Boolean +// Its methods are all atomic, thus safe to be called by +// multiple goroutines simultaneously +// Note: When embedding into a struct, one should always use +// *AtomicBool to avoid copy +type AtomicBool int32 + +// Set sets the Boolean to true +func (ab *AtomicBool) Set() { + atomic.StoreInt32((*int32)(ab), 1) +} + +// UnSet sets the Boolean to false +func (ab *AtomicBool) UnSet() { + atomic.StoreInt32((*int32)(ab), 0) +} + +// IsSet returns whether the Boolean is true +func (ab *AtomicBool) IsSet() bool { + return atomic.LoadInt32((*int32)(ab)) == 1 +} + +// SetTo sets the boolean with given Boolean +func (ab *AtomicBool) SetTo(yes bool) { + if yes { + atomic.StoreInt32((*int32)(ab), 1) + } else { + atomic.StoreInt32((*int32)(ab), 0) + } +} + +// SetToIf sets the Boolean to new only if the Boolean matches the old +// Returns whether the set was done +func (ab *AtomicBool) SetToIf(old, new bool) (set bool) { + var o, n int32 + if old { + o = 1 + } + if new { + n = 1 + } + return atomic.CompareAndSwapInt32((*int32)(ab), o, n) +} diff --git a/vendor/github.com/tevino/abool/bool_test.go b/vendor/github.com/tevino/abool/bool_test.go new file mode 100644 index 0000000000..d63517f515 --- /dev/null +++ b/vendor/github.com/tevino/abool/bool_test.go @@ -0,0 +1,176 @@ +package abool + +import ( + "sync" + "sync/atomic" + "testing" +) + +func TestBool(t *testing.T) { + v := NewBool(true) + if !v.IsSet() { + t.Fatal("NewValue(true) failed") + } + + v = NewBool(false) + if v.IsSet() { + t.Fatal("NewValue(false) failed") + } + + v = New() + if v.IsSet() { + t.Fatal("Empty value of AtomicBool should be false") + } + + v.Set() + if !v.IsSet() { + t.Fatal("AtomicBool.Set() failed") + } + + v.UnSet() + if v.IsSet() { + t.Fatal("AtomicBool.UnSet() failed") + } + + v.SetTo(true) + if !v.IsSet() { + t.Fatal("AtomicBool.SetTo(true) failed") + } + + v.SetTo(false) + if v.IsSet() { + t.Fatal("AtomicBool.SetTo(false) failed") + } + + if set := v.SetToIf(true, false); set || v.IsSet() { + t.Fatal("AtomicBool.SetTo(true, false) failed") + } + + if set := v.SetToIf(false, true); !set || !v.IsSet() { + t.Fatal("AtomicBool.SetTo(false, true) failed") + } +} + +func TestRace(t *testing.T) { + repeat := 10000 + var wg sync.WaitGroup + wg.Add(repeat * 3) + v := New() + + // Writer + go func() { + for i := 0; i < repeat; i++ { + v.Set() + wg.Done() + } + }() + + // Reader + go func() { + for i := 0; i < repeat; i++ { + v.IsSet() + wg.Done() + } + }() + + // Writer + go func() { + for i := 0; i < repeat; i++ { + v.UnSet() + wg.Done() + } + }() + wg.Wait() +} + +func ExampleAtomicBool() { + cond := New() // default to false + cond.Set() // set to true + cond.IsSet() // returns true + cond.UnSet() // set to false + cond.SetTo(true) // set to whatever you want +} + +// Benchmark Read + +func BenchmarkMutexRead(b *testing.B) { + var m sync.RWMutex + var v bool + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.RLock() + _ = v + m.RUnlock() + } +} + +func BenchmarkAtomicValueRead(b *testing.B) { + var v atomic.Value + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = v.Load() != nil + } +} + +func BenchmarkAtomicBoolRead(b *testing.B) { + v := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _ = v.IsSet() + } +} + +// Benchmark Write + +func BenchmarkMutexWrite(b *testing.B) { + var m sync.RWMutex + var v bool + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.RLock() + v = true + m.RUnlock() + } + b.StopTimer() + _ = v +} + +func BenchmarkAtomicValueWrite(b *testing.B) { + var v atomic.Value + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.Store(true) + } +} + +func BenchmarkAtomicBoolWrite(b *testing.B) { + v := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.Set() + } +} + +// Benchmark CAS + +func BenchmarkMutexCAS(b *testing.B) { + var m sync.RWMutex + var v bool + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Lock() + if !v { + v = true + } + m.Unlock() + } + b.StopTimer() +} + +func BenchmarkAtomicBoolCAS(b *testing.B) { + v := New() + b.ResetTimer() + for i := 0; i < b.N; i++ { + v.SetToIf(false, true) + } +} diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c0c80d8930..aeac7d8a51 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -992,7 +992,7 @@ func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAf req.Method != "HEAD" { // Request gzip only, not deflate. Deflate is ambiguous and // not as universally supported anyway. - // See: http://www.gzip.org/zlib/zlib_faq.html#faq38 + // See: https://zlib.net/zlib_faq.html#faq39 // // Note that we don't request this for HEAD requests, // due to a bug in nginx: diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go index 72afe3338c..14e4d5caa3 100644 --- a/vendor/golang.org/x/sys/unix/affinity_linux.go +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -91,9 +91,13 @@ func onesCount64(x uint64) int { const m0 = 0x5555555555555555 // 01010101 ... const m1 = 0x3333333333333333 // 00110011 ... const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... - const m3 = 0x00ff00ff00ff00ff // etc. - const m4 = 0x0000ffff0000ffff + // Unused in this function, but definitions preserved for + // documentation purposes: + // + // const m3 = 0x00ff00ff00ff00ff // etc. + // const m4 = 0x0000ffff0000ffff + // // Implementation: Parallel summing of adjacent bits. // See "Hacker's Delight", Chap. 5: Counting Bits. // The following pattern shows the general approach: diff --git a/vendor/golang.org/x/sys/unix/dirent.go b/vendor/golang.org/x/sys/unix/dirent.go index 6f3460e69c..304016b688 100644 --- a/vendor/golang.org/x/sys/unix/dirent.go +++ b/vendor/golang.org/x/sys/unix/dirent.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// +build aix darwin dragonfly freebsd linux nacl netbsd openbsd solaris +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris package unix diff --git a/vendor/golang.org/x/sys/unix/endian_little.go b/vendor/golang.org/x/sys/unix/endian_little.go index 085df2d8dd..bcdb5d30eb 100644 --- a/vendor/golang.org/x/sys/unix/endian_little.go +++ b/vendor/golang.org/x/sys/unix/endian_little.go @@ -2,7 +2,7 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // -// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le +// +build 386 amd64 amd64p32 arm arm64 ppc64le mipsle mips64le riscv64 package unix diff --git a/vendor/golang.org/x/sys/unix/sendfile_test.go b/vendor/golang.org/x/sys/unix/sendfile_test.go index d41fb93c8f..814b4d91ae 100644 --- a/vendor/golang.org/x/sys/unix/sendfile_test.go +++ b/vendor/golang.org/x/sys/unix/sendfile_test.go @@ -41,10 +41,15 @@ func TestSendfile(t *testing.T) { go func() { conn, err := ln.Accept() if err != nil { - t.Fatal(err) + t.Errorf("failed to accept: %v", err) + return } defer conn.Close() b, err := ioutil.ReadAll(conn) + if err != nil { + t.Errorf("failed to read: %v", err) + return + } if string(b) != contents { t.Errorf("contents not transmitted: got %s (len=%d), want %s", string(b), len(b), contents) } diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go index 00aa6556b5..6ceeb426eb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -516,7 +516,7 @@ func TestClockNanosleep(t *testing.T) { t.Skip("clock_nanosleep syscall is not available, skipping test") } else if err != nil { t.Errorf("ClockNanosleep(CLOCK_MONOTONIC, 0, %#v, nil) = %v", &rel, err) - } else if slept := time.Now().Sub(start); slept < delay { + } else if slept := time.Since(start); slept < delay { t.Errorf("ClockNanosleep(CLOCK_MONOTONIC, 0, %#v, nil) slept only %v", &rel, slept) } @@ -527,7 +527,7 @@ func TestClockNanosleep(t *testing.T) { err = unix.ClockNanosleep(unix.CLOCK_REALTIME, unix.TIMER_ABSTIME, &abs, nil) if err != nil { t.Errorf("ClockNanosleep(CLOCK_REALTIME, TIMER_ABSTIME, %#v (=%v), nil) = %v", &abs, until, err) - } else if slept := time.Now().Sub(start); slept < delay { + } else if slept := time.Since(start); slept < delay { t.Errorf("ClockNanosleep(CLOCK_REALTIME, TIMER_ABSTIME, %#v (=%v), nil) slept only %v", &abs, until, slept) } diff --git a/vendor/golang.org/x/sys/unix/syscall_test.go b/vendor/golang.org/x/sys/unix/syscall_test.go index e0ecfa706b..1538c720bb 100644 --- a/vendor/golang.org/x/sys/unix/syscall_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_test.go @@ -62,13 +62,10 @@ func TestUname(t *testing.T) { // Test that this compiles. (Issue #31735) func TestStatFieldNames(t *testing.T) { var st unix.Stat_t - var ts *unix.Timespec - ts = &st.Atim - ts = &st.Mtim - ts = &st.Ctim - _ = ts - secs := int64(st.Mtim.Sec) - nsecs := int64(st.Mtim.Nsec) - _ = secs - _ = nsecs + var _ *unix.Timespec + _ = &st.Atim + _ = &st.Mtim + _ = &st.Ctim + _ = int64(st.Mtim.Sec) + _ = int64(st.Mtim.Nsec) } diff --git a/vendor/golang.org/x/sys/windows/service.go b/vendor/golang.org/x/sys/windows/service.go index 03383f1dfd..847e00bc99 100644 --- a/vendor/golang.org/x/sys/windows/service.go +++ b/vendor/golang.org/x/sys/windows/service.go @@ -159,6 +159,10 @@ type SERVICE_DESCRIPTION struct { Description *uint16 } +type SERVICE_DELAYED_AUTO_START_INFO struct { + IsDelayedAutoStartUp uint32 +} + type SERVICE_STATUS_PROCESS struct { ServiceType uint32 CurrentState uint32 diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/config.go b/vendor/golang.org/x/sys/windows/svc/mgr/config.go index 61447a58f7..8431edbe7e 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/config.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/config.go @@ -43,6 +43,7 @@ type Config struct { Password string Description string SidType uint32 // one of SERVICE_SID_TYPE, the type of sid to use for the service + DelayedAutoStart bool // the service is started after other auto-start services are started plus a short delay } func toString(p *uint16) string { @@ -95,6 +96,16 @@ func (s *Service) Config() (Config, error) { } p2 := (*windows.SERVICE_DESCRIPTION)(unsafe.Pointer(&b[0])) + b, err = s.queryServiceConfig2(windows.SERVICE_CONFIG_DELAYED_AUTO_START_INFO) + if err != nil { + return Config{}, err + } + p3 := (*windows.SERVICE_DELAYED_AUTO_START_INFO)(unsafe.Pointer(&b[0])) + delayedStart := false + if p3.IsDelayedAutoStartUp != 0 { + delayedStart = true + } + return Config{ ServiceType: p.ServiceType, StartType: p.StartType, @@ -106,6 +117,7 @@ func (s *Service) Config() (Config, error) { ServiceStartName: toString(p.ServiceStartName), DisplayName: toString(p.DisplayName), Description: toString(p2.Description), + DelayedAutoStart: delayedStart, }, nil } @@ -119,6 +131,15 @@ func updateSidType(handle windows.Handle, sidType uint32) error { return windows.ChangeServiceConfig2(handle, windows.SERVICE_CONFIG_SERVICE_SID_INFO, (*byte)(unsafe.Pointer(&sidType))) } +func updateStartUp(handle windows.Handle, isDelayed bool) error { + var d windows.SERVICE_DELAYED_AUTO_START_INFO + if isDelayed { + d.IsDelayedAutoStartUp = 1 + } + return windows.ChangeServiceConfig2(handle, + windows.SERVICE_CONFIG_DELAYED_AUTO_START_INFO, (*byte)(unsafe.Pointer(&d))) +} + // UpdateConfig updates service s configuration parameters. func (s *Service) UpdateConfig(c Config) error { err := windows.ChangeServiceConfig(s.Handle, c.ServiceType, c.StartType, @@ -132,6 +153,12 @@ func (s *Service) UpdateConfig(c Config) error { if err != nil { return err } + + err = updateStartUp(s.Handle, c.DelayedAutoStart) + if err != nil { + return err + } + return updateDescription(s.Handle, c.Description) } diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go index ad4cd6b6f0..8d1cfd8bf4 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr.go @@ -149,6 +149,14 @@ func (m *Mgr) CreateService(name, exepath string, c Config, args ...string) (*Se return nil, err } } + if c.DelayedAutoStart { + err = updateStartUp(h, c.DelayedAutoStart) + if err != nil { + windows.DeleteService(h) + windows.CloseHandle(h) + return nil, err + } + } return &Service{Name: name, Handle: h}, nil } diff --git a/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go b/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go index 9171f5bcf1..750ffe89e8 100644 --- a/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go +++ b/vendor/golang.org/x/sys/windows/svc/mgr/mgr_test.go @@ -80,6 +80,9 @@ func testConfig(t *testing.T, s *mgr.Service, should mgr.Config) mgr.Config { if err != nil { t.Fatalf("Config failed: %s", err) } + if should.DelayedAutoStart != is.DelayedAutoStart { + t.Fatalf("config mismatch: DelayedAutoStart is %v, but should have %v", is.DelayedAutoStart, should.DelayedAutoStart) + } if should.DisplayName != is.DisplayName { t.Fatalf("config mismatch: DisplayName is %q, but should have %q", is.DisplayName, should.DisplayName) } @@ -257,6 +260,15 @@ func TestMyService(t *testing.T) { testConfig(t, s, c) + c.StartType = mgr.StartAutomatic + c.DelayedAutoStart = true + err = s.UpdateConfig(c) + if err != nil { + t.Fatalf("UpdateConfig failed: %v", err) + } + + testConfig(t, s, c) + svcnames, err := m.ListServices() if err != nil { t.Fatalf("ListServices failed: %v", err) diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go index 8a563f92bc..1e3947f0f6 100644 --- a/vendor/golang.org/x/sys/windows/types_windows.go +++ b/vendor/golang.org/x/sys/windows/types_windows.go @@ -197,8 +197,11 @@ const ( FILE_MAP_READ = 0x04 FILE_MAP_EXECUTE = 0x20 - CTRL_C_EVENT = 0 - CTRL_BREAK_EVENT = 1 + CTRL_C_EVENT = 0 + CTRL_BREAK_EVENT = 1 + CTRL_CLOSE_EVENT = 2 + CTRL_LOGOFF_EVENT = 5 + CTRL_SHUTDOWN_EVENT = 6 // Windows reserves errors >= 1<<29 for application use. APPLICATION_ERROR = 1 << 29 diff --git a/vendor/golang.org/x/tools/go.mod b/vendor/golang.org/x/tools/go.mod index 4c386cde2a..026a263884 100644 --- a/vendor/golang.org/x/tools/go.mod +++ b/vendor/golang.org/x/tools/go.mod @@ -5,4 +5,5 @@ go 1.11 require ( golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sync v0.0.0-20190423024810-112230192c58 + golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 ) diff --git a/vendor/golang.org/x/tools/go.sum b/vendor/golang.org/x/tools/go.sum index 22bbdd2b36..c4cc4a6d32 100644 --- a/vendor/golang.org/x/tools/go.sum +++ b/vendor/golang.org/x/tools/go.sum @@ -5,3 +5,5 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEha golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/tools/go/analysis/analysis.go b/vendor/golang.org/x/tools/go/analysis/analysis.go index e375484fa6..bc58c31c9f 100644 --- a/vendor/golang.org/x/tools/go/analysis/analysis.go +++ b/vendor/golang.org/x/tools/go/analysis/analysis.go @@ -128,11 +128,13 @@ type Pass struct { // See comments for ExportObjectFact. ExportPackageFact func(fact Fact) - // AllPackageFacts returns a new slice containing all package facts in unspecified order. + // AllPackageFacts returns a new slice containing all package facts of the analysis's FactTypes + // in unspecified order. // WARNING: This is an experimental API and may change in the future. AllPackageFacts func() []PackageFact - // AllObjectFacts returns a new slice containing all object facts in unspecified order. + // AllObjectFacts returns a new slice containing all object facts of the analysis's FactTypes + // in unspecified order. // WARNING: This is an experimental API and may change in the future. AllObjectFacts func() []ObjectFact diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic.go b/vendor/golang.org/x/tools/go/analysis/diagnostic.go index e7debe93e9..744072cd79 100644 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic.go +++ b/vendor/golang.org/x/tools/go/analysis/diagnostic.go @@ -1,5 +1,3 @@ -// +build !experimental - package analysis import "go/token" @@ -17,4 +15,34 @@ type Diagnostic struct { End token.Pos // optional Category string // optional Message string + + // SuggestedFixes contains suggested fixes for a diagnostic which can be used to perform + // edits to a file that address the diagnostic. + // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? + // Diagnostics should not contain SuggestedFixes that overlap. + // Experimental: This API is experimental and may change in the future. + SuggestedFixes []SuggestedFix // optional +} + +// A SuggestedFix is a code change associated with a Diagnostic that a user can choose +// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged +// by the diagnostic. +// TextEdits for a SuggestedFix should not overlap. TextEdits for a SuggestedFix +// should not contain edits for other packages. +// Experimental: This API is experimental and may change in the future. +type SuggestedFix struct { + // A description for this suggested fix to be shown to a user deciding + // whether to accept it. + Message string + TextEdits []TextEdit +} + +// A TextEdit represents the replacement of the code between Pos and End with the new text. +// Each TextEdit should apply to a single file. End should not be earlier in the file than Pos. +// Experimental: This API is experimental and may change in the future. +type TextEdit struct { + // For a pure insertion, End can either be set to Pos or token.NoPos. + Pos token.Pos + End token.Pos + NewText []byte } diff --git a/vendor/golang.org/x/tools/go/analysis/diagnostic_experimental.go b/vendor/golang.org/x/tools/go/analysis/diagnostic_experimental.go deleted file mode 100644 index 2e9ebb202c..0000000000 --- a/vendor/golang.org/x/tools/go/analysis/diagnostic_experimental.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build experimental - -package analysis - -import "go/token" - -// A Diagnostic is a message associated with a source location or range. -// -// An Analyzer may return a variety of diagnostics; the optional Category, -// which should be a constant, may be used to classify them. -// It is primarily intended to make it easy to look up documentation. -// -// If End is provided, the diagnostic is specified to apply to the range between -// Pos and End. -type Diagnostic struct { - Pos token.Pos - End token.Pos // optional - Category string // optional - Message string - - // TODO(matloob): Should multiple SuggestedFixes be allowed for a diagnostic? - SuggestedFixes []SuggestedFix // optional -} - -// A SuggestedFix is a code change associated with a Diagnostic that a user can choose -// to apply to their code. Usually the SuggestedFix is meant to fix the issue flagged -// by the diagnostic. -type SuggestedFix struct { - // A description for this suggested fix to be shown to a user deciding - // whether to accept it. - Message string - TextEdits []TextEdit -} - -// A TextEdit represents the replacement of the code between Pos and End with the new text. -type TextEdit struct { - // For a pure insertion, End can either be set to Pos or token.NoPos. - Pos token.Pos - End token.Pos - NewText []byte -} diff --git a/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go b/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go index 86f1ce84a7..dcd4f4da81 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/facts/facts.go @@ -99,10 +99,10 @@ func (s *Set) ExportObjectFact(obj types.Object, fact analysis.Fact) { s.mu.Unlock() } -func (s *Set) AllObjectFacts() []analysis.ObjectFact { +func (s *Set) AllObjectFacts(filter map[reflect.Type]bool) []analysis.ObjectFact { var facts []analysis.ObjectFact for k, v := range s.m { - if k.obj != nil { + if k.obj != nil && filter[k.t] { facts = append(facts, analysis.ObjectFact{k.obj, v}) } } @@ -132,10 +132,10 @@ func (s *Set) ExportPackageFact(fact analysis.Fact) { s.mu.Unlock() } -func (s *Set) AllPackageFacts() []analysis.PackageFact { +func (s *Set) AllPackageFacts(filter map[reflect.Type]bool) []analysis.PackageFact { var facts []analysis.PackageFact for k, v := range s.m { - if k.obj == nil { + if k.obj == nil && filter[k.t] { facts = append(facts, analysis.PackageFact{k.pkg, v}) } } diff --git a/vendor/golang.org/x/tools/go/analysis/internal/facts/facts_test.go b/vendor/golang.org/x/tools/go/analysis/internal/facts/facts_test.go index e21a4982ba..c345a12c04 100644 --- a/vendor/golang.org/x/tools/go/analysis/internal/facts/facts_test.go +++ b/vendor/golang.org/x/tools/go/analysis/internal/facts/facts_test.go @@ -10,6 +10,7 @@ import ( "go/token" "go/types" "os" + "reflect" "testing" "golang.org/x/tools/go/analysis/analysistest" @@ -172,3 +173,52 @@ func load(dir string, path string) (*types.Package, error) { } return pkgs[0].Types, nil } + +type otherFact struct { + S string +} + +func (f *otherFact) String() string { return fmt.Sprintf("otherFact(%s)", f.S) } +func (f *otherFact) AFact() {} + +func TestFactFilter(t *testing.T) { + files := map[string]string{ + "a/a.go": `package a; type A int`, + } + dir, cleanup, err := analysistest.WriteFiles(files) + if err != nil { + t.Fatal(err) + } + defer cleanup() + + pkg, err := load(dir, "a") + if err != nil { + t.Fatal(err) + } + + obj := pkg.Scope().Lookup("A") + s, err := facts.Decode(pkg, func(string) ([]byte, error) { return nil, nil }) + if err != nil { + t.Fatal(err) + } + s.ExportObjectFact(obj, &myFact{"good object fact"}) + s.ExportPackageFact(&myFact{"good package fact"}) + s.ExportObjectFact(obj, &otherFact{"bad object fact"}) + s.ExportPackageFact(&otherFact{"bad package fact"}) + + filter := map[reflect.Type]bool{ + reflect.TypeOf(&myFact{}): true, + } + + pkgFacts := s.AllPackageFacts(filter) + wantPkgFacts := `[{package a ("a") myFact(good package fact)}]` + if got := fmt.Sprintf("%v", pkgFacts); got != wantPkgFacts { + t.Errorf("AllPackageFacts: got %v, want %v", got, wantPkgFacts) + } + + objFacts := s.AllObjectFacts(filter) + wantObjFacts := "[{type a.A int myFact(good object fact)}]" + if got := fmt.Sprintf("%v", objFacts); got != wantObjFacts { + t.Errorf("AllObjectFacts: got %v, want %v", got, wantObjFacts) + } +} diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go index f59e95dc21..52de8b03f4 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/printf.go @@ -67,15 +67,20 @@ of arguments with no format string. ` // isWrapper is a fact indicating that a function is a print or printf wrapper. -type isWrapper struct{ Printf bool } +type isWrapper struct{ Kind funcKind } func (f *isWrapper) AFact() {} func (f *isWrapper) String() string { - if f.Printf { + switch f.Kind { + case kindPrintf: return "printfWrapper" - } else { + case kindPrint: return "printWrapper" + case kindErrorf: + return "errorfWrapper" + default: + return "unknownWrapper" } } @@ -223,16 +228,20 @@ func match(info *types.Info, arg ast.Expr, param *types.Var) bool { return ok && info.ObjectOf(id) == param } +type funcKind int + const ( - kindPrintf = 1 - kindPrint = 2 + kindUnknown funcKind = iota + kindPrintf = iota + kindPrint + kindErrorf ) // checkPrintfFwd checks that a printf-forwarding wrapper is forwarding correctly. // It diagnoses writing fmt.Printf(format, args) instead of fmt.Printf(format, args...). -func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind int) { +func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, kind funcKind) { matched := kind == kindPrint || - kind == kindPrintf && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) + kind != kindUnknown && len(call.Args) >= 2 && match(pass.TypesInfo, call.Args[len(call.Args)-2], w.format) if !matched { return } @@ -262,7 +271,7 @@ func checkPrintfFwd(pass *analysis.Pass, w *printfWrapper, call *ast.CallExpr, k fn := w.obj var fact isWrapper if !pass.ImportObjectFact(fn, &fact) { - fact.Printf = kind == kindPrintf + fact.Kind = kind pass.ExportObjectFact(fn, &fact) for _, caller := range w.callers { checkPrintfFwd(pass, caller.w, caller.call, kind) @@ -414,42 +423,42 @@ func checkCall(pass *analysis.Pass) { call := n.(*ast.CallExpr) fn, kind := printfNameAndKind(pass, call) switch kind { - case kindPrintf: - checkPrintf(pass, call, fn) + case kindPrintf, kindErrorf: + checkPrintf(pass, kind, call, fn) case kindPrint: checkPrint(pass, call, fn) } }) } -func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind int) { +func printfNameAndKind(pass *analysis.Pass, call *ast.CallExpr) (fn *types.Func, kind funcKind) { fn, _ = typeutil.Callee(pass.TypesInfo, call).(*types.Func) if fn == nil { return nil, 0 } - var fact isWrapper - if pass.ImportObjectFact(fn, &fact) { - if fact.Printf { - return fn, kindPrintf - } else { - return fn, kindPrint - } - } - _, ok := isPrint[fn.FullName()] if !ok { // Next look up just "printf", for use with -printf.funcs. _, ok = isPrint[strings.ToLower(fn.Name())] } if ok { - if strings.HasSuffix(fn.Name(), "f") { + if fn.Name() == "Errorf" { + kind = kindErrorf + } else if strings.HasSuffix(fn.Name(), "f") { kind = kindPrintf } else { kind = kindPrint } + return fn, kind } - return fn, kind + + var fact isWrapper + if pass.ImportObjectFact(fn, &fact) { + return fn, fact.Kind + } + + return fn, kindUnknown } // isFormatter reports whether t satisfies fmt.Formatter. @@ -491,7 +500,7 @@ type formatState struct { } // checkPrintf checks a call to a formatted print routine such as Printf. -func checkPrintf(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { +func checkPrintf(pass *analysis.Pass, kind funcKind, call *ast.CallExpr, fn *types.Func) { format, idx := formatString(pass, call) if idx < 0 { if false { @@ -511,6 +520,7 @@ func checkPrintf(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { argNum := firstArg maxArgNum := firstArg anyIndex := false + anyW := false for i, w := 0, 0; i < len(format); i += w { w = 1 if format[i] != '%' { @@ -527,6 +537,17 @@ func checkPrintf(pass *analysis.Pass, call *ast.CallExpr, fn *types.Func) { if state.hasIndex { anyIndex = true } + if state.verb == 'w' { + if kind != kindErrorf { + pass.Reportf(call.Pos(), "%s call has error-wrapping directive %%w", state.name) + return + } + if anyW { + pass.Reportf(call.Pos(), "%s call has more than one error-wrapping directive %%w", state.name) + return + } + anyW = true + } if len(state.argNums) > 0 { // Continue with the next sequential argument. argNum = state.argNums[len(state.argNums)-1] + 1 @@ -697,6 +718,7 @@ const ( argFloat argComplex argPointer + argError anyType printfArgType = ^0 ) @@ -739,7 +761,7 @@ var printVerbs = []printVerb{ {'T', "-", anyType}, {'U', "-#", argRune | argInt}, {'v', allFlags, anyType}, - {'w', noFlag, anyType}, + {'w', allFlags, argError}, {'x', sharpNumFlag, argRune | argInt | argString | argPointer}, {'X', sharpNumFlag, argRune | argInt | argString | argPointer}, } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/testdata/src/a/a.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/testdata/src/a/a.go index b783f10be9..69951760ef 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/testdata/src/a/a.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/testdata/src/a/a.go @@ -97,7 +97,6 @@ func PrintfTests() { fmt.Printf("%T", notstringerv) fmt.Printf("%q", stringerarrayv) fmt.Printf("%v", stringerarrayv) - fmt.Printf("%w", err) fmt.Printf("%s", stringerarrayv) fmt.Printf("%v", notstringerarrayv) fmt.Printf("%T", notstringerarrayv) @@ -323,6 +322,16 @@ func PrintfTests() { // Issue 26486 dbg("", 1) // no error "call has arguments but no formatting directive" + + // %w + _ = fmt.Errorf("%w", err) + _ = fmt.Errorf("%#w", err) + _ = fmt.Errorf("%[2]w %[1]s", "x", err) + _ = fmt.Errorf("%[2]w %[1]s", e, "x") // want `Errorf format %\[2\]w has arg "x" of wrong type string` + _ = fmt.Errorf("%w", "x") // want `Errorf format %w has arg "x" of wrong type string` + _ = fmt.Errorf("%w %w", err, err) // want `Errorf call has more than one error-wrapping directive %w` + fmt.Printf("%w", err) // want `Printf call has error-wrapping directive %w` + Errorf(0, "%w", err) } func someString() string { return "X" } @@ -367,13 +376,13 @@ func printf(format string, args ...interface{}) { // want printf:"printfWrapper" // Errorf is used by the test for a case in which the first parameter // is not a format string. -func Errorf(i int, format string, args ...interface{}) { // want Errorf:"printfWrapper" +func Errorf(i int, format string, args ...interface{}) { // want Errorf:"errorfWrapper" _ = fmt.Errorf(format, args...) } // errorf is used by the test for a case in which the function accepts multiple // string parameters before variadic arguments -func errorf(level, format string, args ...interface{}) { // want errorf:"printfWrapper" +func errorf(level, format string, args ...interface{}) { // want errorf:"errorfWrapper" _ = fmt.Errorf(format, args...) } diff --git a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go index 12286fd5df..5000d9acc9 100644 --- a/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go +++ b/vendor/golang.org/x/tools/go/analysis/passes/printf/types.go @@ -37,6 +37,12 @@ func matchArgTypeInternal(pass *analysis.Pass, t printfArgType, typ types.Type, return true // probably a type check problem } } + + // %w accepts only errors. + if t == argError { + return types.ConvertibleTo(typ, errorType) + } + // If the type implements fmt.Formatter, we have nothing to check. if isFormatter(typ) { return true diff --git a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go index 87c3160847..2ed274949b 100644 --- a/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go +++ b/vendor/golang.org/x/tools/go/analysis/unitchecker/unitchecker.go @@ -42,6 +42,7 @@ import ( "log" "os" "path/filepath" + "reflect" "sort" "strings" "sync" @@ -322,6 +323,11 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re return } + factFilter := make(map[reflect.Type]bool) + for _, f := range a.FactTypes { + factFilter[reflect.TypeOf(f)] = true + } + pass := &analysis.Pass{ Analyzer: a, Fset: fset, @@ -334,10 +340,10 @@ func run(fset *token.FileSet, cfg *Config, analyzers []*analysis.Analyzer) ([]re Report: func(d analysis.Diagnostic) { act.diagnostics = append(act.diagnostics, d) }, ImportObjectFact: facts.ImportObjectFact, ExportObjectFact: facts.ExportObjectFact, - AllObjectFacts: facts.AllObjectFacts, + AllObjectFacts: func() []analysis.ObjectFact { return facts.AllObjectFacts(factFilter) }, ImportPackageFact: facts.ImportPackageFact, ExportPackageFact: facts.ExportPackageFact, - AllPackageFacts: facts.AllPackageFacts, + AllPackageFacts: func() []analysis.PackageFact { return facts.AllPackageFacts(factFilter) }, } t0 := time.Now() diff --git a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go index fdc7da0568..ea15d57be1 100644 --- a/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go +++ b/vendor/golang.org/x/tools/go/internal/packagesdriver/sizes.go @@ -82,15 +82,28 @@ func GetSizesGolist(ctx context.Context, buildFlags, env []string, dir string, u args = append(args, buildFlags...) args = append(args, "--", "unsafe") stdout, err := InvokeGo(ctx, env, dir, usesExportData, args...) + var goarch, compiler string if err != nil { - return nil, err + if strings.Contains(err.Error(), "cannot find main module") { + // User's running outside of a module. All bets are off. Get GOARCH and guess compiler is gc. + // TODO(matloob): Is this a problem in practice? + envout, enverr := InvokeGo(ctx, env, dir, usesExportData, "env", "GOARCH") + if enverr != nil { + return nil, err + } + goarch = strings.TrimSpace(envout.String()) + compiler = "gc" + } else { + return nil, err + } + } else { + fields := strings.Fields(stdout.String()) + if len(fields) < 2 { + return nil, fmt.Errorf("could not determine GOARCH and Go compiler") + } + goarch = fields[0] + compiler = fields[1] } - fields := strings.Fields(stdout.String()) - if len(fields) < 2 { - return nil, fmt.Errorf("could not determine GOARCH and Go compiler") - } - goarch := fields[0] - compiler := fields[1] return types.SizesFor(compiler, goarch), nil } diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 44df8210d7..9f8d4ced77 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -13,6 +13,7 @@ import ( "log" "os" "os/exec" + "path" "path/filepath" "reflect" "regexp" @@ -86,6 +87,23 @@ func goListDriver(cfg *Config, patterns ...string) (*driverResponse, error) { }() } + // start fetching rootDirs + var rootDirs map[string]string + var rootDirsReady = make(chan struct{}) + go func() { + rootDirs = determineRootDirs(cfg) + close(rootDirsReady) + }() + getRootDirs := func() map[string]string { + <-rootDirsReady + return rootDirs + } + + // always pass getRootDirs to golistDriver + golistDriver := func(cfg *Config, patterns ...string) (*driverResponse, error) { + return golistDriver(cfg, getRootDirs, patterns...) + } + // Determine files requested in contains patterns var containFiles []string var packagesNamed []string @@ -147,7 +165,7 @@ extractQueries: var containsCandidates []string if len(containFiles) != 0 { - if err := runContainsQueries(cfg, golistDriver, response, containFiles); err != nil { + if err := runContainsQueries(cfg, golistDriver, response, containFiles, getRootDirs); err != nil { return nil, err } } @@ -158,7 +176,7 @@ extractQueries: } } - modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response) + modifiedPkgs, needPkgs, err := processGolistOverlay(cfg, response, getRootDirs) if err != nil { return nil, err } @@ -166,7 +184,7 @@ extractQueries: containsCandidates = append(containsCandidates, modifiedPkgs...) containsCandidates = append(containsCandidates, needPkgs...) } - if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs); err != nil { + if err := addNeededOverlayPackages(cfg, golistDriver, response, needPkgs, getRootDirs); err != nil { return nil, err } // Check candidate packages for containFiles. @@ -198,7 +216,7 @@ extractQueries: return response.dr, nil } -func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string) error { +func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDeduper, pkgs []string, getRootDirs func() map[string]string) error { if len(pkgs) == 0 { return nil } @@ -209,17 +227,17 @@ func addNeededOverlayPackages(cfg *Config, driver driver, response *responseDedu for _, pkg := range dr.Packages { response.addPackage(pkg) } - _, needPkgs, err := processGolistOverlay(cfg, response) + _, needPkgs, err := processGolistOverlay(cfg, response, getRootDirs) if err != nil { return err } - if err := addNeededOverlayPackages(cfg, driver, response, needPkgs); err != nil { + if err := addNeededOverlayPackages(cfg, driver, response, needPkgs, getRootDirs); err != nil { return err } return nil } -func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string) error { +func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, queries []string, rootDirs func() map[string]string) error { for _, query := range queries { // TODO(matloob): Do only one query per directory. fdir := filepath.Dir(query) @@ -240,6 +258,21 @@ func runContainsQueries(cfg *Config, driver driver, response *responseDeduper, q // Return the original error if the attempt to fall back failed. return err } + // Special case to handle issue #33482: + // If this is a file= query for ad-hoc packages where the file only exists on an overlay, + // and exists outside of a module, add the file in for the package. + if len(dirResponse.Packages) == 1 && len(dirResponse.Packages) == 1 && + dirResponse.Packages[0].ID == "command-line-arguments" && len(dirResponse.Packages[0].GoFiles) == 0 { + filename := filepath.Join(pattern, filepath.Base(query)) // avoid recomputing abspath + // TODO(matloob): check if the file is outside of a root dir? + for path := range cfg.Overlay { + if path == filename { + dirResponse.Packages[0].Errors = nil + dirResponse.Packages[0].GoFiles = []string{path} + dirResponse.Packages[0].CompiledGoFiles = []string{path} + } + } + } } isRoot := make(map[string]bool, len(dirResponse.Roots)) for _, root := range dirResponse.Roots { @@ -567,7 +600,7 @@ func otherFiles(p *jsonPackage) [][]string { // golistDriver uses the "go list" command to expand the pattern // words and return metadata for the specified packages. dir may be // "" and env may be nil, as per os/exec.Command. -func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { +func golistDriver(cfg *Config, rootsDirs func() map[string]string, words ...string) (*driverResponse, error) { // go list uses the following identifiers in ImportPath and Imports: // // "p" -- importable package or main (command) @@ -608,6 +641,20 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { return nil, fmt.Errorf("package missing import path: %+v", p) } + // Work around https://golang.org/issue/33157: + // go list -e, when given an absolute path, will find the package contained at + // that directory. But when no package exists there, it will return a fake package + // with an error and the ImportPath set to the absolute path provided to go list. + // Try toto convert that absolute path to what its package path would be if it's + // contained in a known module or GOPATH entry. This will allow the package to be + // properly "reclaimed" when overlays are processed. + if filepath.IsAbs(p.ImportPath) && p.Error != nil { + pkgPath, ok := getPkgPath(p.ImportPath, rootsDirs) + if ok { + p.ImportPath = pkgPath + } + } + if old, found := seen[p.ImportPath]; found { if !reflect.DeepEqual(p, old) { return nil, fmt.Errorf("internal error: go list gives conflicting information for package %v", p.ImportPath) @@ -711,6 +758,27 @@ func golistDriver(cfg *Config, words ...string) (*driverResponse, error) { return &response, nil } +// getPkgPath finds the package path of a directory if it's relative to a root directory. +func getPkgPath(dir string, rootDirs func() map[string]string) (string, bool) { + for rdir, rpath := range rootDirs() { + // TODO(matloob): This doesn't properly handle symlinks. + r, err := filepath.Rel(rdir, dir) + if err != nil { + continue + } + if rpath != "" { + // We choose only ore root even though the directory even it can belong in multiple modules + // or GOPATH entries. This is okay because we only need to work with absolute dirs when a + // file is missing from disk, for instance when gopls calls go/packages in an overlay. + // Once the file is saved, gopls, or the next invocation of the tool will get the correct + // result straight from golist. + // TODO(matloob): Implement module tiebreaking? + return path.Join(rpath, filepath.ToSlash(r)), true + } + } + return "", false +} + // absJoin absolutizes and flattens the lists of files. func absJoin(dir string, fileses ...[]string) (res []string) { for _, files := range fileses { @@ -788,7 +856,20 @@ func invokeGo(cfg *Config, args ...string) (*bytes.Buffer, error) { return bytes.NewBufferString(output), nil } + // Backwards compatibility for Go 1.11 because 1.12 and 1.13 put the directory in the ImportPath. + // If the package doesn't exist, put the absolute path of the directory into the error message, + // as Go 1.13 list does. + const noSuchDirectory = "no such directory" + if len(stderr.String()) > 0 && strings.Contains(stderr.String(), noSuchDirectory) { + errstr := stderr.String() + abspath := strings.TrimSpace(errstr[strings.Index(errstr, noSuchDirectory)+len(noSuchDirectory):]) + output := fmt.Sprintf(`{"ImportPath": %q,"Incomplete": true,"Error": {"Pos": "","Err": %q}}`, + abspath, strings.Trim(stderr.String(), "\n")) + return bytes.NewBufferString(output), nil + } + // Workaround for #29280: go list -e has incorrect behavior when an ad-hoc package doesn't exist. + // Note that the error message we look for in this case is different that the one looked for above. if len(stderr.String()) > 0 && strings.Contains(stderr.String(), "no such file or directory") { output := fmt.Sprintf(`{"ImportPath": "command-line-arguments","Incomplete": true,"Error": {"Pos": "","Err": %q}}`, strings.Trim(stderr.String(), "\n")) diff --git a/vendor/golang.org/x/tools/go/packages/golist_overlay.go b/vendor/golang.org/x/tools/go/packages/golist_overlay.go index ffc7a367f6..b051327df6 100644 --- a/vendor/golang.org/x/tools/go/packages/golist_overlay.go +++ b/vendor/golang.org/x/tools/go/packages/golist_overlay.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strconv" "strings" - "sync" ) // processGolistOverlay provides rudimentary support for adding @@ -18,7 +17,7 @@ import ( // sometimes incorrect. // TODO(matloob): Handle unsupported cases, including the following: // - determining the correct package to add given a new import path -func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs, needPkgs []string, err error) { +func processGolistOverlay(cfg *Config, response *responseDeduper, rootDirs func() map[string]string) (modifiedPkgs, needPkgs []string, err error) { havePkgs := make(map[string]string) // importPath -> non-test package ID needPkgsSet := make(map[string]bool) modifiedPkgsSet := make(map[string]bool) @@ -29,9 +28,6 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs, havePkgs[pkg.PkgPath] = pkg.ID } - var rootDirs map[string]string - var onceGetRootDirs sync.Once - // If no new imports are added, it is safe to avoid loading any needPkgs. // Otherwise, it's hard to tell which package is actually being loaded // (due to vendoring) and whether any modified package will show up @@ -54,7 +50,7 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs, } nextPackage: for _, p := range response.dr.Packages { - if pkgName != p.Name { + if pkgName != p.Name && p.ID != "command-line-arguments" { continue } for _, f := range p.GoFiles { @@ -76,13 +72,10 @@ func processGolistOverlay(cfg *Config, response *responseDeduper) (modifiedPkgs, } // The overlay could have included an entirely new package. if pkg == nil { - onceGetRootDirs.Do(func() { - rootDirs = determineRootDirs(cfg) - }) // Try to find the module or gopath dir the file is contained in. // Then for modules, add the module opath to the beginning. var pkgPath string - for rdir, rpath := range rootDirs { + for rdir, rpath := range rootDirs() { // TODO(matloob): This doesn't properly handle symlinks. r, err := filepath.Rel(rdir, dir) if err != nil { diff --git a/vendor/golang.org/x/tools/go/packages/packages_test.go b/vendor/golang.org/x/tools/go/packages/packages_test.go index c9d07926bd..9f81b4aa9a 100644 --- a/vendor/golang.org/x/tools/go/packages/packages_test.go +++ b/vendor/golang.org/x/tools/go/packages/packages_test.go @@ -930,12 +930,6 @@ func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) { "b/b.go": `package b; import "golang.org/fake/c"; const B = "b" + c.C`, "c/c.go": `package c; const C = "c"`, "d/d.go": `package d; const D = "d"`, - - // TODO: Remove these temporary files when golang.org/issue/33157 is resolved. - filepath.Join("e/e_temp.go"): ``, - filepath.Join("f/f_temp.go"): ``, - filepath.Join("g/g_temp.go"): ``, - filepath.Join("h/h_temp.go"): ``, }}}) defer exported.Cleanup() @@ -1016,6 +1010,46 @@ func testNewPackagesInOverlay(t *testing.T, exporter packagestest.Exporter) { } } +func TestAdHocOverlays(t *testing.T) { + // This test doesn't use packagestest because we are testing ad-hoc packages, + // which are outside of $GOPATH and outside of a module. + tmp, err := ioutil.TempDir("", "a") + if err != nil { + t.Fatal(err) + } + defer os.Remove(tmp) + + filename := filepath.Join(tmp, "a.go") + content := []byte(`package a +const A = 1 +`) + config := &packages.Config{ + Dir: tmp, + Mode: packages.LoadAllSyntax, + Overlay: map[string][]byte{ + filename: content, + }, + } + initial, err := packages.Load(config, fmt.Sprintf("file=%s", filename)) + if err != nil { + t.Error(err) + } + // Check value of a.A. + a := initial[0] + if a.Errors != nil { + t.Fatalf("a: got errors %+v, want no error", err) + } + aA := constant(a, "A") + if aA == nil { + t.Errorf("a.A: got nil") + return + } + got := aA.Val().String() + if want := "1"; got != want { + t.Errorf("a.A: got %s, want %s", got, want) + } +} + func TestLoadAllSyntaxImportErrors(t *testing.T) { packagestest.TestAll(t, testLoadAllSyntaxImportErrors) } diff --git a/vendor/golang.org/x/tools/gopls/go.mod b/vendor/golang.org/x/tools/gopls/go.mod index 6d506314b7..9a8d42d483 100644 --- a/vendor/golang.org/x/tools/gopls/go.mod +++ b/vendor/golang.org/x/tools/gopls/go.mod @@ -2,6 +2,6 @@ module golang.org/x/tools/gopls go 1.11 -require golang.org/x/tools v0.0.0-20190710153321-831012c29e42 +require golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca replace golang.org/x/tools => ../ diff --git a/vendor/golang.org/x/tools/gopls/go.sum b/vendor/golang.org/x/tools/gopls/go.sum index e215e05278..6adfd65905 100644 --- a/vendor/golang.org/x/tools/gopls/go.sum +++ b/vendor/golang.org/x/tools/gopls/go.sum @@ -1,8 +1,9 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca h1:SqwJrz6xPBlCUltcEHz2/p01HRPR+VGD+aYLikk8uas= +golang.org/x/tools v0.0.0-20190723021737-8bb11ff117ca/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go index 04bb96a362..9c07e4c2d2 100644 --- a/vendor/golang.org/x/tools/internal/gopathwalk/walk.go +++ b/vendor/golang.org/x/tools/internal/gopathwalk/walk.go @@ -67,7 +67,7 @@ func Walk(roots []Root, add func(root Root, dir string), opts Options) { func walkDir(root Root, add func(Root, string), opts Options) { if _, err := os.Stat(root.Path); os.IsNotExist(err) { if opts.Debug { - log.Printf("skipping nonexistant directory: %v", root.Path) + log.Printf("skipping nonexistent directory: %v", root.Path) } return } diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 72323f5859..4066565192 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -67,23 +67,27 @@ func importGroup(env *ProcessEnv, importPath string) int { return 0 } -type importFixType int +type ImportFixType int const ( - addImport importFixType = iota - deleteImport - setImportName + AddImport ImportFixType = iota + DeleteImport + SetImportName ) -type importFix struct { - info importInfo - fixType importFixType +type ImportFix struct { + // StmtInfo represents the import statement this fix will add, remove, or change. + StmtInfo ImportInfo + // IdentName is the identifier that this fix will add or remove. + IdentName string + // FixType is the type of fix this is (AddImport, DeleteImport, SetImportName). + FixType ImportFixType } -// An importInfo represents a single import statement. -type importInfo struct { - importPath string // import path, e.g. "crypto/rand". - name string // import name, e.g. "crand", or "" if none. +// An ImportInfo represents a single import statement. +type ImportInfo struct { + ImportPath string // import path, e.g. "crypto/rand". + Name string // import name, e.g. "crand", or "" if none. } // A packageInfo represents what's known about a package. @@ -181,10 +185,10 @@ func collectReferences(f *ast.File) references { return refs } -// collectImports returns all the imports in f, keyed by their package name as -// determined by pathToName. Unnamed imports (., _) and "C" are ignored. -func collectImports(f *ast.File) []*importInfo { - var imports []*importInfo +// collectImports returns all the imports in f. +// Unnamed imports (., _) and "C" are ignored. +func collectImports(f *ast.File) []*ImportInfo { + var imports []*ImportInfo for _, imp := range f.Imports { var name string if imp.Name != nil { @@ -194,9 +198,9 @@ func collectImports(f *ast.File) []*importInfo { continue } path := strings.Trim(imp.Path.Value, `"`) - imports = append(imports, &importInfo{ - name: name, - importPath: path, + imports = append(imports, &ImportInfo{ + Name: name, + ImportPath: path, }) } return imports @@ -204,9 +208,9 @@ func collectImports(f *ast.File) []*importInfo { // findMissingImport searches pass's candidates for an import that provides // pkg, containing all of syms. -func (p *pass) findMissingImport(pkg string, syms map[string]bool) *importInfo { +func (p *pass) findMissingImport(pkg string, syms map[string]bool) *ImportInfo { for _, candidate := range p.candidates { - pkgInfo, ok := p.knownPackages[candidate.importPath] + pkgInfo, ok := p.knownPackages[candidate.ImportPath] if !ok { continue } @@ -246,18 +250,18 @@ type pass struct { otherFiles []*ast.File // sibling files. // Intermediate state, generated by load. - existingImports map[string]*importInfo + existingImports map[string]*ImportInfo allRefs references missingRefs references // Inputs to fix. These can be augmented between successive fix calls. lastTry bool // indicates that this is the last call and fix should clean up as best it can. - candidates []*importInfo // candidate imports in priority order. + candidates []*ImportInfo // candidate imports in priority order. knownPackages map[string]*packageInfo // information about all known packages. } // loadPackageNames saves the package names for everything referenced by imports. -func (p *pass) loadPackageNames(imports []*importInfo) error { +func (p *pass) loadPackageNames(imports []*ImportInfo) error { if p.env.Debug { p.env.Logf("loading package names for %v packages", len(imports)) defer func() { @@ -266,10 +270,10 @@ func (p *pass) loadPackageNames(imports []*importInfo) error { } var unknown []string for _, imp := range imports { - if _, ok := p.knownPackages[imp.importPath]; ok { + if _, ok := p.knownPackages[imp.ImportPath]; ok { continue } - unknown = append(unknown, imp.importPath) + unknown = append(unknown, imp.ImportPath) } names, err := p.env.GetResolver().loadPackageNames(unknown, p.srcDir) @@ -289,24 +293,24 @@ func (p *pass) loadPackageNames(imports []*importInfo) error { // importIdentifier returns the identifier that imp will introduce. It will // guess if the package name has not been loaded, e.g. because the source // is not available. -func (p *pass) importIdentifier(imp *importInfo) string { - if imp.name != "" { - return imp.name +func (p *pass) importIdentifier(imp *ImportInfo) string { + if imp.Name != "" { + return imp.Name } - known := p.knownPackages[imp.importPath] + known := p.knownPackages[imp.ImportPath] if known != nil && known.name != "" { return known.name } - return importPathToAssumedName(imp.importPath) + return importPathToAssumedName(imp.ImportPath) } // load reads in everything necessary to run a pass, and reports whether the // file already has all the imports it needs. It fills in p.missingRefs with the // file's missing symbols, if any, or removes unused imports if not. -func (p *pass) load() ([]*importFix, bool) { +func (p *pass) load() ([]*ImportFix, bool) { p.knownPackages = map[string]*packageInfo{} p.missingRefs = references{} - p.existingImports = map[string]*importInfo{} + p.existingImports = map[string]*ImportInfo{} // Load basic information about the file in question. p.allRefs = collectReferences(p.f) @@ -361,9 +365,9 @@ func (p *pass) load() ([]*importFix, bool) { // fix attempts to satisfy missing imports using p.candidates. If it finds // everything, or if p.lastTry is true, it updates fixes to add the imports it found, // delete anything unused, and update import names, and returns true. -func (p *pass) fix() ([]*importFix, bool) { +func (p *pass) fix() ([]*ImportFix, bool) { // Find missing imports. - var selected []*importInfo + var selected []*ImportInfo for left, rights := range p.missingRefs { if imp := p.findMissingImport(left, rights); imp != nil { selected = append(selected, imp) @@ -375,7 +379,7 @@ func (p *pass) fix() ([]*importFix, bool) { } // Found everything, or giving up. Add the new imports and remove any unused. - var fixes []*importFix + var fixes []*ImportFix for _, imp := range p.existingImports { // We deliberately ignore globals here, because we can't be sure // they're in the same package. People do things like put multiple @@ -383,32 +387,35 @@ func (p *pass) fix() ([]*importFix, bool) { // remove imports if they happen to have the same name as a var in // a different package. if _, ok := p.allRefs[p.importIdentifier(imp)]; !ok { - fixes = append(fixes, &importFix{ - info: *imp, - fixType: deleteImport, + fixes = append(fixes, &ImportFix{ + StmtInfo: *imp, + IdentName: p.importIdentifier(imp), + FixType: DeleteImport, }) continue } // An existing import may need to update its import name to be correct. - if name := p.importSpecName(imp); name != imp.name { - fixes = append(fixes, &importFix{ - info: importInfo{ - name: name, - importPath: imp.importPath, + if name := p.importSpecName(imp); name != imp.Name { + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: name, + ImportPath: imp.ImportPath, }, - fixType: setImportName, + IdentName: p.importIdentifier(imp), + FixType: SetImportName, }) } } for _, imp := range selected { - fixes = append(fixes, &importFix{ - info: importInfo{ - name: p.importSpecName(imp), - importPath: imp.importPath, + fixes = append(fixes, &ImportFix{ + StmtInfo: ImportInfo{ + Name: p.importSpecName(imp), + ImportPath: imp.ImportPath, }, - fixType: addImport, + IdentName: p.importIdentifier(imp), + FixType: AddImport, }) } @@ -419,42 +426,41 @@ func (p *pass) fix() ([]*importFix, bool) { // // When the import identifier matches the assumed import name, the import name does // not appear in the import spec. -func (p *pass) importSpecName(imp *importInfo) string { +func (p *pass) importSpecName(imp *ImportInfo) string { // If we did not load the real package names, or the name is already set, // we just return the existing name. - if !p.loadRealPackageNames || imp.name != "" { - return imp.name + if !p.loadRealPackageNames || imp.Name != "" { + return imp.Name } ident := p.importIdentifier(imp) - if ident == importPathToAssumedName(imp.importPath) { + if ident == importPathToAssumedName(imp.ImportPath) { return "" // ident not needed since the assumed and real names are the same. } return ident } // apply will perform the fixes on f in order. -func apply(fset *token.FileSet, f *ast.File, fixes []*importFix) bool { +func apply(fset *token.FileSet, f *ast.File, fixes []*ImportFix) { for _, fix := range fixes { - switch fix.fixType { - case deleteImport: - astutil.DeleteNamedImport(fset, f, fix.info.name, fix.info.importPath) - case addImport: - astutil.AddNamedImport(fset, f, fix.info.name, fix.info.importPath) - case setImportName: + switch fix.FixType { + case DeleteImport: + astutil.DeleteNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case AddImport: + astutil.AddNamedImport(fset, f, fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case SetImportName: // Find the matching import path and change the name. for _, spec := range f.Imports { - path := strings.Trim(spec.Path.Value, `""`) - if path == fix.info.importPath { + path := strings.Trim(spec.Path.Value, `"`) + if path == fix.StmtInfo.ImportPath { spec.Name = &ast.Ident{ - Name: fix.info.name, + Name: fix.StmtInfo.Name, NamePos: spec.Pos(), } } } } } - return true } // assumeSiblingImportsValid assumes that siblings' use of packages is valid, @@ -463,15 +469,15 @@ func (p *pass) assumeSiblingImportsValid() { for _, f := range p.otherFiles { refs := collectReferences(f) imports := collectImports(f) - importsByName := map[string]*importInfo{} + importsByName := map[string]*ImportInfo{} for _, imp := range imports { importsByName[p.importIdentifier(imp)] = imp } for left, rights := range refs { if imp, ok := importsByName[left]; ok { - if _, ok := stdlib[imp.importPath]; ok { + if _, ok := stdlib[imp.ImportPath]; ok { // We have the stdlib in memory; no need to guess. - rights = stdlib[imp.importPath] + rights = stdlib[imp.ImportPath] } p.addCandidate(imp, &packageInfo{ // no name; we already know it. @@ -484,9 +490,9 @@ func (p *pass) assumeSiblingImportsValid() { // addCandidate adds a candidate import to p, and merges in the information // in pkg. -func (p *pass) addCandidate(imp *importInfo, pkg *packageInfo) { +func (p *pass) addCandidate(imp *ImportInfo, pkg *packageInfo) { p.candidates = append(p.candidates, imp) - if existing, ok := p.knownPackages[imp.importPath]; ok { + if existing, ok := p.knownPackages[imp.ImportPath]; ok { if existing.name == "" { existing.name = pkg.name } @@ -494,7 +500,7 @@ func (p *pass) addCandidate(imp *importInfo, pkg *packageInfo) { existing.exports[export] = true } } else { - p.knownPackages[imp.importPath] = pkg + p.knownPackages[imp.ImportPath] = pkg } } @@ -514,9 +520,9 @@ func fixImportsDefault(fset *token.FileSet, f *ast.File, filename string, env *P return err } -// getFixes gets the getFixes that need to be made to f in order to fix the imports. +// getFixes gets the import fixes that need to be made to f in order to fix the imports. // It does not modify the ast. -func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*importFix, error) { +func getFixes(fset *token.FileSet, f *ast.File, filename string, env *ProcessEnv) ([]*ImportFix, error) { abs, err := filepath.Abs(filename) if err != nil { return nil, err @@ -682,7 +688,7 @@ func cmdDebugStr(cmd *exec.Cmd) string { func addStdlibCandidates(pass *pass, refs references) { add := func(pkg string) { pass.addCandidate( - &importInfo{importPath: pkg}, + &ImportInfo{ImportPath: pkg}, &packageInfo{name: path.Base(pkg), exports: stdlib[pkg]}) } for left := range refs { @@ -714,6 +720,9 @@ type goPackagesResolver struct { } func (r *goPackagesResolver) loadPackageNames(importPaths []string, srcDir string) (map[string]string, error) { + if len(importPaths) == 0 { + return nil, nil + } cfg := r.env.newPackagesConfig(packages.LoadFiles) pkgs, err := packages.Load(cfg, importPaths...) if err != nil { @@ -765,7 +774,7 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { // Search for imports matching potential package references. type result struct { - imp *importInfo + imp *ImportInfo pkg *packageInfo } results := make(chan result, len(refs)) @@ -799,8 +808,8 @@ func addExternalCandidates(pass *pass, refs references, filename string) error { return // No matching package. } - imp := &importInfo{ - importPath: found.importPathShort, + imp := &ImportInfo{ + ImportPath: found.importPathShort, } pkg := &packageInfo{ @@ -867,7 +876,7 @@ func (r *gopathResolver) loadPackageNames(importPaths []string, srcDir string) ( return names, nil } -// importPathToNameGoPath finds out the actual package name, as declared in its .go files. +// importPathToName finds out the actual package name, as declared in its .go files. // If there's a problem, it returns "". func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName string) { // Fast path for standard library without going to disk. @@ -887,8 +896,8 @@ func importPathToName(env *ProcessEnv, importPath, srcDir string) (packageName s } // packageDirToName is a faster version of build.Import if -// the only thing desired is the package name. It uses build.FindOnly -// to find the directory and then only parses one file in the package, +// the only thing desired is the package name. Given a directory, +// packageDirToName then only parses one file in the package, // trusting that the files in the directory are consistent. func packageDirToName(dir string) (packageName string, err error) { d, err := os.Open(dir) diff --git a/vendor/golang.org/x/tools/internal/imports/fix_test.go b/vendor/golang.org/x/tools/internal/imports/fix_test.go index 46145cf5e1..d15b6dd248 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix_test.go +++ b/vendor/golang.org/x/tools/internal/imports/fix_test.go @@ -1306,7 +1306,6 @@ var ( "myotherpackage/toformat.go": input, }, }, - goPackagesIncompatible: true, // https://golang.org/issue/33175 }.processTest(t, "golang.org/fake", "myotherpackage/toformat.go", nil, nil, want) } @@ -1435,7 +1434,6 @@ var _ = race.Acquire "bar/x.go": input, }, }, - goPackagesIncompatible: true, // https://golang.org/issue/33175 }.processTest(t, "foo.com", "bar/x.go", nil, nil, importAdded) // Packages outside the same directory should not. @@ -1648,7 +1646,6 @@ const Y = bar.X "test/t.go": input, }, }, - goPackagesIncompatible: true, // https://golang.org/issue/33175 }.processTest(t, "foo.com", "test/t.go", nil, nil, want) } @@ -1836,7 +1833,6 @@ const Y = foo.X "x/x.go": input, }, }, - goPackagesIncompatible: true, // https://golang.org/issue/33175 }.processTest(t, "foo.com", "x/x.go", nil, nil, want) } @@ -2143,7 +2139,6 @@ var _ = pkg.DoIt "pkg/x_test.go": input, }, }, - goPackagesIncompatible: true, // https://golang.org/issue/33175 }.processTest(t, "foo.com", "pkg/x_test.go", nil, nil, want) } diff --git a/vendor/golang.org/x/tools/internal/imports/imports.go b/vendor/golang.org/x/tools/internal/imports/imports.go index a47a815f58..acf1461b03 100644 --- a/vendor/golang.org/x/tools/internal/imports/imports.go +++ b/vendor/golang.org/x/tools/internal/imports/imports.go @@ -13,6 +13,7 @@ import ( "bytes" "fmt" "go/ast" + "go/build" "go/format" "go/parser" "go/printer" @@ -42,18 +43,10 @@ type Options struct { } // Process implements golang.org/x/tools/imports.Process with explicit context in env. -func Process(filename string, src []byte, opt *Options) ([]byte, error) { - if src == nil { - b, err := ioutil.ReadFile(filename) - if err != nil { - return nil, err - } - src = b - } - - // Set the logger if the user has not provided it. - if opt.Env.Logf == nil { - opt.Env.Logf = log.Printf +func Process(filename string, src []byte, opt *Options) (formatted []byte, err error) { + src, err = initialize(filename, src, opt) + if err != nil { + return nil, err } fileSet := token.NewFileSet() @@ -67,7 +60,85 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { return nil, err } } + return formatFile(fileSet, file, src, adjust, opt) +} +// FixImports returns a list of fixes to the imports that, when applied, +// will leave the imports in the same state as Process. +// +// Note that filename's directory influences which imports can be chosen, +// so it is important that filename be accurate. +func FixImports(filename string, src []byte, opt *Options) (fixes []*ImportFix, err error) { + src, err = initialize(filename, src, opt) + if err != nil { + return nil, err + } + + fileSet := token.NewFileSet() + file, _, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + return getFixes(fileSet, file, filename, opt.Env) +} + +// ApplyFix will apply all of the fixes to the file and format it. +func ApplyFixes(fixes []*ImportFix, filename string, src []byte, opt *Options) (formatted []byte, err error) { + src, err = initialize(filename, src, opt) + if err != nil { + return nil, err + } + + fileSet := token.NewFileSet() + file, adjust, err := parse(fileSet, filename, src, opt) + if err != nil { + return nil, err + } + + // Apply the fixes to the file. + apply(fileSet, file, fixes) + + return formatFile(fileSet, file, src, adjust, opt) +} + +// initialize sets the values for opt and src. +// If they are provided, they are not changed. Otherwise opt is set to the +// default values and src is read from the file system. +func initialize(filename string, src []byte, opt *Options) ([]byte, error) { + // Use defaults if opt is nil. + if opt == nil { + opt = &Options{ + Env: &ProcessEnv{ + GOPATH: build.Default.GOPATH, + GOROOT: build.Default.GOROOT, + }, + AllErrors: opt.AllErrors, + Comments: opt.Comments, + FormatOnly: opt.FormatOnly, + Fragment: opt.Fragment, + TabIndent: opt.TabIndent, + TabWidth: opt.TabWidth, + } + } + + // Set the logger if the user has not provided it. + if opt.Env.Logf == nil { + opt.Env.Logf = log.Printf + } + + if src == nil { + b, err := ioutil.ReadFile(filename) + if err != nil { + return nil, err + } + src = b + } + + return src, nil +} + +func formatFile(fileSet *token.FileSet, file *ast.File, src []byte, adjust func(orig []byte, src []byte) []byte, opt *Options) ([]byte, error) { sortImports(opt.Env, fileSet, file) imps := astutil.Imports(fileSet, file) var spacesBefore []string // import paths we need spaces before @@ -95,7 +166,7 @@ func Process(filename string, src []byte, opt *Options) ([]byte, error) { printConfig := &printer.Config{Mode: printerMode, Tabwidth: opt.TabWidth} var buf bytes.Buffer - err = printConfig.Fprint(&buf, fileSet, file) + err := printConfig.Fprint(&buf, fileSet, file) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/check.go b/vendor/golang.org/x/tools/internal/lsp/cache/check.go index 67e29a9289..3bc6f05ad2 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/check.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/check.go @@ -6,7 +6,6 @@ package cache import ( "context" - "fmt" "go/ast" "go/scanner" "go/token" @@ -20,6 +19,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) type importer struct { @@ -40,7 +40,7 @@ func (imp *importer) Import(pkgPath string) (*types.Package, error) { ctx := imp.ctx id, ok := imp.view.mcache.ids[packagePath(pkgPath)] if !ok { - return nil, fmt.Errorf("no known ID for %s", pkgPath) + return nil, errors.Errorf("no known ID for %s", pkgPath) } pkg, err := imp.getPkg(ctx, id) if err != nil { @@ -51,7 +51,7 @@ func (imp *importer) Import(pkgPath string) (*types.Package, error) { func (imp *importer) getPkg(ctx context.Context, id packageID) (*pkg, error) { if _, ok := imp.seen[id]; ok { - return nil, fmt.Errorf("circular import detected") + return nil, errors.Errorf("circular import detected") } imp.view.pcache.mu.Lock() e, ok := imp.view.pcache.packages[id] @@ -95,11 +95,11 @@ func (imp *importer) getPkg(ctx context.Context, id packageID) (*pkg, error) { } func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error) { - ctx, done := trace.StartSpan(ctx, "cache.importer.typeCheck") + ctx, done := trace.StartSpan(ctx, "cache.importer.typeCheck", telemetry.Package.Of(id)) defer done() meta, ok := imp.view.mcache.packages[id] if !ok { - return nil, fmt.Errorf("no metadata for %v", id) + return nil, errors.Errorf("no metadata for %v", id) } pkg := &pkg{ id: meta.id, @@ -123,9 +123,9 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error) mode = source.ParseExported } var ( - files = make([]*ast.File, len(meta.files)) - errors = make([]error, len(meta.files)) - wg sync.WaitGroup + files = make([]*ast.File, len(meta.files)) + parseErrors = make([]error, len(meta.files)) + wg sync.WaitGroup ) for _, filename := range meta.files { uri := span.FileURI(filename) @@ -141,19 +141,12 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error) go func(i int, ph source.ParseGoHandle) { defer wg.Done() - files[i], errors[i] = ph.Parse(ctx) + files[i], parseErrors[i] = ph.Parse(ctx) }(i, ph) } wg.Wait() - var i int - for _, f := range files { - if f != nil { - files[i] = f - i++ - } - } - for _, err := range errors { + for _, err := range parseErrors { if err == context.Canceled { return nil, err } @@ -162,11 +155,20 @@ func (imp *importer) typeCheck(ctx context.Context, id packageID) (*pkg, error) } } + var i int + for _, f := range files { + if f != nil { + files[i] = f + i++ + } + } + files = files[:i] + // Use the default type information for the unsafe package. if meta.pkgPath == "unsafe" { pkg.types = types.Unsafe } else if len(files) == 0 { // not the unsafe package, no parsed files - return nil, fmt.Errorf("no parsed files for package %s", pkg.pkgPath) + return nil, errors.Errorf("no parsed files for package %s", pkg.pkgPath) } else { pkg.types = types.NewPackage(string(meta.pkgPath), meta.name) } @@ -209,14 +211,14 @@ func (imp *importer) cachePackage(ctx context.Context, pkg *pkg, meta *metadata, uri := ph.File().Identity().URI f, err := imp.view.getFile(ctx, uri) if err != nil { - return fmt.Errorf("no such file %s: %v", uri, err) + return errors.Errorf("no such file %s: %v", uri, err) } gof, ok := f.(*goFile) if !ok { - return fmt.Errorf("non Go file %s", uri) + return errors.Errorf("non Go file %s", uri) } if err := imp.cachePerFile(gof, ph, pkg); err != nil { - return fmt.Errorf("failed to cache file %s: %v", gof.URI(), err) + return errors.Errorf("failed to cache file %s: %v", gof.URI(), err) } } @@ -246,7 +248,7 @@ func (imp *importer) cachePerFile(gof *goFile, ph source.ParseGoHandle, p *pkg) file, err := ph.Parse(imp.ctx) if file == nil { - return fmt.Errorf("no AST for %s: %v", ph.File().Identity().URI, err) + return errors.Errorf("no AST for %s: %v", ph.File().Identity().URI, err) } gof.imports = file.Imports return nil diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/external.go b/vendor/golang.org/x/tools/internal/lsp/cache/external.go index 357a1dad9d..b52899ba7f 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/external.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/external.go @@ -10,6 +10,7 @@ import ( "os" "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" ) @@ -51,7 +52,7 @@ func (h *nativeFileHandle) Kind() source.FileKind { } func (h *nativeFileHandle) Read(ctx context.Context) ([]byte, string, error) { - ctx, done := trace.StartSpan(ctx, "cache.nativeFileHandle.Read") + ctx, done := trace.StartSpan(ctx, "cache.nativeFileHandle.Read", telemetry.File.Of(h.identity.URI.Filename())) defer done() //TODO: this should fail if the version is not the same as the handle data, err := ioutil.ReadFile(h.identity.URI.Filename()) diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/gofile.go b/vendor/golang.org/x/tools/internal/lsp/cache/gofile.go index a347a6716f..0aae9c9ef7 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/gofile.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/gofile.go @@ -6,7 +6,6 @@ package cache import ( "context" - "fmt" "go/ast" "go/token" "sync" @@ -15,6 +14,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) // goFile holds all of the information we know about a Go file. @@ -53,7 +53,11 @@ func (f *goFile) GetToken(ctx context.Context) (*token.File, error) { if file == nil { return nil, err } - return f.view.session.cache.fset.File(file.Pos()), nil + tok := f.view.session.cache.fset.File(file.Pos()) + if tok == nil { + return nil, errors.Errorf("no token.File for %s", f.URI()) + } + return tok, nil } func (f *goFile) GetAST(ctx context.Context, mode source.ParseMode) (*ast.File, error) { @@ -63,7 +67,7 @@ func (f *goFile) GetAST(ctx context.Context, mode source.ParseMode) (*ast.File, if f.isDirty(ctx) || f.wrongParseMode(ctx, mode) { if _, err := f.view.loadParseTypecheck(ctx, f); err != nil { - return nil, fmt.Errorf("GetAST: unable to check package for %s: %v", f.URI(), err) + return nil, errors.Errorf("GetAST: unable to check package for %s: %v", f.URI(), err) } } fh := f.Handle(ctx) diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/load.go b/vendor/golang.org/x/tools/internal/lsp/cache/load.go index 614c9d6bbd..2fee6666f3 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/load.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/load.go @@ -13,7 +13,9 @@ import ( "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/lsp/telemetry/tag" + "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func (v *view) loadParseTypecheck(ctx context.Context, f *goFile) ([]packages.Error, error) { @@ -54,11 +56,11 @@ func (v *view) loadParseTypecheck(ctx context.Context, f *goFile) ([]packages.Er return nil, err } if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("loadParseTypecheck: %s is ill typed", m.pkgPath) + return nil, errors.Errorf("loadParseTypecheck: %s is ill typed", m.pkgPath) } } if len(f.pkgs) == 0 { - return nil, fmt.Errorf("no packages found for %s", f.URI()) + return nil, errors.Errorf("no packages found for %s", f.URI()) } return nil, nil } @@ -87,10 +89,12 @@ func (v *view) checkMetadata(ctx context.Context, f *goFile) (map[packageID]*met return nil, nil, ctx.Err() } + ctx, done := trace.StartSpan(ctx, "packages.Load", telemetry.File.Of(f.filename())) + defer done() pkgs, err := packages.Load(v.Config(ctx), fmt.Sprintf("file=%s", f.filename())) if len(pkgs) == 0 { if err == nil { - err = fmt.Errorf("go/packages.Load: no packages found for %s", f.filename()) + err = errors.Errorf("go/packages.Load: no packages found for %s", f.filename()) } // Return this error as a diagnostic to the user. return nil, []packages.Error{ @@ -109,7 +113,7 @@ func (v *view) checkMetadata(ctx context.Context, f *goFile) (map[packageID]*met // If the package comes back with errors from `go list`, // don't bother type-checking it. if len(pkg.Errors) > 0 { - return nil, pkg.Errors, fmt.Errorf("package %s has errors, skipping type-checking", pkg.PkgPath) + return nil, pkg.Errors, errors.Errorf("package %s has errors, skipping type-checking", pkg.PkgPath) } // Build the import graph for this package. if err := v.link(ctx, packagePath(pkg.PkgPath), pkg, nil, missingImports); err != nil { @@ -129,7 +133,7 @@ func validateMetadata(ctx context.Context, missingImports map[packagePath]struct // If `go list` failed to get data for the file in question (this should never happen). if len(f.meta) == 0 { - return nil, fmt.Errorf("loadParseTypecheck: no metadata found for %v", f.filename()) + return nil, errors.Errorf("loadParseTypecheck: no metadata found for %v", f.filename()) } // If we have already seen these missing imports before, and we have type information, @@ -251,7 +255,7 @@ func (v *view) link(ctx context.Context, pkgPath packagePath, pkg *packages.Pack for importPath, importPkg := range pkg.Imports { importPkgPath := packagePath(importPath) if importPkgPath == pkgPath { - return fmt.Errorf("cycle detected in %s", importPath) + return errors.Errorf("cycle detected in %s", importPath) } // Don't remember any imports with significant errors. if importPkgPath != "unsafe" && len(pkg.CompiledGoFiles) == 0 { diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/modfile.go b/vendor/golang.org/x/tools/internal/lsp/cache/modfile.go index a86a3daf80..883dba1882 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/modfile.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/modfile.go @@ -6,8 +6,9 @@ package cache import ( "context" - "fmt" "go/token" + + errors "golang.org/x/xerrors" ) // modFile holds all of the information we know about a mod file. @@ -16,7 +17,7 @@ type modFile struct { } func (*modFile) GetToken(context.Context) (*token.File, error) { - return nil, fmt.Errorf("GetToken: not implemented") + return nil, errors.Errorf("GetToken: not implemented") } func (*modFile) setContent(content []byte) {} diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/parse.go b/vendor/golang.org/x/tools/internal/lsp/cache/parse.go index 7b547f844c..b842f8452c 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/parse.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/parse.go @@ -6,19 +6,20 @@ package cache import ( "context" - "fmt" "go/ast" "go/parser" "go/scanner" "go/token" "golang.org/x/tools/internal/lsp/source" + "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/memoize" + errors "golang.org/x/xerrors" ) -// Limits the number of parallel parser calls per process. -var parseLimit = make(chan bool, 20) +// Limits the number of parallel file reads per process. +var ioLimit = make(chan struct{}, 20) // parseKey uniquely identifies a parsed Go file. type parseKey struct { @@ -74,17 +75,19 @@ func (h *parseGoHandle) Parse(ctx context.Context) (*ast.File, error) { } func parseGo(ctx context.Context, c *cache, fh source.FileHandle, mode source.ParseMode) (*ast.File, error) { - ctx, done := trace.StartSpan(ctx, "cache.parseGo") + ctx, done := trace.StartSpan(ctx, "cache.parseGo", telemetry.File.Of(fh.Identity().URI.Filename())) defer done() + + ioLimit <- struct{}{} buf, _, err := fh.Read(ctx) + <-ioLimit // Make sure to release the token, even when an error is returned. if err != nil { return nil, err } - parseLimit <- true - defer func() { <-parseLimit }() + parserMode := parser.AllErrors | parser.ParseComments if mode == source.ParseHeader { - parserMode = parser.ImportsOnly + parserMode = parser.ImportsOnly | parser.ParseComments } ast, err := parser.ParseFile(c.fset, fh.Identity().URI.Filename(), buf, parserMode) if ast != nil { @@ -140,8 +143,8 @@ func isEllipsisArray(n ast.Expr) bool { return ok } -// fix inspects and potentially modifies any *ast.BadStmts or *ast.BadExprs in the AST. -// We attempt to modify the AST such that we can type-check it more effectively. +// fix inspects the AST and potentially modifies any *ast.BadStmts so that it can be +// type-checked more effectively. func fix(ctx context.Context, file *ast.File, tok *token.File, src []byte) error { var parent ast.Node var err error @@ -153,7 +156,7 @@ func fix(ctx context.Context, file *ast.File, tok *token.File, src []byte) error case *ast.BadStmt: err = parseDeferOrGoStmt(n, parent, tok, src) // don't shadow err if err != nil { - err = fmt.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err) + err = errors.Errorf("unable to parse defer or go from *ast.BadStmt: %v", err) } return false default: @@ -181,7 +184,7 @@ func parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src var lit string for { if tkn == token.EOF { - return fmt.Errorf("reached the end of the file") + return errors.Errorf("reached the end of the file") } if pos >= bad.From { break @@ -199,7 +202,7 @@ func parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src Go: pos, } default: - return fmt.Errorf("no defer or go statement found") + return errors.Errorf("no defer or go statement found") } // The expression after the "defer" or "go" starts at this position. @@ -207,7 +210,7 @@ func parseDeferOrGoStmt(bad *ast.BadStmt, parent ast.Node, tok *token.File, src var to, curr token.Pos FindTo: for { - curr, tkn, lit = s.Scan() + curr, tkn, _ = s.Scan() // TODO(rstambler): This still needs more handling to work correctly. // We encounter a specific issue with code that looks like this: // @@ -224,15 +227,15 @@ FindTo: to = curr } if !from.IsValid() || tok.Offset(from) >= len(src) { - return fmt.Errorf("invalid from position") + return errors.Errorf("invalid from position") } if !to.IsValid() || tok.Offset(to)+1 >= len(src) { - return fmt.Errorf("invalid to position") + return errors.Errorf("invalid to position") } exprstr := string(src[tok.Offset(from) : tok.Offset(to)+1]) expr, err := parser.ParseExpr(exprstr) if expr == nil { - return fmt.Errorf("no expr in %s: %v", exprstr, err) + return errors.Errorf("no expr in %s: %v", exprstr, err) } // parser.ParseExpr returns undefined positions. // Adjust them for the current file. diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/session.go b/vendor/golang.org/x/tools/internal/lsp/cache/session.go index 2eacabb83b..3511bb3fec 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/session.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/session.go @@ -6,7 +6,6 @@ package cache import ( "context" - "fmt" "os" "sort" "strconv" @@ -21,6 +20,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/xcontext" + errors "golang.org/x/xerrors" ) type session struct { @@ -178,7 +178,7 @@ func (s *session) removeView(ctx context.Context, view *view) error { return nil } } - return fmt.Errorf("view %s for %v not found", view.Name(), view.Folder()) + return errors.Errorf("view %s for %v not found", view.Name(), view.Folder()) } // TODO: Propagate the language ID through to the view. diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/sumfile.go b/vendor/golang.org/x/tools/internal/lsp/cache/sumfile.go index 4dd7822cdb..21d313cb50 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/sumfile.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/sumfile.go @@ -6,8 +6,9 @@ package cache import ( "context" - "fmt" "go/token" + + errors "golang.org/x/xerrors" ) // sumFile holds all of the information we know about a sum file. @@ -16,7 +17,7 @@ type sumFile struct { } func (*sumFile) GetToken(context.Context) (*token.File, error) { - return nil, fmt.Errorf("GetToken: not implemented") + return nil, errors.Errorf("GetToken: not implemented") } func (*sumFile) setContent(content []byte) {} diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/token.go b/vendor/golang.org/x/tools/internal/lsp/cache/token.go index 930a4d1ee9..aa6abc1ebc 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/token.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/token.go @@ -6,11 +6,11 @@ package cache import ( "context" - "fmt" "go/token" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/memoize" + errors "golang.org/x/xerrors" ) type tokenKey struct { @@ -87,7 +87,7 @@ func tokenFile(ctx context.Context, c *cache, fh source.FileHandle) (*token.File } tok := c.FileSet().AddFile(fh.Identity().URI.Filename(), -1, len(buf)) if tok == nil { - return nil, fmt.Errorf("no token.File for %s", fh.Identity().URI) + return nil, errors.Errorf("no token.File for %s", fh.Identity().URI) } tok.SetLinesForContent(buf) return tok, nil diff --git a/vendor/golang.org/x/tools/internal/lsp/cache/view.go b/vendor/golang.org/x/tools/internal/lsp/cache/view.go index 14687d27f1..4f529f1605 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cache/view.go +++ b/vendor/golang.org/x/tools/internal/lsp/cache/view.go @@ -55,8 +55,17 @@ type view struct { // process is the process env for this view. // Note: this contains cached module and filesystem state. + // + // TODO(suzmue): the state cached in the process env is specific to each view, + // however, there is state that can be shared between views that is not currently + // cached, like the module cache. processEnv *imports.ProcessEnv + // modFileVersions stores the last seen versions of the module files that are used + // by processEnvs resolver. + // TODO(suzmue): These versions may not actually be on disk. + modFileVersions map[string]string + // buildFlags is the build flags to use when invoking underlying tools. buildFlags []string @@ -144,14 +153,37 @@ func (v *view) Config(ctx context.Context) *packages.Config { } } -func (v *view) ProcessEnv(ctx context.Context) *imports.ProcessEnv { +func (v *view) RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error, opts *imports.Options) error { v.mu.Lock() defer v.mu.Unlock() - if v.processEnv == nil { v.processEnv = v.buildProcessEnv(ctx) } - return v.processEnv + + // Before running the user provided function, clear caches in the resolver. + if v.modFilesChanged() { + if r, ok := v.processEnv.GetResolver().(*imports.ModuleResolver); ok { + // Clear the resolver cache and set Initialized to false. + r.Initialized = false + r.Main = nil + r.ModsByModPath = nil + r.ModsByDir = nil + // Reset the modFileVersions. + v.modFileVersions = nil + } + } + + // Run the user function. + opts.Env = v.processEnv + if err := fn(opts); err != nil { + return err + } + + // If applicable, store the file versions of the 'go.mod' files that are + // looked at by the resolver. + v.storeModFileVersions() + + return nil } func (v *view) buildProcessEnv(ctx context.Context) *imports.ProcessEnv { @@ -185,6 +217,41 @@ func (v *view) buildProcessEnv(ctx context.Context) *imports.ProcessEnv { return env } +func (v *view) modFilesChanged() bool { + // Check the versions of the 'go.mod' files of the main module + // and modules included by a replace directive. Return true if + // any of these file versions do not match. + for filename, version := range v.modFileVersions { + if version != v.fileVersion(filename) { + return true + } + } + return false +} + +func (v *view) storeModFileVersions() { + // Store the mod files versions, if we are using a ModuleResolver. + r, moduleMode := v.processEnv.GetResolver().(*imports.ModuleResolver) + if !moduleMode || !r.Initialized { + return + } + v.modFileVersions = make(map[string]string) + + // Get the file versions of the 'go.mod' files of the main module + // and modules included by a replace directive in the resolver. + for _, mod := range r.ModsByModPath { + if (mod.Main || mod.Replace != nil) && mod.GoMod != "" { + v.modFileVersions[mod.GoMod] = v.fileVersion(mod.GoMod) + } + } +} + +func (v *view) fileVersion(filename string) string { + uri := span.FileURI(filename) + f := v.session.GetFile(uri) + return f.Identity().Version +} + func (v *view) Env() []string { v.mu.Lock() defer v.mu.Unlock() @@ -442,7 +509,8 @@ func (v *view) findFile(uri span.URI) (viewFile, error) { pathStat, err := os.Stat(fname) if os.IsNotExist(err) { return nil, err - } else if err != nil { + } + if err != nil { return nil, nil // the file may exist, return without an error } for _, c := range candidates { diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/check.go b/vendor/golang.org/x/tools/internal/lsp/cmd/check.go index 0aa07faf89..f7786648be 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/check.go +++ b/vendor/golang.org/x/tools/internal/lsp/cmd/check.go @@ -11,6 +11,7 @@ import ( "time" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) // check implements the check verb for gopls. @@ -60,14 +61,14 @@ func (c *check) Run(ctx context.Context, args ...string) error { select { case <-file.hasDiagnostics: case <-time.After(30 * time.Second): - return fmt.Errorf("timed out waiting for results from %v", file.uri) + return errors.Errorf("timed out waiting for results from %v", file.uri) } file.diagnosticsMu.Lock() defer file.diagnosticsMu.Unlock() for _, d := range file.diagnostics { spn, err := file.mapper.RangeSpan(d.Range) if err != nil { - return fmt.Errorf("Could not convert position %v for %q", d.Range, d.Message) + return errors.Errorf("Could not convert position %v for %q", d.Range, d.Message) } fmt.Printf("%v: %v\n", spn, d.Message) } diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/cmd.go b/vendor/golang.org/x/tools/internal/lsp/cmd/cmd.go index 66ac3d96ae..dbc0ad5e07 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/cmd.go +++ b/vendor/golang.org/x/tools/internal/lsp/cmd/cmd.go @@ -28,6 +28,7 @@ import ( "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/tool" "golang.org/x/tools/internal/xcontext" + errors "golang.org/x/xerrors" ) // Application is the main application as passed to tool.Main @@ -324,7 +325,7 @@ func (c *cmdClient) PublishDiagnostics(ctx context.Context, p *protocol.PublishD func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile { file, found := c.files[uri] - if !found { + if !found || file.err != nil { file = &cmdFile{ uri: uri, hasDiagnostics: make(chan struct{}), @@ -335,7 +336,7 @@ func (c *cmdClient) getFile(ctx context.Context, uri span.URI) *cmdFile { fname := uri.Filename() content, err := ioutil.ReadFile(fname) if err != nil { - file.err = fmt.Errorf("%v: %v", uri, err) + file.err = errors.Errorf("getFile: %v: %v", uri, err) return file } f := c.fset.AddFile(fname, -1, len(content)) @@ -350,16 +351,24 @@ func (c *connection) AddFile(ctx context.Context, uri span.URI) *cmdFile { defer c.Client.filesMu.Unlock() file := c.Client.getFile(ctx, uri) - if !file.added { - file.added = true - p := &protocol.DidOpenTextDocumentParams{} - p.TextDocument.URI = string(uri) - p.TextDocument.Text = string(file.mapper.Content) - p.TextDocument.LanguageID = source.DetectLanguage("", file.uri.Filename()).String() - if err := c.Server.DidOpen(ctx, p); err != nil { - file.err = fmt.Errorf("%v: %v", uri, err) + // This should never happen. + if file == nil { + return &cmdFile{ + uri: uri, + err: fmt.Errorf("no file found for %s", uri), } } + if file.err != nil || file.added { + return file + } + file.added = true + p := &protocol.DidOpenTextDocumentParams{} + p.TextDocument.URI = string(uri) + p.TextDocument.Text = string(file.mapper.Content) + p.TextDocument.LanguageID = source.DetectLanguage("", file.uri.Filename()).String() + if err := c.Server.DidOpen(ctx, p); err != nil { + file.err = errors.Errorf("%v: %v", uri, err) + } return file } diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_race_test.go b/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_race_test.go deleted file mode 100644 index dca3df421a..0000000000 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_race_test.go +++ /dev/null @@ -1,11 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build race - -package cmd_test - -func init() { - isRace = true -} diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_test.go b/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_test.go index 64d74ebe6b..3abc05c730 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_test.go +++ b/vendor/golang.org/x/tools/internal/lsp/cmd/cmd_test.go @@ -19,8 +19,6 @@ import ( "golang.org/x/tools/internal/lsp/tests" ) -var isRace = false - type runner struct { exporter packagestest.Exporter data *tests.Data diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/definition.go b/vendor/golang.org/x/tools/internal/lsp/cmd/definition.go index 4c78327459..4a2e6774e2 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/definition.go +++ b/vendor/golang.org/x/tools/internal/lsp/cmd/definition.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/span" "golang.org/x/tools/internal/tool" + errors "golang.org/x/xerrors" ) // A Definition is the result of a 'definition' query. @@ -79,26 +80,26 @@ func (d *definition) Run(ctx context.Context, args ...string) error { } locs, err := conn.Definition(ctx, &p) if err != nil { - return fmt.Errorf("%v: %v", from, err) + return errors.Errorf("%v: %v", from, err) } if len(locs) == 0 { - return fmt.Errorf("%v: not an identifier", from) + return errors.Errorf("%v: not an identifier", from) } hover, err := conn.Hover(ctx, &p) if err != nil { - return fmt.Errorf("%v: %v", from, err) + return errors.Errorf("%v: %v", from, err) } if hover == nil { - return fmt.Errorf("%v: not an identifier", from) + return errors.Errorf("%v: not an identifier", from) } file = conn.AddFile(ctx, span.NewURI(locs[0].URI)) if file.err != nil { - return fmt.Errorf("%v: %v", from, file.err) + return errors.Errorf("%v: %v", from, file.err) } definition, err := file.mapper.Span(locs[0]) if err != nil { - return fmt.Errorf("%v: %v", from, err) + return errors.Errorf("%v: %v", from, err) } description := strings.TrimSpace(hover.Contents.Value) var result interface{} @@ -115,7 +116,7 @@ func (d *definition) Run(ctx context.Context, args ...string) error { Desc: description, } default: - return fmt.Errorf("unknown emulation for definition: %s", d.query.Emulate) + return errors.Errorf("unknown emulation for definition: %s", d.query.Emulate) } if err != nil { return err @@ -131,7 +132,7 @@ func (d *definition) Run(ctx context.Context, args ...string) error { case *guru.Definition: fmt.Printf("%s: defined here as %s", d.ObjPos, d.Desc) default: - return fmt.Errorf("no printer for type %T", result) + return errors.Errorf("no printer for type %T", result) } return nil } diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/format.go b/vendor/golang.org/x/tools/internal/lsp/cmd/format.go index 64375527c2..93d041dbc5 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/format.go +++ b/vendor/golang.org/x/tools/internal/lsp/cmd/format.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) // format implements the format verb for gopls. @@ -68,18 +69,18 @@ func (f *format) Run(ctx context.Context, args ...string) error { return err } if loc.Range.Start != loc.Range.End { - return fmt.Errorf("only full file formatting supported") + return errors.Errorf("only full file formatting supported") } p := protocol.DocumentFormattingParams{ TextDocument: protocol.TextDocumentIdentifier{URI: loc.URI}, } edits, err := conn.Formatting(ctx, &p) if err != nil { - return fmt.Errorf("%v: %v", spn, err) + return errors.Errorf("%v: %v", spn, err) } sedits, err := lsp.FromProtocolEdits(file.mapper, edits) if err != nil { - return fmt.Errorf("%v: %v", spn, err) + return errors.Errorf("%v: %v", spn, err) } ops := source.EditsToDiff(sedits) lines := diff.SplitLines(string(file.mapper.Content)) diff --git a/vendor/golang.org/x/tools/internal/lsp/cmd/serve.go b/vendor/golang.org/x/tools/internal/lsp/cmd/serve.go index 39cf087eda..67d26d232d 100644 --- a/vendor/golang.org/x/tools/internal/lsp/cmd/serve.go +++ b/vendor/golang.org/x/tools/internal/lsp/cmd/serve.go @@ -24,6 +24,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/tag" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/tool" + errors "golang.org/x/xerrors" ) // Serve is a struct that exposes the configurable parts of the LSP server as @@ -68,7 +69,7 @@ func (s *Serve) Run(ctx context.Context, args ...string) error { } f, err := os.Create(filename) if err != nil { - return fmt.Errorf("Unable to create log file: %v", err) + return errors.Errorf("Unable to create log file: %v", err) } defer f.Close() log.SetOutput(io.MultiWriter(os.Stderr, f)) @@ -124,12 +125,13 @@ type handler struct { } type rpcStats struct { - method string - direction jsonrpc2.Direction - id *jsonrpc2.ID - payload *json.RawMessage - start time.Time - close func() + method string + direction jsonrpc2.Direction + id *jsonrpc2.ID + payload *json.RawMessage + start time.Time + delivering func() + close func() } type statsKeyType int @@ -137,6 +139,10 @@ type statsKeyType int const statsKey = statsKeyType(0) func (h *handler) Deliver(ctx context.Context, r *jsonrpc2.Request, delivered bool) bool { + stats := h.getStats(ctx) + if stats != nil { + stats.delivering() + } return false } @@ -165,6 +171,7 @@ func (h *handler) Request(ctx context.Context, direction jsonrpc2.Direction, r * tag.Tag{Key: telemetry.RPCID, Value: r.ID}, ) telemetry.Started.Record(ctx, 1) + _, stats.delivering = trace.StartSpan(ctx, "queued") return ctx } diff --git a/vendor/golang.org/x/tools/internal/lsp/code_action.go b/vendor/golang.org/x/tools/internal/lsp/code_action.go index 9e2a2ecb53..89b9b73955 100644 --- a/vendor/golang.org/x/tools/internal/lsp/code_action.go +++ b/vendor/golang.org/x/tools/internal/lsp/code_action.go @@ -9,37 +9,45 @@ import ( "fmt" "strings" + "golang.org/x/tools/internal/imports" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionParams) ([]protocol.CodeAction, error) { + uri := span.NewURI(params.TextDocument.URI) + view := s.session.ViewOf(uri) + f, m, err := getSourceFile(ctx, view, uri) + if err != nil { + return nil, err + } + + // Determine the supported actions for this file kind. + fileKind := f.Handle(ctx).Kind() + supportedCodeActions, ok := s.supportedCodeActions[fileKind] + if !ok { + return nil, fmt.Errorf("no supported code actions for %v file kind", fileKind) + } // The Only field of the context specifies which code actions the client wants. // If Only is empty, assume that the client wants all of the possible code actions. var wanted map[protocol.CodeActionKind]bool if len(params.Context.Only) == 0 { - wanted = s.supportedCodeActions + wanted = supportedCodeActions } else { wanted = make(map[protocol.CodeActionKind]bool) for _, only := range params.Context.Only { - wanted[only] = s.supportedCodeActions[only] + wanted[only] = supportedCodeActions[only] } } - - uri := span.NewURI(params.TextDocument.URI) if len(wanted) == 0 { - return nil, fmt.Errorf("no supported code action to execute for %s, wanted %v", uri, params.Context.Only) + return nil, errors.Errorf("no supported code action to execute for %s, wanted %v", uri, params.Context.Only) } - view := s.session.ViewOf(uri) - gof, m, err := getGoFile(ctx, view, uri) - if err != nil { - return nil, err - } spn, err := m.RangeSpan(params.Range) if err != nil { return nil, err @@ -47,7 +55,7 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara var codeActions []protocol.CodeAction - edits, err := organizeImports(ctx, view, spn) + edits, editsPerFix, err := organizeImports(ctx, view, spn) if err != nil { return nil, err } @@ -57,6 +65,10 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara // First, add the quick fixes reported by go/analysis. // TODO: Enable this when this actually works. For now, it's needless work. if s.wantSuggestedFixes { + gof, ok := f.(source.GoFile) + if !ok { + return nil, fmt.Errorf("%s is not a Go file", f.URI()) + } qf, err := quickFixes(ctx, view, gof) if err != nil { log.Error(ctx, "quick fixes failed", err, telemetry.File.Of(uri)) @@ -66,18 +78,23 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara // If we also have diagnostics for missing imports, we can associate them with quick fixes. if findImportErrors(params.Context.Diagnostics) { - // TODO(rstambler): Separate this into a set of codeActions per diagnostic, - // where each action is the addition or removal of one import. - // This can only be done when https://golang.org/issue/31493 is resolved. - codeActions = append(codeActions, protocol.CodeAction{ - Title: "Organize All Imports", // clarify that all imports will change - Kind: protocol.QuickFix, - Edit: &protocol.WorkspaceEdit{ - Changes: &map[string][]protocol.TextEdit{ - string(uri): edits, - }, - }, - }) + // Separate this into a set of codeActions per diagnostic, where + // each action is the addition, removal, or renaming of one import. + for _, importFix := range editsPerFix { + // Get the diagnostics this fix would affect. + if fixDiagnostics := importDiagnostics(importFix.fix, params.Context.Diagnostics); len(fixDiagnostics) > 0 { + codeActions = append(codeActions, protocol.CodeAction{ + Title: importFixTitle(importFix.fix), + Kind: protocol.QuickFix, + Edit: &protocol.WorkspaceEdit{ + Changes: &map[string][]protocol.TextEdit{ + string(uri): importFix.edits, + }, + }, + Diagnostics: fixDiagnostics, + }) + } + } } } @@ -97,16 +114,38 @@ func (s *Server) codeAction(ctx context.Context, params *protocol.CodeActionPara return codeActions, nil } -func organizeImports(ctx context.Context, view source.View, s span.Span) ([]protocol.TextEdit, error) { +type protocolImportFix struct { + fix *imports.ImportFix + edits []protocol.TextEdit +} + +func organizeImports(ctx context.Context, view source.View, s span.Span) ([]protocol.TextEdit, []*protocolImportFix, error) { f, m, rng, err := spanToRange(ctx, view, s) if err != nil { - return nil, err + return nil, nil, err } - edits, err := source.Imports(ctx, view, f, rng) + edits, editsPerFix, err := source.AllImportsFixes(ctx, view, f, rng) if err != nil { - return nil, err + return nil, nil, err } - return ToProtocolEdits(m, edits) + // Convert all source edits to protocol edits. + pEdits, err := ToProtocolEdits(m, edits) + if err != nil { + return nil, nil, err + } + + pEditsPerFix := make([]*protocolImportFix, len(editsPerFix)) + for i, fix := range editsPerFix { + pEdits, err := ToProtocolEdits(m, fix.Edits) + if err != nil { + return nil, nil, err + } + pEditsPerFix[i] = &protocolImportFix{ + fix: fix.Fix, + edits: pEdits, + } + } + return pEdits, pEditsPerFix, nil } // findImports determines if a given diagnostic represents an error that could @@ -123,13 +162,57 @@ func findImportErrors(diagnostics []protocol.Diagnostic) bool { return true } // "X imported but not used" is an unused import. - if strings.HasSuffix(diagnostic.Message, " imported but not used") { + // "X imported but not used as Y" is an unused import. + if strings.Contains(diagnostic.Message, " imported but not used") { return true } } return false } +func importFixTitle(fix *imports.ImportFix) string { + var str string + switch fix.FixType { + case imports.AddImport: + str = fmt.Sprintf("Add import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case imports.DeleteImport: + str = fmt.Sprintf("Delete import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + case imports.SetImportName: + str = fmt.Sprintf("Rename import: %s %q", fix.StmtInfo.Name, fix.StmtInfo.ImportPath) + } + return str +} + +func importDiagnostics(fix *imports.ImportFix, diagnostics []protocol.Diagnostic) (results []protocol.Diagnostic) { + for _, diagnostic := range diagnostics { + switch { + // "undeclared name: X" may be an unresolved import. + case strings.HasPrefix(diagnostic.Message, "undeclared name: "): + ident := strings.TrimPrefix(diagnostic.Message, "undeclared name: ") + if ident == fix.IdentName { + results = append(results, diagnostic) + } + // "could not import: X" may be an invalid import. + case strings.HasPrefix(diagnostic.Message, "could not import: "): + ident := strings.TrimPrefix(diagnostic.Message, "could not import: ") + if ident == fix.IdentName { + results = append(results, diagnostic) + } + // "X imported but not used" is an unused import. + // "X imported but not used as Y" is an unused import. + case strings.Contains(diagnostic.Message, " imported but not used"): + idx := strings.Index(diagnostic.Message, " imported but not used") + importPath := diagnostic.Message[:idx] + if importPath == fmt.Sprintf("%q", fix.StmtInfo.ImportPath) { + results = append(results, diagnostic) + } + } + + } + + return results +} + func quickFixes(ctx context.Context, view source.View, gof source.GoFile) ([]protocol.CodeAction, error) { var codeActions []protocol.CodeAction diff --git a/vendor/golang.org/x/tools/internal/lsp/definition.go b/vendor/golang.org/x/tools/internal/lsp/definition.go index d90385fdf0..52ae133073 100644 --- a/vendor/golang.org/x/tools/internal/lsp/definition.go +++ b/vendor/golang.org/x/tools/internal/lsp/definition.go @@ -27,7 +27,7 @@ func (s *Server) definition(ctx context.Context, params *protocol.TextDocumentPo if err != nil { return nil, err } - ident, err := source.Identifier(ctx, view, f, rng.Start) + ident, err := source.Identifier(ctx, f, rng.Start) if err != nil { return nil, err } @@ -61,7 +61,7 @@ func (s *Server) typeDefinition(ctx context.Context, params *protocol.TextDocume if err != nil { return nil, err } - ident, err := source.Identifier(ctx, view, f, rng.Start) + ident, err := source.Identifier(ctx, f, rng.Start) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/lsp/general.go b/vendor/golang.org/x/tools/internal/lsp/general.go index eea37a5d68..207aa8e668 100644 --- a/vendor/golang.org/x/tools/internal/lsp/general.go +++ b/vendor/golang.org/x/tools/internal/lsp/general.go @@ -18,15 +18,19 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/lsp/telemetry/tag" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func (s *Server) initialize(ctx context.Context, params *protocol.InitializeParams) (*protocol.InitializeResult, error) { - s.initializedMu.Lock() - defer s.initializedMu.Unlock() - if s.isInitialized { + s.stateMu.Lock() + state := s.state + s.stateMu.Unlock() + if state >= serverInitializing { return nil, jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidRequest, "server already initialized") } - s.isInitialized = true // mark server as initialized now + s.stateMu.Lock() + s.state = serverInitializing + s.stateMu.Unlock() // TODO: Remove the option once we are certain there are no issues here. s.textDocumentSyncKind = protocol.Incremental @@ -39,9 +43,13 @@ func (s *Server) initialize(ctx context.Context, params *protocol.InitializePara // Default to using synopsis as a default for hover information. s.hoverKind = source.SynopsisDocumentation - s.supportedCodeActions = map[protocol.CodeActionKind]bool{ - protocol.SourceOrganizeImports: true, - protocol.QuickFix: true, + s.supportedCodeActions = map[source.FileKind]map[protocol.CodeActionKind]bool{ + source.Go: { + protocol.SourceOrganizeImports: true, + protocol.QuickFix: true, + }, + source.Mod: {}, + source.Sum: {}, } s.setClientCapabilities(params.Capabilities) @@ -57,7 +65,7 @@ func (s *Server) initialize(ctx context.Context, params *protocol.InitializePara // no folders and no root, single file mode //TODO(iancottrell): not sure how to do single file mode yet //issue: golang.org/issue/31168 - return nil, fmt.Errorf("single file mode not supported yet") + return nil, errors.Errorf("single file mode not supported yet") } } @@ -127,6 +135,10 @@ func (s *Server) setClientCapabilities(caps protocol.ClientCapabilities) { } func (s *Server) initialized(ctx context.Context, params *protocol.InitializedParams) error { + s.stateMu.Lock() + s.state = serverInitialized + s.stateMu.Unlock() + if s.configurationSupported { if s.dynamicConfigurationSupported { s.client.RegisterCapability(ctx, &protocol.RegistrationParams{ @@ -140,16 +152,7 @@ func (s *Server) initialized(ctx context.Context, params *protocol.InitializedPa }) } for _, view := range s.session.Views() { - config, err := s.client.Configuration(ctx, &protocol.ConfigurationParams{ - Items: []protocol.ConfigurationItem{{ - ScopeURI: protocol.NewURI(view.Folder()), - Section: "gopls", - }}, - }) - if err != nil { - return err - } - if err := s.processConfig(ctx, view, config[0]); err != nil { + if err := s.fetchConfig(ctx, view); err != nil { return err } } @@ -160,6 +163,28 @@ func (s *Server) initialized(ctx context.Context, params *protocol.InitializedPa return nil } +func (s *Server) fetchConfig(ctx context.Context, view source.View) error { + configs, err := s.client.Configuration(ctx, &protocol.ConfigurationParams{ + Items: []protocol.ConfigurationItem{{ + ScopeURI: protocol.NewURI(view.Folder()), + Section: "gopls", + }, { + ScopeURI: protocol.NewURI(view.Folder()), + Section: view.Name(), + }, + }, + }) + if err != nil { + return err + } + for _, config := range configs { + if err := s.processConfig(ctx, view, config); err != nil { + return err + } + } + return nil +} + func (s *Server) processConfig(ctx context.Context, view source.View, config interface{}) error { // TODO: We should probably store and process more of the config. if config == nil { @@ -167,13 +192,13 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int } c, ok := config.(map[string]interface{}) if !ok { - return fmt.Errorf("invalid config gopls type %T", config) + return errors.Errorf("invalid config gopls type %T", config) } // Get the environment for the go/packages config. if env := c["env"]; env != nil { menv, ok := env.(map[string]interface{}) if !ok { - return fmt.Errorf("invalid config gopls.env type %T", env) + return errors.Errorf("invalid config gopls.env type %T", env) } env := view.Env() for k, v := range menv { @@ -185,7 +210,7 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int if buildFlags := c["buildFlags"]; buildFlags != nil { iflags, ok := buildFlags.([]interface{}) if !ok { - return fmt.Errorf("invalid config gopls.buildFlags type %T", buildFlags) + return errors.Errorf("invalid config gopls.buildFlags type %T", buildFlags) } flags := make([]string, 0, len(iflags)) for _, flag := range iflags { @@ -206,6 +231,8 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int switch hoverKind { case "NoDocumentation": s.hoverKind = source.NoDocumentation + case "SingleLine": + s.hoverKind = source.SingleLine case "SynopsisDocumentation": s.hoverKind = source.SynopsisDocumentation case "FullDocumentation": @@ -236,19 +263,21 @@ func (s *Server) processConfig(ctx context.Context, view source.View, config int } func (s *Server) shutdown(ctx context.Context) error { - s.initializedMu.Lock() - defer s.initializedMu.Unlock() - if !s.isInitialized { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state < serverInitialized { return jsonrpc2.NewErrorf(jsonrpc2.CodeInvalidRequest, "server not initialized") } // drop all the active views s.session.Shutdown(ctx) - s.isInitialized = false + s.state = serverShutDown return nil } func (s *Server) exit(ctx context.Context) error { - if s.isInitialized { + s.stateMu.Lock() + defer s.stateMu.Unlock() + if s.state != serverShutDown { os.Exit(1) } os.Exit(0) diff --git a/vendor/golang.org/x/tools/internal/lsp/hover.go b/vendor/golang.org/x/tools/internal/lsp/hover.go index 5f0be1172b..e6147e3791 100644 --- a/vendor/golang.org/x/tools/internal/lsp/hover.go +++ b/vendor/golang.org/x/tools/internal/lsp/hover.go @@ -6,6 +6,7 @@ package lsp import ( "context" + "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/span" @@ -26,9 +27,9 @@ func (s *Server) hover(ctx context.Context, params *protocol.TextDocumentPositio if err != nil { return nil, err } - ident, err := source.Identifier(ctx, view, f, identRange.Start) + ident, err := source.Identifier(ctx, f, identRange.Start) if err != nil { - return nil, err + return nil, nil } hover, err := ident.Hover(ctx, s.preferredContentFormat == protocol.Markdown, s.hoverKind) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/lsp/link.go b/vendor/golang.org/x/tools/internal/lsp/link.go index a251e52b96..616384ace2 100644 --- a/vendor/golang.org/x/tools/internal/lsp/link.go +++ b/vendor/golang.org/x/tools/internal/lsp/link.go @@ -6,7 +6,6 @@ package lsp import ( "context" - "fmt" "go/ast" "go/token" "regexp" @@ -18,6 +17,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/lsp/telemetry/tag" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func (s *Server) documentLink(ctx context.Context, params *protocol.DocumentLinkParams) ([]protocol.DocumentLink, error) { @@ -82,7 +82,7 @@ func findLinksInString(src string, pos token.Pos, view source.View, mapper *prot var links []protocol.DocumentLink re, err := getURLRegexp() if err != nil { - return nil, fmt.Errorf("cannot create regexp for links: %s", err.Error()) + return nil, errors.Errorf("cannot create regexp for links: %s", err.Error()) } for _, urlIndex := range re.FindAllIndex([]byte(src), -1) { start := urlIndex[0] diff --git a/vendor/golang.org/x/tools/internal/lsp/lsp_test.go b/vendor/golang.org/x/tools/internal/lsp/lsp_test.go index 5b563cd507..a1588d46fb 100644 --- a/vendor/golang.org/x/tools/internal/lsp/lsp_test.go +++ b/vendor/golang.org/x/tools/internal/lsp/lsp_test.go @@ -52,10 +52,13 @@ func testLSP(t *testing.T, exporter packagestest.Exporter) { server: &Server{ session: session, undelivered: make(map[span.URI][]source.Diagnostic), - supportedCodeActions: map[protocol.CodeActionKind]bool{ - protocol.SourceOrganizeImports: true, - protocol.QuickFix: true, - }, + supportedCodeActions: map[source.FileKind]map[protocol.CodeActionKind]bool{ + source.Go: { + protocol.SourceOrganizeImports: true, + protocol.QuickFix: true, + }, + source.Mod: {}, + source.Sum: {}}, hoverKind: source.SynopsisDocumentation, }, data: data, @@ -81,6 +84,13 @@ func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) { t.Fatal(err) } got := results[uri] + // A special case to test that there are no diagnostics for a file. + if len(want) == 1 && want[0].Source == "no_diagnostics" { + if len(got) != 0 { + t.Errorf("expected no diagnostics for %s, got %v", uri, got) + } + continue + } if diff := diffDiagnostics(uri, want, got); diff != "" { t.Error(diff) } diff --git a/vendor/golang.org/x/tools/internal/lsp/protocol/context.go b/vendor/golang.org/x/tools/internal/lsp/protocol/context.go index 7a30c1dd60..748edaf04c 100644 --- a/vendor/golang.org/x/tools/internal/lsp/protocol/context.go +++ b/vendor/golang.org/x/tools/internal/lsp/protocol/context.go @@ -35,6 +35,6 @@ func logger(ctx context.Context, at time.Time, tags tag.List) bool { if entry.Error != nil { msg.Type = Error } - client.LogMessage(xcontext.Detach(ctx), msg) + go client.LogMessage(xcontext.Detach(ctx), msg) return true } diff --git a/vendor/golang.org/x/tools/internal/lsp/protocol/enums.go b/vendor/golang.org/x/tools/internal/lsp/protocol/enums.go index ecb8af4d70..c2ee277a89 100644 --- a/vendor/golang.org/x/tools/internal/lsp/protocol/enums.go +++ b/vendor/golang.org/x/tools/internal/lsp/protocol/enums.go @@ -13,7 +13,7 @@ var ( namesInitializeError [int(UnknownProtocolVersion) + 1]string namesMessageType [int(Log) + 1]string namesFileChangeType [int(Deleted) + 1]string - namesWatchKind [int(Change) + 1]string + namesWatchKind [int(WatchDelete) + 1]string namesCompletionTriggerKind [int(TriggerForIncompleteCompletions) + 1]string namesDiagnosticSeverity [int(SeverityHint) + 1]string namesDiagnosticTag [int(Unnecessary) + 1]string @@ -40,7 +40,9 @@ func init() { namesFileChangeType[int(Changed)] = "Changed" namesFileChangeType[int(Deleted)] = "Deleted" - namesWatchKind[int(Change)] = "Change" + namesWatchKind[int(WatchCreate)] = "WatchCreate" + namesWatchKind[int(WatchChange)] = "WatchChange" + namesWatchKind[int(WatchDelete)] = "WatchDelete" namesCompletionTriggerKind[int(Invoked)] = "Invoked" namesCompletionTriggerKind[int(TriggerCharacter)] = "TriggerCharacter" diff --git a/vendor/golang.org/x/tools/internal/lsp/protocol/span.go b/vendor/golang.org/x/tools/internal/lsp/protocol/span.go index 36ea0b55d1..d0e4e84c2f 100644 --- a/vendor/golang.org/x/tools/internal/lsp/protocol/span.go +++ b/vendor/golang.org/x/tools/internal/lsp/protocol/span.go @@ -7,10 +7,10 @@ package protocol import ( - "fmt" "go/token" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) type ColumnMapper struct { @@ -47,7 +47,7 @@ func (m *ColumnMapper) Location(s span.Span) (Location, error) { func (m *ColumnMapper) Range(s span.Span) (Range, error) { if span.CompareURI(m.URI, s.URI()) != 0 { - return Range{}, fmt.Errorf("column mapper is for file %q instead of %q", m.URI, s.URI()) + return Range{}, errors.Errorf("column mapper is for file %q instead of %q", m.URI, s.URI()) } s, err := s.WithAll(m.Converter) if err != nil { diff --git a/vendor/golang.org/x/tools/internal/lsp/protocol/tsprotocol.go b/vendor/golang.org/x/tools/internal/lsp/protocol/tsprotocol.go index 8e45d59496..b9e81ebe20 100644 --- a/vendor/golang.org/x/tools/internal/lsp/protocol/tsprotocol.go +++ b/vendor/golang.org/x/tools/internal/lsp/protocol/tsprotocol.go @@ -1,7 +1,7 @@ // Package protocol contains data types and code for LSP jsonrpcs // generated automatically from vscode-languageserver-node // commit: 8801c20b667945f455d7e023c71d2f741caeda25 -// last fetched Thu Jul 11 2019 13:43:41 GMT-0400 (Eastern Daylight Time) +// last fetched Sat Jul 13 2019 18:33:10 GMT-0700 (Pacific Daylight Time) package protocol // Code generated (see typescript/README.md) DO NOT EDIT. @@ -3611,10 +3611,20 @@ const ( */ Deleted FileChangeType = 3 - /*Change defined: + /*WatchCreate defined: + * Interested in create events. + */ + WatchCreate WatchKind = 1 + + /*WatchChange defined: * Interested in change events */ - Change WatchKind = 2 + WatchChange WatchKind = 2 + + /*WatchDelete defined: + * Interested in delete events + */ + WatchDelete WatchKind = 4 /*Invoked defined: * Completion was triggered by typing an identifier (24x7 code diff --git a/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/README.md b/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/README.md index 4084c4f627..486e09dbd6 100644 --- a/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/README.md +++ b/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/README.md @@ -4,10 +4,10 @@ 1. Make sure `node` is installed. As explained at the [node site]( Node) - you may need `node install @types/node` for the node runtime types -2. Install the typescript compiler, with `node install typescript`. + you may need `npm install @types/node` for the node runtime types +2. Install the typescript compiler, with `npm install typescript`. 3. Make sure `tsc` and `node` are in your execution path. -4. Get the typescript code for the jsonrpc protocol with `git clone vscode-lanuageserver-node.git` +4. Get the typescript code for the jsonrpc protocol with `git clone git@github.com:microsoft/vscode-languageserver-node.git` ## Usage @@ -31,4 +31,4 @@ To generate the client and server boilerplate (tsclient.go and tsserver.go) ## Note -`go.ts` uses the Typescript compiler's API, which is [introduced]( API) in their wiki. \ No newline at end of file +`go.ts` uses the Typescript compiler's API, which is [introduced]( API) in their wiki. diff --git a/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/go.ts b/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/go.ts index b1b1d588f1..558cc7ca69 100644 --- a/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/go.ts +++ b/vendor/golang.org/x/tools/internal/lsp/protocol/typescript/go.ts @@ -927,7 +927,7 @@ let byName = new Map(); // consts are unique. (Go consts are package-level, but Typescript's are // not.) Use suffixes to minimize changes to gopls. let pref = new Map( - [['DiagnosticSeverity', 'Severity']]) // typeName->prefix + [['DiagnosticSeverity', 'Severity'], ['WatchKind', 'Watch']]) // typeName->prefix let suff = new Map([ ['CompletionItemKind', 'Completion'], ['InsertTextFormat', 'TextFormat'] ]) diff --git a/vendor/golang.org/x/tools/internal/lsp/references.go b/vendor/golang.org/x/tools/internal/lsp/references.go index 6be637566b..b179da931d 100644 --- a/vendor/golang.org/x/tools/internal/lsp/references.go +++ b/vendor/golang.org/x/tools/internal/lsp/references.go @@ -30,7 +30,7 @@ func (s *Server) references(ctx context.Context, params *protocol.ReferenceParam return nil, err } // Find all references to the identifier at the position. - ident, err := source.Identifier(ctx, view, f, rng.Start) + ident, err := source.Identifier(ctx, f, rng.Start) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/lsp/rename.go b/vendor/golang.org/x/tools/internal/lsp/rename.go index 4d1e9b010c..e2f0ecb78d 100644 --- a/vendor/golang.org/x/tools/internal/lsp/rename.go +++ b/vendor/golang.org/x/tools/internal/lsp/rename.go @@ -27,7 +27,7 @@ func (s *Server) rename(ctx context.Context, params *protocol.RenameParams) (*pr if err != nil { return nil, err } - ident, err := source.Identifier(ctx, view, f, rng.Start) + ident, err := source.Identifier(ctx, f, rng.Start) if err != nil { return nil, err } diff --git a/vendor/golang.org/x/tools/internal/lsp/server.go b/vendor/golang.org/x/tools/internal/lsp/server.go index 088bdcaed2..6ab758b6c0 100644 --- a/vendor/golang.org/x/tools/internal/lsp/server.go +++ b/vendor/golang.org/x/tools/internal/lsp/server.go @@ -60,12 +60,21 @@ func (s *Server) Run(ctx context.Context) error { return s.Conn.Run(ctx) } +type serverState int + +const ( + serverCreated = serverState(iota) + serverInitializing // set once the server has received "initialize" request + serverInitialized // set once the server has received "initialized" request + serverShutDown +) + type Server struct { Conn *jsonrpc2.Conn client protocol.Client - initializedMu sync.Mutex - isInitialized bool // set once the server has received "initialize" request + stateMu sync.Mutex + state serverState // Configurations. // TODO(rstambler): Separate these into their own struct? @@ -80,7 +89,7 @@ type Server struct { disabledAnalyses map[string]struct{} wantSuggestedFixes bool - supportedCodeActions map[protocol.CodeActionKind]bool + supportedCodeActions map[source.FileKind]map[protocol.CodeActionKind]bool textDocumentSyncKind protocol.TextDocumentSyncKind diff --git a/vendor/golang.org/x/tools/internal/lsp/source/analysis.go b/vendor/golang.org/x/tools/internal/lsp/source/analysis.go index 8e7eeb7ca7..f08a86d8b7 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/analysis.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/analysis.go @@ -20,6 +20,7 @@ import ( "golang.org/x/sync/errgroup" "golang.org/x/tools/go/analysis" "golang.org/x/tools/internal/lsp/telemetry/trace" + errors "golang.org/x/xerrors" ) func analyze(ctx context.Context, v View, pkgs []Package, analyzers []*analysis.Analyzer) ([]*Action, error) { @@ -119,7 +120,7 @@ func (act *Action) execOnce(ctx context.Context, fset *token.FileSet) error { } if failed != nil { sort.Strings(failed) - act.err = fmt.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) + act.err = errors.Errorf("failed prerequisites: %s", strings.Join(failed, ", ")) return act.err } @@ -163,12 +164,12 @@ func (act *Action) execOnce(ctx context.Context, fset *token.FileSet) error { act.pass = pass if act.Pkg.IsIllTyped() && !pass.Analyzer.RunDespiteErrors { - act.err = fmt.Errorf("analysis skipped due to errors in package: %v", act.Pkg.GetErrors()) + act.err = errors.Errorf("analysis skipped due to errors in package: %v", act.Pkg.GetErrors()) } else { act.result, act.err = pass.Analyzer.Run(pass) if act.err == nil { if got, want := reflect.TypeOf(act.result), pass.Analyzer.ResultType; got != want { - act.err = fmt.Errorf( + act.err = errors.Errorf( "internal error: on package %s, analyzer %s returned a result of type %v, but declared ResultType %v", pass.Pkg.Path(), pass.Analyzer, got, want) } diff --git a/vendor/golang.org/x/tools/internal/lsp/source/completion.go b/vendor/golang.org/x/tools/internal/lsp/source/completion.go index f7a7b99cad..a39571cc77 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/completion.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/completion.go @@ -6,7 +6,6 @@ package source import ( "context" - "fmt" "go/ast" "go/token" "go/types" @@ -16,6 +15,7 @@ import ( "golang.org/x/tools/internal/lsp/snippet" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) type CompletionItem struct { @@ -212,7 +212,7 @@ func (c *completer) setSurrounding(ident *ast.Ident) { // members for more candidates. func (c *completer) found(obj types.Object, score float64) error { if obj.Pkg() != nil && obj.Pkg() != c.types && !obj.Exported() { - return fmt.Errorf("%s is inaccessible from %s", obj.Name(), c.types.Path()) + return errors.Errorf("%s is inaccessible from %s", obj.Name(), c.types.Path()) } if c.inDeepCompletion() { @@ -287,14 +287,14 @@ func Completion(ctx context.Context, view View, f GoFile, pos token.Pos, opts Co } pkg := f.GetPackage(ctx) if pkg == nil || pkg.IsIllTyped() { - return nil, nil, fmt.Errorf("package for %s is ill typed", f.URI()) + return nil, nil, errors.Errorf("package for %s is ill typed", f.URI()) } // Completion is based on what precedes the cursor. // Find the path to the position before pos. path, _ := astutil.PathEnclosingInterval(file, pos-1, pos-1) if path == nil { - return nil, nil, fmt.Errorf("cannot find node enclosing position") + return nil, nil, errors.Errorf("cannot find node enclosing position") } // Skip completion inside comments. for _, g := range file.Comments { @@ -358,7 +358,7 @@ func Completion(ctx context.Context, view View, f GoFile, pos token.Pos, opts Co qual := types.RelativeTo(pkg.GetTypes()) of += ", of " + types.ObjectString(obj, qual) } - return nil, nil, fmt.Errorf("this is a definition%s", of) + return nil, nil, errors.Errorf("this is a definition%s", of) } } if err := c.lexical(); err != nil { @@ -423,7 +423,7 @@ func (c *completer) selector(sel *ast.SelectorExpr) error { // Invariant: sel is a true selector. tv, ok := c.info.Types[sel.X] if !ok { - return fmt.Errorf("cannot resolve %s", sel.X) + return errors.Errorf("cannot resolve %s", sel.X) } return c.methodsAndFields(tv.Type, tv.Addressable()) diff --git a/vendor/golang.org/x/tools/internal/lsp/source/completion_format.go b/vendor/golang.org/x/tools/internal/lsp/source/completion_format.go index a7550a80b2..a07d8091cb 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/completion_format.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/completion_format.go @@ -118,7 +118,7 @@ func (c *completer) item(cand candidate) (CompletionItem, error) { log.Error(c.ctx, "declaration in a Go file", err, tag.Of("Label", item.Label)) goto Return } - ident, err := Identifier(c.ctx, c.view, gof, declRange.Start) + ident, err := Identifier(c.ctx, gof, declRange.Start) if err != nil { log.Error(c.ctx, "no identifier", err, tag.Of("Name", obj.Name())) goto Return diff --git a/vendor/golang.org/x/tools/internal/lsp/source/diagnostics.go b/vendor/golang.org/x/tools/internal/lsp/source/diagnostics.go index befbf85111..51cc41dbcc 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/diagnostics.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/diagnostics.go @@ -36,6 +36,7 @@ import ( "golang.org/x/tools/go/packages" "golang.org/x/tools/internal/lsp/telemetry" "golang.org/x/tools/internal/lsp/telemetry/log" + "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" ) @@ -61,7 +62,8 @@ const ( ) func Diagnostics(ctx context.Context, view View, f GoFile, disabledAnalyses map[string]struct{}) (map[span.URI][]Diagnostic, error) { - ctx = telemetry.File.With(ctx, f.URI()) + ctx, done := trace.StartSpan(ctx, "source.Diagnostics", telemetry.File.Of(f.URI())) + defer done() pkg := f.GetPackage(ctx) if pkg == nil { return singleDiagnostic(f.URI(), "%s is not part of a package", f.URI()), nil @@ -106,7 +108,10 @@ type diagnosticSet struct { listErrors, parseErrors, typeErrors []Diagnostic } -func diagnostics(ctx context.Context, v View, pkg Package, reports map[span.URI][]Diagnostic) bool { +func diagnostics(ctx context.Context, view View, pkg Package, reports map[span.URI][]Diagnostic) bool { + ctx, done := trace.StartSpan(ctx, "source.diagnostics", telemetry.Package.Of(pkg.ID())) + defer done() + diagSets := make(map[span.URI]*diagnosticSet) for _, err := range pkg.GetErrors() { diag := Diagnostic{ @@ -125,7 +130,7 @@ func diagnostics(ctx context.Context, v View, pkg Package, reports map[span.URI] set.parseErrors = append(set.parseErrors, diag) case packages.TypeError: if diag.Span.IsPoint() { - diag.Span = pointToSpan(ctx, v, diag.Span) + diag.Span = pointToSpan(ctx, view, diag.Span) } set.typeErrors = append(set.typeErrors, diag) default: diff --git a/vendor/golang.org/x/tools/internal/lsp/source/format.go b/vendor/golang.org/x/tools/internal/lsp/source/format.go index e9f12d2c61..16cb258952 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/format.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/format.go @@ -8,7 +8,6 @@ package source import ( "bytes" "context" - "fmt" "go/format" "golang.org/x/tools/go/ast/astutil" @@ -18,6 +17,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) // Format formats a file with a given range. @@ -30,12 +30,20 @@ func Format(ctx context.Context, f GoFile, rng span.Range) ([]TextEdit, error) { return nil, err } pkg := f.GetPackage(ctx) - if hasListErrors(pkg.GetErrors()) || hasParseErrors(pkg.GetErrors()) { - return nil, fmt.Errorf("%s has parse errors, not formatting", f.URI()) + if hasListErrors(pkg.GetErrors()) || hasParseErrors(pkg, f.URI()) { + // Even if this package has list or parse errors, this file may not + // have any parse errors and can still be formatted. Using format.Node + // on an ast with errors may result in code being added or removed. + // Attempt to format the source of this file instead. + formatted, err := formatSource(ctx, f) + if err != nil { + return nil, err + } + return computeTextEdits(ctx, f, string(formatted)), nil } path, exact := astutil.PathEnclosingInterval(file, rng.Start, rng.End) if !exact || len(path) == 0 { - return nil, fmt.Errorf("no exact AST node matching the specified range") + return nil, errors.Errorf("no exact AST node matching the specified range") } node := path[0] @@ -52,6 +60,16 @@ func Format(ctx context.Context, f GoFile, rng span.Range) ([]TextEdit, error) { return computeTextEdits(ctx, f, buf.String()), nil } +func formatSource(ctx context.Context, file File) ([]byte, error) { + ctx, done := trace.StartSpan(ctx, "source.formatSource") + defer done() + data, _, err := file.Handle(ctx).Read(ctx) + if err != nil { + return nil, err + } + return format.Source(data) +} + // Imports formats a file using the goimports tool. func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEdit, error) { ctx, done := trace.StartSpan(ctx, "source.Imports") @@ -62,23 +80,13 @@ func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEd } pkg := f.GetPackage(ctx) if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("no package for file %s", f.URI()) + return nil, errors.Errorf("no package for file %s", f.URI()) } if hasListErrors(pkg.GetErrors()) { - return nil, fmt.Errorf("%s has list errors, not running goimports", f.URI()) + return nil, errors.Errorf("%s has list errors, not running goimports", f.URI()) } - if resolver, ok := view.ProcessEnv(ctx).GetResolver().(*imports.ModuleResolver); ok && resolver.Initialized { - // TODO(suzmue): only reset this state when necessary (eg when the go.mod files of this - // module or modules with replace directive changes). - resolver.Initialized = false - resolver.Main = nil - resolver.ModsByModPath = nil - resolver.ModsByDir = nil - resolver.ModCachePkgs = nil - } options := &imports.Options{ - Env: view.ProcessEnv(ctx), // Defaults. AllErrors: true, Comments: true, @@ -87,16 +95,89 @@ func Imports(ctx context.Context, view View, f GoFile, rng span.Range) ([]TextEd TabIndent: true, TabWidth: 8, } - formatted, err := imports.Process(f.URI().Filename(), data, options) + var formatted []byte + importFn := func(opts *imports.Options) error { + formatted, err = imports.Process(f.URI().Filename(), data, opts) + return err + } + err = view.RunProcessEnvFunc(ctx, importFn, options) if err != nil { return nil, err } return computeTextEdits(ctx, f, string(formatted)), nil } -func hasParseErrors(errors []packages.Error) bool { - for _, err := range errors { - if err.Kind == packages.ParseError { +type ImportFix struct { + Fix *imports.ImportFix + Edits []TextEdit +} + +// AllImportsFixes formats f for each possible fix to the imports. +// In addition to returning the result of applying all edits, +// it returns a list of fixes that could be applied to the file, with the +// corresponding TextEdits that would be needed to apply that fix. +func AllImportsFixes(ctx context.Context, view View, f GoFile, rng span.Range) (edits []TextEdit, editsPerFix []*ImportFix, err error) { + ctx, done := trace.StartSpan(ctx, "source.AllImportsFixes") + defer done() + data, _, err := f.Handle(ctx).Read(ctx) + if err != nil { + return nil, nil, err + } + pkg := f.GetPackage(ctx) + if pkg == nil || pkg.IsIllTyped() { + return nil, nil, errors.Errorf("no package for file %s", f.URI()) + } + if hasListErrors(pkg.GetErrors()) { + return nil, nil, errors.Errorf("%s has list errors, not running goimports", f.URI()) + } + + options := &imports.Options{ + // Defaults. + AllErrors: true, + Comments: true, + Fragment: true, + FormatOnly: false, + TabIndent: true, + TabWidth: 8, + } + importFn := func(opts *imports.Options) error { + fixes, err := imports.FixImports(f.URI().Filename(), data, opts) + if err != nil { + return err + } + // Apply all of the import fixes to the file. + formatted, err := imports.ApplyFixes(fixes, f.URI().Filename(), data, options) + if err != nil { + return err + } + edits = computeTextEdits(ctx, f, string(formatted)) + // Add the edits for each fix to the result. + editsPerFix = make([]*ImportFix, len(fixes)) + for i, fix := range fixes { + formatted, err := imports.ApplyFixes([]*imports.ImportFix{fix}, f.URI().Filename(), data, options) + if err != nil { + return err + } + editsPerFix[i] = &ImportFix{ + Fix: fix, + Edits: computeTextEdits(ctx, f, string(formatted)), + } + } + return err + } + err = view.RunProcessEnvFunc(ctx, importFn, options) + if err != nil { + return nil, nil, err + } + + return edits, editsPerFix, nil +} + +// hasParseErrors returns true if the given file has parse errors. +func hasParseErrors(pkg Package, uri span.URI) bool { + for _, err := range pkg.GetErrors() { + spn := packagesErrorSpan(err) + if spn.URI() == uri && err.Kind == packages.ParseError { return true } } diff --git a/vendor/golang.org/x/tools/internal/lsp/source/highlight.go b/vendor/golang.org/x/tools/internal/lsp/source/highlight.go index a5a3358b94..97b9457ac6 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/highlight.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/highlight.go @@ -6,13 +6,13 @@ package source import ( "context" - "fmt" "go/ast" "go/token" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func Highlight(ctx context.Context, f GoFile, pos token.Pos) ([]span.Span, error) { @@ -26,11 +26,11 @@ func Highlight(ctx context.Context, f GoFile, pos token.Pos) ([]span.Span, error fset := f.FileSet() path, _ := astutil.PathEnclosingInterval(file, pos, pos) if len(path) == 0 { - return nil, fmt.Errorf("no enclosing position found for %s", fset.Position(pos)) + return nil, errors.Errorf("no enclosing position found for %s", fset.Position(pos)) } id, ok := path[0].(*ast.Ident) if !ok { - return nil, fmt.Errorf("%s is not an identifier", fset.Position(pos)) + return nil, errors.Errorf("%s is not an identifier", fset.Position(pos)) } var result []span.Span if id.Obj != nil { diff --git a/vendor/golang.org/x/tools/internal/lsp/source/hover.go b/vendor/golang.org/x/tools/internal/lsp/source/hover.go index e46fe35932..45667031d1 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/hover.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/hover.go @@ -6,7 +6,6 @@ package source import ( "context" - "fmt" "go/ast" "go/doc" "go/format" @@ -14,6 +13,7 @@ import ( "strings" "golang.org/x/tools/internal/lsp/telemetry/trace" + errors "golang.org/x/xerrors" ) type documentation struct { @@ -25,16 +25,21 @@ type HoverKind int const ( NoDocumentation = HoverKind(iota) + SingleLine SynopsisDocumentation FullDocumentation - - // TODO: Support a single-line hover mode for clients like Vim. - singleLine ) func (i *IdentifierInfo) Hover(ctx context.Context, markdownSupported bool, hoverKind HoverKind) (string, error) { ctx, done := trace.StartSpan(ctx, "source.Hover") defer done() + + // If the user has explicitly requested a single line of hover information, + // fall back to using types.ObjectString. + if hoverKind == SingleLine { + return types.ObjectString(i.decl.obj, i.qf), nil + } + h, err := i.decl.hover(ctx) if err != nil { return "", err @@ -124,7 +129,7 @@ func formatGenDecl(node *ast.GenDecl, obj types.Object, typ types.Type) (*docume } } if spec == nil { - return nil, fmt.Errorf("no spec for node %v at position %v", node, obj.Pos()) + return nil, errors.Errorf("no spec for node %v at position %v", node, obj.Pos()) } // If we have a field or method. switch obj.(type) { @@ -145,7 +150,7 @@ func formatGenDecl(node *ast.GenDecl, obj types.Object, typ types.Type) (*docume case *ast.ImportSpec: return &documentation{spec, spec.Doc}, nil } - return nil, fmt.Errorf("unable to format spec %v (%T)", spec, spec) + return nil, errors.Errorf("unable to format spec %v (%T)", spec, spec) } func formatVar(node ast.Spec, obj types.Object) (*documentation, error) { diff --git a/vendor/golang.org/x/tools/internal/lsp/source/identifier.go b/vendor/golang.org/x/tools/internal/lsp/source/identifier.go index 65724e6b29..42bb93bb75 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/identifier.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/identifier.go @@ -6,7 +6,6 @@ package source import ( "context" - "fmt" "go/ast" "go/token" "go/types" @@ -15,6 +14,7 @@ import ( "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) // IdentifierInfo holds information about an identifier in Go source. @@ -47,40 +47,46 @@ func (i *IdentifierInfo) DeclarationRange() span.Range { // Identifier returns identifier information for a position // in a file, accounting for a potentially incomplete selector. -func Identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*IdentifierInfo, error) { - if result, err := identifier(ctx, view, f, pos); err != nil || result != nil { - return result, err - } - // If the position is not an identifier but immediately follows - // an identifier or selector period (as is common when - // requesting a completion), use the path to the preceding node. - result, err := identifier(ctx, view, f, pos-1) - if result == nil && err == nil { - err = fmt.Errorf("no identifier found") - } - return result, err -} - -// identifier checks a single position for a potential identifier. -func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*IdentifierInfo, error) { - ctx, done := trace.StartSpan(ctx, "source.identifier") - defer done() - +func Identifier(ctx context.Context, f GoFile, pos token.Pos) (*IdentifierInfo, error) { file, err := f.GetAST(ctx, ParseFull) if file == nil { return nil, err } pkg := f.GetPackage(ctx) if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("pkg for %s is ill-typed", f.URI()) + return nil, errors.Errorf("pkg for %s is ill-typed", f.URI()) } + return findIdentifier(ctx, f, pkg, file, pos) +} + +func findIdentifier(ctx context.Context, f GoFile, pkg Package, file *ast.File, pos token.Pos) (*IdentifierInfo, error) { + if result, err := identifier(ctx, f, pkg, file, pos); err != nil || result != nil { + return result, err + } + // If the position is not an identifier but immediately follows + // an identifier or selector period (as is common when + // requesting a completion), use the path to the preceding node. + result, err := identifier(ctx, f, pkg, file, pos-1) + if result == nil && err == nil { + err = errors.Errorf("no identifier found") + } + return result, err +} + +// identifier checks a single position for a potential identifier. +func identifier(ctx context.Context, f GoFile, pkg Package, file *ast.File, pos token.Pos) (*IdentifierInfo, error) { + ctx, done := trace.StartSpan(ctx, "source.identifier") + defer done() + + var err error + // Handle import specs separately, as there is no formal position for a package declaration. if result, err := importSpec(ctx, f, file, pkg, pos); result != nil || err != nil { return result, err } path, _ := astutil.PathEnclosingInterval(file, pos, pos) if path == nil { - return nil, fmt.Errorf("can't find node enclosing position") + return nil, errors.Errorf("can't find node enclosing position") } result := &IdentifierInfo{ File: f, @@ -118,7 +124,7 @@ func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident result.decl.wasImplicit = true } else { // Probably a type error. - return nil, fmt.Errorf("no object for ident %v", result.Name) + return nil, errors.Errorf("no object for ident %v", result.Name) } } @@ -126,7 +132,7 @@ func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident if result.decl.obj.Parent() == types.Universe { decl, ok := lookupBuiltinDecl(f.View(), result.Name).(ast.Node) if !ok { - return nil, fmt.Errorf("no declaration for %s", result.Name) + return nil, errors.Errorf("no declaration for %s", result.Name) } result.decl.node = decl if result.decl.rng, err = posToRange(ctx, f.FileSet(), result.Name, decl.Pos()); err != nil { @@ -157,7 +163,7 @@ func identifier(ctx context.Context, view View, f GoFile, pos token.Pos) (*Ident if result.decl.rng, err = objToRange(ctx, f.FileSet(), result.decl.obj); err != nil { return nil, err } - if result.decl.node, err = objToNode(ctx, view, pkg.GetTypes(), result.decl.obj, result.decl.rng); err != nil { + if result.decl.node, err = objToNode(ctx, f.View(), pkg.GetTypes(), result.decl.obj, result.decl.rng); err != nil { return nil, err } typ := pkg.GetTypesInfo().TypeOf(result.ident) @@ -214,7 +220,7 @@ func objToRange(ctx context.Context, fset *token.FileSet, obj types.Object) (spa func posToRange(ctx context.Context, fset *token.FileSet, name string, pos token.Pos) (span.Range, error) { if !pos.IsValid() { - return span.Range{}, fmt.Errorf("invalid position for %v", name) + return span.Range{}, errors.Errorf("invalid position for %v", name) } return span.NewRange(fset, pos, pos+token.Pos(len(name))), nil } @@ -230,7 +236,7 @@ func objToNode(ctx context.Context, view View, originPkg *types.Package, obj typ } declFile, ok := f.(GoFile) if !ok { - return nil, fmt.Errorf("%s is not a Go file", s.URI()) + return nil, errors.Errorf("%s is not a Go file", s.URI()) } // If the object is exported from a different package, // we don't need its full AST to find the definition. @@ -244,7 +250,7 @@ func objToNode(ctx context.Context, view View, originPkg *types.Package, obj typ } path, _ := astutil.PathEnclosingInterval(declAST, rng.Start, rng.End) if path == nil { - return nil, fmt.Errorf("no path for range %v", rng) + return nil, errors.Errorf("no path for range %v", rng) } for _, node := range path { switch node := node.(type) { @@ -277,7 +283,7 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos } importPath, err := strconv.Unquote(imp.Path.Value) if err != nil { - return nil, fmt.Errorf("import path not quoted: %s (%v)", imp.Path.Value, err) + return nil, errors.Errorf("import path not quoted: %s (%v)", imp.Path.Value, err) } result := &IdentifierInfo{ File: f, @@ -288,10 +294,10 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos // Consider the "declaration" of an import spec to be the imported package. importedPkg := pkg.GetImport(importPath) if importedPkg == nil { - return nil, fmt.Errorf("no import for %q", importPath) + return nil, errors.Errorf("no import for %q", importPath) } if importedPkg.GetSyntax(ctx) == nil { - return nil, fmt.Errorf("no syntax for for %q", importPath) + return nil, errors.Errorf("no syntax for for %q", importPath) } // Heuristic: Jump to the longest (most "interesting") file of the package. var dest *ast.File @@ -301,7 +307,7 @@ func importSpec(ctx context.Context, f GoFile, fAST *ast.File, pkg Package, pos } } if dest == nil { - return nil, fmt.Errorf("package %q has no files", importPath) + return nil, errors.Errorf("package %q has no files", importPath) } result.decl.rng = span.NewRange(f.FileSet(), dest.Name.Pos(), dest.Name.End()) result.decl.node = imp diff --git a/vendor/golang.org/x/tools/internal/lsp/source/references.go b/vendor/golang.org/x/tools/internal/lsp/source/references.go index 34f5160fa1..dc5d38a156 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/references.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/references.go @@ -6,12 +6,12 @@ package source import ( "context" - "fmt" "go/ast" "go/types" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) // ReferenceInfo holds information about reference to an identifier in Go source. @@ -33,17 +33,17 @@ func (i *IdentifierInfo) References(ctx context.Context) ([]*ReferenceInfo, erro // If the object declaration is nil, assume it is an import spec and do not look for references. if i.decl.obj == nil { - return nil, fmt.Errorf("no references for an import spec") + return nil, errors.Errorf("no references for an import spec") } pkgs := i.File.GetPackages(ctx) for _, pkg := range pkgs { if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("package for %s is ill typed", i.File.URI()) + return nil, errors.Errorf("package for %s is ill typed", i.File.URI()) } info := pkg.GetTypesInfo() if info == nil { - return nil, fmt.Errorf("package %s has no types info", pkg.PkgPath()) + return nil, errors.Errorf("package %s has no types info", pkg.PkgPath()) } if i.decl.wasImplicit { diff --git a/vendor/golang.org/x/tools/internal/lsp/source/rename.go b/vendor/golang.org/x/tools/internal/lsp/source/rename.go index 3964fa4eb5..02c9bf2ecb 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/rename.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/rename.go @@ -7,7 +7,6 @@ package source import ( "bytes" "context" - "fmt" "go/ast" "go/format" "go/token" @@ -18,6 +17,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" "golang.org/x/tools/refactor/satisfy" + errors "golang.org/x/xerrors" ) type renamer struct { @@ -41,22 +41,22 @@ func (i *IdentifierInfo) Rename(ctx context.Context, newName string) (map[span.U defer done() if i.Name == newName { - return nil, fmt.Errorf("old and new names are the same: %s", newName) + return nil, errors.Errorf("old and new names are the same: %s", newName) } if !isValidIdentifier(i.Name) { - return nil, fmt.Errorf("invalid identifier to rename: %q", i.Name) + return nil, errors.Errorf("invalid identifier to rename: %q", i.Name) } if i.pkg == nil || i.pkg.IsIllTyped() { - return nil, fmt.Errorf("package for %s is ill typed", i.File.URI()) + return nil, errors.Errorf("package for %s is ill typed", i.File.URI()) } // Do not rename builtin identifiers. if i.decl.obj.Parent() == types.Universe { - return nil, fmt.Errorf("cannot rename builtin %q", i.Name) + return nil, errors.Errorf("cannot rename builtin %q", i.Name) } // Do not rename identifiers declared in another package. if i.pkg.GetTypes() != i.decl.obj.Pkg() { - return nil, fmt.Errorf("failed to rename because %q is declared in package %q", i.Name, i.decl.obj.Pkg().Name()) + return nil, errors.Errorf("failed to rename because %q is declared in package %q", i.Name, i.decl.obj.Pkg().Name()) } refs, err := i.References(ctx) @@ -83,7 +83,7 @@ func (i *IdentifierInfo) Rename(ctx context.Context, newName string) (map[span.U r.check(from.obj) } if r.hadConflicts { - return nil, fmt.Errorf(r.errors) + return nil, errors.Errorf(r.errors) } changes, err := r.update() @@ -200,11 +200,11 @@ func (r *renamer) updatePkgName(pkgName *types.PkgName) (*TextEdit, error) { _, path, _ := pathEnclosingInterval(r.ctx, r.fset, pkg, pkgName.Pos(), pkgName.Pos()) if len(path) < 2 { - return nil, fmt.Errorf("failed to update PkgName for %s", pkgName.Name()) + return nil, errors.Errorf("failed to update PkgName for %s", pkgName.Name()) } spec, ok := path[1].(*ast.ImportSpec) if !ok { - return nil, fmt.Errorf("failed to update PkgName for %s", pkgName.Name()) + return nil, errors.Errorf("failed to update PkgName for %s", pkgName.Name()) } var astIdent *ast.Ident // will be nil if ident is removed diff --git a/vendor/golang.org/x/tools/internal/lsp/source/signature_help.go b/vendor/golang.org/x/tools/internal/lsp/source/signature_help.go index 69786a49b1..ffbb3d58b0 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/signature_help.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/signature_help.go @@ -6,13 +6,13 @@ package source import ( "context" - "fmt" "go/ast" "go/token" "go/types" "golang.org/x/tools/go/ast/astutil" "golang.org/x/tools/internal/lsp/telemetry/trace" + errors "golang.org/x/xerrors" ) type SignatureInformation struct { @@ -35,14 +35,14 @@ func SignatureHelp(ctx context.Context, f GoFile, pos token.Pos) (*SignatureInfo } pkg := f.GetPackage(ctx) if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("package for %s is ill typed", f.URI()) + return nil, errors.Errorf("package for %s is ill typed", f.URI()) } // Find a call expression surrounding the query position. var callExpr *ast.CallExpr path, _ := astutil.PathEnclosingInterval(file, pos, pos) if path == nil { - return nil, fmt.Errorf("cannot find node enclosing position") + return nil, errors.Errorf("cannot find node enclosing position") } FindCall: for _, node := range path { @@ -56,11 +56,11 @@ FindCall: // The user is within an anonymous function, // which may be the parameter to the *ast.CallExpr. // Don't show signature help in this case. - return nil, fmt.Errorf("no signature help within a function declaration") + return nil, errors.Errorf("no signature help within a function declaration") } } if callExpr == nil || callExpr.Fun == nil { - return nil, fmt.Errorf("cannot find an enclosing function") + return nil, errors.Errorf("cannot find an enclosing function") } // Get the object representing the function, if available. @@ -82,12 +82,12 @@ FindCall: // Get the type information for the function being called. sigType := pkg.GetTypesInfo().TypeOf(callExpr.Fun) if sigType == nil { - return nil, fmt.Errorf("cannot get type for Fun %[1]T (%[1]v)", callExpr.Fun) + return nil, errors.Errorf("cannot get type for Fun %[1]T (%[1]v)", callExpr.Fun) } sig, _ := sigType.Underlying().(*types.Signature) if sig == nil { - return nil, fmt.Errorf("cannot find signature for Fun %[1]T (%[1]v)", callExpr.Fun) + return nil, errors.Errorf("cannot find signature for Fun %[1]T (%[1]v)", callExpr.Fun) } qf := qualifier(file, pkg.GetTypes(), pkg.GetTypesInfo()) @@ -128,7 +128,7 @@ FindCall: func builtinSignature(ctx context.Context, v View, callExpr *ast.CallExpr, name string, pos token.Pos) (*SignatureInformation, error) { decl, ok := lookupBuiltinDecl(v, name).(*ast.FuncDecl) if !ok { - return nil, fmt.Errorf("no function declaration for builtin: %s", name) + return nil, errors.Errorf("no function declaration for builtin: %s", name) } params, _ := formatFieldList(ctx, v, decl.Type.Params) results, writeResultParens := formatFieldList(ctx, v, decl.Type.Results) diff --git a/vendor/golang.org/x/tools/internal/lsp/source/source_test.go b/vendor/golang.org/x/tools/internal/lsp/source/source_test.go index b74ec42a5c..8da36c9852 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/source_test.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/source_test.go @@ -62,6 +62,13 @@ func (r *runner) Diagnostics(t *testing.T, data tests.Diagnostics) { t.Fatal(err) } got := results[uri] + // A special case to test that there are no diagnostics for a file. + if len(want) == 1 && want[0].Source == "no_diagnostics" { + if len(got) != 0 { + t.Errorf("expected no diagnostics for %s, got %v", uri, got) + } + continue + } if diff := diffDiagnostics(uri, want, got); diff != "" { t.Error(diff) } @@ -388,7 +395,7 @@ func (r *runner) Definition(t *testing.T, data tests.Definitions) { t.Fatalf("failed to get token for %s: %v", d.Src.URI(), err) } pos := tok.Pos(d.Src.Start().Offset()) - ident, err := source.Identifier(ctx, r.view, f.(source.GoFile), pos) + ident, err := source.Identifier(ctx, f.(source.GoFile), pos) if err != nil { t.Fatalf("failed for %v: %v", d.Src, err) } @@ -461,7 +468,7 @@ func (r *runner) Reference(t *testing.T, data tests.References) { t.Fatalf("failed to get token for %s: %v", src.URI(), err) } pos := tok.Pos(src.Start().Offset()) - ident, err := source.Identifier(ctx, r.view, f.(source.GoFile), pos) + ident, err := source.Identifier(ctx, f.(source.GoFile), pos) if err != nil { t.Fatalf("failed for %v: %v", src, err) } @@ -512,7 +519,7 @@ func (r *runner) Rename(t *testing.T, data tests.Renames) { } pos := tok.Pos(spn.Start().Offset()) - ident, err := source.Identifier(r.ctx, r.view, f.(source.GoFile), pos) + ident, err := source.Identifier(r.ctx, f.(source.GoFile), pos) if err != nil { t.Error(err) continue diff --git a/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix.go b/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix.go index 6d1f733c95..a433723055 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix.go @@ -1,10 +1,24 @@ -// +build !experimental - package source -import "go/token" -import "golang.org/x/tools/go/analysis" +import ( + "go/token" + "golang.org/x/tools/go/analysis" + "golang.org/x/tools/internal/span" +) func getCodeActions(fset *token.FileSet, diag analysis.Diagnostic) ([]SuggestedFixes, error) { - return nil, nil + var cas []SuggestedFixes + for _, fix := range diag.SuggestedFixes { + var ca SuggestedFixes + ca.Title = fix.Message + for _, te := range fix.TextEdits { + span, err := span.NewRange(fset, te.Pos, te.End).Span() + if err != nil { + return nil, err + } + ca.Edits = append(ca.Edits, TextEdit{span, string(te.NewText)}) + } + cas = append(cas, ca) + } + return cas, nil } diff --git a/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix_experimental.go b/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix_experimental.go deleted file mode 100644 index b34f8d77b2..0000000000 --- a/vendor/golang.org/x/tools/internal/lsp/source/suggested_fix_experimental.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build experimental - -package source - -import ( - "go/token" - "golang.org/x/tools/go/analysis" - "golang.org/x/tools/internal/span" -) - -func getCodeActions(fset *token.FileSet, diag analysis.Diagnostic) ([]CodeAction, error) { - var cas []CodeAction - for _, fix := range diag.SuggestedFixes { - var ca CodeAction - ca.Title = fix.Message - for _, te := range fix.TextEdits { - span, err := span.NewRange(fset, te.Pos, te.End).Span() - if err != nil { - return nil, err - } - ca.Edits = append(ca.Edits, TextEdit{span, string(te.NewText)}) - } - cas = append(cas, ca) - } - return cas, nil -} diff --git a/vendor/golang.org/x/tools/internal/lsp/source/symbols.go b/vendor/golang.org/x/tools/internal/lsp/source/symbols.go index 84358f1f66..218064d69e 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/symbols.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/symbols.go @@ -6,7 +6,6 @@ package source import ( "context" - "errors" "fmt" "go/ast" "go/token" @@ -14,6 +13,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) type SymbolKind int @@ -52,7 +52,7 @@ func DocumentSymbols(ctx context.Context, f GoFile) ([]Symbol, error) { } pkg := f.GetPackage(ctx) if pkg == nil || pkg.IsIllTyped() { - return nil, fmt.Errorf("no package for %s", f.URI()) + return nil, errors.Errorf("no package for %s", f.URI()) } info := pkg.GetTypesInfo() q := qualifier(file, pkg.GetTypes(), info) diff --git a/vendor/golang.org/x/tools/internal/lsp/source/util.go b/vendor/golang.org/x/tools/internal/lsp/source/util.go index 38ca24e7b8..ad25225e21 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/util.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/util.go @@ -5,14 +5,49 @@ package source import ( + "context" "fmt" "go/ast" "go/token" "go/types" "path/filepath" + "regexp" "strings" + + "golang.org/x/tools/internal/span" ) +func IsGenerated(ctx context.Context, view View, uri span.URI) bool { + f, err := view.GetFile(ctx, uri) + if err != nil { + return false + } + ph := view.Session().Cache().ParseGoHandle(f.Handle(ctx), ParseHeader) + parsed, err := ph.Parse(ctx) + if parsed == nil { + return false + } + tok := view.Session().Cache().FileSet().File(parsed.Pos()) + if tok == nil { + return false + } + for _, commentGroup := range parsed.Comments { + for _, comment := range commentGroup.List { + if matched := generatedRx.MatchString(comment.Text); matched { + // Check if comment is at the beginning of the line in source. + if pos := tok.Position(comment.Slash); pos.Column == 1 { + return true + } + } + } + } + return false +} + +// Matches cgo generated comment as well as the proposed standard: +// https://golang.org/s/generatedcode +var generatedRx = regexp.MustCompile(`// .*DO NOT EDIT\.?`) + func DetectLanguage(langID, filename string) FileKind { switch langID { case "go": diff --git a/vendor/golang.org/x/tools/internal/lsp/source/view.go b/vendor/golang.org/x/tools/internal/lsp/source/view.go index 622794d7ec..826921e8fd 100644 --- a/vendor/golang.org/x/tools/internal/lsp/source/view.go +++ b/vendor/golang.org/x/tools/internal/lsp/source/view.go @@ -210,14 +210,9 @@ type View interface { Config(ctx context.Context) *packages.Config - // Process returns the process for this view. - // Note: this contains cached module and filesystem state, which must - // be invalidated after a 'go.mod' change. - // - // TODO(suzmue): the state cached in the process env is specific to each view, - // however, there is state that can be shared between views that is not currently - // cached, like the module cache. - ProcessEnv(ctx context.Context) *imports.ProcessEnv + // RunProcessEnvFunc runs fn with the process env for this view inserted into opts. + // Note: the process env contains cached module and filesystem state. + RunProcessEnvFunc(ctx context.Context, fn func(*imports.Options) error, opts *imports.Options) error } // File represents a source file of any type. diff --git a/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent.go b/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent.go index faef9541e5..6bb8b593ca 100644 --- a/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent.go +++ b/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent.go @@ -136,6 +136,9 @@ func convertTimestamp(t time.Time) wire.Timestamp { } func toTruncatableString(s string) *wire.TruncatableString { + if s == "" { + return nil + } return &wire.TruncatableString{Value: s} } @@ -185,6 +188,8 @@ func convertAttribute(v interface{}) wire.Attribute { return wire.IntAttribute{IntValue: int64(v)} case int64: return wire.IntAttribute{IntValue: v} + case int: + return wire.IntAttribute{IntValue: int64(v)} case uint8: return wire.IntAttribute{IntValue: int64(v)} case uint16: @@ -225,6 +230,9 @@ func convertEvent(event trace.Event) wire.TimeEvent { } func convertAnnotation(tags tag.List) *wire.Annotation { + if len(tags) == 0 { + return nil + } entry := log.ToEntry(nil, time.Time{}, tags) description := entry.Message if description == "" && entry.Error != nil { @@ -236,7 +244,7 @@ func convertAnnotation(tags tag.List) *wire.Annotation { tags = append(tags, tag.Of("Error", entry.Error)) } return &wire.Annotation{ - Description: toTruncatableString(entry.Message), + Description: toTruncatableString(description), Attributes: convertAttributes(tags), } } diff --git a/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent_test.go b/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent_test.go new file mode 100644 index 0000000000..26a5e761f5 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/telemetry/ocagent/ocagent_test.go @@ -0,0 +1,145 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package ocagent + +import ( + "encoding/json" + "errors" + "reflect" + "testing" + + "golang.org/x/tools/internal/lsp/telemetry/log" + "golang.org/x/tools/internal/lsp/telemetry/ocagent/wire" + "golang.org/x/tools/internal/lsp/telemetry/tag" +) + +func TestConvert_annotation(t *testing.T) { + tests := []struct { + name string + tagList tag.List + want *wire.Annotation + }{ + { + name: "no tags", + tagList: nil, + want: nil, + }, + { + name: "description no error", + tagList: tag.List{ + tag.Of(log.MessageTag, "cache miss"), + tag.Of("db", "godb"), + }, + want: &wire.Annotation{ + Description: &wire.TruncatableString{Value: "cache miss"}, + Attributes: &wire.Attributes{ + AttributeMap: map[string]wire.Attribute{ + "db": wire.StringAttribute{StringValue: &wire.TruncatableString{Value: "godb"}}, + }, + }, + }, + }, + + { + name: "description and error", + tagList: tag.List{ + tag.Of(log.MessageTag, "cache miss"), + tag.Of("db", "godb"), + tag.Of(log.ErrorTag, errors.New("no network connectivity")), + }, + want: &wire.Annotation{ + Description: &wire.TruncatableString{Value: "cache miss"}, + Attributes: &wire.Attributes{ + AttributeMap: map[string]wire.Attribute{ + "Error": wire.StringAttribute{StringValue: &wire.TruncatableString{Value: "no network connectivity"}}, + "db": wire.StringAttribute{StringValue: &wire.TruncatableString{Value: "godb"}}, + }, + }, + }, + }, + { + name: "no description, but error", + tagList: tag.List{ + tag.Of("db", "godb"), + tag.Of(log.ErrorTag, errors.New("no network connectivity")), + }, + want: &wire.Annotation{ + Description: &wire.TruncatableString{Value: "no network connectivity"}, + Attributes: &wire.Attributes{ + AttributeMap: map[string]wire.Attribute{ + "db": wire.StringAttribute{StringValue: &wire.TruncatableString{Value: "godb"}}, + }, + }, + }, + }, + { + name: "enumerate all attribute types", + tagList: tag.List{ + tag.Of(log.MessageTag, "cache miss"), + tag.Of("db", "godb"), + + tag.Of("age", 0.456), // Constant converted into "float64" + tag.Of("ttl", float32(5000)), + tag.Of("expiry_ms", float64(1e3)), + + tag.Of("retry", false), + tag.Of("stale", true), + + tag.Of("max", 0x7fff), // Constant converted into "int" + tag.Of("opcode", int8(0x7e)), + tag.Of("base", int16(1<<9)), + tag.Of("checksum", int32(0x11f7e294)), + tag.Of("mode", int64(0644)), + + tag.Of("min", uint(1)), + tag.Of("mix", uint8(44)), + tag.Of("port", uint16(55678)), + tag.Of("min_hops", uint32(1<<9)), + tag.Of("max_hops", uint64(0xffffff)), + }, + want: &wire.Annotation{ + Description: &wire.TruncatableString{Value: "cache miss"}, + Attributes: &wire.Attributes{ + AttributeMap: map[string]wire.Attribute{ + "db": wire.StringAttribute{StringValue: &wire.TruncatableString{Value: "godb"}}, + + "age": wire.DoubleAttribute{DoubleValue: 0.456}, + "ttl": wire.DoubleAttribute{DoubleValue: 5000.0}, + "expiry_ms": wire.DoubleAttribute{DoubleValue: 1e3}, + + "retry": wire.BoolAttribute{BoolValue: false}, + "stale": wire.BoolAttribute{BoolValue: true}, + + "max": wire.IntAttribute{IntValue: 0x7fff}, + "opcode": wire.IntAttribute{IntValue: 0x7e}, + "base": wire.IntAttribute{IntValue: 1 << 9}, + "checksum": wire.IntAttribute{IntValue: 0x11f7e294}, + "mode": wire.IntAttribute{IntValue: 0644}, + + "min": wire.IntAttribute{IntValue: 1}, + "mix": wire.IntAttribute{IntValue: 44}, + "port": wire.IntAttribute{IntValue: 55678}, + "min_hops": wire.IntAttribute{IntValue: 1 << 9}, + "max_hops": wire.IntAttribute{IntValue: 0xffffff}, + }, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got := convertAnnotation(tt.tagList) + if !reflect.DeepEqual(got, tt.want) { + t.Fatalf("Got:\n%s\nWant:\n%s", marshaled(got), marshaled(tt.want)) + } + }) + } +} + +func marshaled(v interface{}) string { + blob, _ := json.MarshalIndent(v, "", " ") + return string(blob) +} diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/generated/generated.go b/vendor/golang.org/x/tools/internal/lsp/testdata/generated/generated.go new file mode 100644 index 0000000000..abd2beef06 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/testdata/generated/generated.go @@ -0,0 +1,7 @@ +package generated + +// Code generated by generator.go. DO NOT EDIT. + +func _() { + var y int //@diag("y", "LSP", "y declared but not used") +} diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/generated/generator.go b/vendor/golang.org/x/tools/internal/lsp/testdata/generated/generator.go new file mode 100644 index 0000000000..036998787c --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/testdata/generated/generator.go @@ -0,0 +1,5 @@ +package generated + +func _() { + var x int //@diag("x", "LSP", "x declared but not used") +} diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/good/good0.go b/vendor/golang.org/x/tools/internal/lsp/testdata/good/good0.go index ceb26c5df6..98f03b090c 100644 --- a/vendor/golang.org/x/tools/internal/lsp/testdata/good/good0.go +++ b/vendor/golang.org/x/tools/internal/lsp/testdata/good/good0.go @@ -1,4 +1,4 @@ -package good //@diag("package", "", "") +package good //@diag("package", "no_diagnostics", "") func stuff() { //@item(good_stuff, "stuff", "func()", "func") x := 5 diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/good/good1.go b/vendor/golang.org/x/tools/internal/lsp/testdata/good/good1.go index 95a9250fe8..184fbd26dd 100644 --- a/vendor/golang.org/x/tools/internal/lsp/testdata/good/good1.go +++ b/vendor/golang.org/x/tools/internal/lsp/testdata/good/good1.go @@ -1,4 +1,4 @@ -package good //@diag("package", "", "") +package good //@diag("package", "no_diagnostics", "") import ( "golang.org/x/tools/internal/lsp/types" //@item(types_import, "types", "\"golang.org/x/tools/internal/lsp/types\"", "package") diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/nodisk/newdisk_exists.go b/vendor/golang.org/x/tools/internal/lsp/testdata/nodisk/newdisk_exists.go deleted file mode 100644 index cf2bdf3dc3..0000000000 --- a/vendor/golang.org/x/tools/internal/lsp/testdata/nodisk/newdisk_exists.go +++ /dev/null @@ -1 +0,0 @@ -TODO: Remove this file when golang.org/issue/33157 is resolved. \ No newline at end of file diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.golden b/vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.golden new file mode 100644 index 0000000000..474ad90bb2 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.golden @@ -0,0 +1,9 @@ +-- gofmt -- +package noparse_format //@format("package") + +func _() { + f() +} + +-- gofmt-d -- + diff --git a/vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.in b/vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.in new file mode 100644 index 0000000000..4b98cf8d01 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/lsp/testdata/noparse_format/parse_format.go.in @@ -0,0 +1,5 @@ +package noparse_format //@format("package") + +func _() { +f() +} \ No newline at end of file diff --git a/vendor/golang.org/x/tools/internal/lsp/tests/tests.go b/vendor/golang.org/x/tools/internal/lsp/tests/tests.go index 767d817546..73063c833f 100644 --- a/vendor/golang.org/x/tools/internal/lsp/tests/tests.go +++ b/vendor/golang.org/x/tools/internal/lsp/tests/tests.go @@ -28,8 +28,8 @@ import ( const ( ExpectedCompletionsCount = 144 ExpectedCompletionSnippetCount = 15 - ExpectedDiagnosticsCount = 17 - ExpectedFormatCount = 5 + ExpectedDiagnosticsCount = 21 + ExpectedFormatCount = 6 ExpectedImportCount = 2 ExpectedDefinitionsCount = 38 ExpectedTypeDefinitionsCount = 2 @@ -420,11 +420,6 @@ func (data *Data) collectDiagnostics(spn span.Span, msgSource, msg string) { if _, ok := data.Diagnostics[spn.URI()]; !ok { data.Diagnostics[spn.URI()] = []source.Diagnostic{} } - // If a file has an empty diagnostic message, return. This allows us to - // avoid testing diagnostics in files that may have a lot of them. - if msg == "" { - return - } severity := source.SeverityError if strings.Contains(string(spn.URI()), "analyzer") { severity = source.SeverityWarning diff --git a/vendor/golang.org/x/tools/internal/lsp/text_synchronization.go b/vendor/golang.org/x/tools/internal/lsp/text_synchronization.go index e870154a53..68abec0eb9 100644 --- a/vendor/golang.org/x/tools/internal/lsp/text_synchronization.go +++ b/vendor/golang.org/x/tools/internal/lsp/text_synchronization.go @@ -16,6 +16,7 @@ import ( "golang.org/x/tools/internal/lsp/telemetry/log" "golang.org/x/tools/internal/lsp/telemetry/trace" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocumentParams) error { @@ -28,8 +29,17 @@ func (s *Server) didOpen(ctx context.Context, params *protocol.DidOpenTextDocume // Open the file. s.session.DidOpen(ctx, uri, fileKind, text) - // Run diagnostics on the newly-changed file. view := s.session.ViewOf(uri) + + // TODO: Ideally, we should be able to specify that a generated file should be opened as read-only. + if source.IsGenerated(ctx, view, uri) { + s.client.ShowMessage(ctx, &protocol.ShowMessageParams{ + Message: fmt.Sprintf("Do not edit this file! %s is a generated file.", uri.Filename()), + Type: protocol.Warning, + }) + } + + // Run diagnostics on the newly-changed file. go func() { ctx := view.BackgroundContext() ctx, done := trace.StartSpan(ctx, "lsp:background-worker") @@ -54,7 +64,7 @@ func (s *Server) didChange(ctx context.Context, params *protocol.DidChangeTextDo if !isFullChange { switch s.textDocumentSyncKind { case protocol.Full: - return fmt.Errorf("expected a full content change, received incremental changes for %s", uri) + return errors.Errorf("expected a full content change, received incremental changes for %s", uri) case protocol.Incremental: // Determine the new file content. var err error diff --git a/vendor/golang.org/x/tools/internal/lsp/util.go b/vendor/golang.org/x/tools/internal/lsp/util.go index d3b375b4b0..cf5cf4599d 100644 --- a/vendor/golang.org/x/tools/internal/lsp/util.go +++ b/vendor/golang.org/x/tools/internal/lsp/util.go @@ -6,11 +6,11 @@ package lsp import ( "context" - "fmt" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/lsp/source" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func getSourceFile(ctx context.Context, v source.View, uri span.URI) (source.File, *protocol.ColumnMapper, error) { @@ -38,7 +38,7 @@ func getGoFile(ctx context.Context, v source.View, uri span.URI) (source.GoFile, } gof, ok := f.(source.GoFile) if !ok { - return nil, nil, fmt.Errorf("not a Go file %v", f.URI()) + return nil, nil, errors.Errorf("not a Go file %v", f.URI()) } return gof, m, nil } diff --git a/vendor/golang.org/x/tools/internal/lsp/workspace.go b/vendor/golang.org/x/tools/internal/lsp/workspace.go index ddb597ec39..5812756900 100644 --- a/vendor/golang.org/x/tools/internal/lsp/workspace.go +++ b/vendor/golang.org/x/tools/internal/lsp/workspace.go @@ -6,10 +6,10 @@ package lsp import ( "context" - "fmt" "golang.org/x/tools/internal/lsp/protocol" "golang.org/x/tools/internal/span" + errors "golang.org/x/xerrors" ) func (s *Server) changeFolders(ctx context.Context, event protocol.WorkspaceFoldersChangeEvent) error { @@ -18,7 +18,7 @@ func (s *Server) changeFolders(ctx context.Context, event protocol.WorkspaceFold if view != nil { view.Shutdown(ctx) } else { - return fmt.Errorf("view %s for %v not found", folder.Name, folder.URI) + return errors.Errorf("view %s for %v not found", folder.Name, folder.URI) } } @@ -31,6 +31,12 @@ func (s *Server) changeFolders(ctx context.Context, event protocol.WorkspaceFold } func (s *Server) addView(ctx context.Context, name string, uri span.URI) error { - s.session.NewView(ctx, name, uri) + view := s.session.NewView(ctx, name, uri) + s.stateMu.Lock() + state := s.state + s.stateMu.Unlock() + if state >= serverInitialized { + s.fetchConfig(ctx, view) + } return nil } diff --git a/vendor/k8s.io/klog/.travis.yml b/vendor/k8s.io/klog/.travis.yml index 0f508dae66..5677664c21 100644 --- a/vendor/k8s.io/klog/.travis.yml +++ b/vendor/k8s.io/klog/.travis.yml @@ -5,11 +5,12 @@ go: - 1.9.x - 1.10.x - 1.11.x + - 1.12.x script: - go get -t -v ./... - diff -u <(echo -n) <(gofmt -d .) - diff -u <(echo -n) <(golint $(go list -e ./...)) - - go tool vet . + - go tool vet . || go vet . - go test -v -race ./... install: - go get golang.org/x/lint/golint diff --git a/vendor/k8s.io/klog/README.md b/vendor/k8s.io/klog/README.md index bee306f398..841468b4b6 100644 --- a/vendor/k8s.io/klog/README.md +++ b/vendor/k8s.io/klog/README.md @@ -31,7 +31,7 @@ How to use klog - Use `klog.InitFlags(nil)` explicitly for initializing global flags as we no longer use `init()` method to register the flags - You can now use `log-file` instead of `log-dir` for logging to a single file (See `examples/log_file/usage_log_file.go`) - If you want to redirect everything logged using klog somewhere else (say syslog!), you can use `klog.SetOutput()` method and supply a `io.Writer`. (See `examples/set_output/usage_set_output.go`) -- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md)) +- For more logging conventions (See [Logging Conventions](https://github.com/kubernetes/community/blob/master/contributors/devel/sig-instrumentation/logging.md)) ### Coexisting with glog This package can be used side by side with glog. [This example](examples/coexist_glog/coexist_glog.go) shows how to initialize and syncronize flags from the global `flag.CommandLine` FlagSet. In addition, the example makes use of stderr as combined output by setting `alsologtostderr` (or `logtostderr`) to `true`. diff --git a/vendor/k8s.io/klog/go.mod b/vendor/k8s.io/klog/go.mod new file mode 100644 index 0000000000..3877d8546a --- /dev/null +++ b/vendor/k8s.io/klog/go.mod @@ -0,0 +1,5 @@ +module k8s.io/klog + +go 1.12 + +require github.com/go-logr/logr v0.1.0 diff --git a/vendor/k8s.io/klog/go.sum b/vendor/k8s.io/klog/go.sum new file mode 100644 index 0000000000..fb64d277a7 --- /dev/null +++ b/vendor/k8s.io/klog/go.sum @@ -0,0 +1,2 @@ +github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg= +github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= diff --git a/vendor/k8s.io/klog/integration_tests/internal/main.go b/vendor/k8s.io/klog/integration_tests/internal/main.go new file mode 100644 index 0000000000..2801c17e32 --- /dev/null +++ b/vendor/k8s.io/klog/integration_tests/internal/main.go @@ -0,0 +1,46 @@ +/* + +This file is intended to be used as a standin for a klog'ed executable. + +It is called by the integration test via `go run` and with different klog +flags to assert on klog behaviour, especially where klog logs its output +when different combinations of the klog flags are at play. + +This file is not intended to be used outside of the intergration tests and +is not supposed to be a (good) example on how to use klog. + +*/ + +package main + +import ( + "flag" + "fmt" + "os" + + "k8s.io/klog" +) + +func main() { + infoLogLine := getEnvOrDie("KLOG_INFO_LOG") + warningLogLine := getEnvOrDie("KLOG_WARNING_LOG") + errorLogLine := getEnvOrDie("KLOG_ERROR_LOG") + fatalLogLine := getEnvOrDie("KLOG_FATAL_LOG") + + klog.InitFlags(nil) + flag.Parse() + klog.Info(infoLogLine) + klog.Warning(warningLogLine) + klog.Error(errorLogLine) + klog.Flush() + klog.Fatal(fatalLogLine) +} + +func getEnvOrDie(name string) string { + val, ok := os.LookupEnv(name) + if !ok { + fmt.Fprintf(os.Stderr, name+" could not be found in environment") + os.Exit(1) + } + return val +} diff --git a/vendor/k8s.io/klog/integration_tests/klog_test.go b/vendor/k8s.io/klog/integration_tests/klog_test.go new file mode 100644 index 0000000000..de4ba4b096 --- /dev/null +++ b/vendor/k8s.io/klog/integration_tests/klog_test.go @@ -0,0 +1,311 @@ +package integration_tests_test + +import ( + "bytes" + "io" + "io/ioutil" + "os" + "os/exec" + "path/filepath" + "regexp" + "testing" +) + +const ( + infoLog = "this is a info log line" + warningLog = "this is a warning log line" + errorLog = "this is a error log line" + fatalLog = "this is a fatal log line" +) + +// res is a type alias to a slice of pointers to regular expressions. +type res = []*regexp.Regexp + +var ( + infoLogRE = regexp.MustCompile(regexp.QuoteMeta(infoLog)) + warningLogRE = regexp.MustCompile(regexp.QuoteMeta(warningLog)) + errorLogRE = regexp.MustCompile(regexp.QuoteMeta(errorLog)) + fatalLogRE = regexp.MustCompile(regexp.QuoteMeta(fatalLog)) + + stackTraceRE = regexp.MustCompile(`\ngoroutine \d+ \[[^]]+\]:\n`) + + allLogREs = res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE, stackTraceRE} + + defaultExpectedInDirREs = map[int]res{ + 0: {stackTraceRE, fatalLogRE, errorLogRE, warningLogRE, infoLogRE}, + 1: {stackTraceRE, fatalLogRE, errorLogRE, warningLogRE}, + 2: {stackTraceRE, fatalLogRE, errorLogRE}, + 3: {stackTraceRE, fatalLogRE}, + } + + defaultNotExpectedInDirREs = map[int]res{ + 0: {}, + 1: {infoLogRE}, + 2: {infoLogRE, warningLogRE}, + 3: {infoLogRE, warningLogRE, errorLogRE}, + } +) + +func TestDestinationsWithDifferentFlags(t *testing.T) { + tests := map[string]struct { + // logfile states if the flag -log_file should be set + logfile bool + // logdir states if the flag -log_dir should be set + logdir bool + // flags is for additional flags to pass to the klog'ed executable + flags []string + + // expectedLogFile states if we generally expect the log file to exist. + // If this is not set, we expect the file not to exist and will error if it + // does. + expectedLogFile bool + // expectedLogDir states if we generally expect the log files in the log + // dir to exist. + // If this is not set, we expect the log files in the log dir not to exist and + // will error if they do. + expectedLogDir bool + + // expectedOnStderr is a list of REs we expect to find on stderr + expectedOnStderr res + // notExpectedOnStderr is a list of REs that we must not find on stderr + notExpectedOnStderr res + // expectedInFile is a list of REs we expect to find in the log file + expectedInFile res + // notExpectedInFile is a list of REs we must not find in the log file + notExpectedInFile res + + // expectedInDir is a list of REs we expect to find in the log files in the + // log dir, specified by log severity (0 = warning, 1 = info, ...) + expectedInDir map[int]res + // notExpectedInDir is a list of REs we must not find in the log files in + // the log dir, specified by log severity (0 = warning, 1 = info, ...) + notExpectedInDir map[int]res + }{ + "default flags": { + // Everything, EXCEPT the trace on fatal, goes to stderr + + expectedOnStderr: res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE}, + notExpectedOnStderr: res{stackTraceRE}, + }, + "everything disabled": { + // Nothing, including the trace on fatal, is showing anywhere + + flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, + + notExpectedOnStderr: allLogREs, + }, + "everything disabled but low stderrthreshold": { + // Everything above -stderrthreshold, including the trace on fatal, will + // be logged to stderr, even if we set -logtostderr to false. + + flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1"}, + + expectedOnStderr: res{warningLogRE, errorLogRE, stackTraceRE}, + notExpectedOnStderr: res{infoLogRE}, + }, + "with logtostderr only": { + // Everything, EXCEPT the trace on fatal, goes to stderr + + flags: []string{"-logtostderr=true", "-alsologtostderr=false", "-stderrthreshold=1000"}, + + expectedOnStderr: res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE}, + notExpectedOnStderr: res{stackTraceRE}, + }, + "with log file only": { + // Everything, including the trace on fatal, goes to the single log file + + logfile: true, + flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, + + expectedLogFile: true, + + notExpectedOnStderr: allLogREs, + expectedInFile: allLogREs, + }, + "with log dir only": { + // Everything, including the trace on fatal, goes to the log files in the log dir + + logdir: true, + flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, + + expectedLogDir: true, + + notExpectedOnStderr: allLogREs, + expectedInDir: defaultExpectedInDirREs, + notExpectedInDir: defaultNotExpectedInDirREs, + }, + "with log dir and logtostderr": { + // Everything, EXCEPT the trace on fatal, goes to stderr. The -log_dir is + // ignored, nothing goes to the log files in the log dir. + + logdir: true, + flags: []string{"-logtostderr=true", "-alsologtostderr=false", "-stderrthreshold=1000"}, + + expectedOnStderr: res{infoLogRE, warningLogRE, errorLogRE, fatalLogRE}, + notExpectedOnStderr: res{stackTraceRE}, + }, + "with log file and log dir": { + // Everything, including the trace on fatal, goes to the single log file. + // The -log_dir is ignored, nothing goes to the log file in the log dir. + + logdir: true, + logfile: true, + flags: []string{"-logtostderr=false", "-alsologtostderr=false", "-stderrthreshold=1000"}, + + expectedLogFile: true, + + notExpectedOnStderr: allLogREs, + expectedInFile: allLogREs, + }, + "with log file and alsologtostderr": { + // Everything, including the trace on fatal, goes to the single log file + // AND to stderr. + + flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000"}, + logfile: true, + + expectedLogFile: true, + + expectedOnStderr: allLogREs, + expectedInFile: allLogREs, + }, + "with log dir and alsologtostderr": { + // Everything, including the trace on fatal, goes to the log file in the + // log dir AND to stderr. + + logdir: true, + flags: []string{"-alsologtostderr=true", "-logtostderr=false", "-stderrthreshold=1000"}, + + expectedLogDir: true, + + expectedOnStderr: allLogREs, + expectedInDir: defaultExpectedInDirREs, + notExpectedInDir: defaultNotExpectedInDirREs, + }, + } + + for tcName, tc := range tests { + tc := tc + t.Run(tcName, func(t *testing.T) { + t.Parallel() + withTmpDir(t, func(logdir string) { + // :: Setup + flags := tc.flags + stderr := &bytes.Buffer{} + logfile := filepath.Join(logdir, "the_single_log_file") // /some/tmp/dir/the_single_log_file + + if tc.logfile { + flags = append(flags, "-log_file="+logfile) + } + if tc.logdir { + flags = append(flags, "-log_dir="+logdir) + } + + // :: Execute + klogRun(t, flags, stderr) + + // :: Assert + // check stderr + checkForLogs(t, tc.expectedOnStderr, tc.notExpectedOnStderr, stderr.String(), "stderr") + + // check log_file + if tc.expectedLogFile { + content := getFileContent(t, logfile) + checkForLogs(t, tc.expectedInFile, tc.notExpectedInFile, content, "logfile") + } else { + assertFileIsAbsent(t, logfile) + } + + // check files in log_dir + for level, file := range logFileName { + logfile := filepath.Join(logdir, file) // /some/tmp/dir/main.WARNING + if tc.expectedLogDir { + content := getFileContent(t, logfile) + checkForLogs(t, tc.expectedInDir[level], tc.notExpectedInDir[level], content, "logfile["+file+"]") + } else { + assertFileIsAbsent(t, logfile) + } + } + }) + }) + } +} + +const klogExampleGoFile = "./internal/main.go" + +// klogRun spawns a simple executable that uses klog, to later inspect its +// stderr and potentially created log files +func klogRun(t *testing.T, flags []string, stderr io.Writer) { + callFlags := []string{"run", klogExampleGoFile} + callFlags = append(callFlags, flags...) + + cmd := exec.Command("go", callFlags...) + cmd.Stderr = stderr + cmd.Env = append(os.Environ(), + "KLOG_INFO_LOG="+infoLog, + "KLOG_WARNING_LOG="+warningLog, + "KLOG_ERROR_LOG="+errorLog, + "KLOG_FATAL_LOG="+fatalLog, + ) + + err := cmd.Run() + + if _, ok := err.(*exec.ExitError); !ok { + t.Fatalf("Run failed: %v", err) + } +} + +var logFileName = map[int]string{ + 0: "main.INFO", + 1: "main.WARNING", + 2: "main.ERROR", + 3: "main.FATAL", +} + +func getFileContent(t *testing.T, filePath string) string { + content, err := ioutil.ReadFile(filePath) + if err != nil { + t.Errorf("Could not read file '%s': %v", filePath, err) + } + return string(content) +} + +func assertFileIsAbsent(t *testing.T, filePath string) { + if _, err := os.Stat(filePath); !os.IsNotExist(err) { + t.Errorf("Expected file '%s' not to exist", filePath) + } +} + +func checkForLogs(t *testing.T, expected, disallowed res, content, name string) { + for _, re := range expected { + checkExpected(t, true, name, content, re) + } + for _, re := range disallowed { + checkExpected(t, false, name, content, re) + } +} + +func checkExpected(t *testing.T, expected bool, where string, haystack string, needle *regexp.Regexp) { + found := needle.MatchString(haystack) + + if expected && !found { + t.Errorf("Expected to find '%s' in %s", needle, where) + } + if !expected && found { + t.Errorf("Expected not to find '%s' in %s", needle, where) + } +} + +func withTmpDir(t *testing.T, f func(string)) { + tmpDir, err := ioutil.TempDir("", "klog_e2e_") + if err != nil { + t.Fatalf("Could not create temp directory: %v", err) + } + defer func() { + if err := os.RemoveAll(tmpDir); err != nil { + t.Fatalf("Could not remove temp directory '%s': %v", tmpDir, err) + } + }() + + f(tmpDir) +} diff --git a/vendor/k8s.io/klog/klog.go b/vendor/k8s.io/klog/klog.go index 10330d7ef7..2520ebdaa7 100644 --- a/vendor/k8s.io/klog/klog.go +++ b/vendor/k8s.io/klog/klog.go @@ -20,17 +20,17 @@ // // Basic examples: // -// glog.Info("Prepare to repel boarders") +// klog.Info("Prepare to repel boarders") // -// glog.Fatalf("Initialization failed: %s", err) +// klog.Fatalf("Initialization failed: %s", err) // // See the documentation for the V function for an explanation of these examples: // -// if glog.V(2) { -// glog.Info("Starting transaction...") +// if klog.V(2) { +// klog.Info("Starting transaction...") // } // -// glog.V(2).Infoln("Processed", nItems, "elements") +// klog.V(2).Infoln("Processed", nItems, "elements") // // Log output is buffered and written periodically using Flush. Programs // should call Flush before exiting to guarantee all log output is written. @@ -417,6 +417,7 @@ func InitFlags(flagset *flag.FlagSet) { logging.toStderr = true logging.alsoToStderr = false logging.skipHeaders = false + logging.addDirHeader = false logging.skipLogHeaders = false }) @@ -432,6 +433,7 @@ func InitFlags(flagset *flag.FlagSet) { flagset.BoolVar(&logging.toStderr, "logtostderr", logging.toStderr, "log to standard error instead of files") flagset.BoolVar(&logging.alsoToStderr, "alsologtostderr", logging.alsoToStderr, "log to standard error as well as files") flagset.Var(&logging.verbosity, "v", "number for the log level verbosity") + flagset.BoolVar(&logging.skipHeaders, "add_dir_header", logging.addDirHeader, "If true, adds the file directory to the header") flagset.BoolVar(&logging.skipHeaders, "skip_headers", logging.skipHeaders, "If true, avoid header prefixes in the log messages") flagset.BoolVar(&logging.skipLogHeaders, "skip_log_headers", logging.skipLogHeaders, "If true, avoid headers when opening log files") flagset.Var(&logging.stderrThreshold, "stderrthreshold", "logs at or above this threshold go to stderr") @@ -500,6 +502,9 @@ type loggingT struct { // If true, do not add the headers to log files skipLogHeaders bool + + // If true, add the file directory to the header + addDirHeader bool } // buffer holds a byte Buffer for reuse. The zero value is ready for use. @@ -585,9 +590,14 @@ func (l *loggingT) header(s severity, depth int) (*buffer, string, int) { file = "???" line = 1 } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] + if slash := strings.LastIndex(file, "/"); slash >= 0 { + path := file + file = path[slash+1:] + if l.addDirHeader { + if dirsep := strings.LastIndex(path[:slash], "/"); dirsep >= 0 { + file = path[dirsep+1:] + } + } } } return l.formatHeader(s, file, line), file, line @@ -736,6 +746,8 @@ func (rb *redirectBuffer) Write(bytes []byte) (n int, err error) { // SetOutput sets the output destination for all severities func SetOutput(w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() for s := fatalLog; s >= infoLog; s-- { rb := &redirectBuffer{ w: w, @@ -746,6 +758,8 @@ func SetOutput(w io.Writer) { // SetOutputBySeverity sets the output destination for specific severity func SetOutputBySeverity(name string, w io.Writer) { + logging.mu.Lock() + defer logging.mu.Unlock() sev, ok := severityByName(name) if !ok { panic(fmt.Sprintf("SetOutputBySeverity(%q): unrecognized severity name", name)) @@ -771,24 +785,38 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo if alsoToStderr || l.alsoToStderr || s >= l.stderrThreshold.get() { os.Stderr.Write(data) } - if l.file[s] == nil { - if err := l.createFiles(s); err != nil { - os.Stderr.Write(data) // Make sure the message appears somewhere. - l.exit(err) + + if logging.logFile != "" { + // Since we are using a single log file, all of the items in l.file array + // will point to the same file, so just use one of them to write data. + if l.file[infoLog] == nil { + if err := l.createFiles(infoLog); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } } - } - switch s { - case fatalLog: - l.file[fatalLog].Write(data) - fallthrough - case errorLog: - l.file[errorLog].Write(data) - fallthrough - case warningLog: - l.file[warningLog].Write(data) - fallthrough - case infoLog: l.file[infoLog].Write(data) + } else { + if l.file[s] == nil { + if err := l.createFiles(s); err != nil { + os.Stderr.Write(data) // Make sure the message appears somewhere. + l.exit(err) + } + } + + switch s { + case fatalLog: + l.file[fatalLog].Write(data) + fallthrough + case errorLog: + l.file[errorLog].Write(data) + fallthrough + case warningLog: + l.file[warningLog].Write(data) + fallthrough + case infoLog: + l.file[infoLog].Write(data) + } } } if s == fatalLog { @@ -827,7 +855,7 @@ func (l *loggingT) output(s severity, buf *buffer, file string, line int, alsoTo // timeoutFlush calls Flush and returns when it completes or after timeout // elapses, whichever happens first. This is needed because the hooks invoked -// by Flush may deadlock when glog.Fatal is called from a hook that holds +// by Flush may deadlock when klog.Fatal is called from a hook that holds // a lock. func timeoutFlush(timeout time.Duration) { done := make(chan bool, 1) @@ -838,7 +866,7 @@ func timeoutFlush(timeout time.Duration) { select { case <-done: case <-time.After(timeout): - fmt.Fprintln(os.Stderr, "glog: Flush took longer than", timeout) + fmt.Fprintln(os.Stderr, "klog: Flush took longer than", timeout) } } @@ -1094,9 +1122,9 @@ type Verbose bool // The returned value is a boolean of type Verbose, which implements Info, Infoln // and Infof. These methods will write to the Info log if called. // Thus, one may write either -// if glog.V(2) { glog.Info("log this") } +// if klog.V(2) { klog.Info("log this") } // or -// glog.V(2).Info("log this") +// klog.V(2).Info("log this") // The second form is shorter but the first is cheaper if logging is off because it does // not evaluate its arguments. // @@ -1170,7 +1198,7 @@ func InfoDepth(depth int, args ...interface{}) { } // Infoln logs to the INFO log. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Infoln(args ...interface{}) { logging.println(infoLog, args...) } @@ -1194,7 +1222,7 @@ func WarningDepth(depth int, args ...interface{}) { } // Warningln logs to the WARNING and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Warningln(args ...interface{}) { logging.println(warningLog, args...) } @@ -1218,7 +1246,7 @@ func ErrorDepth(depth int, args ...interface{}) { } // Errorln logs to the ERROR, WARNING, and INFO logs. -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Errorln(args ...interface{}) { logging.println(errorLog, args...) } @@ -1244,7 +1272,7 @@ func FatalDepth(depth int, args ...interface{}) { // Fatalln logs to the FATAL, ERROR, WARNING, and INFO logs, // including a stack trace of all running goroutines, then calls os.Exit(255). -// Arguments are handled in the manner of fmt.Println; a newline is appended if missing. +// Arguments are handled in the manner of fmt.Println; a newline is always appended. func Fatalln(args ...interface{}) { logging.println(fatalLog, args...) } diff --git a/vendor/k8s.io/klog/klog_test.go b/vendor/k8s.io/klog/klog_test.go index c59a7bf345..76a780c6f7 100644 --- a/vendor/k8s.io/klog/klog_test.go +++ b/vendor/k8s.io/klog/klog_test.go @@ -22,6 +22,7 @@ import ( "fmt" "io/ioutil" stdLog "log" + "os" "path/filepath" "runtime" "strconv" @@ -88,6 +89,7 @@ func contains(s severity, str string, t *testing.T) bool { // setFlags configures the logging flags how the test expects them. func setFlags() { logging.toStderr = false + logging.addDirHeader = false } // Test that Info works as advertised. @@ -198,6 +200,30 @@ func TestHeader(t *testing.T) { } } +func TestHeaderWithDir(t *testing.T) { + setFlags() + logging.addDirHeader = true + defer logging.swap(logging.newBuffers()) + defer func(previous func() time.Time) { timeNow = previous }(timeNow) + timeNow = func() time.Time { + return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local) + } + pid = 1234 + Info("test") + var line int + format := "I0102 15:04:05.067890 1234 klog/klog_test.go:%d] test\n" + n, err := fmt.Sscanf(contents(infoLog), format, &line) + if n != 1 || err != nil { + t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog)) + } + // Scanf treats multiple spaces as equivalent to a single space, + // so check for correct space-padding also. + want := fmt.Sprintf(format, line) + if contents(infoLog) != want { + t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want) + } +} + // Test that an Error log goes to Warning and Info. // Even in the Info log, the source character will be E, so the data should // all be identical. @@ -295,6 +321,36 @@ func TestVmoduleOff(t *testing.T) { } } +func TestSetOutputDataRace(t *testing.T) { + setFlags() + defer logging.swap(logging.newBuffers()) + for i := 1; i <= 50; i++ { + go func() { + logging.flushDaemon() + }() + } + for i := 1; i <= 50; i++ { + go func() { + SetOutput(ioutil.Discard) + }() + } + for i := 1; i <= 50; i++ { + go func() { + logging.flushDaemon() + }() + } + for i := 1; i <= 50; i++ { + go func() { + SetOutputBySeverity("INFO", ioutil.Discard) + }() + } + for i := 1; i <= 50; i++ { + go func() { + logging.flushDaemon() + }() + } +} + // vGlobs are patterns that match/don't match this file at V=2. var vGlobs = map[string]bool{ // Easy to test the numeric match here. @@ -473,7 +529,7 @@ func TestLogBacktraceAt(t *testing.T) { // Need 2 appearances, one in the log header and one in the trace: // log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here // ... - // github.com/glog/glog_test.go:280 (0x41ba91) + // k8s.io/klog/klog_test.go:280 (0x41ba91) // ... // We could be more precise but that would require knowing the details // of the traceback format, which may not be dependable. @@ -488,6 +544,39 @@ func BenchmarkHeader(b *testing.B) { } } +func BenchmarkHeaderWithDir(b *testing.B) { + logging.addDirHeader = true + for i := 0; i < b.N; i++ { + buf, _, _ := logging.header(infoLog, 0) + logging.putBuffer(buf) + } +} + +func BenchmarkLogs(b *testing.B) { + setFlags() + defer logging.swap(logging.newBuffers()) + + testFile, err := ioutil.TempFile("", "test.log") + if err != nil { + b.Error("unable to create temporary file") + } + defer os.Remove(testFile.Name()) + + logging.verbosity.Set("0") + logging.toStderr = false + logging.alsoToStderr = false + logging.stderrThreshold = fatalLog + logging.logFile = testFile.Name() + logging.swap([numSeverity]flushSyncWriter{nil, nil, nil, nil}) + + for i := 0; i < b.N; i++ { + Error("error") + Warning("warning") + Info("info") + } + logging.flushAll() +} + // Test the logic on checking log size limitation. func TestFileSizeCheck(t *testing.T) { setFlags() diff --git a/vendor/k8s.io/sample-controller/Godeps/Godeps.json b/vendor/k8s.io/sample-controller/Godeps/Godeps.json index 4de2890f17..6bd9d8b0fb 100644 --- a/vendor/k8s.io/sample-controller/Godeps/Godeps.json +++ b/vendor/k8s.io/sample-controller/Godeps/Godeps.json @@ -80,7 +80,7 @@ }, { "ImportPath": "github.com/gogo/protobuf", - "Rev": "v1.0.0" + "Rev": "65acae22fc9d" }, { "ImportPath": "github.com/golang/groupcache", @@ -132,7 +132,15 @@ }, { "ImportPath": "github.com/json-iterator/go", - "Rev": "v1.1.6" + "Rev": "v1.1.7" + }, + { + "ImportPath": "github.com/kisielk/errcheck", + "Rev": "v1.2.0" + }, + { + "ImportPath": "github.com/kisielk/gotool", + "Rev": "v1.0.0" }, { "ImportPath": "github.com/kr/pretty", @@ -280,19 +288,19 @@ }, { "ImportPath": "k8s.io/api", - "Rev": "c8a0b81cb10e" + "Rev": "077ce48e77da" }, { "ImportPath": "k8s.io/apimachinery", - "Rev": "0bb8574e0887" + "Rev": "ac5d3b819fc6" }, { "ImportPath": "k8s.io/client-go", - "Rev": "0c47f9da0001" + "Rev": "396a06da3bd7" }, { "ImportPath": "k8s.io/code-generator", - "Rev": "77f3a1fe56bb" + "Rev": "d0071a119380" }, { "ImportPath": "k8s.io/gengo", @@ -308,7 +316,7 @@ }, { "ImportPath": "k8s.io/utils", - "Rev": "c55fbcfc754a" + "Rev": "581e00157fb1" }, { "ImportPath": "modernc.org/cc", diff --git a/vendor/k8s.io/sample-controller/go.mod b/vendor/k8s.io/sample-controller/go.mod index 63ac3ec858..aa6ff608d8 100644 --- a/vendor/k8s.io/sample-controller/go.mod +++ b/vendor/k8s.io/sample-controller/go.mod @@ -5,10 +5,10 @@ module k8s.io/sample-controller go 1.12 require ( - k8s.io/api v0.0.0-20190718062839-c8a0b81cb10e - k8s.io/apimachinery v0.0.0-20190717022731-0bb8574e0887 - k8s.io/client-go v0.0.0-20190717023132-0c47f9da0001 - k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb + k8s.io/api v0.0.0-20190808180749-077ce48e77da + k8s.io/apimachinery v0.0.0-20190808180622-ac5d3b819fc6 + k8s.io/client-go v0.0.0-20190808180953-396a06da3bd7 + k8s.io/code-generator v0.0.0-20190808180452-d0071a119380 k8s.io/klog v0.3.1 ) @@ -19,8 +19,8 @@ replace ( golang.org/x/sys => golang.org/x/sys v0.0.0-20190209173611-3b5209105503 golang.org/x/text => golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db golang.org/x/tools => golang.org/x/tools v0.0.0-20190313210603-aa82965741a9 - k8s.io/api => k8s.io/api v0.0.0-20190718062839-c8a0b81cb10e - k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190717022731-0bb8574e0887 - k8s.io/client-go => k8s.io/client-go v0.0.0-20190717023132-0c47f9da0001 - k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb + k8s.io/api => k8s.io/api v0.0.0-20190808180749-077ce48e77da + k8s.io/apimachinery => k8s.io/apimachinery v0.0.0-20190808180622-ac5d3b819fc6 + k8s.io/client-go => k8s.io/client-go v0.0.0-20190808180953-396a06da3bd7 + k8s.io/code-generator => k8s.io/code-generator v0.0.0-20190808180452-d0071a119380 ) diff --git a/vendor/k8s.io/sample-controller/go.sum b/vendor/k8s.io/sample-controller/go.sum index c73aa23343..e84357d24c 100644 --- a/vendor/k8s.io/sample-controller/go.sum +++ b/vendor/k8s.io/sample-controller/go.sum @@ -28,8 +28,8 @@ github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nA github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/gogo/protobuf v1.0.0 h1:2jyBKDKU/8v3v2xVR2PtiWQviFUyiaGk2rpfyFT8rTM= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d h1:3PaI8p3seN09VjbTYC/QWlUZdZ1qS1zGjy7LH2Wt07I= +github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903 h1:LbsanbbD6LieFkXbj9YNNBupiGHJgFeLpO0j0Fza1h8= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -54,8 +54,10 @@ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpO github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.6 h1:MrUvLMLTMxbqFJ9kzlvat/rYZqZnW3u4wkLzWTaFwKs= -github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo= +github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= @@ -64,9 +66,11 @@ github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -136,10 +140,10 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -k8s.io/api v0.0.0-20190718062839-c8a0b81cb10e/go.mod h1:5UP0nKwb/iEVBSMrDGsFuoIlrOOjKvatkMrhuY0czQk= -k8s.io/apimachinery v0.0.0-20190717022731-0bb8574e0887/go.mod h1:sBJWIJZfxLhp7mRsRyuAE/NfKTr3kXGR1iaqg8O0gJo= -k8s.io/client-go v0.0.0-20190717023132-0c47f9da0001/go.mod h1:JvcLDbEoGrrziiUkPAV/sdE4llq5kUUrDdGtJ/RpAJQ= -k8s.io/code-generator v0.0.0-20190717022600-77f3a1fe56bb/go.mod h1:cDx5jQmWH25Ff74daM7NVYty9JWw9dvIS9zT9eIubCY= +k8s.io/api v0.0.0-20190808180749-077ce48e77da/go.mod h1:irWZZ8fkUYB2+fwyvjN9QMt0m5/1PYsJc1eJElzGHeM= +k8s.io/apimachinery v0.0.0-20190808180622-ac5d3b819fc6/go.mod h1:Waf/xTS2FGRrgXCkO5FP3XxTOWh0qLf2QhL1qFZZ/R8= +k8s.io/client-go v0.0.0-20190808180953-396a06da3bd7/go.mod h1:H23TU1TQHQmIajoknimIaarT7sd/34hNrtXKX9HE9so= +k8s.io/code-generator v0.0.0-20190808180452-d0071a119380/go.mod h1:yWQ6Ygojs0rLB0sAgl4OcQSi2sM7k20oNWn+7H9w+eA= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -148,8 +152,8 @@ k8s.io/klog v0.3.1 h1:RVgyDHY/kFKtLqh67NvEWIgkMneNoIrdkN0CxDSQc68= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058 h1:di3XCwddOR9cWBNpfgXaskhh6cgJuwcK54rvtwUaC10= k8s.io/kube-openapi v0.0.0-20190709113604-33be087ad058/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a h1:2jUDc9gJja832Ftp+QbDV0tVhQHMISFn01els+2ZAcw= -k8s.io/utils v0.0.0-20190607212802-c55fbcfc754a/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1 h1:+ySTxfHnfzZb9ys375PXNlLhkJPLKgHajBU0N62BDvE= +k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= modernc.org/cc v1.0.0/go.mod h1:1Sk4//wdnYJiUIxnW8ddKpaOJCF37yAdqYnkxUpaYxw= modernc.org/golex v1.0.0/go.mod h1:b/QX9oBD/LhixY6NDh+IdGv17hgB+51fET1i2kPSmvk= modernc.org/mathutil v1.0.0/go.mod h1:wU0vUrJsVWBZ4P6e7xtFJEhFSNsfRLJ8H458uRjg03k= diff --git a/vendor/k8s.io/utils/trace/trace.go b/vendor/k8s.io/utils/trace/trace.go index f672d88f21..3b424104a9 100644 --- a/vendor/k8s.io/utils/trace/trace.go +++ b/vendor/k8s.io/utils/trace/trace.go @@ -25,32 +25,55 @@ import ( "k8s.io/klog" ) +// Field is a key value pair that provides additional details about the trace. +type Field struct { + Key string + Value interface{} +} + +func (f Field) format() string { + return fmt.Sprintf("%s:%v", f.Key, f.Value) +} + +func writeFields(b *bytes.Buffer, l []Field) { + for i, f := range l { + b.WriteString(f.format()) + if i < len(l)-1 { + b.WriteString(",") + } + } +} + type traceStep struct { stepTime time.Time msg string + fields []Field } // Trace keeps track of a set of "steps" and allows us to log a specific // step if it took longer than its share of the total allowed time type Trace struct { name string + fields []Field startTime time.Time steps []traceStep } -// New creates a Trace with the specified name -func New(name string) *Trace { - return &Trace{name, time.Now(), nil} +// New creates a Trace with the specified name. The name identifies the operation to be traced. The +// Fields add key value pairs to provide additional details about the trace, such as operation inputs. +func New(name string, fields ...Field) *Trace { + return &Trace{name: name, startTime: time.Now(), fields: fields} } -// Step adds a new step with a specific message. Call this at the end of an -// execution step to record how long it took. -func (t *Trace) Step(msg string) { +// Step adds a new step with a specific message. Call this at the end of an execution step to record +// how long it took. The Fields add key value pairs to provide additional details about the trace +// step. +func (t *Trace) Step(msg string, fields ...Field) { if t.steps == nil { // traces almost always have less than 6 steps, do this to avoid more than a single allocation t.steps = make([]traceStep, 0, 6) } - t.steps = append(t.steps, traceStep{time.Now(), msg}) + t.steps = append(t.steps, traceStep{stepTime: time.Now(), msg: msg, fields: fields}) } // Log is used to dump all the steps in the Trace @@ -65,12 +88,23 @@ func (t *Trace) logWithStepThreshold(stepThreshold time.Duration) { endTime := time.Now() totalTime := endTime.Sub(t.startTime) - buffer.WriteString(fmt.Sprintf("Trace[%d]: %q (started: %v) (total time: %v):\n", tracenum, t.name, t.startTime, totalTime)) + buffer.WriteString(fmt.Sprintf("Trace[%d]: %q ", tracenum, t.name)) + if len(t.fields) > 0 { + writeFields(&buffer, t.fields) + buffer.WriteString(" ") + } + buffer.WriteString(fmt.Sprintf("(started: %v) (total time: %v):\n", t.startTime, totalTime)) lastStepTime := t.startTime for _, step := range t.steps { stepDuration := step.stepTime.Sub(lastStepTime) if stepThreshold == 0 || stepDuration > stepThreshold || klog.V(4) { - buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] %v\n", tracenum, step.stepTime.Sub(t.startTime), stepDuration, step.msg)) + buffer.WriteString(fmt.Sprintf("Trace[%d]: [%v] [%v] ", tracenum, step.stepTime.Sub(t.startTime), stepDuration)) + buffer.WriteString(step.msg) + if len(step.fields) > 0 { + buffer.WriteString(" ") + writeFields(&buffer, step.fields) + } + buffer.WriteString("\n") } lastStepTime = step.stepTime } diff --git a/vendor/k8s.io/utils/trace/trace_test.go b/vendor/k8s.io/utils/trace/trace_test.go index 2521071fc2..5b68cc9ea3 100644 --- a/vendor/k8s.io/utils/trace/trace_test.go +++ b/vendor/k8s.io/utils/trace/trace_test.go @@ -37,7 +37,7 @@ func TestStep(t *testing.T) { inputString: "", expectedTrace: &Trace{ steps: []traceStep{ - {time.Now(), ""}, + {stepTime: time.Now(), msg: ""}, }, }, }, @@ -46,7 +46,7 @@ func TestStep(t *testing.T) { inputString: "test2", expectedTrace: &Trace{ steps: []traceStep{ - {time.Now(), "test2"}, + {stepTime: time.Now(), msg: "test2"}, }, }, }, @@ -83,35 +83,69 @@ func TestTotalTime(t *testing.T) { } func TestLog(t *testing.T) { - test := struct { + tests := []struct { name string + msg string + fields []Field expectedMessages []string sampleTrace *Trace }{ - name: "Check the log dump with 3 msg", - expectedMessages: []string{ - "msg1", "msg2", "msg3", - }, - sampleTrace: &Trace{ - name: "Sample Trace", - steps: []traceStep{ - {time.Now(), "msg1"}, - {time.Now(), "msg2"}, - {time.Now(), "msg3"}, + { + name: "Check the log dump with 3 msg", + expectedMessages: []string{ + "msg1", "msg2", "msg3", }, + sampleTrace: &Trace{ + name: "Sample Trace", + steps: []traceStep{ + {stepTime: time.Now(), msg: "msg1"}, + {stepTime: time.Now(), msg: "msg2"}, + {stepTime: time.Now(), msg: "msg3"}, + }, + }, + }, + { + name: "Check formatting", + expectedMessages: []string{ + "URL:/api,count:3", "msg1 str:text,int:2,bool:false", "msg2 x:1", + }, + sampleTrace: &Trace{ + name: "Sample Trace", + fields: []Field{{"URL", "/api"}, {"count", 3}}, + steps: []traceStep{ + {stepTime: time.Now(), msg: "msg1", fields: []Field{{"str", "text"}, {"int", 2}, {"bool", false}}}, + {stepTime: time.Now(), msg: "msg2", fields: []Field{{"x", "1"}}}, + }, + }, + }, + { + name: "Check fixture formatted", + expectedMessages: []string{ + "URL:/api,count:3", "msg1 str:text,int:2,bool:false", "msg2 x:1", + }, + sampleTrace: fieldsTraceFixture(), }, } - t.Run(test.name, func(t *testing.T) { - var buf bytes.Buffer - klog.SetOutput(&buf) - test.sampleTrace.Log() - for _, msg := range test.expectedMessages { - if !strings.Contains(buf.String(), msg) { - t.Errorf("\nMsg %q not found in log: \n%v\n", msg, buf.String()) + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + var buf bytes.Buffer + klog.SetOutput(&buf) + test.sampleTrace.Log() + for _, msg := range test.expectedMessages { + if !strings.Contains(buf.String(), msg) { + t.Errorf("\nMsg %q not found in log: \n%v\n", msg, buf.String()) + } } - } - }) + }) + } +} + +func fieldsTraceFixture() *Trace { + trace := New("Sample Trace", Field{"URL", "/api"}, Field{"count", 3}) + trace.Step("msg1", Field{"str", "text"}, Field{"int", 2}, Field{"bool", false}) + trace.Step("msg2", Field{"x", "1"}) + return trace } func TestLogIfLong(t *testing.T) {