1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

update vendor due to build failure

This commit is contained in:
shuting 2019-06-03 15:07:38 -07:00
parent 1adfb3a339
commit 0b85f1da0b
705 changed files with 66451 additions and 2925 deletions
pkg/apis/policy/v1alpha1
vendor/github.com
evanphx/json-patch/cmd/json-patch
golang/glog
minio/minio

View file

@ -26,46 +26,21 @@ import (
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CopyFrom) DeepCopyInto(out *CopyFrom) {
func (in *CloneFrom) DeepCopyInto(out *CloneFrom) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CopyFrom.
func (in *CopyFrom) DeepCopy() *CopyFrom {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneFrom.
func (in *CloneFrom) DeepCopy() *CloneFrom {
if in == nil {
return nil
}
out := new(CopyFrom)
out := new(CloneFrom)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Generation) DeepCopyInto(out *Generation) {
*out = *in
if in.CopyFrom != nil {
in, out := &in.CopyFrom, &out.CopyFrom
*out = new(CopyFrom)
**out = **in
}
if in.Data != nil {
in, out := &in.Data, &out.Data
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Labels != nil {
in, out := &in.Labels, &out.Labels
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generation.
func (in *Generation) DeepCopy() *Generation {
if in == nil {
@ -150,6 +125,11 @@ func (in *PolicyList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
*out = *in
if in.Kinds != nil {
in, out := &in.Kinds, &out.Kinds
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
@ -187,8 +167,7 @@ func (in *Rule) DeepCopyInto(out *Rule) {
}
if in.Generation != nil {
in, out := &in.Generation, &out.Generation
*out = new(Generation)
(*in).DeepCopyInto(*out)
*out = (*in).DeepCopy()
}
return
}

View file

@ -0,0 +1,39 @@
package main
// Borrowed from Concourse: https://github.com/concourse/atc/blob/master/atccmd/file_flag.go
import (
"fmt"
"os"
"path/filepath"
)
// FileFlag is a flag for passing a path to a file on disk. The file is
// expected to be a file, not a directory, that actually exists.
type FileFlag string
// UnmarshalFlag implements go-flag's Unmarshaler interface
func (f *FileFlag) UnmarshalFlag(value string) error {
stat, err := os.Stat(value)
if err != nil {
return err
}
if stat.IsDir() {
return fmt.Errorf("path '%s' is a directory, not a file", value)
}
abs, err := filepath.Abs(value)
if err != nil {
return err
}
*f = FileFlag(abs)
return nil
}
// Path is the path to the file
func (f FileFlag) Path() string {
return string(f)
}

View file

@ -0,0 +1,56 @@
package main
import (
"fmt"
"io/ioutil"
"log"
"os"
jsonpatch "github.com/evanphx/json-patch"
flags "github.com/jessevdk/go-flags"
)
type opts struct {
PatchFilePaths []FileFlag `long:"patch-file" short:"p" value-name:"PATH" description:"Path to file with one or more operations"`
}
func main() {
var o opts
_, err := flags.Parse(&o)
if err != nil {
log.Fatalf("error: %s\n", err)
}
patches := make([]jsonpatch.Patch, len(o.PatchFilePaths))
for i, patchFilePath := range o.PatchFilePaths {
var bs []byte
bs, err = ioutil.ReadFile(patchFilePath.Path())
if err != nil {
log.Fatalf("error reading patch file: %s", err)
}
var patch jsonpatch.Patch
patch, err = jsonpatch.DecodePatch(bs)
if err != nil {
log.Fatalf("error decoding patch file: %s", err)
}
patches[i] = patch
}
doc, err := ioutil.ReadAll(os.Stdin)
if err != nil {
log.Fatalf("error reading from stdin: %s", err)
}
mdoc := doc
for _, patch := range patches {
mdoc, err = patch.Apply(mdoc)
if err != nil {
log.Fatalf("error applying patch: %s", err)
}
}
fmt.Printf("%s", mdoc)
}

191
vendor/github.com/golang/glog/LICENSE generated vendored Normal file
View file

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction, and
distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by the copyright
owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all other entities
that control, are controlled by, or are under common control with that entity.
For the purposes of this definition, "control" means (i) the power, direct or
indirect, to cause the direction or management of such entity, whether by
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity exercising
permissions granted by this License.
"Source" form shall mean the preferred form for making modifications, including
but not limited to software source code, documentation source, and configuration
files.
"Object" form shall mean any form resulting from mechanical transformation or
translation of a Source form, including but not limited to compiled object code,
generated documentation, and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or Object form, made
available under the License, as indicated by a copyright notice that is included
in or attached to the work (an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object form, that
is based on (or derived from) the Work and for which the editorial revisions,
annotations, elaborations, or other modifications represent, as a whole, an
original work of authorship. For the purposes of this License, Derivative Works
shall not include works that remain separable from, or merely link (or bind by
name) to the interfaces of, the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including the original version
of the Work and any modifications or additions to that Work or Derivative Works
thereof, that is intentionally submitted to Licensor for inclusion in the Work
by the copyright owner or by an individual or Legal Entity authorized to submit
on behalf of the copyright owner. For the purposes of this definition,
"submitted" means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems, and
issue tracking systems that are managed by, or on behalf of, the Licensor for
the purpose of discussing and improving the Work, but excluding communication
that is conspicuously marked or otherwise designated in writing by the copyright
owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
of whom a Contribution has been received by Licensor and subsequently
incorporated within the Work.
2. Grant of Copyright License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the Work and such
Derivative Works in Source or Object form.
3. Grant of Patent License.
Subject to the terms and conditions of this License, each Contributor hereby
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
irrevocable (except as stated in this section) patent license to make, have
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
such license applies only to those patent claims licensable by such Contributor
that are necessarily infringed by their Contribution(s) alone or by combination
of their Contribution(s) with the Work to which such Contribution(s) was
submitted. If You institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
Contribution incorporated within the Work constitutes direct or contributory
patent infringement, then any patent licenses granted to You under this License
for that Work shall terminate as of the date such litigation is filed.
4. Redistribution.
You may reproduce and distribute copies of the Work or Derivative Works thereof
in any medium, with or without modifications, and in Source or Object form,
provided that You meet the following conditions:
You must give any other recipients of the Work or Derivative Works a copy of
this License; and
You must cause any modified files to carry prominent notices stating that You
changed the files; and
You must retain, in the Source form of any Derivative Works that You distribute,
all copyright, patent, trademark, and attribution notices from the Source form
of the Work, excluding those notices that do not pertain to any part of the
Derivative Works; and
If the Work includes a "NOTICE" text file as part of its distribution, then any
Derivative Works that You distribute must include a readable copy of the
attribution notices contained within such NOTICE file, excluding those notices
that do not pertain to any part of the Derivative Works, in at least one of the
following places: within a NOTICE text file distributed as part of the
Derivative Works; within the Source form or documentation, if provided along
with the Derivative Works; or, within a display generated by the Derivative
Works, if and wherever such third-party notices normally appear. The contents of
the NOTICE file are for informational purposes only and do not modify the
License. You may add Your own attribution notices within Derivative Works that
You distribute, alongside or as an addendum to the NOTICE text from the Work,
provided that such additional attribution notices cannot be construed as
modifying the License.
You may add Your own copyright statement to Your modifications and may provide
additional or different license terms and conditions for use, reproduction, or
distribution of Your modifications, or for any such Derivative Works as a whole,
provided Your use, reproduction, and distribution of the Work otherwise complies
with the conditions stated in this License.
5. Submission of Contributions.
Unless You explicitly state otherwise, any Contribution intentionally submitted
for inclusion in the Work by You to the Licensor shall be under the terms and
conditions of this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify the terms of
any separate license agreement you may have executed with Licensor regarding
such Contributions.
6. Trademarks.
This License does not grant permission to use the trade names, trademarks,
service marks, or product names of the Licensor, except as required for
reasonable and customary use in describing the origin of the Work and
reproducing the content of the NOTICE file.
7. Disclaimer of Warranty.
Unless required by applicable law or agreed to in writing, Licensor provides the
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE,
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
solely responsible for determining the appropriateness of using or
redistributing the Work and assume any risks associated with Your exercise of
permissions under this License.
8. Limitation of Liability.
In no event and under no legal theory, whether in tort (including negligence),
contract, or otherwise, unless required by applicable law (such as deliberate
and grossly negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special, incidental,
or consequential damages of any character arising as a result of this License or
out of the use or inability to use the Work (including but not limited to
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
any and all other commercial damages or losses), even if such Contributor has
been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability.
While redistributing the Work or Derivative Works thereof, You may choose to
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
other liability obligations and/or rights consistent with this License. However,
in accepting such obligations, You may act only on Your own behalf and on Your
sole responsibility, not on behalf of any other Contributor, and only if You
agree to indemnify, defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason of your
accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work
To apply the Apache License to your work, attach the following boilerplate
notice, with the fields enclosed by brackets "[]" replaced with your own
identifying information. (Don't include the brackets!) The text should be
enclosed in the appropriate comment syntax for the file format. We also
recommend that a file or class name and description of purpose be included on
the same "printed page" as the copyright notice for easier identification within
third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

44
vendor/github.com/golang/glog/README generated vendored Normal file
View file

@ -0,0 +1,44 @@
glog
====
Leveled execution logs for Go.
This is an efficient pure Go implementation of leveled logs in the
manner of the open source C++ package
https://github.com/google/glog
By binding methods to booleans it is possible to use the log package
without paying the expense of evaluating the arguments to the log.
Through the -vmodule flag, the package also provides fine-grained
control over logging at the file level.
The comment from glog.go introduces the ideas:
Package glog implements logging analogous to the Google-internal
C++ INFO/ERROR/V setup. It provides functions Info, Warning,
Error, Fatal, plus formatting variants such as Infof. It
also provides V-style logging controlled by the -v and
-vmodule=file=2 flags.
Basic examples:
glog.Info("Prepare to repel boarders")
glog.Fatalf("Initialization failed: %s", err)
See the documentation for the V function for an explanation
of these examples:
if glog.V(2) {
glog.Info("Starting transaction...")
}
glog.V(2).Infoln("Processed", nItems, "elements")
The repository contains an open source version of the log package
used inside Google. The master copy of the source lives inside
Google, not here. The code in this repo is for export only and is not itself
under development. Feature requests will be ignored.
Send bug reports to golang-nuts@googlegroups.com.

1180
vendor/github.com/golang/glog/glog.go generated vendored Normal file

File diff suppressed because it is too large Load diff

124
vendor/github.com/golang/glog/glog_file.go generated vendored Normal file
View file

@ -0,0 +1,124 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// File I/O for logs.
package glog
import (
"errors"
"flag"
"fmt"
"os"
"os/user"
"path/filepath"
"strings"
"sync"
"time"
)
// MaxSize is the maximum size of a log file in bytes.
var MaxSize uint64 = 1024 * 1024 * 1800
// logDirs lists the candidate directories for new log files.
var logDirs []string
// If non-empty, overrides the choice of directory in which to write logs.
// See createLogDirs for the full list of possible destinations.
var logDir = flag.String("log_dir", "", "If non-empty, write log files in this directory")
func createLogDirs() {
if *logDir != "" {
logDirs = append(logDirs, *logDir)
}
logDirs = append(logDirs, os.TempDir())
}
var (
pid = os.Getpid()
program = filepath.Base(os.Args[0])
host = "unknownhost"
userName = "unknownuser"
)
func init() {
h, err := os.Hostname()
if err == nil {
host = shortHostname(h)
}
current, err := user.Current()
if err == nil {
userName = current.Username
}
// Sanitize userName since it may contain filepath separators on Windows.
userName = strings.Replace(userName, `\`, "_", -1)
}
// shortHostname returns its argument, truncating at the first period.
// For instance, given "www.google.com" it returns "www".
func shortHostname(hostname string) string {
if i := strings.Index(hostname, "."); i >= 0 {
return hostname[:i]
}
return hostname
}
// logName returns a new log file name containing tag, with start time t, and
// the name for the symlink for tag.
func logName(tag string, t time.Time) (name, link string) {
name = fmt.Sprintf("%s.%s.%s.log.%s.%04d%02d%02d-%02d%02d%02d.%d",
program,
host,
userName,
tag,
t.Year(),
t.Month(),
t.Day(),
t.Hour(),
t.Minute(),
t.Second(),
pid)
return name, program + "." + tag
}
var onceLogDirs sync.Once
// create creates a new log file and returns the file and its filename, which
// contains tag ("INFO", "FATAL", etc.) and t. If the file is created
// successfully, create also attempts to update the symlink for that tag, ignoring
// errors.
func create(tag string, t time.Time) (f *os.File, filename string, err error) {
onceLogDirs.Do(createLogDirs)
if len(logDirs) == 0 {
return nil, "", errors.New("log: no log dirs")
}
name, link := logName(tag, t)
var lastErr error
for _, dir := range logDirs {
fname := filepath.Join(dir, name)
f, err := os.Create(fname)
if err == nil {
symlink := filepath.Join(dir, link)
os.Remove(symlink) // ignore err
os.Symlink(name, symlink) // ignore err
return f, fname, nil
}
lastErr = err
}
return nil, "", fmt.Errorf("log: cannot create log: %v", lastErr)
}

415
vendor/github.com/golang/glog/glog_test.go generated vendored Normal file
View file

@ -0,0 +1,415 @@
// Go support for leveled logs, analogous to https://code.google.com/p/google-glog/
//
// Copyright 2013 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package glog
import (
"bytes"
"fmt"
stdLog "log"
"path/filepath"
"runtime"
"strconv"
"strings"
"testing"
"time"
)
// Test that shortHostname works as advertised.
func TestShortHostname(t *testing.T) {
for hostname, expect := range map[string]string{
"": "",
"host": "host",
"host.google.com": "host",
} {
if got := shortHostname(hostname); expect != got {
t.Errorf("shortHostname(%q): expected %q, got %q", hostname, expect, got)
}
}
}
// flushBuffer wraps a bytes.Buffer to satisfy flushSyncWriter.
type flushBuffer struct {
bytes.Buffer
}
func (f *flushBuffer) Flush() error {
return nil
}
func (f *flushBuffer) Sync() error {
return nil
}
// swap sets the log writers and returns the old array.
func (l *loggingT) swap(writers [numSeverity]flushSyncWriter) (old [numSeverity]flushSyncWriter) {
l.mu.Lock()
defer l.mu.Unlock()
old = l.file
for i, w := range writers {
logging.file[i] = w
}
return
}
// newBuffers sets the log writers to all new byte buffers and returns the old array.
func (l *loggingT) newBuffers() [numSeverity]flushSyncWriter {
return l.swap([numSeverity]flushSyncWriter{new(flushBuffer), new(flushBuffer), new(flushBuffer), new(flushBuffer)})
}
// contents returns the specified log value as a string.
func contents(s severity) string {
return logging.file[s].(*flushBuffer).String()
}
// contains reports whether the string is contained in the log.
func contains(s severity, str string, t *testing.T) bool {
return strings.Contains(contents(s), str)
}
// setFlags configures the logging flags how the test expects them.
func setFlags() {
logging.toStderr = false
}
// Test that Info works as advertised.
func TestInfo(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
func TestInfoDepth(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
f := func() { InfoDepth(1, "depth-test1") }
// The next three lines must stay together
_, _, wantLine, _ := runtime.Caller(0)
InfoDepth(0, "depth-test0")
f()
msgs := strings.Split(strings.TrimSuffix(contents(infoLog), "\n"), "\n")
if len(msgs) != 2 {
t.Fatalf("Got %d lines, expected 2", len(msgs))
}
for i, m := range msgs {
if !strings.HasPrefix(m, "I") {
t.Errorf("InfoDepth[%d] has wrong character: %q", i, m)
}
w := fmt.Sprintf("depth-test%d", i)
if !strings.Contains(m, w) {
t.Errorf("InfoDepth[%d] missing %q: %q", i, w, m)
}
// pull out the line number (between : and ])
msg := m[strings.LastIndex(m, ":")+1:]
x := strings.Index(msg, "]")
if x < 0 {
t.Errorf("InfoDepth[%d]: missing ']': %q", i, m)
continue
}
line, err := strconv.Atoi(msg[:x])
if err != nil {
t.Errorf("InfoDepth[%d]: bad line number: %q", i, m)
continue
}
wantLine++
if wantLine != line {
t.Errorf("InfoDepth[%d]: got line %d, want %d", i, line, wantLine)
}
}
}
func init() {
CopyStandardLogTo("INFO")
}
// Test that CopyStandardLogTo panics on bad input.
func TestCopyStandardLogToPanic(t *testing.T) {
defer func() {
if s, ok := recover().(string); !ok || !strings.Contains(s, "LOG") {
t.Errorf(`CopyStandardLogTo("LOG") should have panicked: %v`, s)
}
}()
CopyStandardLogTo("LOG")
}
// Test that using the standard log package logs to INFO.
func TestStandardLog(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
stdLog.Print("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that the header has the correct format.
func TestHeader(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
defer func(previous func() time.Time) { timeNow = previous }(timeNow)
timeNow = func() time.Time {
return time.Date(2006, 1, 2, 15, 4, 5, .067890e9, time.Local)
}
pid = 1234
Info("test")
var line int
format := "I0102 15:04:05.067890 1234 glog_test.go:%d] test\n"
n, err := fmt.Sscanf(contents(infoLog), format, &line)
if n != 1 || err != nil {
t.Errorf("log format error: %d elements, error %s:\n%s", n, err, contents(infoLog))
}
// Scanf treats multiple spaces as equivalent to a single space,
// so check for correct space-padding also.
want := fmt.Sprintf(format, line)
if contents(infoLog) != want {
t.Errorf("log format error: got:\n\t%q\nwant:\t%q", contents(infoLog), want)
}
}
// Test that an Error log goes to Warning and Info.
// Even in the Info log, the source character will be E, so the data should
// all be identical.
func TestError(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Error("test")
if !contains(errorLog, "E", t) {
t.Errorf("Error has wrong character: %q", contents(errorLog))
}
if !contains(errorLog, "test", t) {
t.Error("Error failed")
}
str := contents(errorLog)
if !contains(warningLog, str, t) {
t.Error("Warning failed")
}
if !contains(infoLog, str, t) {
t.Error("Info failed")
}
}
// Test that a Warning log goes to Info.
// Even in the Info log, the source character will be W, so the data should
// all be identical.
func TestWarning(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
Warning("test")
if !contains(warningLog, "W", t) {
t.Errorf("Warning has wrong character: %q", contents(warningLog))
}
if !contains(warningLog, "test", t) {
t.Error("Warning failed")
}
str := contents(warningLog)
if !contains(infoLog, str, t) {
t.Error("Info failed")
}
}
// Test that a V log goes to Info.
func TestV(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.verbosity.Set("2")
defer logging.verbosity.Set("0")
V(2).Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that a vmodule enables a log in this file.
func TestVmoduleOn(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.vmodule.Set("glog_test=2")
defer logging.vmodule.Set("")
if !V(1) {
t.Error("V not enabled for 1")
}
if !V(2) {
t.Error("V not enabled for 2")
}
if V(3) {
t.Error("V enabled for 3")
}
V(2).Info("test")
if !contains(infoLog, "I", t) {
t.Errorf("Info has wrong character: %q", contents(infoLog))
}
if !contains(infoLog, "test", t) {
t.Error("Info failed")
}
}
// Test that a vmodule of another file does not enable a log in this file.
func TestVmoduleOff(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
logging.vmodule.Set("notthisfile=2")
defer logging.vmodule.Set("")
for i := 1; i <= 3; i++ {
if V(Level(i)) {
t.Errorf("V enabled for %d", i)
}
}
V(2).Info("test")
if contents(infoLog) != "" {
t.Error("V logged incorrectly")
}
}
// vGlobs are patterns that match/don't match this file at V=2.
var vGlobs = map[string]bool{
// Easy to test the numeric match here.
"glog_test=1": false, // If -vmodule sets V to 1, V(2) will fail.
"glog_test=2": true,
"glog_test=3": true, // If -vmodule sets V to 1, V(3) will succeed.
// These all use 2 and check the patterns. All are true.
"*=2": true,
"?l*=2": true,
"????_*=2": true,
"??[mno]?_*t=2": true,
// These all use 2 and check the patterns. All are false.
"*x=2": false,
"m*=2": false,
"??_*=2": false,
"?[abc]?_*t=2": false,
}
// Test that vmodule globbing works as advertised.
func testVmoduleGlob(pat string, match bool, t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
defer logging.vmodule.Set("")
logging.vmodule.Set(pat)
if V(2) != Verbose(match) {
t.Errorf("incorrect match for %q: got %t expected %t", pat, V(2), match)
}
}
// Test that a vmodule globbing works as advertised.
func TestVmoduleGlob(t *testing.T) {
for glob, match := range vGlobs {
testVmoduleGlob(glob, match, t)
}
}
func TestRollover(t *testing.T) {
setFlags()
var err error
defer func(previous func(error)) { logExitFunc = previous }(logExitFunc)
logExitFunc = func(e error) {
err = e
}
defer func(previous uint64) { MaxSize = previous }(MaxSize)
MaxSize = 512
Info("x") // Be sure we have a file.
info, ok := logging.file[infoLog].(*syncBuffer)
if !ok {
t.Fatal("info wasn't created")
}
if err != nil {
t.Fatalf("info has initial error: %v", err)
}
fname0 := info.file.Name()
Info(strings.Repeat("x", int(MaxSize))) // force a rollover
if err != nil {
t.Fatalf("info has error after big write: %v", err)
}
// Make sure the next log file gets a file name with a different
// time stamp.
//
// TODO: determine whether we need to support subsecond log
// rotation. C++ does not appear to handle this case (nor does it
// handle Daylight Savings Time properly).
time.Sleep(1 * time.Second)
Info("x") // create a new file
if err != nil {
t.Fatalf("error after rotation: %v", err)
}
fname1 := info.file.Name()
if fname0 == fname1 {
t.Errorf("info.f.Name did not change: %v", fname0)
}
if info.nbytes >= MaxSize {
t.Errorf("file size was not reset: %d", info.nbytes)
}
}
func TestLogBacktraceAt(t *testing.T) {
setFlags()
defer logging.swap(logging.newBuffers())
// The peculiar style of this code simplifies line counting and maintenance of the
// tracing block below.
var infoLine string
setTraceLocation := func(file string, line int, ok bool, delta int) {
if !ok {
t.Fatal("could not get file:line")
}
_, file = filepath.Split(file)
infoLine = fmt.Sprintf("%s:%d", file, line+delta)
err := logging.traceLocation.Set(infoLine)
if err != nil {
t.Fatal("error setting log_backtrace_at: ", err)
}
}
{
// Start of tracing block. These lines know about each other's relative position.
_, file, line, ok := runtime.Caller(0)
setTraceLocation(file, line, ok, +2) // Two lines between Caller and Info calls.
Info("we want a stack trace here")
}
numAppearances := strings.Count(contents(infoLog), infoLine)
if numAppearances < 2 {
// Need 2 appearances, one in the log header and one in the trace:
// log_test.go:281: I0511 16:36:06.952398 02238 log_test.go:280] we want a stack trace here
// ...
// github.com/glog/glog_test.go:280 (0x41ba91)
// ...
// We could be more precise but that would require knowing the details
// of the traceback format, which may not be dependable.
t.Fatal("got no trace back; log is ", contents(infoLog))
}
}
func BenchmarkHeader(b *testing.B) {
for i := 0; i < b.N; i++ {
buf, _, _ := logging.header(infoLog, 0)
logging.putBuffer(buf)
}
}

37
vendor/github.com/minio/minio/Dockerfile generated vendored Normal file
View file

@ -0,0 +1,37 @@
FROM golang:1.12-alpine
LABEL maintainer="MinIO Inc <dev@min.io>"
ENV GOPATH /go
ENV CGO_ENABLED 0
ENV GO111MODULE on
RUN \
apk add --no-cache git && \
git clone https://github.com/minio/minio && cd minio && \
go install -v -ldflags "$(go run buildscripts/gen-ldflags.go)" && \
cd dockerscripts; go build -ldflags "-s -w" -o /usr/bin/healthcheck healthcheck.go
FROM alpine:3.9
ENV MINIO_UPDATE off
ENV MINIO_ACCESS_KEY_FILE=access_key \
MINIO_SECRET_KEY_FILE=secret_key
EXPOSE 9000
COPY --from=0 /go/bin/minio /usr/bin/minio
COPY --from=0 /usr/bin/healthcheck /usr/bin/healthcheck
COPY dockerscripts/docker-entrypoint.sh /usr/bin/
RUN \
apk add --no-cache ca-certificates 'curl>7.61.0' && \
echo 'hosts: files mdns4_minimal [NOTFOUND=return] dns mdns4' >> /etc/nsswitch.conf
ENTRYPOINT ["/usr/bin/docker-entrypoint.sh"]
VOLUME ["/data"]
HEALTHCHECK --interval=1m CMD healthcheck
CMD ["minio"]

View file

@ -18,157 +18,237 @@ import React from "react"
import { connect } from "react-redux"
import web from "../web"
import * as alertActions from "../alert/actions"
import { getRandomAccessKey, getRandomSecretKey } from "../utils"
import jwtDecode from "jwt-decode"
import classNames from "classnames"
import {
Tooltip,
Modal,
ModalBody,
ModalHeader,
OverlayTrigger
} from "react-bootstrap"
import { Modal, ModalBody, ModalHeader } from "react-bootstrap"
import InputGroup from "./InputGroup"
import { ACCESS_KEY_MIN_LENGTH, SECRET_KEY_MIN_LENGTH } from "../constants"
export class ChangePasswordModal extends React.Component {
constructor(props) {
super(props)
this.state = {
accessKey: "",
secretKey: "",
keysReadOnly: false
currentAccessKey: "",
currentSecretKey: "",
currentSecretKeyVisible: false,
newAccessKey: "",
newSecretKey: "",
newSecretKeyVisible: false
}
}
// When its shown, it loads the access key and secret key.
// When its shown, it loads the access key from JWT token
componentWillMount() {
const { serverInfo } = this.props
// Check environment variables first.
if (serverInfo.info.isEnvCreds || serverInfo.info.isWorm) {
this.setState({
accessKey: "xxxxxxxxx",
secretKey: "xxxxxxxxx",
keysReadOnly: true
})
} else {
web.GetAuth().then(data => {
this.setState({
accessKey: data.accessKey,
secretKey: data.secretKey
})
})
}
}
// Handle field changes from inside the modal.
accessKeyChange(e) {
const token = jwtDecode(web.GetToken())
this.setState({
accessKey: e.target.value
})
}
secretKeyChange(e) {
this.setState({
secretKey: e.target.value
})
}
secretKeyVisible(secretKeyVisible) {
this.setState({
secretKeyVisible
currentAccessKey: token.sub,
newAccessKey: token.sub
})
}
// Save the auth params and set them.
setAuth(e) {
const { showAlert } = this.props
const accessKey = this.state.accessKey
const secretKey = this.state.secretKey
web
.SetAuth({
accessKey,
secretKey
})
.then(data => {
showAlert({
type: "success",
message: "Changed credentials"
if (this.canUpdateCredentials()) {
const currentAccessKey = this.state.currentAccessKey
const currentSecretKey = this.state.currentSecretKey
const newAccessKey = this.state.newAccessKey
const newSecretKey = this.state.newSecretKey
web
.SetAuth({
currentAccessKey,
currentSecretKey,
newAccessKey,
newSecretKey
})
})
.catch(err => {
showAlert({
type: "danger",
message: err.message
.then(data => {
showAlert({
type: "success",
message: "Credentials updated successfully."
})
})
})
.catch(err => {
showAlert({
type: "danger",
message: err.message
})
})
}
}
generateAuth(e) {
web.GenerateAuth().then(data => {
const { serverInfo } = this.props
// Generate random access key only for root user
if (!serverInfo.userInfo.isIAMUser) {
this.setState({
accessKey: data.accessKey,
secretKey: data.secretKey,
secretKeyVisible: true
newAccessKey: getRandomAccessKey()
})
}
this.setState({
newSecretKey: getRandomSecretKey(),
newSecretKeyVisible: true
})
}
canChangePassword() {
const { serverInfo } = this.props
// Password change is not allowed in WORM mode
if (serverInfo.info.isWorm) {
return false
}
// When credentials are set on ENV, password change not allowed for owner
if (serverInfo.info.isEnvCreds && !serverInfo.userInfo.isIAMUser) {
return false
}
return true
}
canUpdateCredentials() {
return (
this.state.currentAccessKey.length > 0 &&
this.state.currentSecretKey.length > 0 &&
this.state.newAccessKey.length >= ACCESS_KEY_MIN_LENGTH &&
this.state.newSecretKey.length >= SECRET_KEY_MIN_LENGTH
)
}
render() {
const { hideChangePassword } = this.props
const { hideChangePassword, serverInfo } = this.props
const allowChangePassword = this.canChangePassword()
if (!allowChangePassword) {
return (
<Modal bsSize="sm" animation={false} show={true}>
<ModalHeader>Change Password</ModalHeader>
<ModalBody>
Credentials of this user cannot be updated through MinIO Browser.
</ModalBody>
<div className="modal-footer">
<button
id="cancel-change-password"
className="btn btn-link"
onClick={hideChangePassword}
>
Close
</button>
</div>
</Modal>
)
}
return (
<Modal bsSize="sm" animation={false} show={true}>
<ModalHeader>Change Password</ModalHeader>
<ModalBody className="m-t-20">
<InputGroup
value={this.state.accessKey}
onChange={this.accessKeyChange.bind(this)}
id="accessKey"
label="Access Key"
name="accesskey"
type="text"
spellCheck="false"
required="required"
autoComplete="false"
align="ig-left"
readonly={this.state.keysReadOnly}
/>
<i
onClick={this.secretKeyVisible.bind(
this,
!this.state.secretKeyVisible
<div className="has-toggle-password">
<InputGroup
value={this.state.currentAccessKey}
id="currentAccessKey"
label="Current Access Key"
name="currentAccesskey"
type="text"
spellCheck="false"
required="required"
autoComplete="false"
align="ig-left"
readonly={true}
/>
<i
onClick={() => {
this.setState({
currentSecretKeyVisible: !this.state.currentSecretKeyVisible
})
}}
className={
"toggle-password fa fa-eye " +
(this.state.currentSecretKeyVisible ? "toggled" : "")
}
/>
<InputGroup
value={this.state.currentSecretKey}
onChange={e => {
this.setState({ currentSecretKey: e.target.value })
}}
id="currentSecretKey"
label="Current Secret Key"
name="currentSecretKey"
type={this.state.currentSecretKeyVisible ? "text" : "password"}
spellCheck="false"
required="required"
autoComplete="false"
align="ig-left"
/>
</div>
<div className="has-toggle-password m-t-30">
{!serverInfo.userInfo.isIAMUser && (
<InputGroup
value={this.state.newAccessKey}
id="newAccessKey"
label={"New Access Key"}
name="newAccesskey"
type="text"
spellCheck="false"
required="required"
autoComplete="false"
align="ig-left"
onChange={e => {
this.setState({ newAccessKey: e.target.value })
}}
readonly={serverInfo.userInfo.isIAMUser}
/>
)}
className={
"toggle-password fa fa-eye " +
(this.state.secretKeyVisible ? "toggled" : "")
}
/>
<InputGroup
value={this.state.secretKey}
onChange={this.secretKeyChange.bind(this)}
id="secretKey"
label="Secret Key"
name="accesskey"
type={this.state.secretKeyVisible ? "text" : "password"}
spellCheck="false"
required="required"
autoComplete="false"
align="ig-left"
readonly={this.state.keysReadOnly}
/>
<i
onClick={() => {
this.setState({
newSecretKeyVisible: !this.state.newSecretKeyVisible
})
}}
className={
"toggle-password fa fa-eye " +
(this.state.newSecretKeyVisible ? "toggled" : "")
}
/>
<InputGroup
value={this.state.newSecretKey}
onChange={e => {
this.setState({ newSecretKey: e.target.value })
}}
id="newSecretKey"
label="New Secret Key"
name="newSecretKey"
type={this.state.newSecretKeyVisible ? "text" : "password"}
spellCheck="false"
required="required"
autoComplete="false"
align="ig-left"
onChange={e => {
this.setState({ newSecretKey: e.target.value })
}}
/>
</div>
</ModalBody>
<div className="modal-footer">
<button
id="generate-keys"
className={
"btn btn-primary " + (this.state.keysReadOnly ? "hidden" : "")
}
className={"btn btn-primary"}
onClick={this.generateAuth.bind(this)}
>
Generate
</button>
<button
id="update-keys"
className={
"btn btn-success " + (this.state.keysReadOnly ? "hidden" : "")
}
className={classNames({
btn: true,
"btn-success": this.canUpdateCredentials()
})}
disabled={!this.canUpdateCredentials()}
onClick={this.setAuth.bind(this)}
>
Update
@ -198,4 +278,7 @@ const mapDispatchToProps = dispatch => {
}
}
export default connect(mapStateToProps, mapDispatchToProps)(ChangePasswordModal)
export default connect(
mapStateToProps,
mapDispatchToProps
)(ChangePasswordModal)

View file

@ -17,21 +17,38 @@
import React from "react"
import { shallow, mount } from "enzyme"
import { ChangePasswordModal } from "../ChangePasswordModal"
import jwtDecode from "jwt-decode"
jest.mock("jwt-decode")
jwtDecode.mockImplementation(() => ({ sub: "minio" }))
jest.mock("../../web", () => ({
GetAuth: jest.fn(() => {
return Promise.resolve({ accessKey: "test1", secretKey: "test2" })
}),
GenerateAuth: jest.fn(() => {
return Promise.resolve({ accessKey: "gen1", secretKey: "gen2" })
}),
SetAuth: jest.fn(({ accessKey, secretKey }) => {
if (accessKey == "test3" && secretKey == "test4") {
return Promise.resolve({})
} else {
return Promise.reject({ message: "Error" })
SetAuth: jest.fn(
({ currentAccessKey, currentSecretKey, newAccessKey, newSecretKey }) => {
if (
currentAccessKey == "minio" &&
currentSecretKey == "minio123" &&
newAccessKey == "test" &&
newSecretKey == "test1234"
) {
return Promise.resolve({})
} else {
return Promise.reject({
message: "Error"
})
}
}
})
),
GetToken: jest.fn(() => "")
}))
jest.mock("../../utils", () => ({
getRandomAccessKey: () => "raccesskey",
getRandomSecretKey: () => "rsecretkey"
}))
describe("ChangePasswordModal", () => {
@ -40,57 +57,112 @@ describe("ChangePasswordModal", () => {
memory: "test",
platform: "test",
runtime: "test",
info: { isEnvCreds: false }
info: { isEnvCreds: false },
userInfo: { isIAMUser: false }
}
it("should render without crashing", () => {
shallow(<ChangePasswordModal serverInfo={serverInfo} />)
})
it("should get the keys when its rendered", () => {
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
setImmediate(() => {
expect(wrapper.state("accessKey")).toBe("test1")
expect(wrapper.state("secretKey")).toBe("test2")
})
it("should not allow changing password when isWorm is true", () => {
const newServerInfo = { ...serverInfo, info: { isWorm: true } }
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should show readonly keys when isEnvCreds is true", () => {
const newServerInfo = { ...serverInfo, info: { isEnvCreds: true } }
it("should not allow changing password when isEnvCreds is true and not IAM user", () => {
const newServerInfo = {
...serverInfo,
info: { isEnvCreds: true },
userInfo: { isIAMUser: false }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(wrapper.state("accessKey")).toBe("xxxxxxxxx")
expect(wrapper.state("secretKey")).toBe("xxxxxxxxx")
expect(wrapper.find("#accessKey").prop("readonly")).toBeTruthy()
expect(wrapper.find("#secretKey").prop("readonly")).toBeTruthy()
expect(wrapper.find("#generate-keys").hasClass("hidden")).toBeTruthy()
expect(wrapper.find("#update-keys").hasClass("hidden")).toBeTruthy()
expect(
wrapper
.find("ModalBody")
.childAt(0)
.text()
).toBe("Credentials of this user cannot be updated through MinIO Browser.")
})
it("should generate accessKey and secretKey when Generate buttons is clicked", () => {
const wrapper = shallow(<ChangePasswordModal serverInfo={serverInfo} />)
wrapper.find("#generate-keys").simulate("click")
setImmediate(() => {
expect(wrapper.state("accessKey")).toBe("gen1")
expect(wrapper.state("secretKey")).toBe("gen2")
expect(wrapper.state("newAccessKey")).toBe("raccesskey")
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
})
})
it("should not generate accessKey for IAM User", () => {
const newServerInfo = {
...serverInfo,
userInfo: { isIAMUser: true }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
wrapper.find("#generate-keys").simulate("click")
setImmediate(() => {
expect(wrapper.state("newAccessKey")).toBe("minio")
expect(wrapper.state("newSecretKey")).toBe("rsecretkey")
})
})
it("should not show new accessKey field for IAM User", () => {
const newServerInfo = {
...serverInfo,
userInfo: { isIAMUser: true }
}
const wrapper = shallow(<ChangePasswordModal serverInfo={newServerInfo} />)
expect(wrapper.find("#newAccesskey").exists()).toBeFalsy()
})
it("should disble Update button for invalid accessKey or secretKey", () => {
const showAlert = jest.fn()
const wrapper = shallow(
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
)
wrapper
.find("#currentAccessKey")
.simulate("change", { target: { value: "minio" } })
wrapper
.find("#currentSecretKey")
.simulate("change", { target: { value: "minio123" } })
wrapper.find("#newAccessKey").simulate("change", { target: { value: "t" } })
wrapper
.find("#newSecretKey")
.simulate("change", { target: { value: "t1" } })
expect(wrapper.find("#update-keys").prop("disabled")).toBeTruthy()
})
it("should update accessKey and secretKey when Update button is clicked", () => {
const showAlert = jest.fn()
const wrapper = shallow(
<ChangePasswordModal serverInfo={serverInfo} showAlert={showAlert} />
)
wrapper
.find("#accessKey")
.simulate("change", { target: { value: "test3" } })
.find("#currentAccessKey")
.simulate("change", { target: { value: "minio" } })
wrapper
.find("#secretKey")
.simulate("change", { target: { value: "test4" } })
.find("#currentSecretKey")
.simulate("change", { target: { value: "minio123" } })
wrapper
.find("#newAccessKey")
.simulate("change", { target: { value: "test" } })
wrapper
.find("#newSecretKey")
.simulate("change", { target: { value: "test1234" } })
expect(wrapper.find("#update-keys").prop("disabled")).toBeFalsy()
wrapper.find("#update-keys").simulate("click")
setImmediate(() => {
expect(showAlert).toHaveBeenCalledWith({
type: "success",
message: "Changed credentials"
message: "Credentials updated successfully."
})
})
})

View file

@ -34,7 +34,7 @@ export const fetchStorageInfo = () => {
return web.StorageInfo().then(res => {
const storageInfo = {
total: res.storageInfo.Total,
used: res.storageInfo.Used
used: res.storageInfo.Used
}
dispatch(setStorageInfo(storageInfo))
})
@ -54,7 +54,8 @@ export const fetchServerInfo = () => {
memory: res.MinioMemory,
platform: res.MinioPlatform,
runtime: res.MinioRuntime,
info: res.MinioGlobalInfo
info: res.MinioGlobalInfo,
userInfo: res.MinioUserInfo
}
dispatch(setServerInfo(serverInfo))
})

View file

@ -28,3 +28,6 @@ export const NONE = "none"
export const SHARE_OBJECT_EXPIRY_DAYS = 5
export const SHARE_OBJECT_EXPIRY_HOURS = 0
export const SHARE_OBJECT_EXPIRY_MINUTES = 0
export const ACCESS_KEY_MIN_LENGTH = 3
export const SECRET_KEY_MIN_LENGTH = 8

View file

@ -14,11 +14,11 @@
* limitations under the License.
*/
import { minioBrowserPrefix } from './constants.js'
import { minioBrowserPrefix } from "./constants.js"
export const sortObjectsByName = (objects, order) => {
let folders = objects.filter(object => object.name.endsWith('/'))
let files = objects.filter(object => !object.name.endsWith('/'))
let folders = objects.filter(object => object.name.endsWith("/"))
let files = objects.filter(object => !object.name.endsWith("/"))
folders = folders.sort((a, b) => {
if (a.name.toLowerCase() < b.name.toLowerCase()) return -1
if (a.name.toLowerCase() > b.name.toLowerCase()) return 1
@ -37,32 +37,34 @@ export const sortObjectsByName = (objects, order) => {
}
export const sortObjectsBySize = (objects, order) => {
let folders = objects.filter(object => object.name.endsWith('/'))
let files = objects.filter(object => !object.name.endsWith('/'))
let folders = objects.filter(object => object.name.endsWith("/"))
let files = objects.filter(object => !object.name.endsWith("/"))
files = files.sort((a, b) => a.size - b.size)
if (order)
files = files.reverse()
if (order) files = files.reverse()
return [...folders, ...files]
}
export const sortObjectsByDate = (objects, order) => {
let folders = objects.filter(object => object.name.endsWith('/'))
let files = objects.filter(object => !object.name.endsWith('/'))
files = files.sort((a, b) => new Date(a.lastModified).getTime() - new Date(b.lastModified).getTime())
if (order)
files = files.reverse()
let folders = objects.filter(object => object.name.endsWith("/"))
let files = objects.filter(object => !object.name.endsWith("/"))
files = files.sort(
(a, b) =>
new Date(a.lastModified).getTime() - new Date(b.lastModified).getTime()
)
if (order) files = files.reverse()
return [...folders, ...files]
}
export const pathSlice = (path) => {
path = path.replace(minioBrowserPrefix, '')
let prefix = ''
let bucket = ''
if (!path) return {
export const pathSlice = path => {
path = path.replace(minioBrowserPrefix, "")
let prefix = ""
let bucket = ""
if (!path)
return {
bucket,
prefix
}
let objectIndex = path.indexOf('/', 1)
}
let objectIndex = path.indexOf("/", 1)
if (objectIndex == -1) {
bucket = path.slice(1)
return {
@ -79,7 +81,29 @@ export const pathSlice = (path) => {
}
export const pathJoin = (bucket, prefix) => {
if (!prefix)
prefix = ''
return minioBrowserPrefix + '/' + bucket + '/' + prefix
if (!prefix) prefix = ""
return minioBrowserPrefix + "/" + bucket + "/" + prefix
}
export const getRandomAccessKey = () => {
const alphaNumericTable = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
let arr = new Uint8Array(20)
window.crypto.getRandomValues(arr)
const random = Array.prototype.map.call(arr, v => {
const i = v % alphaNumericTable.length
return alphaNumericTable.charAt(i)
})
return random.join("")
}
export const getRandomSecretKey = () => {
let arr = new Uint8Array(40)
window.crypto.getRandomValues(arr)
const binStr = Array.prototype.map
.call(arr, v => {
return String.fromCharCode(v)
})
.join("")
const base64Str = btoa(binStr)
return base64Str.replace(/\//g, "+").substr(0, 40)
}

View file

@ -72,6 +72,9 @@ class Web {
Logout() {
storage.removeItem('token')
}
GetToken() {
return storage.getItem('token')
}
ServerInfo() {
return this.makeCall('ServerInfo')
}
@ -99,12 +102,6 @@ class Web {
RemoveObject(args) {
return this.makeCall('RemoveObject', args)
}
GetAuth() {
return this.makeCall('GetAuth')
}
GenerateAuth() {
return this.makeCall('GenerateAuth')
}
SetAuth(args) {
return this.makeCall('SetAuth', args)
.then(res => {

View file

@ -190,8 +190,8 @@
----------------------------*/
.toggle-password {
position: absolute;
bottom: 30px;
right: 35px;
bottom: 0 ;
right: 0;
width: 30px;
height: 30px;
border: 1px solid #eee;
@ -206,6 +206,10 @@
background: #eee;
}
}
.has-toggle-password {
position: relative;
}
//--------------------------

View file

@ -68,6 +68,7 @@
"humanize": "0.0.9",
"identity-obj-proxy": "^3.0.0",
"json-loader": "^0.5.4",
"jwt-decode": "^2.2.0",
"local-storage-fallback": "^4.0.2",
"material-design-iconic-font": "^2.2.0",
"mime-db": "^1.25.0",

File diff suppressed because one or more lines are too long

View file

@ -3364,9 +3364,9 @@ fstream-ignore@^1.0.5:
minimatch "^3.0.0"
fstream@^1.0.0, fstream@^1.0.10, fstream@^1.0.2:
version "1.0.11"
resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.11.tgz#5c1fb1f117477114f0632a0eb4b71b3cb0fd3171"
integrity sha1-XB+x8RdHcRTwYyoOtLcbPLD9MXE=
version "1.0.12"
resolved "https://registry.yarnpkg.com/fstream/-/fstream-1.0.12.tgz#4e8ba8ee2d48be4f7d0de505455548eae5932045"
integrity sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==
dependencies:
graceful-fs "^4.1.2"
inherits "~2.0.0"
@ -3482,7 +3482,7 @@ glob@^7.0.0, glob@^7.1.1, glob@^7.1.2:
once "^1.3.0"
path-is-absolute "^1.0.0"
glob@^7.0.3, glob@^7.0.5, glob@^7.1.3:
glob@^7.0.3, glob@^7.0.5:
version "7.1.3"
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.3.tgz#3960832d3f1574108342dafd3a67b332c0969df1"
integrity sha512-vcfuiIxogLV4DlGBHIUOwI0IbrJ8HWPc4MU7HzviGeNho/UJDfi6B5p3sHeWIQ0KGIU0Jpxi5ZHxemQfLkkAwQ==
@ -3494,6 +3494,18 @@ glob@^7.0.3, glob@^7.0.5, glob@^7.1.3:
once "^1.3.0"
path-is-absolute "^1.0.0"
glob@^7.1.3:
version "7.1.4"
resolved "https://registry.yarnpkg.com/glob/-/glob-7.1.4.tgz#aa608a2f6c577ad357e1ae5a5c26d9a8d1969255"
integrity sha512-hkLPepehmnKk41pUGm3sYxoFs/umurYfYJCerbXEyFIWcAzvpipAgVkBqqT9RBKMGjnq6kMuyYwha6csxbiM1A==
dependencies:
fs.realpath "^1.0.0"
inflight "^1.0.4"
inherits "2"
minimatch "^3.0.4"
once "^1.3.0"
path-is-absolute "^1.0.0"
global-modules-path@^2.3.0:
version "2.3.1"
resolved "https://registry.yarnpkg.com/global-modules-path/-/global-modules-path-2.3.1.tgz#e541f4c800a1a8514a990477b267ac67525b9931"
@ -3546,11 +3558,7 @@ globby@^7.1.1:
pify "^3.0.0"
slash "^1.0.0"
graceful-fs@^4.1.11, graceful-fs@^4.1.4:
version "4.1.11"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.11.tgz#0e8bdfe4d1ddb8854d64e04ea7c00e2a026e5658"
graceful-fs@^4.1.15, graceful-fs@^4.1.2:
graceful-fs@^4.1.11, graceful-fs@^4.1.15, graceful-fs@^4.1.2, graceful-fs@^4.1.4:
version "4.1.15"
resolved "https://registry.yarnpkg.com/graceful-fs/-/graceful-fs-4.1.15.tgz#ffb703e1066e8a0eeaa4c8b80ba9253eeefbfb00"
integrity sha512-6uHUhOPEBgQ24HM+r6b/QwWfZq+yiFcipKFrOFiBEnWdy5sdzYoi+pJeQaPI5qOLRFqWmAXUPQNsielzdLoecA==
@ -4907,6 +4915,11 @@ jsprim@^1.2.2:
json-schema "0.2.3"
verror "1.10.0"
jwt-decode@^2.2.0:
version "2.2.0"
resolved "https://registry.yarnpkg.com/jwt-decode/-/jwt-decode-2.2.0.tgz#7d86bd56679f58ce6a84704a657dd392bba81a79"
integrity sha1-fYa9VmefWM5qhHBKZX3TkruoGnk=
keycode@^2.1.2:
version "2.1.9"
resolved "https://registry.yarnpkg.com/keycode/-/keycode-2.1.9.tgz#964a23c54e4889405b4861a5c9f0480d45141dfa"
@ -5965,6 +5978,7 @@ on-headers@~1.0.1:
once@^1.3.0, once@^1.3.1, once@^1.3.3, once@^1.4.0:
version "1.4.0"
resolved "https://registry.yarnpkg.com/once/-/once-1.4.0.tgz#583b1aa775961d4b113ac17d9c50baef9dd76bd1"
integrity sha1-WDsap3WWHUsROsF9nFC6753Xa9E=
dependencies:
wrappy "1"
@ -7392,19 +7406,13 @@ right-align@^0.1.1:
dependencies:
align-text "^0.1.1"
rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.6.1, rimraf@^2.6.2:
rimraf@2, rimraf@^2.2.8, rimraf@^2.5.1, rimraf@^2.5.4, rimraf@^2.6.1, rimraf@^2.6.2:
version "2.6.3"
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.3.tgz#b2d104fe0d8fb27cf9e0a1cda8262dd3833c6cab"
integrity sha512-mwqeW5XsA2qAejG46gYdENaxXjx9onRNCfn7L0duuP4hCuTIi/QO7PDK07KJfp1d+izWPrzEJDcSqBa0OZQriA==
dependencies:
glob "^7.1.3"
rimraf@^2.5.4:
version "2.6.2"
resolved "https://registry.yarnpkg.com/rimraf/-/rimraf-2.6.2.tgz#2ed8150d24a16ea8651e6d6ef0f47c4158ce7a36"
dependencies:
glob "^7.0.5"
ripemd160@^2.0.0, ripemd160@^2.0.1:
version "2.0.1"
resolved "https://registry.yarnpkg.com/ripemd160/-/ripemd160-2.0.1.tgz#0f4584295c53a3628af7e6d79aca21ce57d1c6e7"

View file

@ -99,6 +99,8 @@ var (
"enable": false,
"brokers": null,
"topic": "",
"queueDir": "",
"queueLimit": 0,
"tls": {
"enable": false,
"skipVerify": false,

View file

@ -27,7 +27,7 @@ import (
"github.com/aliyun/aliyun-oss-go-sdk/oss"
"google.golang.org/api/googleapi"
minio "github.com/minio/minio-go"
minio "github.com/minio/minio-go/v6"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
@ -879,16 +879,6 @@ var errorCodes = errorCodeMap{
Description: "Object name already exists as a directory.",
HTTPStatusCode: http.StatusConflict,
},
ErrReadQuorum: {
Code: "XMinioReadQuorum",
Description: "Multiple disk failures, unable to reconstruct data.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrWriteQuorum: {
Code: "XMinioWriteQuorum",
Description: "Multiple disks failures, unable to write data.",
HTTPStatusCode: http.StatusServiceUnavailable,
},
ErrInvalidObjectName: {
Code: "XMinioInvalidObjectName",
Description: "Object name contains unsupported characters.",
@ -1534,7 +1524,7 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
apiErr = ErrKMSAuthFailure
case errOperationTimedOut, context.Canceled, context.DeadlineExceeded:
apiErr = ErrOperationTimedOut
case errNetworkConnReset:
case errDiskNotFound:
apiErr = ErrSlowDown
}
@ -1593,9 +1583,9 @@ func toAPIErrorCode(ctx context.Context, err error) (apiErr APIErrorCode) {
case InvalidPart:
apiErr = ErrInvalidPart
case InsufficientWriteQuorum:
apiErr = ErrWriteQuorum
apiErr = ErrSlowDown
case InsufficientReadQuorum:
apiErr = ErrReadQuorum
apiErr = ErrSlowDown
case UnsupportedDelimiter:
apiErr = ErrNotImplemented
case InvalidMarkerPrefixCombination:

View file

@ -39,8 +39,8 @@ var toAPIErrorTests = []struct {
{err: ObjectNameInvalid{}, errCode: ErrInvalidObjectName},
{err: InvalidUploadID{}, errCode: ErrNoSuchUpload},
{err: InvalidPart{}, errCode: ErrInvalidPart},
{err: InsufficientReadQuorum{}, errCode: ErrReadQuorum},
{err: InsufficientWriteQuorum{}, errCode: ErrWriteQuorum},
{err: InsufficientReadQuorum{}, errCode: ErrSlowDown},
{err: InsufficientWriteQuorum{}, errCode: ErrSlowDown},
{err: UnsupportedDelimiter{}, errCode: ErrNotImplemented},
{err: InvalidMarkerPrefixCombination{}, errCode: ErrNotImplemented},
{err: InvalidUploadIDKeyCombination{}, errCode: ErrNotImplemented},

View file

@ -30,7 +30,7 @@ import (
"github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns"

View file

@ -29,7 +29,7 @@ import (
etcd "github.com/coreos/etcd/clientv3"
dns2 "github.com/miekg/dns"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/cmd/logger/target/console"
"github.com/minio/minio/cmd/logger/target/http"

View file

@ -107,7 +107,7 @@ func readConfigEtcd(ctx context.Context, client *etcd.Client, configFile string)
// watchConfigEtcd - watches for changes on `configFile` on etcd and loads them.
func watchConfigEtcd(objAPI ObjectLayer, configFile string, loadCfgFn func(ObjectLayer) error) {
for {
watchCh := globalEtcdClient.Watch(context.Background(), iamConfigPrefix)
watchCh := globalEtcdClient.Watch(context.Background(), configFile)
select {
case <-GlobalServiceDoneCh:
return

View file

@ -325,7 +325,7 @@ func (s *serverConfig) TestNotificationTargets() error {
if !v.Enable {
continue
}
t, err := target.NewKafkaTarget(k, v)
t, err := target.NewKafkaTarget(k, v, GlobalServiceDoneCh)
if err != nil {
return fmt.Errorf("kafka(%s): %s", k, err.Error())
}
@ -667,7 +667,7 @@ func getNotificationTargets(config *serverConfig) *event.TargetList {
for id, args := range config.Notify.Kafka {
if args.Enable {
newTarget, err := target.NewKafkaTarget(id, args)
newTarget, err := target.NewKafkaTarget(id, args, GlobalServiceDoneCh)
if err != nil {
logger.LogIf(context.Background(), err)
continue

View file

@ -200,7 +200,7 @@ func TestValidateConfig(t *testing.T) {
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "postgresql": { "1": { "enable": true, "connectionString": "", "table": "", "host": "", "port": "", "user": "", "password": "", "database": "" }}}}`, false},
// Test 16 - Test Kafka
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "" } }}}`, false},
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "kafka": { "1": { "enable": true, "brokers": null, "topic": "", "queueDir": "", "queueLimit": 0 } }}}`, false},
// Test 17 - Test Webhook
{`{"version": "` + v + `", "credential": { "accessKey": "minio", "secretKey": "minio123" }, "region": "us-east-1", "browser": "on", "notify": { "webhook": { "1": { "enable": true, "endpoint": "" } }}}`, false},

View file

@ -884,7 +884,7 @@ type serverConfigV32 struct {
} `json:"policy"`
}
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit with MQTT.
// serverConfigV33 is just like version '32', removes clientID from NATS and MQTT, and adds queueDir, queueLimit in MQTT and kafka.
type serverConfigV33 struct {
quick.Config `json:"-"` // ignore interfaces

View file

@ -29,7 +29,7 @@ import (
"path"
"strconv"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"

View file

@ -23,7 +23,7 @@ import (
"testing"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/sio"
)

View file

@ -22,7 +22,7 @@ import (
"strconv"
"strings"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/ellipses"
)

View file

@ -31,7 +31,7 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/cpu"
"github.com/minio/minio/pkg/disk"

View file

@ -30,7 +30,7 @@ import (
"sync/atomic"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/lock"
"github.com/minio/minio/pkg/madmin"

View file

@ -24,7 +24,7 @@ import (
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio-go"
minio "github.com/minio/minio-go/v6"
)
var (

View file

@ -221,6 +221,11 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
logger.FatalIf(err, "Unable to initialize gateway backend")
}
// Populate existing buckets to the etcd backend
if globalDNSConfig != nil {
initFederatorBackend(newObject)
}
if enableConfigOps {
// Create a new config system.
globalConfigSys = NewConfigSys()
@ -299,6 +304,9 @@ func StartGateway(ctx *cli.Context, gw Gateway) {
printGatewayStartupMessage(getAPIEndpoints(), gatewayName)
}
// Set when gateway is enabled
globalIsGateway = true
// Set uptime time after object layer has initialized.
globalBootTime = UTCNow()

View file

@ -36,7 +36,7 @@ import (
"github.com/Azure/go-autorest/autorest/azure"
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"

View file

@ -31,7 +31,7 @@ import (
b2 "github.com/minio/blazer/base"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
h2 "github.com/minio/minio/pkg/hash"

View file

@ -38,7 +38,7 @@ import (
"cloud.google.com/go/storage"
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/policy"

View file

@ -28,7 +28,7 @@ import (
"cloud.google.com/go/storage"
"google.golang.org/api/googleapi"
miniogo "github.com/minio/minio-go"
miniogo "github.com/minio/minio-go/v6"
minio "github.com/minio/minio/cmd"
)

View file

@ -19,7 +19,7 @@ package hdfs
import (
"strings"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
minio "github.com/minio/minio/cmd"
)

View file

@ -31,7 +31,7 @@ import (
"github.com/minio/cli"
"github.com/minio/hdfs/v3"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"

View file

@ -29,8 +29,8 @@ import (
humanize "github.com/dustin/go-humanize"
"github.com/minio/cli"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio-go/v6/pkg/s3utils"
minio "github.com/minio/minio/cmd"
xhttp "github.com/minio/minio/cmd/http"
"github.com/minio/minio/cmd/logger"

View file

@ -26,7 +26,7 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio/cmd/logger"

View file

@ -27,12 +27,12 @@ import (
"time"
"github.com/minio/cli"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/credentials"
miniogo "github.com/minio/minio-go/v6"
"github.com/minio/minio-go/v6/pkg/credentials"
minio "github.com/minio/minio/cmd"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/policy"

View file

@ -20,7 +20,7 @@ import (
"fmt"
"testing"
miniogo "github.com/minio/minio-go"
miniogo "github.com/minio/minio-go/v6"
"github.com/minio/minio/pkg/hash"
minio "github.com/minio/minio/cmd"

View file

@ -24,7 +24,7 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/crypto"
@ -379,6 +379,20 @@ type resourceHandler struct {
// setCorsHandler handler for CORS (Cross Origin Resource Sharing)
func setCorsHandler(h http.Handler) http.Handler {
commonS3Headers := []string{
"Date",
"ETag",
"Server",
"Connection",
"Accept-Ranges",
"Content-Range",
"Content-Encoding",
"Content-Length",
"Content-Type",
"X-Amz*",
"x-amz*",
"*",
}
c := cors.New(cors.Options{
AllowedOrigins: []string{"*"},
@ -391,8 +405,8 @@ func setCorsHandler(h http.Handler) http.Handler {
http.MethodOptions,
http.MethodPatch,
},
AllowedHeaders: []string{"*"},
ExposedHeaders: []string{"*"},
AllowedHeaders: commonS3Headers,
ExposedHeaders: commonS3Headers,
AllowCredentials: true,
})

View file

@ -23,7 +23,7 @@ import (
"time"
isatty "github.com/mattn/go-isatty"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
etcd "github.com/coreos/etcd/clientv3"
humanize "github.com/dustin/go-humanize"
@ -82,8 +82,6 @@ const (
// GlobalMultipartCleanupInterval - Cleanup interval when the stale multipart cleanup is initiated.
GlobalMultipartCleanupInterval = time.Hour * 24 // 24 hrs.
// Refresh interval to update in-memory bucket policy cache.
globalRefreshBucketPolicyInterval = 5 * time.Minute
// Refresh interval to update in-memory iam config cache.
globalRefreshIAMInterval = 5 * time.Minute
@ -111,6 +109,9 @@ var (
// Indicates if the running minio server is an erasure-code backend.
globalIsXL = false
// Indicates if the running minio is in gateway mode.
globalIsGateway = false
// This flag is set to 'true' by default
globalIsBrowserEnabled = true

View file

@ -28,7 +28,7 @@ import (
"testing"
"time"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
)
var serverPort uint32 = 60000

View file

@ -26,9 +26,8 @@ import (
"time"
humanize "github.com/dustin/go-humanize"
"golang.org/x/net/http2"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/certs"
)
@ -56,9 +55,6 @@ const (
// DefaultMaxHeaderBytes - default maximum HTTP header size in bytes.
DefaultMaxHeaderBytes = 1 * humanize.MiByte
// DefaultHTTP2MaxConcurrentStreams - default value for HTTP 2.0 maximum concurrent streams allowed.
DefaultHTTP2MaxConcurrentStreams = 1024
)
// Server - extended http.Server supports multiple addresses to serve and enhanced connection handling.
@ -133,11 +129,6 @@ func (srv *Server) Start() (err error) {
// Start servicing with listener.
if tlsConfig != nil {
if err = http2.ConfigureServer(&srv.Server, &http2.Server{
MaxConcurrentStreams: DefaultHTTP2MaxConcurrentStreams,
}); err != nil {
return err
}
return srv.Server.Serve(tls.NewListener(listener, tlsConfig))
}
return srv.Server.Serve(listener)
@ -211,7 +202,14 @@ func NewServer(addrs []string, handler http.Handler, getCert certs.GetCertificat
CipherSuites: defaultCipherSuites,
CurvePreferences: secureCurves,
MinVersion: tls.VersionTLS12,
NextProtos: []string{"h2", "http/1.1"},
// Do not edit the next line, protos priority is kept
// on purpose in this manner for HTTP 2.0, we would
// still like HTTP 2.0 clients to negotiate connection
// to server if needed but by default HTTP 1.1 is
// expected. We need to change this in future
// when we wish to go back to HTTP 2.0 as default
// priority for HTTP protocol negotiation.
NextProtos: []string{"http/1.1", "h2"},
}
tlsConfig.GetCertificate = getCert
}

View file

@ -25,7 +25,7 @@ import (
"time"
etcd "github.com/coreos/etcd/clientv3"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
iampolicy "github.com/minio/minio/pkg/iam/policy"
@ -79,7 +79,8 @@ func (sys *IAMSys) Init(objAPI ObjectLayer) error {
go func() {
// Refresh IAMSys with etcd watch.
for {
watchCh := globalEtcdClient.Watch(context.Background(), iamConfigPrefix)
watchCh := globalEtcdClient.Watch(context.Background(),
iamConfigPrefix, etcd.WithPrefix())
select {
case <-GlobalServiceDoneCh:
return
@ -490,6 +491,51 @@ func (sys *IAMSys) SetUser(accessKey string, uinfo madmin.UserInfo) error {
return nil
}
// SetUserSecretKey - sets user secret key
func (sys *IAMSys) SetUserSecretKey(accessKey string, secretKey string) error {
objectAPI := newObjectLayerFn()
if objectAPI == nil {
return errServerNotInitialized
}
sys.Lock()
defer sys.Unlock()
cred, ok := sys.iamUsersMap[accessKey]
if !ok {
return errNoSuchUser
}
uinfo := madmin.UserInfo{
SecretKey: secretKey,
Status: madmin.AccountStatus(cred.Status),
}
configFile := pathJoin(iamConfigUsersPrefix, accessKey, iamIdentityFile)
data, err := json.Marshal(uinfo)
if err != nil {
return err
}
if globalEtcdClient != nil {
err = saveConfigEtcd(context.Background(), globalEtcdClient, configFile, data)
} else {
err = saveConfig(context.Background(), objectAPI, configFile, data)
}
if err != nil {
return err
}
sys.iamUsersMap[accessKey] = auth.Credentials{
AccessKey: accessKey,
SecretKey: secretKey,
Status: string(uinfo.Status),
}
return nil
}
// GetUser - get user credentials
func (sys *IAMSys) GetUser(accessKey string) (cred auth.Credentials, ok bool) {
sys.RLock()

View file

@ -47,6 +47,7 @@ var (
errChangeCredNotAllowed = errors.New("Changing access key and secret key not allowed")
errAuthentication = errors.New("Authentication failed, check your access credentials")
errNoAuthToken = errors.New("JWT token missing")
errIncorrectCreds = errors.New("Current access key or secret key is incorrect")
)
func authenticateJWTUsers(accessKey, secretKey string, expiry time.Duration) (string, error) {

View file

@ -17,12 +17,8 @@
package cmd
import (
"context"
"errors"
"path"
"time"
"github.com/minio/minio/cmd/logger"
)
const lockRESTVersion = "v1"
@ -53,28 +49,22 @@ type lockResponse struct {
func (l *localLocker) removeEntryIfExists(nlrip nameLockRequesterInfoPair) {
// Check if entry is still in map (could have been removed altogether by 'concurrent' (R)Unlock of last entry)
if lri, ok := l.lockMap[nlrip.name]; ok {
if !l.removeEntry(nlrip.name, nlrip.lri.UID, &lri) {
// Remove failed, in case it is a:
if nlrip.lri.Writer {
// Writer: this should never happen as the whole (mapped) entry should have been deleted
reqInfo := (&logger.ReqInfo{}).AppendTags("name", nlrip.name)
reqInfo.AppendTags("uid", nlrip.lri.UID)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, errors.New("Lock maintenance failed to remove entry for write lock (should never happen)"))
} // Reader: this can happen if multiple read locks were active and
// the one we are looking for has been released concurrently (so it is fine).
} // Removal went okay, all is fine.
// Even if the entry exists, it may not be the same entry which was
// considered as expired, so we simply an attempt to remove it if its
// not possible there is nothing we need to do.
l.removeEntry(nlrip.name, nlrip.lri.UID, &lri)
}
}
// removeEntry either, based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock or last read lock)
// removeEntry based on the uid of the lock message, removes a single entry from the
// lockRequesterInfo array or the whole array from the map (in case of a write lock
// or last read lock)
func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bool {
// Find correct entry to remove based on uid.
for index, entry := range *lri {
if entry.UID == uid {
if len(*lri) == 1 {
// Remove the (last) lock.
// Remove the write lock.
delete(l.lockMap, name)
} else {
// Remove the appropriate read lock.
@ -84,6 +74,7 @@ func (l *localLocker) removeEntry(name, uid string, lri *[]lockRequesterInfo) bo
return true
}
}
// None found return false, perhaps entry removed in previous run.
return false
}

View file

@ -286,17 +286,17 @@ func (l *lockRESTServer) lockMaintenance(interval time.Duration) {
Resource: nlrip.name,
})
// Close the connection regardless of the call response.
c.Close()
// For successful response, verify if lock is indeed active or stale.
// For successful response, verify if lock was indeed active or stale.
if expired {
// The lock is no longer active at server that originated the lock
// So remove the lock from the map.
// The lock is no longer active at server that originated
// the lock, attempt to remove the lock.
l.ll.mutex.Lock()
l.ll.removeEntryIfExists(nlrip) // Purge the stale entry if it exists.
l.ll.mutex.Unlock()
}
// Close the connection regardless of the call response.
c.Close()
}
}

View file

@ -29,7 +29,7 @@ import (
"time"
"github.com/minio/highwayhash"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger/message/log"
)

View file

@ -29,7 +29,7 @@ import (
"github.com/minio/dsync"
"github.com/minio/lsync"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger"
xnet "github.com/minio/minio/pkg/net"
)

View file

@ -182,11 +182,11 @@ func (d *naughtyDisk) DeleteFileBulk(volume string, paths []string) ([]error, er
return errs, nil
}
func (d *naughtyDisk) WriteAll(volume string, path string, buf []byte) (err error) {
func (d *naughtyDisk) WriteAll(volume string, path string, reader io.Reader) (err error) {
if err := d.calcError(); err != nil {
return err
}
return d.disk.WriteAll(volume, path, buf)
return d.disk.WriteAll(volume, path, reader)
}
func (d *naughtyDisk) ReadAll(volume string, path string) (buf []byte, err error) {

View file

@ -27,7 +27,7 @@ import (
"strings"
"syscall"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger"
)

View file

@ -24,7 +24,7 @@ import (
"runtime"
"testing"
"github.com/minio/minio-go/pkg/set"
"github.com/minio/minio-go/v6/pkg/set"
)
func TestMustSplitHostPort(t *testing.T) {

View file

@ -31,6 +31,7 @@ import (
"sync"
"time"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event"
xnet "github.com/minio/minio/pkg/net"
@ -910,6 +911,11 @@ func (args eventArgs) ToEvent() event.Event {
}
func sendEvent(args eventArgs) {
// remove sensitive encryption entries in metadata.
crypto.RemoveSensitiveEntries(args.Object.UserDefined)
crypto.RemoveInternalEntries(args.Object.UserDefined)
// globalNotificationSys is not initialized in gateway mode.
if globalNotificationSys == nil {
return

View file

@ -27,22 +27,6 @@ import (
// underlying storage layer.
func toObjectErr(err error, params ...string) error {
switch err {
case errDiskNotFound:
switch len(params) {
case 1:
err = BucketNotFound{Bucket: params[0]}
case 2:
err = ObjectNotFound{
Bucket: params[0],
Object: params[1],
}
case 3:
err = InvalidUploadID{
Bucket: params[0],
Object: params[1],
UploadID: params[2],
}
}
case errVolumeNotFound:
if len(params) >= 1 {
err = BucketNotFound{Bucket: params[0]}

View file

@ -21,7 +21,7 @@ import (
"io"
"net/http"
"github.com/minio/minio-go/pkg/encrypt"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/pkg/madmin"
"github.com/minio/minio/pkg/policy"
)

View file

@ -255,8 +255,8 @@ func testPutObjectPartDiskNotFound(obj ObjectLayer, instanceType string, disks [
}
// As all disks at not available, bucket not found.
expectedErr2 := BucketNotFound{Bucket: testCase.bucketName}
if err.Error() != expectedErr2.Error() {
expectedErr2 := errDiskNotFound
if err != errDiskNotFound {
t.Fatalf("Test %s: expected error %s, got %s instead.", instanceType, expectedErr2, err)
}
}

View file

@ -34,7 +34,7 @@ import (
"unicode/utf8"
snappy "github.com/golang/snappy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns"

View file

@ -33,8 +33,8 @@ import (
"time"
"github.com/gorilla/mux"
miniogo "github.com/minio/minio-go"
"github.com/minio/minio-go/pkg/encrypt"
miniogo "github.com/minio/minio-go/v6"
"github.com/minio/minio-go/v6/pkg/encrypt"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/dns"

View file

@ -25,10 +25,9 @@ import (
"path"
"strings"
"sync"
"time"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
@ -61,6 +60,11 @@ func (sys *PolicySys) removeDeletedBuckets(bucketInfos []BucketInfo) {
// Set - sets policy to given bucket name. If policy is empty, existing policy is removed.
func (sys *PolicySys) Set(bucketName string, policy policy.Policy) {
if globalIsGateway {
// Set policy is a non-op under gateway mode.
return
}
sys.Lock()
defer sys.Unlock()
@ -84,9 +88,21 @@ func (sys *PolicySys) IsAllowed(args policy.Args) bool {
sys.RLock()
defer sys.RUnlock()
// If policy is available for given bucket, check the policy.
if p, found := sys.bucketPolicyMap[args.BucketName]; found {
return p.IsAllowed(args)
if globalIsGateway {
// When gateway is enabled, no cached value
// is used to validate bucket policies.
objAPI := newObjectLayerFn()
if objAPI != nil {
config, err := objAPI.GetBucketPolicy(context.Background(), args.BucketName)
if err == nil {
return config.IsAllowed(args)
}
}
} else {
// If policy is available for given bucket, check the policy.
if p, found := sys.bucketPolicyMap[args.BucketName]; found {
return p.IsAllowed(args)
}
}
// As policy is not available for given bucket name, returns IsOwner i.e.
@ -135,21 +151,11 @@ func (sys *PolicySys) Init(objAPI ObjectLayer) error {
return errInvalidArgument
}
defer func() {
// Refresh PolicySys in background.
go func() {
ticker := time.NewTicker(globalRefreshBucketPolicyInterval)
defer ticker.Stop()
for {
select {
case <-GlobalServiceDoneCh:
return
case <-ticker.C:
sys.refresh(objAPI)
}
}
}()
}()
// In gateway mode, we don't need to load the policies
// from the backend.
if globalIsGateway {
return nil
}
doneCh := make(chan struct{})
defer close(doneCh)

View file

@ -20,8 +20,8 @@ import (
"reflect"
"testing"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/set"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/policy/condition"
)

View file

@ -35,20 +35,19 @@ import (
"bytes"
humanize "github.com/dustin/go-humanize"
"github.com/klauspost/readahead"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/disk"
xioutil "github.com/minio/minio/pkg/ioutil"
"github.com/minio/minio/pkg/mountinfo"
"github.com/ncw/directio"
)
const (
diskMinFreeSpace = 900 * humanize.MiByte // Min 900MiB free space.
diskMinTotalSpace = diskMinFreeSpace // Min 900MiB total space.
maxAllowedIOError = 5
posixWriteBlockSize = 4 * humanize.MiByte
// DirectIO alignment needs to be 4K. Defined here as
// directio.AlignSize is defined as 0 in MacOS causing divide by 0 error.
directioAlignSize = 4096
diskMinFreeSpace = 900 * humanize.MiByte // Min 900MiB free space.
diskMinTotalSpace = diskMinFreeSpace // Min 900MiB total space.
maxAllowedIOError = 5
readBlockSize = humanize.KiByte * 32 // Default read block size 32KiB.
)
// isValidVolname verifies a volname name in accordance with object
@ -190,10 +189,9 @@ func newPosix(path string) (*posix, error) {
p := &posix{
connected: true,
diskPath: path,
// 4MiB buffer pool for posix internal operations.
pool: sync.Pool{
New: func() interface{} {
b := directio.AlignedBlock(posixWriteBlockSize)
b := directio.AlignedBlock(readBlockSize)
return &b
},
},
@ -1110,10 +1108,13 @@ func (s *posix) ReadFileStream(volume, path string, offset, length int64) (io.Re
if _, err = file.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
return struct {
r := struct {
io.Reader
io.Closer
}{Reader: io.LimitReader(file, length), Closer: file}, nil
}{Reader: io.LimitReader(file, length), Closer: file}
return readahead.NewReadCloser(r), nil
}
// CreateFile - creates the file.
@ -1182,10 +1183,6 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er
return err
}
}
defer func() {
w.Sync() // Sync before close.
w.Close()
}()
var e error
if fileSize > 0 {
@ -1208,76 +1205,26 @@ func (s *posix) CreateFile(volume, path string, fileSize int64, r io.Reader) (er
return err
}
defer w.Close()
bufp := s.pool.Get().(*[]byte)
defer s.pool.Put(bufp)
buf := *bufp
// Writes remaining bytes in the buffer.
writeRemaining := func(w *os.File, buf []byte) (remainingWritten int, err error) {
var n int
remaining := len(buf)
// The following logic writes the remainging data such that it writes whatever best is possible (aligned buffer)
// in O_DIRECT mode and remaining (unaligned buffer) in non-O_DIRECT mode.
remainingAligned := (remaining / directioAlignSize) * directioAlignSize
remainingAlignedBuf := buf[:remainingAligned]
remainingUnalignedBuf := buf[remainingAligned:]
if len(remainingAlignedBuf) > 0 {
n, err = w.Write(remainingAlignedBuf)
if err != nil {
return 0, err
}
remainingWritten += n
}
if len(remainingUnalignedBuf) > 0 {
// Write on O_DIRECT fds fail if buffer is not 4K aligned, hence disable O_DIRECT.
if err = disk.DisableDirectIO(w); err != nil {
return 0, err
}
n, err = w.Write(remainingUnalignedBuf)
if err != nil {
return 0, err
}
remainingWritten += n
}
return remainingWritten, nil
written, err := xioutil.CopyAligned(w, r, *bufp)
if err != nil {
return err
}
var written int
for {
var n int
n, err = io.ReadFull(r, buf)
switch err {
case nil:
n, err = w.Write(buf)
if err != nil {
return err
}
written += n
case io.ErrUnexpectedEOF:
n, err = writeRemaining(w, buf[:n])
if err != nil {
return err
}
written += n
fallthrough
case io.EOF:
if fileSize != -1 {
if written < int(fileSize) {
return errLessData
}
if written > int(fileSize) {
return errMoreData
}
}
return nil
default:
return err
}
if written < fileSize {
return errLessData
} else if written > fileSize {
return errMoreData
}
return nil
}
func (s *posix) WriteAll(volume, path string, buf []byte) (err error) {
func (s *posix) WriteAll(volume, path string, reader io.Reader) (err error) {
defer func() {
if err == errFaultyDisk {
atomic.AddInt32(&s.ioErrCount, 1)
@ -1295,11 +1242,13 @@ func (s *posix) WriteAll(volume, path string, buf []byte) (err error) {
return err
}
if _, err = w.Write(buf); err != nil {
return err
}
defer w.Close()
return w.Close()
bufp := s.pool.Get().(*[]byte)
defer s.pool.Put(bufp)
_, err = io.CopyBuffer(w, reader, *bufp)
return err
}
// AppendFile - append a byte array at path, if file doesn't exist at

View file

@ -22,7 +22,7 @@ import (
"net/http"
"testing"
minio "github.com/minio/minio-go"
minio "github.com/minio/minio-go/v6"
)
// Test Post Policy parsing and checking conditions

View file

@ -28,12 +28,22 @@ import (
"time"
xhttp "github.com/minio/minio/cmd/http"
"golang.org/x/net/http2"
)
// DefaultRESTTimeout - default RPC timeout is one minute.
const DefaultRESTTimeout = 1 * time.Minute
// NetworkError - error type in case of errors related to http/transport
// for ex. connection refused, connection reset, dns resolution failure etc.
// All errors returned by storage-rest-server (ex errFileNotFound, errDiskNotFound) are not considered to be network errors.
type NetworkError struct {
Err error
}
func (n *NetworkError) Error() string {
return n.Err.Error()
}
// Client - http based RPC client.
type Client struct {
httpClient *http.Client
@ -46,7 +56,7 @@ type Client struct {
func (c *Client) Call(method string, values url.Values, body io.Reader, length int64) (reply io.ReadCloser, err error) {
req, err := http.NewRequest(http.MethodPost, c.url.String()+"/"+method+"?"+values.Encode(), body)
if err != nil {
return nil, err
return nil, &NetworkError{err}
}
req.Header.Set("Authorization", "Bearer "+c.newAuthToken())
@ -56,7 +66,7 @@ func (c *Client) Call(method string, values url.Values, body io.Reader, length i
}
resp, err := c.httpClient.Do(req)
if err != nil {
return nil, err
return nil, &NetworkError{err}
}
if resp.StatusCode != http.StatusOK {
@ -107,12 +117,6 @@ func NewClient(url *url.URL, tlsConfig *tls.Config, timeout time.Duration, newAu
TLSClientConfig: tlsConfig,
DisableCompression: true,
}
if tlsConfig != nil {
// If TLS is enabled configure http2
if err := http2.ConfigureTransport(tr); err != nil {
return nil, err
}
}
return &Client{
httpClient: &http.Client{Transport: tr},
httpIdleConnsCloser: tr.CloseIdleConnections,

View file

@ -146,6 +146,7 @@ func sumHMAC(key []byte, data []byte) []byte {
// extractSignedHeaders extract signed headers from Authorization header
func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header, APIErrorCode) {
reqHeaders := r.Header
reqQueries := r.URL.Query()
// find whether "host" is part of list of signed headers.
// if not return ErrUnsignedHeaders. "host" is mandatory.
if !contains(signedHeaders, "host") {
@ -156,6 +157,10 @@ func extractSignedHeaders(signedHeaders []string, r *http.Request) (http.Header,
// `host` will not be found in the headers, can be found in r.Host.
// but its alway necessary that the list of signed headers containing host in it.
val, ok := reqHeaders[http.CanonicalHeaderKey(header)]
if !ok {
// try to set headers from Query String
val, ok = reqQueries[header]
}
if ok {
for _, enc := range val {
extractedSignedHeaders.Add(header, enc)

View file

@ -17,6 +17,7 @@
package cmd
import (
"github.com/minio/minio/cmd/crypto"
"net/http"
"testing"
)
@ -149,6 +150,22 @@ func TestExtractSignedHeaders(t *testing.T) {
t.Fatalf("Expected the APIErrorCode to be %d, but got %d", ErrNone, errCode)
}
inputQuery := r.URL.Query()
// case where some headers need to get from request query
signedHeaders = append(signedHeaders, "x-amz-server-side-encryption")
// expect to fail with `ErrUnsignedHeaders` because couldn't find some header
_, errCode = extractSignedHeaders(signedHeaders, r)
if errCode != ErrUnsignedHeaders {
t.Fatalf("Expected the APIErrorCode to %d, but got %d", ErrUnsignedHeaders, errCode)
}
// set headers value through Get parameter
inputQuery.Add("x-amz-server-side-encryption", crypto.SSEAlgorithmAES256)
r.URL.RawQuery = inputQuery.Encode()
_, errCode = extractSignedHeaders(signedHeaders, r)
if errCode != ErrNone {
t.Fatalf("Expected the APIErrorCode to be %d, but got %d", ErrNone, errCode)
}
// "x-amz-content-sha256" header value from the extracted result.
extractedContentSha256 := extractedSignedHeaders.Get("x-amz-content-sha256")
// "host" header value from the extracted result.

View file

@ -35,7 +35,7 @@ import (
"strings"
"time"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
sha256 "github.com/minio/sha256-simd"
)
@ -251,13 +251,19 @@ func doesPresignedSignatureMatch(hashedPayload string, r *http.Request, region s
// Save other headers available in the request parameters.
for k, v := range req.URL.Query() {
key := strings.ToLower(k)
// Handle the metadata in presigned put query string
if strings.Contains(strings.ToLower(k), "x-amz-meta-") {
if strings.Contains(key, "x-amz-meta-") {
query.Set(k, v[0])
continue
}
if strings.Contains(key, "x-amz-server-side-") {
query.Set(k, v[0])
}
if strings.HasPrefix(strings.ToLower(k), "x-amz") {
if strings.HasPrefix(key, "x-amz") {
continue
}
query[k] = v

View file

@ -54,7 +54,7 @@ type StorageAPI interface {
DeleteFileBulk(volume string, paths []string) (errs []error, err error)
// Write all data, syncs the data to disk.
WriteAll(volume string, path string, buf []byte) (err error)
WriteAll(volume string, path string, reader io.Reader) (err error)
// Read all.
ReadAll(volume string, path string) (buf []byte, err error)

View file

@ -21,7 +21,6 @@ import (
"crypto/tls"
"io"
"io/ioutil"
"net"
"net/url"
"path"
"strconv"
@ -44,32 +43,10 @@ func isNetworkError(err error) bool {
if err.Error() == errConnectionStale.Error() {
return true
}
if strings.Contains(err.Error(), "connection reset by peer") {
if _, ok := err.(*rest.NetworkError); ok {
return true
}
if uerr, isURLError := err.(*url.Error); isURLError {
if uerr.Timeout() {
return true
}
err = uerr.Err
}
_, isNetOpError := err.(*net.OpError)
return isNetOpError
}
// Attempt to approximate network error with a
// typed network error, otherwise default to
// errDiskNotFound
func toNetworkError(err error) error {
if err == nil {
return err
}
if strings.Contains(err.Error(), "connection reset by peer") {
return errNetworkConnReset
}
return errDiskNotFound
return false
}
// Converts rpc.ServerError to underlying error. This function is
@ -81,7 +58,7 @@ func toStorageErr(err error) error {
}
if isNetworkError(err) {
return toNetworkError(err)
return errDiskNotFound
}
switch err.Error() {
@ -256,11 +233,10 @@ func (client *storageRESTClient) CreateFile(volume, path string, length int64, r
}
// WriteAll - write all data to a file.
func (client *storageRESTClient) WriteAll(volume, path string, buffer []byte) error {
func (client *storageRESTClient) WriteAll(volume, path string, reader io.Reader) error {
values := make(url.Values)
values.Set(storageRESTVolume, volume)
values.Set(storageRESTFilePath, path)
reader := bytes.NewBuffer(buffer)
respBody, err := client.call(storageRESTMethodWriteAll, values, reader, -1)
defer http.DrainBody(respBody)
return err

View file

@ -245,14 +245,7 @@ func (s *storageRESTServer) WriteAllHandler(w http.ResponseWriter, r *http.Reque
return
}
buf := make([]byte, r.ContentLength)
_, err := io.ReadFull(r.Body, buf)
if err != nil {
s.writeErrorResponse(w, err)
return
}
err = s.storage.WriteAll(volume, filePath, buf)
err := s.storage.WriteAll(volume, filePath, io.LimitReader(r.Body, r.ContentLength))
if err != nil {
s.writeErrorResponse(w, err)
}

View file

@ -54,8 +54,8 @@ import (
"github.com/fatih/color"
"github.com/gorilla/mux"
"github.com/minio/minio-go/pkg/s3signer"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3signer"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/bpool"
@ -174,7 +174,7 @@ func prepareFS() (ObjectLayer, string, error) {
return obj, fsDirs[0], nil
}
func prepareXL32() (ObjectLayer, []string, error) {
func prepareXLSets32() (ObjectLayer, []string, error) {
fsDirs1, err := getRandomDisks(16)
if err != nil {
return nil, nil, err
@ -1704,7 +1704,7 @@ func prepareTestBackend(instanceType string) (ObjectLayer, []string, error) {
switch instanceType {
// Total number of disks for XL sets backend is set to 32.
case XLSetsTestStr:
return prepareXL32()
return prepareXLSets32()
// Total number of disks for XL backend is set to 16.
case XLTestStr:
return prepareXL16()
@ -1986,7 +1986,7 @@ func ExecObjectLayerTest(t TestErrHandler, objTest objTestType) {
// Executing the object layer tests for single node setup.
objTest(objLayer, FSTestStr, t)
objLayer, fsDirs, err := prepareXL16()
objLayer, fsDirs, err := prepareXLSets32()
if err != nil {
t.Fatalf("Initialization of object layer failed for XL setup: %s", err)
}

View file

@ -85,6 +85,3 @@ var errNoSuchPolicy = errors.New("Specified canned policy does not exist")
// error returned when access is denied.
var errAccessDenied = errors.New("Do not have enough permissions to access this resource")
// errNetworkConnReset - connection reset by peer
var errNetworkConnReset = errors.New("connection reset by peer")

View file

@ -36,9 +36,9 @@ import (
snappy "github.com/golang/snappy"
"github.com/gorilla/mux"
"github.com/gorilla/rpc/v2/json2"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/pkg/set"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/set"
"github.com/minio/minio/browser"
"github.com/minio/minio/cmd/crypto"
"github.com/minio/minio/cmd/logger"
@ -69,6 +69,7 @@ type ServerInfoRep struct {
MinioPlatform string
MinioRuntime string
MinioGlobalInfo map[string]interface{}
MinioUserInfo map[string]interface{}
UIVersion string `json:"uiVersion"`
}
@ -97,17 +98,17 @@ func (web *webAPIHandlers) ServerInfo(r *http.Request, args *WebGenericArgs, rep
reply.MinioVersion = Version
reply.MinioGlobalInfo = getGlobalInfo()
// If ENV creds are not set and incoming user is not owner
// disable changing credentials.
v, ok := reply.MinioGlobalInfo["isEnvCreds"].(bool)
if ok && !v {
reply.MinioGlobalInfo["isEnvCreds"] = !owner
}
// if etcd is set disallow changing credentials through UI
// if etcd is set, disallow changing credentials through UI for owner
if globalEtcdClient != nil {
reply.MinioGlobalInfo["isEnvCreds"] = true
}
// Check if the user is IAM user
reply.MinioUserInfo = map[string]interface{}{
"isIAMUser": !owner,
}
reply.MinioMemory = mem
reply.MinioPlatform = platform
reply.MinioRuntime = goruntime
@ -768,8 +769,10 @@ func (web webAPIHandlers) GenerateAuth(r *http.Request, args *WebGenericArgs, re
// SetAuthArgs - argument for SetAuth
type SetAuthArgs struct {
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
CurrentAccessKey string `json:"currentAccessKey"`
CurrentSecretKey string `json:"currentSecretKey"`
NewAccessKey string `json:"newAccessKey"`
NewSecretKey string `json:"newSecretKey"`
}
// SetAuthReply - reply for SetAuth
@ -781,68 +784,85 @@ type SetAuthReply struct {
// SetAuth - Set accessKey and secretKey credentials.
func (web *webAPIHandlers) SetAuth(r *http.Request, args *SetAuthArgs, reply *SetAuthReply) error {
_, owner, authErr := webRequestAuthenticate(r)
claims, owner, authErr := webRequestAuthenticate(r)
if authErr != nil {
return toJSONError(authErr)
}
// If creds are set through ENV disallow changing credentials.
if globalIsEnvCreds || globalWORMEnabled || !owner || globalEtcdClient != nil {
// When WORM is enabled, disallow changing credenatials for owner and user
if globalWORMEnabled {
return toJSONError(errChangeCredNotAllowed)
}
creds, err := auth.CreateCredentials(args.AccessKey, args.SecretKey)
if err != nil {
return toJSONError(err)
if owner {
if globalIsEnvCreds || globalEtcdClient != nil {
return toJSONError(errChangeCredNotAllowed)
}
// get Current creds and verify
prevCred := globalServerConfig.GetCredential()
if prevCred.AccessKey != args.CurrentAccessKey || prevCred.SecretKey != args.CurrentSecretKey {
return errIncorrectCreds
}
creds, err := auth.CreateCredentials(args.NewAccessKey, args.NewSecretKey)
if err != nil {
return toJSONError(err)
}
// Acquire lock before updating global configuration.
globalServerConfigMu.Lock()
defer globalServerConfigMu.Unlock()
// Update credentials in memory
prevCred = globalServerConfig.SetCredential(creds)
// Persist updated credentials.
if err = saveServerConfig(context.Background(), newObjectLayerFn(), globalServerConfig); err != nil {
// Save the current creds when failed to update.
globalServerConfig.SetCredential(prevCred)
logger.LogIf(context.Background(), err)
return toJSONError(err)
}
reply.Token, err = authenticateWeb(args.NewAccessKey, args.NewSecretKey)
if err != nil {
return toJSONError(err)
}
} else {
// for IAM users, access key cannot be updated
// claims.Subject is used instead of accesskey from args
prevCred, ok := globalIAMSys.GetUser(claims.Subject)
if !ok {
return errInvalidAccessKeyID
}
// Throw error when wrong secret key is provided
if prevCred.SecretKey != args.CurrentSecretKey {
return errIncorrectCreds
}
creds, err := auth.CreateCredentials(claims.Subject, args.NewSecretKey)
if err != nil {
return toJSONError(err)
}
err = globalIAMSys.SetUserSecretKey(creds.AccessKey, creds.SecretKey)
if err != nil {
return toJSONError(err)
}
reply.Token, err = authenticateWeb(creds.AccessKey, creds.SecretKey)
if err != nil {
return toJSONError(err)
}
}
// Acquire lock before updating global configuration.
globalServerConfigMu.Lock()
defer globalServerConfigMu.Unlock()
// Update credentials in memory
prevCred := globalServerConfig.SetCredential(creds)
// Persist updated credentials.
if err = saveServerConfig(context.Background(), newObjectLayerFn(), globalServerConfig); err != nil {
// Save the current creds when failed to update.
globalServerConfig.SetCredential(prevCred)
logger.LogIf(context.Background(), err)
return toJSONError(err)
}
reply.Token, err = authenticateWeb(creds.AccessKey, creds.SecretKey)
if err != nil {
return toJSONError(err)
}
reply.UIVersion = browser.UIVersion
return nil
}
// GetAuthReply - Reply current credentials.
type GetAuthReply struct {
AccessKey string `json:"accessKey"`
SecretKey string `json:"secretKey"`
UIVersion string `json:"uiVersion"`
}
// GetAuth - return accessKey and secretKey credentials.
func (web *webAPIHandlers) GetAuth(r *http.Request, args *WebGenericArgs, reply *GetAuthReply) error {
_, owner, authErr := webRequestAuthenticate(r)
if authErr != nil {
return toJSONError(authErr)
}
if !owner {
return toJSONError(errAccessDenied)
}
creds := globalServerConfig.GetCredential()
reply.AccessKey = creds.AccessKey
reply.SecretKey = creds.SecretKey
reply.UIVersion = browser.UIVersion
return nil
}
// URLTokenReply contains the reply for CreateURLToken.
type URLTokenReply struct {
Token string `json:"token"`

View file

@ -35,7 +35,7 @@ import (
jwtgo "github.com/dgrijalva/jwt-go"
humanize "github.com/dustin/go-humanize"
miniogopolicy "github.com/minio/minio-go/pkg/policy"
miniogopolicy "github.com/minio/minio-go/v6/pkg/policy"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/policy"
"github.com/minio/minio/pkg/policy/condition"
@ -701,18 +701,21 @@ func testSetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle
}
testCases := []struct {
username string
password string
success bool
currentAccessKey string
currentSecretKey string
newAccessKey string
newSecretKey string
success bool
}{
{"", "", false},
{"1", "1", false},
{"azerty", "foooooooooooooo", true},
{"", "", "", "", false},
{"1", "1", "1", "1", false},
{credentials.AccessKey, credentials.SecretKey, "azerty", "bar", false},
{credentials.AccessKey, credentials.SecretKey, "azerty", "foooooooooooooo", true},
}
// Iterating over the test cases, calling the function under test and asserting the response.
for i, testCase := range testCases {
setAuthRequest := SetAuthArgs{AccessKey: testCase.username, SecretKey: testCase.password}
setAuthRequest := SetAuthArgs{CurrentAccessKey: testCase.currentAccessKey, CurrentSecretKey: testCase.currentSecretKey, NewAccessKey: testCase.newAccessKey, NewSecretKey: testCase.newSecretKey}
setAuthReply := &SetAuthReply{}
req, err := newTestWebRPCRequest("Web.SetAuth", authorization, setAuthRequest)
if err != nil {
@ -735,42 +738,6 @@ func testSetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandle
}
}
// Wrapper for calling Get Auth Handler
func TestWebHandlerGetAuth(t *testing.T) {
ExecObjectLayerTest(t, testGetAuthWebHandler)
}
// testGetAuthWebHandler - Test GetAuth web handler
func testGetAuthWebHandler(obj ObjectLayer, instanceType string, t TestErrHandler) {
// Register the API end points with XL/FS object layer.
apiRouter := initTestWebRPCEndPoint(obj)
credentials := globalServerConfig.GetCredential()
rec := httptest.NewRecorder()
authorization, err := getWebRPCToken(apiRouter, credentials.AccessKey, credentials.SecretKey)
if err != nil {
t.Fatal("Cannot authenticate")
}
getAuthRequest := WebGenericArgs{}
getAuthReply := &GetAuthReply{}
req, err := newTestWebRPCRequest("Web.GetAuth", authorization, getAuthRequest)
if err != nil {
t.Fatalf("Failed to create HTTP request: <ERROR> %v", err)
}
apiRouter.ServeHTTP(rec, req)
if rec.Code != http.StatusOK {
t.Fatalf("Expected the response status to be 200, but instead found `%d`", rec.Code)
}
err = getTestWebRPCResponse(rec, &getAuthReply)
if err != nil {
t.Fatalf("Failed, %v", err)
}
if getAuthReply.AccessKey != credentials.AccessKey || getAuthReply.SecretKey != credentials.SecretKey {
t.Fatalf("Failed to get correct auth keys")
}
}
func TestWebCreateURLToken(t *testing.T) {
ExecObjectLayerTest(t, testCreateURLToken)
}
@ -1518,7 +1485,7 @@ func TestWebCheckAuthorization(t *testing.T) {
webRPCs := []string{
"ServerInfo", "StorageInfo", "MakeBucket",
"ListBuckets", "ListObjects", "RemoveObject",
"GenerateAuth", "SetAuth", "GetAuth",
"GenerateAuth", "SetAuth",
"GetBucketPolicy", "SetBucketPolicy", "ListAllBucketPolicies",
"PresignedGet",
}

View file

@ -858,10 +858,21 @@ func leastEntry(entriesCh []FileInfoCh, readQuorum int) (FileInfo, bool) {
// mergeEntriesCh - merges FileInfo channel to entries upto maxKeys.
func mergeEntriesCh(entriesCh []FileInfoCh, maxKeys int, readQuorum int) (entries FilesInfo) {
for i := 0; i < maxKeys; {
var fi FileInfo
fi, entries.IsTruncated = leastEntry(entriesCh, readQuorum)
if !entries.IsTruncated {
var i = 0
for {
fi, valid := leastEntry(entriesCh, readQuorum)
if !valid {
break
}
if i == maxKeys {
entries.IsTruncated = true
// Re-insert the last entry so it can be
// listed in the next listing iteration.
for j := range entriesCh {
if !entriesCh[j].Valid {
entriesCh[j].Push(fi)
}
}
break
}
entries.Files = append(entries.Files, fi)

View file

@ -21,7 +21,7 @@ import (
"sort"
"sync"
"github.com/minio/minio-go/pkg/s3utils"
"github.com/minio/minio-go/v6/pkg/s3utils"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/policy"
)

View file

@ -17,6 +17,7 @@
package cmd
import (
"bytes"
"context"
"crypto/sha256"
"encoding/hex"
@ -437,7 +438,7 @@ func writeXLMetadata(ctx context.Context, disk StorageAPI, bucket, prefix string
}
// Persist marshaled data.
err = disk.WriteAll(bucket, jsonFile, metadataBytes)
err = disk.WriteAll(bucket, jsonFile, bytes.NewReader(metadataBytes))
logger.LogIf(ctx, err)
return err
}

233
vendor/github.com/minio/minio/docs/bigdata/README.md generated vendored Normal file
View file

@ -0,0 +1,233 @@
# **Disaggregated HDP Spark and Hive with MinIO**
## **1. Cloud-native Architecture**
![cloud-native](https://github.com/minio/minio/blob/master/docs/bigdata/images/image1.png?raw=true "cloud native architecture")
Kubernetes manages stateless Spark and Hive containers elastically on the compute nodes. Spark has native scheduler integration with Kubernetes. Hive, for legacy reasons, uses YARN scheduler on top of Kubernetes.
All access to MinIO object storage is via S3/SQL SELECT API. In addition to the compute nodes, MinIO containers are also managed by Kubernetes as stateful containers with local storage (JBOD/JBOF) mapped as persistent local volumes. This architecture enables multi-tenant MinIO, allowing isolation of data between customers.
MinIO also supports multi-cluster, multi-site federation similar to AWS regions and tiers. Using MinIO Information Lifecycle Management (ILM), you can configure data to be tiered between NVMe based hot storage, and HDD based warm storage. All data is encrypted with per-object key. Access Control and Identity Management between the tenants are managed by MinIO using OpenID Connect or Kerberos/LDAP/AD.
## **2. Prerequisites**
* Install Hortonworks Distribution using this [guide.](https://docs.hortonworks.com/HDPDocuments/Ambari-2.7.1.0/bk_ambari-installation/content/ch_Installing_Ambari.html)
* [Setup Ambari](https://docs.hortonworks.com/HDPDocuments/Ambari-2.7.1.0/bk_ambari-installation/content/set_up_the_ambari_server.html) which automatically sets up YARN
* [Installing Spark](https://docs.hortonworks.com/HDPDocuments/HDP3/HDP-3.0.1/installing-spark/content/installing_spark.html)
* Install MinIO Distributed Server using one of the guides below.
* [Deployment based on Kubernetes](https://docs.min.io/docs/deploy-minio-on-kubernetes.html#minio-distributed-server-deployment)
* [Deployment based on MinIO Helm Chart](https://github.com/helm/charts/tree/master/stable/minio)
## **3. Configure Hadoop, Spark, Hive to use MinIO**
After successful installation navigate to the Ambari UI `http://<ambari-server>:8080/` and login using the default credentials: [**_username: admin, password: admin_**]
![ambari-login](https://github.com/minio/minio/blob/master/docs/bigdata/images/image3.png?raw=true "ambari login")
### **3.1 Configure Hadoop**
Navigate to **Services** -> **HDFS** -> **CONFIGS** -> **ADVANCED** as shown below
![hdfs-configs](https://github.com/minio/minio/blob/master/docs/bigdata/images/image2.png?raw=true "hdfs advanced configs")
Navigate to **Custom core-site** to configure MinIO parameters for `_s3a_` connector
![s3a-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image5.png?raw=true "custom core-site")
Add the following optimal entries for _core-site.xml_ to configure _s3a_ with **MinIO**. Most important options here are
* _fs.s3a.access.key=minio_ (Access Key to access MinIO instance, this is obtained after the deployment on k8s)
* _fs.s3a.secret.key=minio123_ (Secret Key to access MinIO instance, this is obtained after the deployment on k8s)
* _fs.s3a.endpoint=`http://minio-address/`_
* _fs.s3a.path.style.acces=true_
![s3a-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image4.png?raw=true "custom core-site s3a")
The rest of the other optimization options are discussed in the links below
* [https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html](https://hadoop.apache.org/docs/current/hadoop-aws/tools/hadoop-aws/index.html)
* [https://hadoop.apache.org/docs/r3.1.1/hadoop-aws/tools/hadoop-aws/committers.html](https://hadoop.apache.org/docs/r3.1.1/hadoop-aws/tools/hadoop-aws/committers.html)
Once the config changes are applied, proceed to restart **Hadoop** services.
![hdfs-services](https://github.com/minio/minio/blob/master/docs/bigdata/images/image7.png?raw=true "hdfs restart services")
### **3.2 Configure Spark2**
Navigate to **Services** -> **Spark2** -> **CONFIGS** as shown below
![spark-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image6.png?raw=true "spark config")
Navigate to “**Custom spark-defaults**” to configure MinIO parameters for `_s3a_` connector
![spark-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image9.png?raw=true "spark defaults")
Add the following optimal entries for _spark-defaults.conf_ to configure Spark with **MinIO**.
* _spark.hadoop.fs.s3a.committer.magic.enabled=true_
* _spark.hadoop.fs.s3a.committer.name=magic_
* _spark.hadoop.fs.s3a.impl=org.apache.hadoop.fs.s3a.S3AFileSystem_
* _spark.hadoop.fs.s3a.path.style.access=true_
* _spark.hadoop.mapreduce.outputcommitter.factory.scheme.s3a=org.apache.hadoop.fs.s3a.commit.S3ACommitterFactory_
![spark-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image8.png?raw=true "spark custom configuration")
Once the config changes are applied, proceed to restart **Spark** services.
![spark-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image12.png?raw=true "spark restart services")
### **3.3 Configure Hive**
Navigate to **Services** -> **Hive** -> **CONFIGS**-> **ADVANCED** as shown below
![hive-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image10.png?raw=true "hive advanced config")
Navigate to “**Custom hive-site**” to configure MinIO parameters for `_s3a_` connector
![hive-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image11.png?raw=true "hive advanced config")
Add the following optimal entries for `hive-site.xml` to configure Hive with **MinIO**.
* _hive.blobstore.use.blobstore.as.scratchdir=true_
* _hive.exec.input.listing.max.threads=50_
* _hive.load.dynamic.partitions.thread=25_
* _hive.metastore.fshandler.threads=50_
* _hive.mv.files.threads=40_
* _mapreduce.input.fileinputformat.list-status.num-threads=50_
For more information about these options please visit [https://www.cloudera.com/documentation/enterprise/5-11-x/topics/admin_hive_on_s3_tuning.html](https://www.cloudera.com/documentation/enterprise/5-11-x/topics/admin_hive_on_s3_tuning.html)
![hive-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image13.png?raw=true "hive advanced custom config")
Once the config changes are applied, proceed to restart all Hive services.
![hive-config](https://github.com/minio/minio/blob/master/docs/bigdata/images/image14.png?raw=true "restart hive services")
## **4. Run Sample Applications**
After installing Hive, Hadoop and Spark successfully, we can now proceed to run some sample applications to see if they are configured appropriately. We can use Spark Pi and Spark WordCount programs to validate our Spark installation. We can also explore how to run Spark jobs from the command line and Spark shell.
### **4.1 Spark Pi**
Test the Spark installation by running the following compute intensive example, which calculates pi by “throwing darts” at a circle. The program generates points in the unit square ((0,0) to (1,1)) and counts how many points fall within the unit circle within the square. The result approximates pi.
Follow these steps to run the Spark Pi example:
* Login as user **spark**.
* When the job runs, the library can now use **MinIO** during intermediate processing.
* Navigate to a node with the Spark client and access the spark2-client directory:
```
cd /usr/hdp/current/spark2-client
su spark
```
* Run the Apache Spark Pi job in yarn-client mode, using code from **org.apache.spark**:
```
./bin/spark-submit --class org.apache.spark.examples.SparkPi \
--master yarn-client \
--num-executors 1 \
--driver-memory 512m \
--executor-memory 512m \
--executor-cores 1 \
examples/jars/spark-examples*.jar 10
```
The job should produce an output as shown below. Note the value of pi in the output.
```
17/03/22 23:21:10 INFO DAGScheduler: Job 0 finished: reduce at SparkPi.scala:38, took 1.302805 s
Pi is roughly 3.1445191445191445
```
Job status can also be viewed in a browser by navigating to the YARN ResourceManager Web UI and clicking on job history server information.
### **4.2 WordCount**
WordCount is a simple program that counts how often a word occurs in a text file. The code builds a dataset of (String, Int) pairs called counts, and saves the dataset to a file.
The following example submits WordCount code to the Scala shell. Select an input file for the Spark WordCount example. We can use any text file as input.
* Login as user **spark**.
* When the job runs, the library can now use **MinIO** during intermediate processing.
* Navigate to a node with Spark client and access the spark2-client directory:
```
cd /usr/hdp/current/spark2-client
su spark
```
The following example uses _log4j.properties_ as the input file:
#### **4.2.1 Upload the input file to HDFS:**
```
hadoop fs -copyFromLocal /etc/hadoop/conf/log4j.properties
s3a://testbucket/testdata
```
#### **4.2.2 Run the Spark shell:**
```
./bin/spark-shell --master yarn-client --driver-memory 512m --executor-memory 512m
```
The command should produce an output as shown below. (with additional status messages):
```
Spark context Web UI available at http://172.26.236.247:4041
Spark context available as 'sc' (master = yarn, app id = application_1490217230866_0002).
Spark session available as 'spark'.
Welcome to
____ __
/ __/__ ___ _____/ /__
_\ \/ _ \/ _ `/ __/ '_/
/___/ .__/\_,_/_/ /_/\_\ version 2.1.0.2.6.0.0-598
/_/
Using Scala version 2.11.8 (Java HotSpot(TM) 64-Bit Server VM, Java 1.8.0_112)
Type in expressions to have them evaluated.
Type :help for more information.
scala>
```
* At the _scala>_ prompt, submit the job by typing the following commands, Replace node names, file name, and file location with your values:
```
scala> val file = sc.textFile("s3a://testbucket/testdata")
file: org.apache.spark.rdd.RDD[String] = s3a://testbucket/testdata MapPartitionsRDD[1] at textFile at <console>:24
scala> val counts = file.flatMap(line => line.split(" ")).map(word => (word, 1)).reduceByKey(_ + _)
counts: org.apache.spark.rdd.RDD[(String, Int)] = ShuffledRDD[4] at reduceByKey at <console>:25
scala> counts.saveAsTextFile("s3a://testbucket/wordcount")
```
Use one of the following approaches to view job output:
View output in the Scala shell:
```
scala> counts.count()
364
```
To view the output from MinIO exit the Scala shell. View WordCount job status:
```
hadoop fs -ls s3a://testbucket/wordcount
```
The output should be similar to the following:
```
Found 3 items
-rw-rw-rw- 1 spark spark 0 2019-05-04 01:36 s3a://testbucket/wordcount/_SUCCESS
-rw-rw-rw- 1 spark spark 4956 2019-05-04 01:36 s3a://testbucket/wordcount/part-00000
-rw-rw-rw- 1 spark spark 5616 2019-05-04 01:36 s3a://testbucket/wordcount/part-00001
```

Binary file not shown.

After

(image error) Size: 322 KiB

Binary file not shown.

After

(image error) Size: 53 KiB

Binary file not shown.

After

(image error) Size: 45 KiB

Binary file not shown.

After

(image error) Size: 42 KiB

Binary file not shown.

After

(image error) Size: 74 KiB

Binary file not shown.

After

(image error) Size: 45 KiB

Binary file not shown.

After

(image error) Size: 67 KiB

Binary file not shown.

After

(image error) Size: 11 KiB

Binary file not shown.

After

(image error) Size: 107 KiB

Binary file not shown.

After

(image error) Size: 64 KiB

Binary file not shown.

After

(image error) Size: 48 KiB

Binary file not shown.

After

(image error) Size: 58 KiB

Binary file not shown.

After

(image error) Size: 78 KiB

Binary file not shown.

After

(image error) Size: 70 KiB

View file

@ -931,10 +931,23 @@ The MinIO server configuration file is stored on the backend in json format. Upd
"1": {
"enable": true,
"brokers": ["localhost:9092"],
"topic": "bucketevents"
"topic": "bucketevents",
"queueDir": "",
"queueLimit": 0,
"tls": {
"enable": false,
"skipVerify": false,
"clientAuth": 0
},
"sasl": {
"enable": false,
"username": "",
"password": ""
}
}
}
```
MinIO supports persistent event store. The persistent store will backup events when the kafka broker goes offline and replays it when the broker comes back online. The event store can be configured by setting the directory path in `queueDir` field and the maximum limit of events in the queueDir in `queueLimit` field. For eg, the `queueDir` can be `/home/events` and `queueLimit` can be `1000`. By default, the `queueLimit` is set to 10000.
To update the configuration, use `mc admin config get` command to get the current configuration file for the minio deployment in json format, and save it locally.

View file

@ -64,6 +64,8 @@
"enable": false,
"brokers": null,
"topic": "",
"queueDir": "",
"queueLimit": 0,
"tls": {
"enable": false,
"skipVerify": false,
@ -73,7 +75,7 @@
"enable": false,
"username": "",
"password": ""
}
}
}
},
"mqtt": {

View file

@ -43,6 +43,7 @@ mc ls myb2
### Known limitations
Gateway inherits the following B2 limitations:
- PutObject() does not return an md5sum of the uploaded file as an etag response. Apps that check vailidity will fail.
- No support for CopyObject S3 API (There are no equivalent APIs available on Backblaze B2).
- No support for CopyObjectPart S3 API (There are no equivalent APIs available on Backblaze B2).
- Only read-only bucket policy supported at bucket level, all other variations will return API Notimplemented error.

Some files were not shown because too many files have changed in this diff Show more