Skip to content

Commit

Permalink
Remove panic() and handle it appropriately (minio#5807)
Browse files Browse the repository at this point in the history
This is an effort to remove panic from the source. 
Add a new call called CriticialIf, that calls LogIf and exits. 
Replace panics with one of CriticalIf, FatalIf and a return of error.
  • Loading branch information
ebozduman authored and kannappanr committed Apr 20, 2018
1 parent 846f3e8 commit f16bfda
Show file tree
Hide file tree
Showing 21 changed files with 129 additions and 128 deletions.
12 changes: 9 additions & 3 deletions cmd/bucket-notification-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,12 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
return
}

host := xnet.MustParseHost(r.RemoteAddr)
target := target.NewHTTPClientTarget(*host, w)
host, e := xnet.ParseHost(r.RemoteAddr)
logger.CriticalIf(ctx, e)

target, e := target.NewHTTPClientTarget(*host, w)
logger.CriticalIf(ctx, e)

rulesMap := event.NewRulesMap(eventNames, pattern, target.ID())

if err := globalNotificationSys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
Expand All @@ -233,7 +237,9 @@ func (api objectAPIHandlers) ListenBucketNotificationHandler(w http.ResponseWrit
defer globalNotificationSys.RemoveRemoteTarget(bucketName, target.ID())
defer globalNotificationSys.RemoveRulesMap(bucketName, rulesMap)

thisAddr := xnet.MustParseHost(GetLocalPeer(globalEndpoints))
thisAddr, e := xnet.ParseHost(GetLocalPeer(globalEndpoints))
logger.CriticalIf(ctx, e)

if err := SaveListener(objAPI, bucketName, eventNames, pattern, target.ID(), *thisAddr); err != nil {
logger.GetReqInfo(ctx).AppendTags("target", target.ID().Name)
logger.LogIf(ctx, err)
Expand Down
7 changes: 6 additions & 1 deletion cmd/config-current.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,8 @@ import (
"reflect"
"sync"

"github.com/minio/minio/cmd/logger"

"github.com/minio/minio/pkg/auth"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/event/target"
Expand Down Expand Up @@ -167,9 +169,12 @@ func (s *serverConfig) ConfigDiff(t *serverConfig) string {
}

func newServerConfig() *serverConfig {
cred, err := auth.GetNewCredentials()
logger.FatalIf(err, "")

srvCfg := &serverConfig{
Version: serverConfigVersion,
Credential: auth.MustGetNewCredentials(),
Credential: cred,
Region: globalMinioDefaultRegion,
Browser: true,
StorageClass: storageClassConfig{
Expand Down
12 changes: 9 additions & 3 deletions cmd/encryption-v1.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ package cmd

import (
"bytes"
"context"
"crypto/hmac"
"crypto/md5"
"crypto/rand"
Expand All @@ -27,7 +28,9 @@ import (
"errors"
"io"
"net/http"
"strconv"

"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/ioutil"
sha256 "github.com/minio/sha256-simd"
"github.com/minio/sio"
Expand Down Expand Up @@ -739,10 +742,9 @@ func (li *ListPartsInfo) IsEncrypted() bool {
// DecryptedSize returns the size of the object after decryption in bytes.
// It returns an error if the object is not encrypted or marked as encrypted
// but has an invalid size.
// DecryptedSize panics if the referred object is not encrypted.
func (o *ObjectInfo) DecryptedSize() (int64, error) {
if !o.IsEncrypted() {
panic("cannot compute decrypted size of an object which is not encrypted")
return 0, errors.New("Cannot compute decrypted size of an unencrypted object")
}
size, err := sio.DecryptedSize(uint64(o.Size))
if err != nil {
Expand All @@ -757,7 +759,11 @@ func (o *ObjectInfo) DecryptedSize() (int64, error) {
func (o *ObjectInfo) EncryptedSize() int64 {
size, err := sio.EncryptedSize(uint64(o.Size))
if err != nil {
panic(err) // Since AWS S3 allows parts to be 5GB at most this cannot happen - sio max. size is 256 TB
// This cannot happen since AWS S3 allows parts to be 5GB at most
// sio max. size is 256 TB
reqInfo := (&logger.ReqInfo{}).AppendTags("size", strconv.FormatUint(size, 10))
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.CriticalIf(ctx, err)
}
return int64(size)
}
Expand Down
16 changes: 10 additions & 6 deletions cmd/gateway/azure/gateway-azure.go
Original file line number Diff line number Diff line change
Expand Up @@ -344,21 +344,21 @@ func azureToObjectError(err error, params ...string) error {
return err
}

// mustGetAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value.
// getAzureUploadID - returns new upload ID which is hex encoded 8 bytes random value.
// this 8 byte restriction is needed because Azure block id has a restriction of length
// upto 8 bytes.
func mustGetAzureUploadID() string {
func getAzureUploadID() (string, error) {
var id [8]byte

n, err := io.ReadFull(rand.Reader, id[:])
if err != nil {
panic(fmt.Errorf("unable to generate upload ID for azure. %s", err))
return "", err
}
if n != len(id) {
panic(fmt.Errorf("insufficient random data (expected: %d, read: %d)", len(id), n))
return "", fmt.Errorf("Unexpected random data size. Expected: %d, read: %d)", len(id), n)
}

return hex.EncodeToString(id[:])
return hex.EncodeToString(id[:]), nil
}

// checkAzureUploadID - returns error in case of given string is upload ID.
Expand Down Expand Up @@ -724,7 +724,11 @@ func (a *azureObjects) checkUploadIDExists(ctx context.Context, bucketName, obje

// NewMultipartUpload - Use Azure equivalent CreateBlockBlob.
func (a *azureObjects) NewMultipartUpload(ctx context.Context, bucket, object string, metadata map[string]string) (uploadID string, err error) {
uploadID = mustGetAzureUploadID()
uploadID, err = getAzureUploadID()
if err != nil {
logger.LogIf(ctx, err)
return "", err
}
metadataObject := getAzureMetadataObjectName(object, uploadID)

var jsonData []byte
Expand Down
5 changes: 2 additions & 3 deletions cmd/generic-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ import (
"time"

humanize "github.com/dustin/go-humanize"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/sys"
"github.com/rs/cors"
"golang.org/x/time/rate"
Expand Down Expand Up @@ -602,9 +603,7 @@ func (h pathValidityHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
// cancelled immediately.
func setRateLimitHandler(h http.Handler) http.Handler {
_, maxLimit, err := sys.GetMaxOpenFileLimit()
if err != nil {
panic(err)
}
logger.CriticalIf(context.Background(), err)
// Burst value is set to 1 to allow only maxOpenFileLimit
// requests to happen at once.
l := rate.NewLimiter(rate.Limit(maxLimit), 1)
Expand Down
5 changes: 4 additions & 1 deletion cmd/jwt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,10 @@ func testAuthenticate(authType string, t *testing.T) {
t.Fatalf("unable initialize config file, %s", err)
}
defer os.RemoveAll(testPath)
cred := auth.MustGetNewCredentials()
cred, err := auth.GetNewCredentials()
if err != nil {
t.Fatalf("Error getting new credentials: %s", err)
}
globalServerConfig.SetCredential(cred)

// Define test cases.
Expand Down
12 changes: 12 additions & 0 deletions cmd/logger/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -314,7 +314,19 @@ func LogIf(ctx context.Context, err error) {
fmt.Println(output)
}

// CriticalIf :
// Like LogIf with exit
// It'll be called for fatal error conditions during run-time
func CriticalIf(ctx context.Context, err error) {
if err != nil {
LogIf(ctx, err)
os.Exit(1)
}
}

// FatalIf :
// Just fatal error message, no stack trace
// It'll be called for input validation failures
func FatalIf(err error, msg string, data ...interface{}) {
if err != nil {
if msg != "" {
Expand Down
19 changes: 10 additions & 9 deletions cmd/notification.go
Original file line number Diff line number Diff line change
Expand Up @@ -181,14 +181,15 @@ func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLaye
}
defer objLock.Unlock()

reader, err := readConfig(ctx, objAPI, configFile)
if err != nil && !IsErrIgnored(err, errDiskNotFound, errNoSuchNotifications) {
return err
reader, e := readConfig(ctx, objAPI, configFile)
if e != nil && !IsErrIgnored(e, errDiskNotFound, errNoSuchNotifications) {
return e
}

listenerList := []ListenBucketNotificationArgs{}
if reader != nil {
if err = json.NewDecoder(reader).Decode(&listenerList); err != nil {
err := json.NewDecoder(reader).Decode(&listenerList)
if err != nil {
logger.LogIf(ctx, err)
return err
}
Expand All @@ -201,8 +202,8 @@ func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLaye

activeListenerList := []ListenBucketNotificationArgs{}
for _, args := range listenerList {
var found bool
if found, err = isLocalHost(args.Addr.Name); err != nil {
found, err := isLocalHost(args.Addr.Name)
if err != nil {
logger.GetReqInfo(ctx).AppendTags("host", args.Addr.Name)
logger.LogIf(ctx, err)
return err
Expand All @@ -217,8 +218,8 @@ func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLaye
return fmt.Errorf("unable to find PeerRPCClient by address %v in listener.json for bucket %v", args.Addr, bucketName)
}

var exist bool
if exist, err = rpcClient.RemoteTargetExist(bucketName, args.TargetID); err != nil {
exist, err := rpcClient.RemoteTargetExist(bucketName, args.TargetID)
if err != nil {
logger.GetReqInfo(ctx).AppendTags("targetID", args.TargetID.Name)
logger.LogIf(ctx, err)
return err
Expand All @@ -231,7 +232,7 @@ func (sys *NotificationSys) initListeners(ctx context.Context, objAPI ObjectLaye
target := NewPeerRPCClientTarget(bucketName, args.TargetID, rpcClient)
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
if err = sys.AddRemoteTarget(bucketName, target, rulesMap); err != nil {
logger.GetReqInfo(ctx).AppendTags("targetID", target.id.Name)
logger.GetReqInfo(ctx).AppendTags("targetName", target.id.Name)
logger.LogIf(ctx, err)
return err
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/object-api-utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ func pathJoin(elem ...string) string {
func mustGetUUID() string {
uuid, err := uuid.New()
if err != nil {
panic(fmt.Sprintf("Random UUID generation failed. Error: %s", err))
logger.CriticalIf(context.Background(), err)
}

return uuid.String()
Expand Down
10 changes: 5 additions & 5 deletions cmd/peer-rpc.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (receiver *PeerRPCReceiver) ListenBucketNotification(args *ListenBucketNoti
rulesMap := event.NewRulesMap(args.EventNames, args.Pattern, target.ID())
if err := globalNotificationSys.AddRemoteTarget(args.BucketName, target, rulesMap); err != nil {
reqInfo := &logger.ReqInfo{BucketName: target.bucketName}
reqInfo.AppendTags("target", target.id.Name)
reqInfo.AppendTags("targetName", target.id.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
return err
Expand Down Expand Up @@ -155,14 +155,13 @@ func (receiver *PeerRPCReceiver) SendEvent(args *SendEventArgs, reply *SendEvent
if errMap := globalNotificationSys.send(args.BucketName, args.Event, args.TargetID); len(errMap) != 0 {
var found bool
if err, found = errMap[args.TargetID]; !found {
// errMap must be zero or one element map because we sent to only one target ID.
panic(fmt.Errorf("error for target %v not found in error map %+v", args.TargetID, errMap))
return fmt.Errorf("error for target %v not found in error map %+v", args.TargetID, errMap)
}
}

if err != nil {
reqInfo := (&logger.ReqInfo{}).AppendTags("Event", args.Event.EventName.String())
reqInfo.AppendTags("target", args.TargetID.Name)
reqInfo.AppendTags("targetName", args.TargetID.Name)
ctx := logger.SetReqInfo(context.Background(), reqInfo)
logger.LogIf(ctx, err)
}
Expand Down Expand Up @@ -275,7 +274,8 @@ func makeRemoteRPCClients(endpoints EndpointList) map[xnet.Host]*PeerRPCClient {
cred := globalServerConfig.GetCredential()
serviceEndpoint := path.Join(minioReservedBucketPath, s3Path)
for _, hostStr := range GetRemotePeers(endpoints) {
host := xnet.MustParseHost(hostStr)
host, err := xnet.ParseHost(hostStr)
logger.CriticalIf(context.Background(), err)
peerRPCClientMap[*host] = &PeerRPCClient{newAuthRPCClient(authConfig{
accessKey: cred.AccessKey,
secretKey: cred.SecretKey,
Expand Down
3 changes: 2 additions & 1 deletion cmd/web-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -406,7 +406,8 @@ func (web webAPIHandlers) GenerateAuth(r *http.Request, args *WebGenericArgs, re
if !isHTTPRequestValid(r) {
return toJSONError(errAuthentication)
}
cred := auth.MustGetNewCredentials()
cred, err := auth.GetNewCredentials()
logger.CriticalIf(context.Background(), err)
reply.AccessKey = cred.AccessKey
reply.SecretKey = cred.SecretKey
reply.UIVersion = browser.UIVersion
Expand Down
17 changes: 10 additions & 7 deletions cmd/xl-v1-metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"crypto"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"hash"
"path"
Expand All @@ -42,7 +43,7 @@ var DefaultBitrotAlgorithm = HighwayHash256
func init() {
hh256Key, err := hex.DecodeString(magicHighwayHash256Key)
if err != nil || len(hh256Key) != highwayhash.Size {
panic("Failed to decode fixed magic HighwayHash256 key: Please report this bug at https://github.com/minio/minio/issues")
logger.CriticalIf(context.Background(), errors.New("Failed to decode fixed magic HighwayHash256 key. Please report this bug at https://github.com/minio/minio/issues"))
}

newBLAKE2b := func() hash.Hash {
Expand Down Expand Up @@ -80,11 +81,12 @@ var bitrotAlgorithms = map[BitrotAlgorithm]string{
HighwayHash256: "highwayhash256",
}

// New returns a new hash.Hash calculating the given bitrot algorithm. New panics
// if the algorithm is not supported or not linked into the binary.
// New returns a new hash.Hash calculating the given bitrot algorithm.
// New logs error and exits if the algorithm is not supported or not
// linked into the binary.
func (a BitrotAlgorithm) New() hash.Hash {
if _, ok := bitrotAlgorithms[a]; !ok {
panic(fmt.Sprintf("bitrot algorithm #%d is not supported", a))
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
}
return crypto.Hash(a).New()
}
Expand All @@ -98,10 +100,11 @@ func (a BitrotAlgorithm) Available() bool {
// String returns the string identifier for a given bitrot algorithm.
// If the algorithm is not supported String panics.
func (a BitrotAlgorithm) String() string {
if name, ok := bitrotAlgorithms[a]; ok {
return name
name, ok := bitrotAlgorithms[a]
if !ok {
logger.CriticalIf(context.Background(), errors.New("Unsupported bitrot algorithm"))
}
panic(fmt.Sprintf("bitrot algorithm #%d is not supported", a))
return name
}

// BitrotAlgorithmFromString returns a bitrot algorithm from the given string representation.
Expand Down
27 changes: 17 additions & 10 deletions pkg/auth/credentials.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,30 +81,37 @@ func (cred Credentials) Equal(ccred Credentials) bool {
return cred.AccessKey == ccred.AccessKey && subtle.ConstantTimeCompare([]byte(cred.SecretKey), []byte(ccred.SecretKey)) == 1
}

// MustGetNewCredentials generates and returns new credential.
func MustGetNewCredentials() (cred Credentials) {
readBytes := func(size int) (data []byte) {
// GetNewCredentials generates and returns new credential.
func GetNewCredentials() (cred Credentials, err error) {
readBytes := func(size int) (data []byte, err error) {
data = make([]byte, size)
if n, err := rand.Read(data); err != nil {
panic(err)
var n int
if n, err = rand.Read(data); err != nil {
return nil, err
} else if n != size {
panic(fmt.Errorf("not enough data read. expected: %v, got: %v", size, n))
return nil, fmt.Errorf("Not enough data. Expected to read: %v bytes, got: %v bytes", size, n)
}
return
return data, nil
}

// Generate access key.
keyBytes := readBytes(accessKeyMaxLen)
keyBytes, err := readBytes(accessKeyMaxLen)
if err != nil {
return cred, err
}
for i := 0; i < accessKeyMaxLen; i++ {
keyBytes[i] = alphaNumericTable[keyBytes[i]%alphaNumericTableLen]
}
cred.AccessKey = string(keyBytes)

// Generate secret key.
keyBytes = readBytes(secretKeyMaxLen)
keyBytes, err = readBytes(secretKeyMaxLen)
if err != nil {
return cred, err
}
cred.SecretKey = string([]byte(base64.StdEncoding.EncodeToString(keyBytes))[:secretKeyMaxLen])

return cred
return cred, nil
}

// CreateCredentials returns new credential with the given access key and secret key.
Expand Down
Loading

0 comments on commit f16bfda

Please sign in to comment.