Skip to content

Commit

Permalink
tolerate listing with only readQuorum disks (minio#10357)
Browse files Browse the repository at this point in the history
We can reduce this further in the future, but this is a good
value to keep around. With the advent of continuous healing,
we can be assured that namespace will eventually be
consistent so we are okay to avoid the necessity to
a list across all drives on all sets.

Bonus Pop()'s in parallel seem to have the potential to
wait too on large drive setups and cause more slowness
instead of gaining any performance remove it for now.

Also, implement load balanced reply for local disks,
ensuring that local disks have an affinity for

- cleanupStaleMultipartUploads()
  • Loading branch information
harshavardhana authored Aug 27, 2020
1 parent 0a2e6d5 commit a359e36
Show file tree
Hide file tree
Showing 14 changed files with 162 additions and 204 deletions.
2 changes: 1 addition & 1 deletion cmd/background-newdisks-heal-ops.go
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@ func monitorLocalDisksAndHeal(ctx context.Context, z *erasureZones, drivesToHeal
// Heal all erasure sets that need
for i, erasureSetToHeal := range erasureSetInZoneToHeal {
for _, setIndex := range erasureSetToHeal {
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].drivesPerSet)
err := healErasureSet(ctx, setIndex, z.zones[i].sets[setIndex], z.zones[i].setDriveCount)
if err != nil {
logger.LogIf(ctx, err)
}
Expand Down
18 changes: 9 additions & 9 deletions cmd/config/storageclass/storage-class.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func parseStorageClass(storageClassEnv string) (sc StorageClass, err error) {
}

// Validates the parity disks.
func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
func validateParity(ssParity, rrsParity, setDriveCount int) (err error) {
if ssParity == 0 && rrsParity == 0 {
return nil
}
Expand All @@ -174,12 +174,12 @@ func validateParity(ssParity, rrsParity, drivesPerSet int) (err error) {
return fmt.Errorf("Reduced redundancy storage class parity %d should be greater than or equal to %d", rrsParity, minParityDisks)
}

if ssParity > drivesPerSet/2 {
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, drivesPerSet/2)
if ssParity > setDriveCount/2 {
return fmt.Errorf("Standard storage class parity %d should be less than or equal to %d", ssParity, setDriveCount/2)
}

if rrsParity > drivesPerSet/2 {
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, drivesPerSet/2)
if rrsParity > setDriveCount/2 {
return fmt.Errorf("Reduced redundancy storage class parity %d should be less than or equal to %d", rrsParity, setDriveCount/2)
}

if ssParity > 0 && rrsParity > 0 {
Expand Down Expand Up @@ -220,9 +220,9 @@ func Enabled(kvs config.KVS) bool {
}

// LookupConfig - lookup storage class config and override with valid environment settings if any.
func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
func LookupConfig(kvs config.KVS, setDriveCount int) (cfg Config, err error) {
cfg = Config{}
cfg.Standard.Parity = drivesPerSet / 2
cfg.Standard.Parity = setDriveCount / 2
cfg.RRS.Parity = defaultRRSParity

if err = config.CheckValidKeys(config.StorageClassSubSys, kvs, DefaultKVS); err != nil {
Expand All @@ -239,7 +239,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {
}
}
if cfg.Standard.Parity == 0 {
cfg.Standard.Parity = drivesPerSet / 2
cfg.Standard.Parity = setDriveCount / 2
}

if rrsc != "" {
Expand All @@ -254,7 +254,7 @@ func LookupConfig(kvs config.KVS, drivesPerSet int) (cfg Config, err error) {

// Validation is done after parsing both the storage classes. This is needed because we need one
// storage class value to deduce the correct value of the other storage class.
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, drivesPerSet); err != nil {
if err = validateParity(cfg.Standard.Parity, cfg.RRS.Parity, setDriveCount); err != nil {
return Config{}, err
}

Expand Down
10 changes: 5 additions & 5 deletions cmd/config/storageclass/storage-class_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -69,10 +69,10 @@ func TestParseStorageClass(t *testing.T) {

func TestValidateParity(t *testing.T) {
tests := []struct {
rrsParity int
ssParity int
success bool
drivesPerSet int
rrsParity int
ssParity int
success bool
setDriveCount int
}{
{2, 4, true, 16},
{3, 3, true, 16},
Expand All @@ -85,7 +85,7 @@ func TestValidateParity(t *testing.T) {
{9, 2, false, 16},
}
for i, tt := range tests {
err := validateParity(tt.ssParity, tt.rrsParity, tt.drivesPerSet)
err := validateParity(tt.ssParity, tt.rrsParity, tt.setDriveCount)
if err != nil && tt.success {
t.Errorf("Test %d, Expected success, got %s", i+1, err)
}
Expand Down
2 changes: 1 addition & 1 deletion cmd/endpoint.go
Original file line number Diff line number Diff line change
Expand Up @@ -193,7 +193,7 @@ func NewEndpoint(arg string) (ep Endpoint, e error) {
}

// ZoneEndpoints represent endpoints in a given zone
// along with its setCount and drivesPerSet.
// along with its setCount and setDriveCount.
type ZoneEndpoints struct {
SetCount int
DrivesPerSet int
Expand Down
3 changes: 1 addition & 2 deletions cmd/erasure-bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,7 @@ func (er erasureObjects) listBuckets(ctx context.Context) (bucketsInfo []BucketI
if err == nil {
// NOTE: The assumption here is that volumes across all disks in
// readQuorum have consistent view i.e they all have same number
// of buckets. This is essentially not verified since healing
// should take care of this.
// of buckets.
var bucketsInfo []BucketInfo
for _, volInfo := range volsInfo {
if isReservedOrInvalidBucket(volInfo.Name, true) {
Expand Down
11 changes: 11 additions & 0 deletions cmd/erasure-common.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,17 @@ import (
"github.com/minio/minio/pkg/sync/errgroup"
)

func (er erasureObjects) getLoadBalancedLocalDisks() (newDisks []StorageAPI) {
disks := er.getDisks()
// Based on the random shuffling return back randomized disks.
for _, i := range hashOrder(UTCNow().String(), len(disks)) {
if disks[i-1] != nil && disks[i-1].IsLocal() {
newDisks = append(newDisks, disks[i-1])
}
}
return newDisks
}

// getLoadBalancedDisks - fetches load balanced (sufficiently randomized) disk slice.
func (er erasureObjects) getLoadBalancedDisks() (newDisks []StorageAPI) {
disks := er.getDisks()
Expand Down
3 changes: 2 additions & 1 deletion cmd/erasure-multipart.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,8 @@ func (er erasureObjects) cleanupStaleMultipartUploads(ctx context.Context, clean
return
case <-ticker.C:
var disk StorageAPI
for _, d := range er.getLoadBalancedDisks() {
// run multiple cleanup's local to this server.
for _, d := range er.getLoadBalancedLocalDisks() {
if d != nil {
disk = d
break
Expand Down
Loading

0 comments on commit a359e36

Please sign in to comment.