Skip to content

Commit

Permalink
remove/deprecate crawler disable environment (minio#11214)
Browse files Browse the repository at this point in the history
with changes present to automatically throttle crawler
at runtime, there is no need to have an environment
value to disable crawling. crawling is a fundamental
piece for healing, lifecycle and many other features
there is no good reason anyone would need to disable
this on a production system.

* Apply suggestions from code review
  • Loading branch information
harshavardhana authored Jan 4, 2021
1 parent e7ae49f commit a438305
Show file tree
Hide file tree
Showing 7 changed files with 20 additions and 63 deletions.
8 changes: 0 additions & 8 deletions cmd/admin-bucket-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,7 @@ import (
"net/http"

"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
iampolicy "github.com/minio/minio/pkg/iam/policy"
"github.com/minio/minio/pkg/madmin"
)
Expand Down Expand Up @@ -54,12 +52,6 @@ func (a adminAPIHandlers) PutBucketQuotaConfigHandler(w http.ResponseWriter, r *
vars := mux.Vars(r)
bucket := vars["bucket"]

// Turn off quota commands if data usage info is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrAdminBucketQuotaDisabled), r.URL)
return
}

if _, err := objectAPI.GetBucketInfo(ctx, bucket); err != nil {
writeErrorResponseJSON(ctx, w, toAPIError(ctx, err), r.URL)
return
Expand Down
40 changes: 18 additions & 22 deletions cmd/admin-router.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,6 @@ import (
"net/http"

"github.com/gorilla/mux"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/madmin"
)

Expand Down Expand Up @@ -172,27 +170,25 @@ func registerAdminRouter(router *mux.Router, enableConfigOps, enableIAMOps bool)
}

if globalIsDistErasure || globalIsErasure {
// Quota operations
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")

// Bucket replication operations
// GetBucketTargetHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
// SetRemoteTargetHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
// RemoveRemoteTargetHandler
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
}
// GetBucketQuotaConfig
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/get-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.GetBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")
// PutBucketQuotaConfig
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-bucket-quota").HandlerFunc(
httpTraceHdrs(adminAPI.PutBucketQuotaConfigHandler)).Queries("bucket", "{bucket:.*}")

// Bucket replication operations
// GetBucketTargetHandler
adminRouter.Methods(http.MethodGet).Path(adminVersion+"/list-remote-targets").HandlerFunc(
httpTraceHdrs(adminAPI.ListRemoteTargetsHandler)).Queries("bucket", "{bucket:.*}", "type", "{type:.*}")
// SetRemoteTargetHandler
adminRouter.Methods(http.MethodPut).Path(adminVersion+"/set-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.SetRemoteTargetHandler)).Queries("bucket", "{bucket:.*}")
// RemoveRemoteTargetHandler
adminRouter.Methods(http.MethodDelete).Path(adminVersion+"/remove-remote-target").HandlerFunc(
httpTraceHdrs(adminAPI.RemoveRemoteTargetHandler)).Queries("bucket", "{bucket:.*}", "arn", "{arn:.*}")
}

// -- Top APIs --
// Top locks
if globalIsDistErasure {
Expand Down
12 changes: 0 additions & 12 deletions cmd/api-errors.go
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,6 @@ const (
ErrReplicationSourceNotVersionedError
ErrReplicationNeedsVersioningError
ErrReplicationBucketNeedsVersioningError
ErrBucketReplicationDisabledError
ErrObjectRestoreAlreadyInProgress
ErrNoSuchKey
ErrNoSuchUpload
Expand Down Expand Up @@ -263,7 +262,6 @@ const (
// Bucket Quota error codes
ErrAdminBucketQuotaExceeded
ErrAdminNoSuchQuotaConfiguration
ErrAdminBucketQuotaDisabled

ErrHealNotImplemented
ErrHealNoSuchProcess
Expand Down Expand Up @@ -889,11 +887,6 @@ var errorCodes = errorCodeMap{
Description: "Versioning must be 'Enabled' on the bucket to add a replication target",
HTTPStatusCode: http.StatusBadRequest,
},
ErrBucketReplicationDisabledError: {
Code: "XMinioAdminBucketReplicationDisabled",
Description: "Replication specified but disk usage crawl is disabled on MinIO server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrNoSuchObjectLockConfiguration: {
Code: "NoSuchObjectLockConfiguration",
Description: "The specified object does not have a ObjectLock configuration",
Expand Down Expand Up @@ -1215,11 +1208,6 @@ var errorCodes = errorCodeMap{
Description: "The quota configuration does not exist",
HTTPStatusCode: http.StatusNotFound,
},
ErrAdminBucketQuotaDisabled: {
Code: "XMinioAdminBucketQuotaDisabled",
Description: "Quota specified but disk usage crawl is disabled on MinIO server",
HTTPStatusCode: http.StatusBadRequest,
},
ErrInsecureClientRequest: {
Code: "XMinioInsecureClientRequest",
Description: "Cannot respond to plain-text request from TLS-encrypted server",
Expand Down
7 changes: 0 additions & 7 deletions cmd/bucket-handlers.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,6 @@ import (

"github.com/minio/minio-go/v7/pkg/set"
"github.com/minio/minio-go/v7/pkg/tags"
"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/dns"
"github.com/minio/minio/cmd/crypto"
xhttp "github.com/minio/minio/cmd/http"
Expand All @@ -42,7 +41,6 @@ import (
objectlock "github.com/minio/minio/pkg/bucket/object/lock"
"github.com/minio/minio/pkg/bucket/policy"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/handlers"
"github.com/minio/minio/pkg/hash"
Expand Down Expand Up @@ -1336,11 +1334,6 @@ func (api objectAPIHandlers) PutBucketReplicationConfigHandler(w http.ResponseWr
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrNotImplemented), r.URL)
return
}
// Turn off replication if disk crawl is unavailable.
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOff {
writeErrorResponseJSON(ctx, w, errorCodes.ToAPIErr(ErrBucketReplicationDisabledError), r.URL)
return
}
if s3Error := checkRequestAuthType(ctx, r, policy.PutReplicationConfigurationAction, bucket, ""); s3Error != ErrNone {
writeErrorResponse(ctx, w, errorCodes.ToAPIErr(s3Error), r.URL, guessIsBrowserReq(r))
return
Expand Down
8 changes: 2 additions & 6 deletions cmd/data-crawler.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,14 +29,12 @@ import (
"sync"
"time"

"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/config/heal"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/bucket/lifecycle"
"github.com/minio/minio/pkg/bucket/replication"
"github.com/minio/minio/pkg/color"
"github.com/minio/minio/pkg/console"
"github.com/minio/minio/pkg/env"
"github.com/minio/minio/pkg/event"
"github.com/minio/minio/pkg/hash"
"github.com/minio/minio/pkg/madmin"
Expand All @@ -62,11 +60,9 @@ var (
crawlerSleeper = newDynamicSleeper(10, 10*time.Second)
)

// initDataCrawler will start the crawler unless disabled.
// initDataCrawler will start the crawler in the background.
func initDataCrawler(ctx context.Context, objAPI ObjectLayer) {
if env.Get(envDataUsageCrawlConf, config.EnableOn) == config.EnableOn {
go runDataCrawler(ctx, objAPI)
}
go runDataCrawler(ctx, objAPI)
}

// runDataCrawler will start a data crawler.
Expand Down
1 change: 0 additions & 1 deletion cmd/data-usage.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,6 @@ import (
)

const (
envDataUsageCrawlConf = "MINIO_DISK_USAGE_CRAWL_ENABLE"
envDataUsageCrawlDebug = "MINIO_DISK_USAGE_CRAWL_DEBUG"

dataUsageRoot = SlashSeparator
Expand Down
7 changes: 0 additions & 7 deletions cmd/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,7 @@ import (
"sync/atomic"
"time"

"github.com/minio/minio/cmd/config"
"github.com/minio/minio/cmd/logger"
"github.com/minio/minio/pkg/env"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
Expand Down Expand Up @@ -402,11 +400,6 @@ func bucketUsageMetricsPrometheus(ch chan<- prometheus.Metric) {
return
}

// Crawler disabled, nothing to do.
if env.Get(envDataUsageCrawlConf, config.EnableOn) != config.EnableOn {
return
}

dataUsageInfo, err := loadDataUsageFromBackend(GlobalContext, objLayer)
if err != nil {
return
Expand Down

0 comments on commit a438305

Please sign in to comment.