Skip to content

Commit

Permalink
oracleobjectstorage: speed up operations by using S3 pacer and settin…
Browse files Browse the repository at this point in the history
…g minsleep to 10ms

Uploading 100 files of each 1 MB took 20 seconds before. With above fix it takes around 2 seconds now.

10x time improvement in line with pacer's sleep reduction from 100ms to 10ms
  • Loading branch information
msays2000 authored Jan 25, 2023
1 parent e2afd00 commit 8c8ee99
Show file tree
Hide file tree
Showing 2 changed files with 7 additions and 5 deletions.
4 changes: 1 addition & 3 deletions backend/oracleobjectstorage/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,7 @@ const (
defaultUploadCutoff = fs.SizeSuffix(200 * 1024 * 1024)
defaultUploadConcurrency = 10
maxUploadCutoff = fs.SizeSuffix(5 * 1024 * 1024 * 1024)
minSleep = 100 * time.Millisecond
maxSleep = 5 * time.Minute
decayConstant = 1 // bigger for slower decay, exponential
minSleep = 10 * time.Millisecond
defaultCopyTimeoutDuration = fs.Duration(time.Minute)
)

Expand Down
8 changes: 6 additions & 2 deletions backend/oracleobjectstorage/oracleobjectstorage.go
Original file line number Diff line number Diff line change
Expand Up @@ -64,14 +64,18 @@ func NewFs(ctx context.Context, name, root string, m configmap.Mapper) (fs.Fs, e
if err != nil {
return nil, err
}
p := pacer.NewDefault(pacer.MinSleep(minSleep), pacer.MaxSleep(maxSleep), pacer.DecayConstant(decayConstant))
pc := fs.NewPacer(ctx, pacer.NewS3(pacer.MinSleep(minSleep)))
// Set pacer retries to 2 (1 try and 1 retry) because we are
// relying on SDK retry mechanism, but we allow 2 attempts to
// retry directory listings after XMLSyntaxError
pc.SetRetries(2)
f := &Fs{
name: name,
opt: *opt,
ci: ci,
srv: objectStorageClient,
cache: bucket.NewCache(),
pacer: fs.NewPacer(ctx, p),
pacer: pc,
}
f.setRoot(root)
f.features = (&fs.Features{
Expand Down

0 comments on commit 8c8ee99

Please sign in to comment.