Skip to content

Commit

Permalink
Make IOPerDrive Configurable (#7)
Browse files Browse the repository at this point in the history
Signed-off-by: Zhou Ting <[email protected]>
  • Loading branch information
moting9 authored Sep 18, 2023
1 parent c746362 commit 31b8560
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 24 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,7 @@ Examples:
Flags:
-b, --blocksize string read/write block size (default "4MiB")
-f, --filesize string amount of data to read/write per drive (default "1GiB")
-i, --ioperdrive int number of concurrent I/O per drive (default 4)
-h, --help help for dperf
--serial run tests one by one, instead of all at once.
--version version for dperf
Expand Down
26 changes: 17 additions & 9 deletions cmd/cmd.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,11 +42,12 @@ const alignSize = 4096

// flags
var (
serial = false
verbose = false
blockSize = "4MiB"
fileSize = "1GiB"
cpuNode = 0
serial = false
verbose = false
blockSize = "4MiB"
fileSize = "1GiB"
cpuNode = 0
ioPerDrive = 4
)

var dperfCmd = &cobra.Command{
Expand Down Expand Up @@ -123,11 +124,16 @@ $ dperf --serial /mnt/drive{1..6}
return fmt.Errorf("Invalid filesize must multiples of 4k: %d", fs)
}

if ioPerDrive <= 0 {
return fmt.Errorf("Invalid ioperdrive must greater than 0: %d", ioPerDrive)
}

perf := &dperf.DrivePerf{
Serial: serial,
BlockSize: bs,
FileSize: fs,
Verbose: verbose,
Serial: serial,
BlockSize: bs,
FileSize: fs,
Verbose: verbose,
IOPerDrive: ioPerDrive,
}
paths := make([]string, 0, len(args))
for _, arg := range args {
Expand Down Expand Up @@ -175,6 +181,8 @@ func init() {
"filesize", "f", fileSize, "amount of data to read/write per drive")
dperfCmd.PersistentFlags().IntVarP(&cpuNode,
"cpunode", "c", -1, "execute on a specific CPU node, defaults to all CPU nodes")
dperfCmd.PersistentFlags().IntVarP(&ioPerDrive,
"ioperdrive", "i", ioPerDrive, "number of concurrent I/O per drive, default is 4")

dperfCmd.PersistentFlags().MarkHidden("alsologtostderr")
dperfCmd.PersistentFlags().MarkHidden("add_dir_header")
Expand Down
29 changes: 14 additions & 15 deletions pkg/dperf/perf.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,10 +30,11 @@ import (

// DrivePerf options
type DrivePerf struct {
Serial bool
Verbose bool
BlockSize uint64
FileSize uint64
Serial bool
Verbose bool
BlockSize uint64
FileSize uint64
IOPerDrive int
}

// mustGetUUID - get a random UUID.
Expand All @@ -46,15 +47,13 @@ func mustGetUUID() string {
return u.String()
}

const ioPerDrive = 4 // number of concurrent I/O per drive

func (d *DrivePerf) runTests(ctx context.Context, path string, testUUID string) (dr *DrivePerfResult) {
writeThroughputs := make([]uint64, ioPerDrive)
readThroughputs := make([]uint64, ioPerDrive)
errs := make([]error, ioPerDrive)
writeThroughputs := make([]uint64, d.IOPerDrive)
readThroughputs := make([]uint64, d.IOPerDrive)
errs := make([]error, d.IOPerDrive)

dataBuffers := make([][]byte, ioPerDrive)
for i := 0; i < ioPerDrive; i++ {
dataBuffers := make([][]byte, d.IOPerDrive)
for i := 0; i < d.IOPerDrive; i++ {
// Read Aligned block upto a multiple of BlockSize
dataBuffers[i] = directio.AlignedBlock(int(d.BlockSize))
}
Expand All @@ -64,8 +63,8 @@ func (d *DrivePerf) runTests(ctx context.Context, path string, testUUID string)
defer os.RemoveAll(testUUIDPath)

var wg sync.WaitGroup
wg.Add(ioPerDrive)
for i := 0; i < ioPerDrive; i++ {
wg.Add(int(d.IOPerDrive))
for i := 0; i < int(d.IOPerDrive); i++ {
go func(idx int) {
defer wg.Done()
iopath := testPath + "-" + strconv.Itoa(idx)
Expand All @@ -79,8 +78,8 @@ func (d *DrivePerf) runTests(ctx context.Context, path string, testUUID string)
}
wg.Wait()

wg.Add(ioPerDrive)
for i := 0; i < ioPerDrive; i++ {
wg.Add(d.IOPerDrive)
for i := 0; i < d.IOPerDrive; i++ {
go func(idx int) {
defer wg.Done()
iopath := testPath + "-" + strconv.Itoa(idx)
Expand Down

0 comments on commit 31b8560

Please sign in to comment.