Skip to content

Commit

Permalink
update: enable master show client use metrics while qos off
Browse files Browse the repository at this point in the history
Signed-off-by: leonrayang <[email protected]>
  • Loading branch information
leonrayang committed Aug 10, 2022
1 parent 0ab77b2 commit 3d9d76b
Show file tree
Hide file tree
Showing 2 changed files with 25 additions and 22 deletions.
2 changes: 1 addition & 1 deletion master/api_service.go
Original file line number Diff line number Diff line change
Expand Up @@ -1344,7 +1344,7 @@ func (m *Server) qosUpload(w http.ResponseWriter, r *http.Request) {

if qosEnable, _ := strconv.ParseBool(qosEnableStr); qosEnable {
if clientInfo, err := parseQosInfo(r); err == nil {
// log.LogInfof("action[qosUpload] cliInfoMgrMap [%v],clientInfo id[%v] clientInfo.Host %v, remote addr", clientInfo.ID, clientInfo.Host, r.RemoteAddr)
log.LogDebugf("action[qosUpload] cliInfoMgrMap [%v],clientInfo id[%v] clientInfo.Host %v, enable %v", clientInfo.ID, clientInfo.Host, r.RemoteAddr, qosEnable)
if clientInfo.ID == 0 {
if limit, err = vol.qosManager.init(m.cluster, clientInfo.Host); err != nil {
sendErrReply(w, r, newErrHTTPReply(err))
Expand Down
45 changes: 24 additions & 21 deletions master/limit.go
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
package master

import (
"fmt"
"sync"
"time"
"fmt"
"sync"
"time"

"github.com/cubefs/cubefs/proto"
"github.com/cubefs/cubefs/util"
"github.com/cubefs/cubefs/util/log"
"github.com/cubefs/cubefs/proto"
"github.com/cubefs/cubefs/util"
"github.com/cubefs/cubefs/util/log"
)


Expand Down Expand Up @@ -169,6 +169,7 @@ func (qosManager *QosCtrlManager) initClientQosInfo(clientID uint64, host string
}

limitRsp2Client.Magnify[factorType] = serverLimit.Magnify
limitRsp2Client.FactorMap[factorType] = clientInitInfo.FactorMap[factorType]

log.LogDebugf("action[initClientQosInfo] vol [%v] clientID [%v] factorType [%v] init client info and set limitRsp2Client [%v]"+
"server total[%v] used [%v] buffer [%v]",
Expand All @@ -184,7 +185,7 @@ func (qosManager *QosCtrlManager) initClientQosInfo(clientID uint64, host string
ID: clientID,
host: host,
}
log.LogDebugf("action[initClientQosInfo] vol [%v] clientID [%v]", qosManager.vol.Name, clientID)
log.LogDebugf("action[initClientQosInfo] vol [%v] clientID [%v] Assign [%v]", qosManager.vol.Name, clientID, limitRsp2Client)
return
}

Expand Down Expand Up @@ -310,7 +311,7 @@ func (serverLimit *ServerFactorLimit) updateLimitFactor(req interface{}) {
}

func (qosManager *QosCtrlManager) init(cluster *Cluster, host string) (limit *proto.LimitRsp2Client, err error) {
log.LogInfof("action[qosManage.init] vol [%v] host %v", qosManager.vol.Name, host)
log.LogDebugf("action[qosManage.init] vol [%v] host %v", qosManager.vol.Name, host)
var id uint64
if id, err = cluster.idAlloc.allocateCommonID(); err == nil {
return qosManager.initClientQosInfo(id, host)
Expand Down Expand Up @@ -343,6 +344,9 @@ func (qosManager *QosCtrlManager) HandleClientQosReq(reqClientInfo *proto.Client
clientInfo.Assign = limitRsp
clientInfo.Time = time.Now()
for i := proto.IopsReadType; i <= proto.FlowWriteType; i++ {
reqClientInfo.FactorMap[i].UsedLimit = reqClientInfo.FactorMap[i].Used
reqClientInfo.FactorMap[i].UsedBuffer = reqClientInfo.FactorMap[i].Need

log.LogDebugf("action[HandleClientQosReq] vol [%v] [%v,%v,%v,%v]", qosManager.vol.Name,
reqClientInfo.FactorMap[i].Used,
reqClientInfo.FactorMap[i].Need,
Expand All @@ -351,7 +355,6 @@ func (qosManager *QosCtrlManager) HandleClientQosReq(reqClientInfo *proto.Client
}
return
}

index := 0
wg := &sync.WaitGroup{}
wg.Add(len(reqClientInfo.FactorMap))
Expand Down Expand Up @@ -408,6 +411,10 @@ func (qosManager *QosCtrlManager) updateServerLimitByClientsInfo(factorType uint
serverLimit.CliNeed = cliSum.Need
qosManager.RUnlock()

if !qosManager.qosEnable {
return
}

serverLimit.Buffer = 0
nextStageUse = cliSum.Used
nextStageNeed = cliSum.Need
Expand Down Expand Up @@ -481,8 +488,9 @@ func (qosManager *QosCtrlManager) assignClientsNewQos(factorType uint32) {
qosManager.RLock()
serverLimit := qosManager.serverFactorLimitMap[factorType]
var bufferAllocated uint64

// recalculate client Assign limit and buffer
for host, cliInfoMgr := range qosManager.cliInfoMgrMap {
for _, cliInfoMgr := range qosManager.cliInfoMgrMap {
cliInfo := cliInfoMgr.Cli.FactorMap[factorType]
assignInfo := cliInfoMgr.Assign.FactorMap[factorType]

Expand All @@ -495,18 +503,13 @@ func (qosManager *QosCtrlManager) assignClientsNewQos(factorType uint32) {
assignInfo.UsedBuffer = uint64(float64(serverLimit.Buffer) * (float64(assignInfo.UsedLimit) / float64(serverLimit.Allocated)) * 0.5)
}

log.LogDebugf("action[assignClientsNewQos] Assign host [%v] limit [%v] buffer [%v]",
host, assignInfo.UsedLimit, assignInfo.UsedBuffer)

// buffer left may be quit large and we should not used up and doen't mean if buffer large than used limit line
if assignInfo.UsedBuffer > assignInfo.UsedLimit {
assignInfo.UsedBuffer = assignInfo.UsedLimit
}
}

bufferAllocated += assignInfo.UsedBuffer
log.LogDebugf("action[assignClientsNewQos] vol [%v] host [%v] type [%v] assignInfo used limit [%v], used buffer [%v]",
qosManager.vol.Name, host, proto.QosTypeString(factorType), assignInfo.UsedLimit, assignInfo.UsedBuffer)
}

qosManager.RUnlock()
Expand All @@ -529,19 +532,13 @@ func (vol *Vol) checkQos() {
// check expire client and delete from map
tTime := time.Now()
for id, cli := range vol.qosManager.cliInfoMgrMap {
log.LogWarnf("action[checkQos] vol [%v] Id [%v] addr [%v] check. time now[%v],client last alive time[%v]",
vol.Name, id, cli.host, tTime.Unix(), cli.Time.Unix())
if cli.Time.Add(20 * time.Second).Before(tTime) {
log.LogWarnf("action[checkQos] vol [%v] Id [%v] addr [%v] be delete in case of long time no request",
vol.Name, id, cli.host)
delete(vol.qosManager.cliInfoMgrMap, id)
}
}

if !vol.qosManager.qosEnable {
vol.qosManager.Unlock()
return
}
vol.qosManager.Unlock()

// periodically updateServerLimitByClientsInfo and get assigned limit info for all clients
Expand All @@ -550,6 +547,10 @@ func (vol *Vol) checkQos() {
// calc all clients and get real used and need value , used value should less then total
vol.qosManager.updateServerLimitByClientsInfo(factorType)
// update client assign info by result above
if !vol.qosManager.qosEnable {
continue
}

vol.qosManager.assignClientsNewQos(factorType)

serverLimit := vol.qosManager.serverFactorLimitMap[factorType]
Expand All @@ -572,6 +573,8 @@ func (vol *Vol) getQosStatus(cluster *Cluster) interface{} {
ClusterMaxUploadCnt uint32
ClientALiveCnt int
}
vol.qosManager.RLock()
defer vol.qosManager.RUnlock()

return &qosStatus{
ServerFactorLimitMap: map[uint32]*ServerFactorLimit{
Expand Down

0 comments on commit 3d9d76b

Please sign in to comment.