Skip to content

Commit

Permalink
Merge branch 'master' into console
Browse files Browse the repository at this point in the history
  • Loading branch information
ansjsun authored Aug 13, 2020
2 parents 9403c5a + 0433b71 commit 1276745
Show file tree
Hide file tree
Showing 3 changed files with 55 additions and 42 deletions.
28 changes: 14 additions & 14 deletions docs/source/csv/env.csv
Original file line number Diff line number Diff line change
@@ -1,19 +1,19 @@
Role,Spec,Test,Product
Master,CPU,>=4C,>=8C
,内存,>=4G,>=16G
,内核,CentOS 7,CentOS 7
,数量,3,3
,Memory,>=4G,>=16G
,Kernel,>=3.10,>=3.10
,Nodes,3,3
DataNode,CPU,>=4C,>=4C
,内存,>=4G,>=8G
,内核,CentOS 7,CentOS 7
,硬盘容量,>=1TB,>=2TB
,硬盘类型,sata | ssd,sata | ssd
,文件系统,xfs | etx4,xfs | etx4
,数量,>=3,100~1000
,Memory,>=4G,>=8G
,Kernel,>=3.10,>=3.10
,Disk Capacity,>=1TB,>=2TB
,Disk Type,sata | ssd,sata | ssd
,File System,xfs | etx4,xfs | etx4
,Nodes,>=3,100~1000
MetaNode,CPU,>=4C,>=8C
,内存,>=8G,>=16G
,内核,CentOS 7,CentOS 7
,数量,>=4,100~1000
,Memory,>=8G,>=16G
,Kernel,>=3.10,>=3.10
,Nodes,>=4,100~1000
Client,CPU,>=2C,>=2C
,内存,>=4G,>=1G
,内核,CentOS 7,CentOS 7
,Memory,>=4G,>=1G
,Kernel,>=3.10,>=3.10
4 changes: 2 additions & 2 deletions docs/source/env.rst
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ Environment Requirements
--------------------------

The following table lists the system and hardware requirements of the performance test environment and production environment. You can also refer to the capacity planning chapter to accurately customize the deployment plan based on your cluster's actual capacity planning.
Note that since the DataNode used some features of CentOS7, the kernel version of the DataNode must be not lower than CentOS7.
Note that since the DataNode used some features of linux kernal, so that the kernel version of servers which used for deploy DataNode must be later than 3.10.

In order to speed up read and write of meta data, the meta data is stored in memory, while the DataNode mainly occupies disk resources. To maximize the use of node resources, you can mix-deploy DataNode and MetaNode on the same node.

Expand All @@ -23,7 +23,7 @@ If you have been clear about those statistics, you can use the empirical referen
.. csv-table::
:header: "Total File Count", "Total File Size", "Total memory", "Total Disk Space"

"10亿", "10PB", "2048 GB", "10PB"
"1,000,000,000", "10PB", "2048 GB", "10PB"

The higher the proportion of large files, the greater the MetaNode pressure.

Expand Down
65 changes: 39 additions & 26 deletions objectnode/fs_store_user.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
package objectnode

import (
"fmt"
"github.com/chubaofs/chubaofs/util/exporter"
"hash/crc32"
"sync"
"time"
Expand All @@ -41,7 +43,13 @@ type StrictUserInfoStore struct {
}

func (s *StrictUserInfoStore) LoadUser(accessKey string) (*proto.UserInfo, error) {
return s.mc.UserAPI().GetAKInfo(accessKey)
// if error occurred when loading user, and error is not NotExist, output an ump log
userInfo, err := s.mc.UserAPI().GetAKInfo(accessKey)
if err != proto.ErrUserNotExists && err != proto.ErrAccessKeyNotExists {
log.LogErrorf("LoadUser: fetch user info fail: err(%v)", err)
exporter.Warning(fmt.Sprintf("StrictUserInfoStore load user fail: accessKey(%v) err(%v)", accessKey, err))
}
return userInfo, err
}

type CacheUserInfoStore struct {
Expand Down Expand Up @@ -134,35 +142,38 @@ func (us *CacheUserInfoLoader) scheduleUpdate() {
for {
select {
case <-t.C:
aks = aks[:0]
us.akInfoMutex.RLock()
for ak := range us.akInfoStore {
aks = append(aks, ak)
}
us.akInfoMutex.RUnlock()
for _, ak := range aks {
akPolicy, err := us.mc.UserAPI().GetAKInfo(ak)
if err == proto.ErrUserNotExists || err == proto.ErrAccessKeyNotExists {
us.akInfoMutex.Lock()
delete(us.akInfoStore, ak)
us.akInfoMutex.Unlock()
us.blacklist.Store(ak, time.Now())
log.LogDebugf("scheduleUpdate: release user info: accessKey(%v)", ak)
continue
}
if err != nil {
log.LogErrorf("scheduleUpdate: fetch user info fail: accessKey(%v), err(%v)", ak, err)
continue
}
us.akInfoMutex.Lock()
us.akInfoStore[ak] = akPolicy
us.akInfoMutex.Unlock()
}
t.Reset(updateUserStoreInterval)
case <-us.closeCh:
t.Stop()
return
}

aks = aks[:0]
us.akInfoMutex.RLock()
for ak := range us.akInfoStore {
aks = append(aks, ak)
}
us.akInfoMutex.RUnlock()
for _, ak := range aks {
akPolicy, err := us.mc.UserAPI().GetAKInfo(ak)
if err == proto.ErrUserNotExists || err == proto.ErrAccessKeyNotExists {
us.akInfoMutex.Lock()
delete(us.akInfoStore, ak)
us.akInfoMutex.Unlock()
us.blacklist.Store(ak, time.Now())
log.LogDebugf("scheduleUpdate: release user info: accessKey(%v)", ak)
continue
}
// if error info is not empty, it means error occurred communication with master, output an ump log
if err != nil {
log.LogErrorf("scheduleUpdate: fetch user info fail: accessKey(%v), err(%v)", ak, err)
exporter.Warning(fmt.Sprintf("CacheUserInfoLoader get user info fail when scheduling update: err(%v)", err))
break
}
us.akInfoMutex.Lock()
us.akInfoStore[ak] = akPolicy
us.akInfoMutex.Unlock()
}
t.Reset(updateUserStoreInterval)
}
}

Expand Down Expand Up @@ -208,6 +219,8 @@ func (us *CacheUserInfoLoader) LoadUser(accessKey string) (*proto.UserInfo, erro
if err != nil {
if err != proto.ErrUserNotExists && err != proto.ErrAccessKeyNotExists {
log.LogErrorf("LoadUser: fetch user info fail: err(%v)", err)
// if error occurred when loading user, and error is not NotExist, output an ump log
exporter.Warning(fmt.Sprintf("CacheUserInfoLoader load user info fail: accessKey(%v) err(%v)", accessKey, err))
}
release()
us.blacklist.Store(accessKey, time.Now())
Expand Down

0 comments on commit 1276745

Please sign in to comment.