Skip to content

Commit

Permalink
Merge pull request kubernetes#47316 from k82cn/k8s_47315
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue (batch tested with PRs 48981, 47316, 49180)

Added golint check for pkg/kubelet.

**What this PR does / why we need it**:
Added golint check for pkg/kubelet, and make golint happy.

**Which issue this PR fixes** *(optional, in `fixes #<issue number>(, fixes #<issue_number>, ...)` format, will close that issue when PR gets merged)*: fixes kubernetes#47315 

**Release note**:
```release-note-none
```
  • Loading branch information
Kubernetes Submit Queue authored Jul 19, 2017
2 parents 9378dab + 63b78a3 commit c0287ce
Show file tree
Hide file tree
Showing 26 changed files with 170 additions and 162 deletions.
26 changes: 13 additions & 13 deletions cmd/kubelet/app/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,9 +126,9 @@ HTTP server: The kubelet can also listen for HTTP and respond to a simple API
return cmd
}

// UnsecuredKubeletDeps returns a KubeletDeps suitable for being run, or an error if the server setup
// UnsecuredDependencies returns a Dependencies suitable for being run, or an error if the server setup
// is not valid. It will not start any background processes, and does not include authentication/authorization
func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error) {
func UnsecuredDependencies(s *options.KubeletServer) (*kubelet.Dependencies, error) {
// Initialize the TLS Options
tlsOptions, err := InitializeTLS(&s.KubeletFlags, &s.KubeletConfiguration)
if err != nil {
Expand All @@ -151,7 +151,7 @@ func UnsecuredKubeletDeps(s *options.KubeletServer) (*kubelet.KubeletDeps, error
dockerClient = nil
}

return &kubelet.KubeletDeps{
return &kubelet.Dependencies{
Auth: nil, // default does not enforce auth[nz]
CAdvisorInterface: nil, // cadvisor.New launches background processes (bg http.ListenAndServe, and some bg cleaners), not set here
Cloud: nil, // cloud provider might start background processes
Expand Down Expand Up @@ -182,7 +182,7 @@ func getKubeClient(s *options.KubeletServer) (*clientset.Clientset, error) {
}

// Tries to download the kubelet-<node-name> configmap from "kube-system" namespace via the API server and returns a JSON string or error
func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (string, error) {
func getRemoteKubeletConfig(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (string, error) {
// TODO(mtaufen): should probably cache clientset and pass into this function rather than regenerate on every request
kubeClient, err := getKubeClient(s)
if err != nil {
Expand Down Expand Up @@ -281,11 +281,11 @@ func initKubeletConfigSync(s *options.KubeletServer) (*componentconfig.KubeletCo
}
}

// Run runs the specified KubeletServer with the given KubeletDeps. This should never exit.
// Run runs the specified KubeletServer with the given Dependencies. This should never exit.
// The kubeDeps argument may be nil - if so, it is initialized from the settings on KubeletServer.
// Otherwise, the caller is assumed to have set up the KubeletDeps object and a default one will
// Otherwise, the caller is assumed to have set up the Dependencies object and a default one will
// not be generated.
func Run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) error {
func Run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) error {
if err := run(s, kubeDeps); err != nil {
return fmt.Errorf("failed to run Kubelet: %v", err)
}
Expand Down Expand Up @@ -339,7 +339,7 @@ func validateConfig(s *options.KubeletServer) error {
}

// makeEventRecorder sets up kubeDeps.Recorder if its nil. Its a no-op otherwise.
func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, nodeName types.NodeName) {
func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, nodeName types.NodeName) {
if kubeDeps.Recorder != nil {
return
}
Expand All @@ -354,7 +354,7 @@ func makeEventRecorder(s *componentconfig.KubeletConfiguration, kubeDeps *kubele
}
}

func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies) (err error) {
// TODO: this should be replaced by a --standalone flag
standaloneMode := (len(s.APIServerList) == 0 && !s.RequireKubeConfig)

Expand Down Expand Up @@ -417,7 +417,7 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.KubeletDeps) (err error) {
}

if kubeDeps == nil {
kubeDeps, err = UnsecuredKubeletDeps(s)
kubeDeps, err = UnsecuredDependencies(s)
if err != nil {
return err
}
Expand Down Expand Up @@ -817,7 +817,7 @@ func addChaosToClientConfig(s *options.KubeletServer, config *restclient.Config)
// 2 Kubelet binary
// 3 Standalone 'kubernetes' binary
// Eventually, #2 will be replaced with instances of #3
func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, runOnce bool, standaloneMode bool) error {
func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, runOnce bool, standaloneMode bool) error {
hostname := nodeutil.GetHostname(kubeFlags.HostnameOverride)
// Query the cloud provider for our node name, default to hostname if kcfg.Cloud == nil
nodeName, err := getNodeName(kubeDeps.Cloud, hostname)
Expand Down Expand Up @@ -891,7 +891,7 @@ func RunKubelet(kubeFlags *options.KubeletFlags, kubeCfg *componentconfig.Kubele
return nil
}

func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps) {
func startKubelet(k kubelet.Bootstrap, podCfg *config.PodConfig, kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies) {
// start the kubelet
go wait.Until(func() { k.Run(podCfg.Updates()) }, 0, wait.NeverStop)

Expand All @@ -908,7 +908,7 @@ func startKubelet(k kubelet.KubeletBootstrap, podCfg *config.PodConfig, kubeCfg
}
}

func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.KubeletBootstrap, err error) {
func CreateAndInitKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *kubelet.Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (k kubelet.Bootstrap, err error) {
// TODO: block until all sources have delivered at least one update to the channel, or break the sync loop
// up into "per source" synchronizations

Expand Down
5 changes: 3 additions & 2 deletions pkg/kubelet/disk_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ type diskSpaceManager interface {
IsRuntimeDiskSpaceAvailable() (bool, error)
}

// DiskSpacePolicy defines the free disk for Docker and Root.
type DiskSpacePolicy struct {
// free disk space threshold for filesystem holding docker images.
DockerFreeDiskMB int
Expand Down Expand Up @@ -112,10 +113,10 @@ func (dm *realDiskSpaceManager) isSpaceAvailable(fsType string, threshold int, f

func validatePolicy(policy DiskSpacePolicy) error {
if policy.DockerFreeDiskMB < 0 {
return fmt.Errorf("free disk space should be non-negative. Invalid value %d for docker disk space threshold.", policy.DockerFreeDiskMB)
return fmt.Errorf("free disk space should be non-negative; invalid value %d for docker disk space threshold", policy.DockerFreeDiskMB)
}
if policy.RootFreeDiskMB < 0 {
return fmt.Errorf("free disk space should be non-negative. Invalid value %d for root disk space threshold.", policy.RootFreeDiskMB)
return fmt.Errorf("free disk space should be non-negative; invalid value %d for root disk space threshold", policy.RootFreeDiskMB)
}
return nil
}
Expand Down
51 changes: 26 additions & 25 deletions pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,10 +116,10 @@ const (
// nodeStatusUpdateRetry specifies how many times kubelet retries when posting node status failed.
nodeStatusUpdateRetry = 5

// Location of container logs.
// ContainerLogsDir is the location of container logs.
ContainerLogsDir = "/var/log/containers"

// max backoff period, exported for the e2e test
// MaxContainerBackOff is the max backoff period, exported for the e2e test
MaxContainerBackOff = 300 * time.Second

// Capacity of the channel for storing pods to kill. A small number should
Expand Down Expand Up @@ -156,9 +156,9 @@ const (
// container restarts and image pulls.
backOffPeriod = time.Second * 10

// Period for performing container garbage collection.
// ContainerGCPeriod is the period for performing container garbage collection.
ContainerGCPeriod = time.Minute
// Period for performing image garbage collection.
// ImageGCPeriod is the period for performing image garbage collection.
ImageGCPeriod = 5 * time.Minute

// Minimum number of dead containers to keep in a pod
Expand All @@ -178,8 +178,8 @@ type SyncHandler interface {
// Option is a functional option type for Kubelet
type Option func(*Kubelet)

// bootstrapping interface for kubelet, targets the initialization protocol
type KubeletBootstrap interface {
// Bootstrap is a bootstrapping interface for kubelet, targets the initialization protocol
type Bootstrap interface {
GetConfiguration() componentconfig.KubeletConfiguration
BirthCry()
StartGarbageCollection()
Expand All @@ -189,21 +189,21 @@ type KubeletBootstrap interface {
RunOnce(<-chan kubetypes.PodUpdate) ([]RunPodResult, error)
}

// create and initialize a Kubelet instance
type KubeletBuilder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (KubeletBootstrap, error)
// Builder creates and initializes a Kubelet instance
type Builder func(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (Bootstrap, error)

// KubeletDeps is a bin for things we might consider "injected dependencies" -- objects constructed
// Dependencies is a bin for things we might consider "injected dependencies" -- objects constructed
// at runtime that are necessary for running the Kubelet. This is a temporary solution for grouping
// these objects while we figure out a more comprehensive dependency injection story for the Kubelet.
type KubeletDeps struct {
type Dependencies struct {
// TODO(mtaufen): KubeletBuilder:
// Mesos currently uses this as a hook to let them make their own call to
// let them wrap the KubeletBootstrap that CreateAndInitKubelet returns with
// their own KubeletBootstrap. It's a useful hook. I need to think about what
// a nice home for it would be. There seems to be a trend, between this and
// the Options fields below, of providing hooks where you can add extra functionality
// to the Kubelet for your solution. Maybe we should centralize these sorts of things?
Builder KubeletBuilder
Builder Builder

// TODO(mtaufen): ContainerRuntimeOptions and Options:
// Arrays of functions that can do arbitrary things to the Kubelet and the Runtime
Expand Down Expand Up @@ -240,7 +240,7 @@ type KubeletDeps struct {

// makePodSourceConfig creates a config.PodConfig from the given
// KubeletConfiguration or returns an error.
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, nodeName types.NodeName) (*config.PodConfig, error) {
func makePodSourceConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, nodeName types.NodeName) (*config.PodConfig, error) {
manifestURLHeader := make(http.Header)
if kubeCfg.ManifestURLHeader != "" {
pieces := strings.Split(kubeCfg.ManifestURLHeader, ":")
Expand Down Expand Up @@ -285,7 +285,7 @@ func getRuntimeAndImageServices(config *componentconfig.KubeletConfiguration) (i

// NewMainKubelet instantiates a new Kubelet object along with all the required internal modules.
// No initialization of Kubelet and its modules should happen here.
func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) {
func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies, crOptions *options.ContainerRuntimeOptions, standaloneMode bool, hostnameOverride, nodeIP, providerID string) (*Kubelet, error) {
if kubeCfg.RootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", kubeCfg.RootDirectory)
}
Expand Down Expand Up @@ -514,11 +514,11 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
glog.Warningf("Failed to close iptables lock file: %v", err)
}

if plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU)); err != nil {
plug, err := network.InitNetworkPlugin(kubeDeps.NetworkPlugins, crOptions.NetworkPluginName, &criNetworkHost{&networkHost{klet}, &network.NoopPortMappingGetter{}}, hairpinMode, kubeCfg.NonMasqueradeCIDR, int(crOptions.NetworkPluginMTU))
if err != nil {
return nil, err
} else {
klet.networkPlugin = plug
}
klet.networkPlugin = plug

machineInfo, err := klet.GetCachedMachineInfo()
if err != nil {
Expand Down Expand Up @@ -698,14 +698,15 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub
var ips []net.IP
cfgAddress := net.ParseIP(kubeCfg.Address)
if cfgAddress == nil || cfgAddress.IsUnspecified() {
if localIPs, err := allLocalIPsWithoutLoopback(); err != nil {
localIPs, err := allLocalIPsWithoutLoopback()
if err != nil {
return nil, err
} else {
ips = localIPs
}
ips = localIPs
} else {
ips = []net.IP{cfgAddress}
}

ips = append(ips, cloudIPs...)
names := append([]string{klet.GetHostname(), hostnameOverride}, cloudNames...)
klet.serverCertificateManager, err = certificate.NewKubeletServerCertificateManager(klet.kubeClient, kubeCfg, klet.nodeName, ips, names)
Expand Down Expand Up @@ -1118,7 +1119,7 @@ func allLocalIPsWithoutLoopback() ([]net.IP, error) {
for _, i := range interfaces {
addresses, err := i.Addrs()
if err != nil {
return nil, fmt.Errorf("could not list the addresses for network interface %v: %v\n", i, err)
return nil, fmt.Errorf("could not list the addresses for network interface %v: %v", i, err)
}
for _, address := range addresses {
switch v := address.(type) {
Expand Down Expand Up @@ -1150,7 +1151,7 @@ func (kl *Kubelet) setupDataDirs() error {
return nil
}

// Starts garbage collection threads.
// StartGarbageCollection starts garbage collection threads.
func (kl *Kubelet) StartGarbageCollection() {
loggedContainerGCFailure := false
go wait.Until(func() {
Expand Down Expand Up @@ -2128,10 +2129,10 @@ func (kl *Kubelet) ListenAndServeReadOnly(address net.IP, port uint) {
}

// Delete the eligible dead container instances in a pod. Depending on the configuration, the latest dead containers may be kept around.
func (kl *Kubelet) cleanUpContainersInPod(podId types.UID, exitedContainerID string) {
if podStatus, err := kl.podCache.Get(podId); err == nil {
func (kl *Kubelet) cleanUpContainersInPod(podID types.UID, exitedContainerID string) {
if podStatus, err := kl.podCache.Get(podID); err == nil {
removeAll := false
if syncedPod, ok := kl.podManager.GetPodByUID(podId); ok {
if syncedPod, ok := kl.podManager.GetPodByUID(podID); ok {
// When an evicted pod has already synced, all containers can be removed.
removeAll = eviction.PodIsEvicted(syncedPod.Status)
}
Expand All @@ -2146,7 +2147,7 @@ func isSyncPodWorthy(event *pleg.PodLifecycleEvent) bool {
}

// Gets the streaming server configuration to use with in-process CRI shims.
func getStreamingConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *KubeletDeps) *streaming.Config {
func getStreamingConfig(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Dependencies) *streaming.Config {
config := &streaming.Config{
// Use a relative redirect (no scheme or host).
BaseURL: &url.URL{
Expand Down
18 changes: 9 additions & 9 deletions pkg/kubelet/kubelet_cadvisor.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,19 +61,19 @@ func (kl *Kubelet) RootFsInfo() (cadvisorapiv2.FsInfo, error) {
return kl.cadvisor.RootFsInfo()
}

// Returns stats (from Cadvisor) for a non-Kubernetes container.
// GetRawContainerInfo returns stats (from Cadvisor) for a non-Kubernetes container.
func (kl *Kubelet) GetRawContainerInfo(containerName string, req *cadvisorapi.ContainerInfoRequest, subcontainers bool) (map[string]*cadvisorapi.ContainerInfo, error) {
if subcontainers {
return kl.cadvisor.SubcontainerInfo(containerName, req)
} else {
containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
if err != nil {
return nil, err
}
return map[string]*cadvisorapi.ContainerInfo{
containerInfo.Name: containerInfo,
}, nil
}

containerInfo, err := kl.cadvisor.ContainerInfo(containerName, req)
if err != nil {
return nil, err
}
return map[string]*cadvisorapi.ContainerInfo{
containerInfo.Name: containerInfo,
}, nil
}

// GetVersionInfo returns information about the version of cAdvisor in use.
Expand Down
22 changes: 11 additions & 11 deletions pkg/kubelet/kubelet_cadvisor_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ import (
)

func TestGetContainerInfo(t *testing.T) {
cadvisorApiFailure := fmt.Errorf("cAdvisor failure")
cadvisorAPIFailure := fmt.Errorf("cAdvisor failure")
runtimeError := fmt.Errorf("List containers error")
tests := []struct {
name string
Expand All @@ -40,7 +40,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError error
podList []*kubecontainertest.FakePod
requestedPodFullName string
requestedPodUid types.UID
requestedPodUID types.UID
requestedContainerName string
expectDockerContainerCall bool
mockError error
Expand Down Expand Up @@ -73,7 +73,7 @@ func TestGetContainerInfo(t *testing.T) {
},
},
requestedPodFullName: "qux_ns",
requestedPodUid: "",
requestedPodUID: "",
requestedContainerName: "foo",
expectDockerContainerCall: true,
mockError: nil,
Expand Down Expand Up @@ -102,11 +102,11 @@ func TestGetContainerInfo(t *testing.T) {
},
},
requestedPodFullName: "qux_ns",
requestedPodUid: "uuid",
requestedPodUID: "uuid",
requestedContainerName: "foo",
expectDockerContainerCall: true,
mockError: cadvisorApiFailure,
expectedError: cadvisorApiFailure,
mockError: cadvisorAPIFailure,
expectedError: cadvisorAPIFailure,
expectStats: false,
},
{
Expand All @@ -117,7 +117,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError: nil,
podList: []*kubecontainertest.FakePod{},
requestedPodFullName: "qux",
requestedPodUid: "",
requestedPodUID: "",
requestedContainerName: "foo",
expectDockerContainerCall: false,
mockError: nil,
Expand All @@ -132,7 +132,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError: runtimeError,
podList: []*kubecontainertest.FakePod{},
requestedPodFullName: "qux",
requestedPodUid: "",
requestedPodUID: "",
requestedContainerName: "foo",
mockError: nil,
expectedError: runtimeError,
Expand All @@ -146,7 +146,7 @@ func TestGetContainerInfo(t *testing.T) {
runtimeError: nil,
podList: []*kubecontainertest.FakePod{},
requestedPodFullName: "qux_ns",
requestedPodUid: "",
requestedPodUID: "",
requestedContainerName: "foo",
mockError: nil,
expectedError: kubecontainer.ErrContainerNotFound,
Expand Down Expand Up @@ -174,7 +174,7 @@ func TestGetContainerInfo(t *testing.T) {
},
},
requestedPodFullName: "qux_ns",
requestedPodUid: "",
requestedPodUID: "",
requestedContainerName: "foo",
mockError: nil,
expectedError: kubecontainer.ErrContainerNotFound,
Expand All @@ -195,7 +195,7 @@ func TestGetContainerInfo(t *testing.T) {
fakeRuntime.Err = tc.runtimeError
fakeRuntime.PodList = tc.podList

stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUid, tc.requestedContainerName, cadvisorReq)
stats, err := kubelet.GetContainerInfo(tc.requestedPodFullName, tc.requestedPodUID, tc.requestedContainerName, cadvisorReq)
assert.Equal(t, tc.expectedError, err)

if tc.expectStats {
Expand Down
Loading

0 comments on commit c0287ce

Please sign in to comment.