Skip to content

Commit

Permalink
Fix the rest of the code
Browse files Browse the repository at this point in the history
  • Loading branch information
smarterclayton committed Apr 29, 2016
1 parent 8d0187a commit fdb110c
Show file tree
Hide file tree
Showing 129 changed files with 625 additions and 663 deletions.
4 changes: 2 additions & 2 deletions cluster/addons/dns/kube2sky/kube2sky.go
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.
endpointPort := &e.Subsets[idx].Ports[portIdx]
portSegment := buildPortSegmentString(endpointPort.Name, endpointPort.Protocol)
if portSegment != "" {
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, endpointPort.Port)
err := ks.generateSRVRecord(subdomain, portSegment, recordLabel, recordKey, int(endpointPort.Port))
if err != nil {
return err
}
Expand Down Expand Up @@ -343,7 +343,7 @@ func (ks *kube2sky) generateRecordsForPortalService(subdomain string, service *k
port := &service.Spec.Ports[i]
portSegment := buildPortSegmentString(port.Name, port.Protocol)
if portSegment != "" {
err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, port.Port)
err = ks.generateSRVRecord(subdomain, portSegment, recordLabel, subdomain, int(port.Port))
if err != nil {
return err
}
Expand Down
8 changes: 4 additions & 4 deletions cluster/addons/dns/kube2sky/kube2sky_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ type hostPort struct {
func getHostPort(service *kapi.Service) *hostPort {
return &hostPort{
Host: service.Spec.ClusterIP,
Port: service.Spec.Ports[0].Port,
Port: int(service.Spec.Ports[0].Port),
}
}

Expand Down Expand Up @@ -181,7 +181,7 @@ func newService(namespace, serviceName, clusterIP, portName string, portNumber i
Spec: kapi.ServiceSpec{
ClusterIP: clusterIP,
Ports: []kapi.ServicePort{
{Port: portNumber, Name: portName, Protocol: "TCP"},
{Port: int32(portNumber), Name: portName, Protocol: "TCP"},
},
},
}
Expand Down Expand Up @@ -212,7 +212,7 @@ func newSubset() kapi.EndpointSubset {

func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.EndpointSubset {
subset := newSubset()
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: port, Name: portName, Protocol: "TCP"})
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(port), Name: portName, Protocol: "TCP"})
for _, ip := range ips {
subset.Addresses = append(subset.Addresses, kapi.EndpointAddress{IP: ip})
}
Expand All @@ -221,7 +221,7 @@ func newSubsetWithOnePort(portName string, port int, ips ...string) kapi.Endpoin

func newSubsetWithTwoPorts(portName1 string, portNumber1 int, portName2 string, portNumber2 int, ips ...string) kapi.EndpointSubset {
subset := newSubsetWithOnePort(portName1, portNumber1, ips...)
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: portNumber2, Name: portName2, Protocol: "TCP"})
subset.Ports = append(subset.Ports, kapi.EndpointPort{Port: int32(portNumber2), Name: portName2, Protocol: "TCP"})
return subset
}

Expand Down
34 changes: 17 additions & 17 deletions cmd/kube-controller-manager/app/controllermanager.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func Run(s *options.CMServer) error {
kubeconfig.ContentConfig.ContentType = s.ContentType
// Override kubeconfig qps/burst settings from flags
kubeconfig.QPS = s.KubeAPIQPS
kubeconfig.Burst = s.KubeAPIBurst
kubeconfig.Burst = int(s.KubeAPIBurst)

kubeClient, err := client.New(kubeconfig)
if err != nil {
Expand All @@ -144,7 +144,7 @@ func Run(s *options.CMServer) error {
mux.Handle("/metrics", prometheus.Handler())

server := &http.Server{
Addr: net.JoinHostPort(s.Address, strconv.Itoa(s.Port)),
Addr: net.JoinHostPort(s.Address, strconv.Itoa(int(s.Port))),
Handler: mux,
}
glog.Fatal(server.ListenAndServe())
Expand Down Expand Up @@ -198,20 +198,20 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
informers[reflect.TypeOf(&api.Pod{})] = podInformer

go endpointcontroller.NewEndpointController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "endpoint-controller"))).
Run(s.ConcurrentEndpointSyncs, wait.NeverStop)
Run(int(s.ConcurrentEndpointSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

go replicationcontroller.NewReplicationManager(
podInformer,
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replication-controller")),
ResyncPeriod(s),
replicationcontroller.BurstReplicas,
s.LookupCacheSizeForRC,
).Run(s.ConcurrentRCSyncs, wait.NeverStop)
int(s.LookupCacheSizeForRC),
).Run(int(s.ConcurrentRCSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

if s.TerminatedPodGCThreshold > 0 {
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), s.TerminatedPodGCThreshold).
go gc.New(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "garbage-collector")), ResyncPeriod(s), int(s.TerminatedPodGCThreshold)).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
Expand All @@ -224,8 +224,8 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
// this cidr has been validated already
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, s.DeletingPodsBurst),
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod.Duration)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
Expand Down Expand Up @@ -268,7 +268,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
ReplenishmentResyncPeriod: ResyncPeriod(s),
GroupKindsToReplenish: groupKindsToReplenish,
}
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(s.ConcurrentResourceQuotaSyncs, wait.NeverStop)
go resourcequotacontroller.NewResourceQuotaController(resourceQuotaControllerOptions).Run(int(s.ConcurrentResourceQuotaSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

// If apiserver is not running we should wait for some time and fail only then. This is particularly
Expand Down Expand Up @@ -299,7 +299,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
namespaceController := namespacecontroller.NewNamespaceController(namespaceKubeClient, namespaceClientPool, groupVersionResources, s.NamespaceSyncPeriod.Duration, api.FinalizerKubernetes)
go namespaceController.Run(s.ConcurrentNamespaceSyncs, wait.NeverStop)
go namespaceController.Run(int(s.ConcurrentNamespaceSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

groupVersion := "extensions/v1beta1"
Expand All @@ -324,29 +324,29 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig

if containsResource(resources, "daemonsets") {
glog.Infof("Starting daemon set controller")
go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), s.LookupCacheSizeForDaemonSet).
Run(s.ConcurrentDaemonSetSyncs, wait.NeverStop)
go daemon.NewDaemonSetsController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "daemon-set-controller")), ResyncPeriod(s), int(s.LookupCacheSizeForDaemonSet)).
Run(int(s.ConcurrentDaemonSetSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}

if containsResource(resources, "jobs") {
glog.Infof("Starting job controller")
go job.NewJobController(podInformer, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "job-controller"))).
Run(s.ConcurrentJobSyncs, wait.NeverStop)
Run(int(s.ConcurrentJobSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}

if containsResource(resources, "deployments") {
glog.Infof("Starting deployment controller")
go deployment.NewDeploymentController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "deployment-controller")), ResyncPeriod(s)).
Run(s.ConcurrentDeploymentSyncs, wait.NeverStop)
Run(int(s.ConcurrentDeploymentSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}

if containsResource(resources, "replicasets") {
glog.Infof("Starting ReplicaSet controller")
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, s.LookupCacheSizeForRS).
Run(s.ConcurrentRSSyncs, wait.NeverStop)
go replicaset.NewReplicaSetController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "replicaset-controller")), ResyncPeriod(s), replicaset.BurstReplicas, int(s.LookupCacheSizeForRS)).
Run(int(s.ConcurrentRSSyncs), wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
}
Expand All @@ -364,7 +364,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
s.PVClaimBinderSyncPeriod.Duration,
s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry,
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
)
Expand Down
Loading

0 comments on commit fdb110c

Please sign in to comment.