Skip to content

Commit

Permalink
Re-GET nodes during CIDR allocation (to avoid cascading bad resource …
Browse files Browse the repository at this point in the history
…version).
  • Loading branch information
cjcullen committed Feb 12, 2016
1 parent 32d844e commit 52b1612
Showing 1 changed file with 10 additions and 4 deletions.
14 changes: 10 additions & 4 deletions pkg/controller/node/nodecontroller.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,14 +514,20 @@ func (nc *NodeController) reconcileNodeCIDRs(nodes *api.NodeList) {
}
for _, node := range nodes.Items {
if node.Spec.PodCIDR == "" {
// Re-GET node (because ours might be stale by now).
n, err := nc.kubeClient.Core().Nodes().Get(node.Name)
if err != nil {
glog.Errorf("Failed to get node %q: %v", node.Name, err)
continue
}
podCIDR, found := availableCIDRs.PopAny()
if !found {
nc.recordNodeStatusChange(&node, "CIDRNotAvailable")
nc.recordNodeStatusChange(n, "CIDRNotAvailable")
continue
}
glog.V(4).Infof("Assigning node %s CIDR %s", node.Name, podCIDR)
node.Spec.PodCIDR = podCIDR
if _, err := nc.kubeClient.Core().Nodes().Update(&node); err != nil {
glog.V(4).Infof("Assigning node %s CIDR %s", n.Name, podCIDR)
n.Spec.PodCIDR = podCIDR
if _, err := nc.kubeClient.Core().Nodes().Update(n); err != nil {
nc.recordNodeStatusChange(&node, "CIDRAssignmentFailed")
}
}
Expand Down

0 comments on commit 52b1612

Please sign in to comment.