Skip to content

Commit

Permalink
Merge pull request openshift#14447 from dcbw/sdn-normal-cni-plugin
Browse files Browse the repository at this point in the history
Merged by openshift-bot
  • Loading branch information
OpenShift Bot authored Jun 10, 2017
2 parents a659cf7 + f111845 commit 1aded21
Show file tree
Hide file tree
Showing 12 changed files with 21 additions and 261 deletions.
1 change: 0 additions & 1 deletion contrib/node/install-sdn.sh
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@ os::provision::install-sdn() {

local osdn_plugin_path="${deployed_root}/pkg/sdn/plugin"
install -m u+rwx,g+rwx,o+rx "${osdn_plugin_path}/bin/openshift-sdn-ovs" "${target_bindir}"
install -m u+rw,g+rw,o+r "${osdn_plugin_path}/sdn-cni-plugin/80-openshift-sdn.conf" "${target_confdir}"

install -m u+rwx,g+rwx,o+rx "${binaries_path}/sdn-cni-plugin" "${target_cnidir}/openshift-sdn"
install -m u+rwx,g+rwx,o+rx "${binaries_path}/host-local" "${target_cnidir}"
Expand Down
2 changes: 1 addition & 1 deletion contrib/systemd/containerized/origin-node.service
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@ PartOf=docker.service
[Service]
EnvironmentFile=/etc/sysconfig/origin-node
ExecStartPre=-/usr/bin/docker rm -f origin-node
ExecStart=/usr/bin/docker run --name origin-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/origin-node -v /:/rootfs:ro,rslave -v /etc/systemd/system:/host-etc/systemd/system -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:rw -v /var/lib/docker:/var/lib/docker -v /etc/origin/node:/etc/origin/node -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /var/lib/origin:/var/lib/origin:rslave -v /var/log:/var/log -v /dev:/dev -e HOST=/rootfs -e HOST_ETC=/host-etc openshift/node
ExecStart=/usr/bin/docker run --name origin-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/origin-node -v /:/rootfs:ro,rslave -v /etc/systemd/system:/host-etc/systemd/system -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:rw -v /var/lib/docker:/var/lib/docker -v /etc/origin/node:/etc/origin/node -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /var/lib/cni:/var/lib/cni -v /etc/cni/net.d:/etc/cni/net.d -v /var/lib/origin:/var/lib/origin:rslave -v /var/log:/var/log -v /dev:/dev -e HOST=/rootfs -e HOST_ETC=/host-etc openshift/node
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop origin-node
Restart=always
Expand Down
1 change: 0 additions & 1 deletion hack/dind-cluster.sh
Original file line number Diff line number Diff line change
Expand Up @@ -460,7 +460,6 @@ function copy-runtime() {
cp "$(os::util::find::built_binary sdn-cni-plugin)" "${target}/openshift-sdn"
local osdn_plugin_path="${origin_root}/pkg/sdn/plugin"
cp "${osdn_plugin_path}/bin/openshift-sdn-ovs" "${target}"
cp "${osdn_plugin_path}/sdn-cni-plugin/80-openshift-sdn.conf" "${target}"
}

function wait-for-cluster() {
Expand Down
3 changes: 1 addition & 2 deletions images/dind/node/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,6 @@ RUN ln -sf /data/openshift-sdn-ovs /usr/local/bin/ && \
ln -sf /data/openshift /usr/local/bin/openshift-f5-router && \
ln -sf /data/openshift-sdn /opt/cni/bin/ && \
ln -sf /data/host-local /opt/cni/bin/ && \
ln -sf /data/loopback /opt/cni/bin/ && \
ln -sf /data/80-openshift-sdn.conf /etc/cni/net.d/
ln -sf /data/loopback /opt/cni/bin/

ENV KUBECONFIG /data/openshift.local.config/master/admin.kubeconfig
4 changes: 0 additions & 4 deletions origin.spec
Original file line number Diff line number Diff line change
Expand Up @@ -372,9 +372,6 @@ mkdir -p %{buildroot}%{_sharedstatedir}/origin

# Install sdn scripts
install -d -m 0755 %{buildroot}%{_sysconfdir}/cni/net.d
pushd pkg/sdn/plugin/sdn-cni-plugin
install -p -m 0644 80-openshift-sdn.conf %{buildroot}%{_sysconfdir}/cni/net.d
popd
pushd pkg/sdn/plugin/bin
install -p -m 0755 openshift-sdn-ovs %{buildroot}%{_bindir}/openshift-sdn-ovs
popd
Expand Down Expand Up @@ -551,7 +548,6 @@ fi
%dir /opt/cni/bin
%{_bindir}/openshift-sdn-ovs
%{_unitdir}/%{name}-node.service.d/openshift-sdn-ovs.conf
%{_sysconfdir}/cni/net.d/80-openshift-sdn.conf
/opt/cni/bin/*

%posttrans sdn-ovs
Expand Down
8 changes: 0 additions & 8 deletions pkg/cmd/server/kubernetes/node/node_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,6 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider"
"k8s.io/kubernetes/pkg/kubelet"
"k8s.io/kubernetes/pkg/kubelet/dockertools"
kubeletnetwork "k8s.io/kubernetes/pkg/kubelet/network"
kubeletcni "k8s.io/kubernetes/pkg/kubelet/network/cni"
kubeletserver "k8s.io/kubernetes/pkg/kubelet/server"
kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
Expand Down Expand Up @@ -268,13 +267,6 @@ func BuildKubernetesNodeConfig(options configapi.NodeConfig, enableProxy, enable
}
deps.Cloud = cloud

// Replace the kubelet-created CNI plugin with the SDN plugin
// Kubelet must be initialized with NetworkPluginName="cni" but
// the SDN plugin (if available) needs to be the only one used
if sdnPlugin != nil {
deps.NetworkPlugins = []kubeletnetwork.NetworkPlugin{sdnPlugin}
}

// provide any config overrides
//deps.NodeName = options.NodeName
deps.KubeClient = externalKubeClient
Expand Down
50 changes: 16 additions & 34 deletions pkg/sdn/plugin/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,9 @@ package plugin

import (
"fmt"
"io/ioutil"
"net"
"os"
osexec "os/exec"
"strings"
"sync"
Expand Down Expand Up @@ -59,8 +61,6 @@ type OsdnNode struct {
localSubnetCIDR string
localIP string
hostName string
podNetworkReady chan struct{}
kubeletInitReady chan struct{}
iptablesSyncPeriod time.Duration
mtu uint32

Expand Down Expand Up @@ -98,6 +98,10 @@ func NewNodePlugin(pluginName string, osClient *osclient.Client, kClient kclient
return nil, nil
}

// If our CNI config file exists, remove it so that kubelet doesn't think
// we're ready yet
os.Remove("/etc/cni/net.d/80-openshift-sdn.conf")

log.Infof("Initializing SDN node of type %q with configured hostname %q (IP %q), iptables sync period %q", pluginName, hostname, selfIP, iptablesSyncPeriod.String())
if hostname == "" {
output, err := kexec.New().Command("uname", "-n").CombinedOutput()
Expand Down Expand Up @@ -136,8 +140,6 @@ func NewNodePlugin(pluginName string, osClient *osclient.Client, kClient kclient
podManager: newPodManager(kClient, policy, mtu, oc),
localIP: selfIP,
hostName: hostname,
podNetworkReady: make(chan struct{}),
kubeletInitReady: make(chan struct{}),
iptablesSyncPeriod: iptablesSyncPeriod,
mtu: mtu,
egressPolicies: make(map[uint32][]osapi.EgressNetworkPolicy),
Expand Down Expand Up @@ -232,20 +234,6 @@ func (node *OsdnNode) Start() error {
return err
}

// Wait for kubelet to init the plugin so we get a knetwork.Host
log.V(5).Infof("Waiting for kubelet network plugin initialization")
<-node.kubeletInitReady
// Wait for kubelet itself to finish initializing
kwait.PollInfinite(100*time.Millisecond,
func() (bool, error) {
if node.host.GetRuntime() == nil {
return false, nil
}
return true, nil
})

//**** After this point, all OsdnNode fields have been initialized

nodeIPTables := newNodeIPTables(node.networkInfo.ClusterNetwork.String(), node.iptablesSyncPeriod)
if err = nodeIPTables.Setup(); err != nil {
return fmt.Errorf("failed to set up iptables: %v", err)
Expand All @@ -267,7 +255,7 @@ func (node *OsdnNode) Start() error {
node.watchServices()

log.V(5).Infof("Starting openshift-sdn pod manager")
if err := node.podManager.Start(cniserver.CNIServerSocketPath, node.host, node.localSubnetCIDR, node.networkInfo.ClusterNetwork); err != nil {
if err := node.podManager.Start(cniserver.CNIServerSocketPath, node.localSubnetCIDR, node.networkInfo.ClusterNetwork); err != nil {
return err
}

Expand All @@ -290,9 +278,16 @@ func (node *OsdnNode) Start() error {
}

log.V(5).Infof("openshift-sdn network plugin ready")
node.markPodNetworkReady()

return nil
// Write our CNI config file out to disk to signal to kubelet that
// our network plugin is ready
return ioutil.WriteFile("/etc/cni/net.d/80-openshift-sdn.conf", []byte(`
{
"cniVersion": "0.1.0",
"name": "openshift-sdn",
"type": "openshift-sdn"
}
`), 0644)
}

// FIXME: this should eventually go into kubelet via a CNI UPDATE/CHANGE action
Expand Down Expand Up @@ -333,19 +328,6 @@ func (node *OsdnNode) GetLocalPods(namespace string) ([]kapi.Pod, error) {
return pods, nil
}

func (node *OsdnNode) markPodNetworkReady() {
close(node.podNetworkReady)
}

func (node *OsdnNode) IsPodNetworkReady() error {
select {
case <-node.podNetworkReady:
return nil
default:
return fmt.Errorf("SDN pod network is not ready")
}
}

func isServiceChanged(oldsvc, newsvc *kapi.Service) bool {
if len(oldsvc.Spec.Ports) == len(newsvc.Spec.Ports) {
for i := range oldsvc.Spec.Ports {
Expand Down
65 changes: 0 additions & 65 deletions pkg/sdn/plugin/plugin.go

This file was deleted.

5 changes: 1 addition & 4 deletions pkg/sdn/plugin/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,6 @@ import (
"github.com/golang/glog"

kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
knetwork "k8s.io/kubernetes/pkg/kubelet/network"
kubehostport "k8s.io/kubernetes/pkg/kubelet/network/hostport"

cnitypes "github.com/containernetworking/cni/pkg/types"
Expand Down Expand Up @@ -50,7 +49,6 @@ type podManager struct {
// and thus can be set from Start()
ipamConfig []byte
hostportSyncer kubehostport.HostportSyncer
host knetwork.Host
}

// Creates a new live podManager; used by node code0
Expand Down Expand Up @@ -126,8 +124,7 @@ func getIPAMConfig(clusterNetwork *net.IPNet, localSubnet string) ([]byte, error
}

// Start the CNI server and start processing requests from it
func (m *podManager) Start(socketPath string, host knetwork.Host, localSubnetCIDR string, clusterNetwork *net.IPNet) error {
m.host = host
func (m *podManager) Start(socketPath string, localSubnetCIDR string, clusterNetwork *net.IPNet) error {
m.hostportSyncer = kubehostport.NewHostportSyncer()

var err error
Expand Down
91 changes: 0 additions & 91 deletions pkg/sdn/plugin/pod_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,11 +4,7 @@ package plugin

import (
"fmt"
"io/ioutil"
"net"
"path/filepath"
"strconv"
"strings"
"syscall"

sdnapi "github.com/openshift/origin/pkg/sdn/api"
Expand All @@ -19,7 +15,6 @@ import (

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kerrors "k8s.io/apimachinery/pkg/util/errors"
ksets "k8s.io/apimachinery/pkg/util/sets"
kapi "k8s.io/kubernetes/pkg/api"
kapiv1 "k8s.io/kubernetes/pkg/api/v1"
kcontainer "k8s.io/kubernetes/pkg/kubelet/container"
Expand Down Expand Up @@ -233,83 +228,6 @@ func podIsExited(p *kcontainer.Pod) bool {
return true
}

// getNonExitedPods returns a list of pods that have at least one running container.
func (m *podManager) getNonExitedPods() ([]*kcontainer.Pod, error) {
ret := []*kcontainer.Pod{}
pods, err := m.host.GetRuntime().GetPods(true)
if err != nil {
return nil, fmt.Errorf("failed to retrieve pods from runtime: %v", err)
}
for _, p := range pods {
if podIsExited(p) {
continue
}
ret = append(ret, p)
}
return ret, nil
}

// ipamGarbageCollection will release unused IPs from dead containers that
// the CNI plugin was never notified had died. openshift-sdn uses the CNI
// host-local IPAM plugin, which stores allocated IPs in a file in
// /var/lib/cni/network. Each file in this directory has as its name the
// allocated IP address of the container, and as its contents the container ID.
// This routine looks for container IDs that are not reported as running by the
// container runtime, and releases each one's IPAM allocation.
func (m *podManager) ipamGarbageCollection() {
glog.V(2).Infof("Starting IP garbage collection")

const ipamDir string = "/var/lib/cni/networks/openshift-sdn"
files, err := ioutil.ReadDir(ipamDir)
if err != nil {
glog.Errorf("Failed to list files in CNI host-local IPAM store %v: %v", ipamDir, err)
return
}

// gather containerIDs for allocated ips
ipContainerIdMap := make(map[string]string)
for _, file := range files {
// skip non checkpoint file
if ip := net.ParseIP(file.Name()); ip == nil {
continue
}

content, err := ioutil.ReadFile(filepath.Join(ipamDir, file.Name()))
if err != nil {
glog.Errorf("Failed to read file %v: %v", file, err)
}
ipContainerIdMap[file.Name()] = strings.TrimSpace(string(content))
}

// gather infra container IDs of current running Pods
runningContainerIDs := ksets.String{}
pods, err := m.getNonExitedPods()
if err != nil {
glog.Errorf("Failed to get pods: %v", err)
return
}
for _, pod := range pods {
containerID, err := m.host.GetRuntime().GetPodContainerID(pod)
if err != nil {
glog.Warningf("Failed to get infra containerID of %q/%q: %v", pod.Namespace, pod.Name, err)
continue
}

runningContainerIDs.Insert(strings.TrimSpace(containerID.ID))
}

// release leaked ips
for ip, containerID := range ipContainerIdMap {
// if the container is not running, release IP
if runningContainerIDs.Has(containerID) {
continue
}

glog.V(2).Infof("Releasing IP %q allocated to %q.", ip, containerID)
m.ipamDel(containerID)
}
}

// Set up all networking (host/container veth, OVS flows, IPAM, loopback, etc)
func (m *podManager) setup(req *cniserver.PodRequest) (*cnitypes.Result, *runningPod, error) {
pod, err := m.kClient.Core().Pods(req.PodNamespace).Get(req.PodName, metav1.GetOptions{})
Expand All @@ -319,15 +237,6 @@ func (m *podManager) setup(req *cniserver.PodRequest) (*cnitypes.Result, *runnin

ipamResult, err := m.ipamAdd(req.Netns, req.ContainerId)
if err != nil {
// TODO: Remove this hack once we've figured out how to retrieve the netns
// of an exited container. Currently, restarting docker will leak a bunch of
// ips. This will exhaust available ip space unless we cleanup old ips. At the
// same time we don't want to try GC'ing them periodically as that could lead
// to a performance regression in starting pods. So on each setup failure, try
// GC on the assumption that the kubelet is going to retry pod creation, and
// when it does, there will be ips.
m.ipamGarbageCollection()

return nil, nil, fmt.Errorf("failed to run IPAM for %v: %v", req.ContainerId, err)
}
podIP := ipamResult.IP4.IP.IP
Expand Down
Loading

0 comments on commit 1aded21

Please sign in to comment.