Skip to content

Commit

Permalink
kubevirt, refactor: Reuse ippool re-fill logic
Browse files Browse the repository at this point in the history
This change group together the ip pool re-fill logic at "syncPods" and
"addUpdateLocalNodeEvent" for live migratable pods.

Signed-off-by: Enrique Llorente <[email protected]>
  • Loading branch information
qinqon committed Jul 18, 2023
1 parent f508156 commit ced79da
Show file tree
Hide file tree
Showing 2 changed files with 66 additions and 37 deletions.
32 changes: 21 additions & 11 deletions go-controller/pkg/kubevirt/pod.go
Original file line number Diff line number Diff line change
Expand Up @@ -244,44 +244,54 @@ func FindLiveMigratablePods(watchFactory *factory.WatchFactory) ([]*corev1.Pod,
return liveMigratablePods, nil
}

// AllowPodBridgeNetworkLiveMigrationAnnotation will refill ip pool in
// allocateSyncMigratablePodIPs will refill ip pool in
// case the node has take over the vm subnet for live migrated vms
func allocateSyncMigratablePodIPs(watchFactory *factory.WatchFactory, lsManager *logicalswitchmanager.LogicalSwitchManager, nodeName, nadName string, pod *corev1.Pod, allocatePodIPsOnSwitch func(*corev1.Pod, *util.PodAnnotation, string, string) (string, error)) error {
func allocateSyncMigratablePodIPs(watchFactory *factory.WatchFactory, lsManager *logicalswitchmanager.LogicalSwitchManager, nodeName, nadName string, pod *corev1.Pod, allocatePodIPsOnSwitch func(*corev1.Pod, *util.PodAnnotation, string, string) (string, error)) (*ktypes.NamespacedName, string, *util.PodAnnotation, error) {
isStale, err := IsMigratedSourcePodStale(watchFactory, pod)
if err != nil {
return err
return nil, "", nil, err
}

// We care only for Running virt-launcher pods
if isStale {
return nil
return nil, "", nil, nil
}

vmKey := ExtractVMNameFromPod(pod)

annotation, err := util.UnmarshalPodAnnotation(pod.Annotations, nadName)
if err != nil {
return nil
return nil, "", nil, nil
}
switchName, zoneContainsPodSubnet := ZoneContainsPodSubnet(lsManager, annotation)
// If this zone do not own the subnet or the node that is passed
// do not match the switch, they should not be deallocated
if !zoneContainsPodSubnet || (nodeName != "" && switchName != nodeName) {
return nil
return vmKey, "", nil, nil
}
if _, err := allocatePodIPsOnSwitch(pod, annotation, nadName, switchName); err != nil {
return err
expectedLogicalPortName, err := allocatePodIPsOnSwitch(pod, annotation, nadName, switchName)
if err != nil {
return vmKey, "", nil, err
}
return nil
return vmKey, expectedLogicalPortName, annotation, nil
}

// AllocateSyncMigratablePodIPsOnZone will refill ip pool in
// with pod's IPs if those IPs belong to the zone
func AllocateSyncMigratablePodIPsOnZone(watchFactory *factory.WatchFactory, lsManager *logicalswitchmanager.LogicalSwitchManager, nadName string, pod *corev1.Pod, allocatePodIPsOnSwitch func(*corev1.Pod, *util.PodAnnotation, string, string) (string, error)) (*ktypes.NamespacedName, string, *util.PodAnnotation, error) {
// We care about the whole zone so we pass the nodeName empty
return allocateSyncMigratablePodIPs(watchFactory, lsManager, nadName, "", pod, allocatePodIPsOnSwitch)
}

// AllowPodBridgeNetworkLiveMigrationAnnotation will refill ip pool in
// AllocateSyncMigratablePodIPsOnNode will refill ip pool in
// case the node has take over the vm subnet for live migrated vms
func AllocateSyncMigratablePodsIPsOnNode(watchFactory *factory.WatchFactory, lsManager *logicalswitchmanager.LogicalSwitchManager, nodeName, nadName string, allocatePodIPsOnSwitch func(*corev1.Pod, *util.PodAnnotation, string, string) (string, error)) error {
liveMigratablePods, err := FindLiveMigratablePods(watchFactory)
if err != nil {
return err
}
for _, liveMigratablePod := range liveMigratablePods {
if err := allocateSyncMigratablePodIPs(watchFactory, lsManager, nodeName, nadName, liveMigratablePod, allocatePodIPsOnSwitch); err != nil {
if _, _, _, err := allocateSyncMigratablePodIPs(watchFactory, lsManager, nodeName, nadName, liveMigratablePod, allocatePodIPsOnSwitch); err != nil {
return err
}
}
Expand Down
71 changes: 45 additions & 26 deletions go-controller/pkg/ovn/pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -28,49 +28,33 @@ func (oc *DefaultNetworkController) syncPods(pods []interface{}) error {
// TBD: Before this succeeds, add Pod handler should not continue to allocate IPs for the new Pods.
expectedLogicalPorts := make(map[string]bool)
vms := make(map[ktypes.NamespacedName]bool)
var err error
for _, podInterface := range pods {
pod, ok := podInterface.(*kapi.Pod)
if !ok {
return fmt.Errorf("spurious object in syncPods: %v", podInterface)
}

switchName := pod.Spec.NodeName
expectedLogicalPortName := ""
var annotations *util.PodAnnotation
if kubevirt.IsPodLiveMigratable(pod) {
isStale, err := kubevirt.IsMigratedSourcePodStale(oc.watchFactory, pod)
vms, expectedLogicalPortName, annotations, err = oc.allocateSyncMigratablePodIPsOnZone(vms, pod)
if err != nil {
return err
}
// The stale pods are "old" information from a live migration
// so it's not needed to be in sync
if isStale {
continue
}
vm := kubevirt.ExtractVMNameFromPod(pod)
if vm != nil {
vms[*vm] = oc.isPodScheduledinLocalZone(pod)
}
annotation, err := util.UnmarshalPodAnnotation(pod.Annotations, ovntypes.DefaultNetworkName)
} else if oc.isPodScheduledinLocalZone(pod) {
expectedLogicalPortName, annotations, err = oc.allocateSyncPodsIPs(pod)
if err != nil {
continue
}
zoneContainsPodSubnet := false
switchName, zoneContainsPodSubnet = kubevirt.ZoneContainsPodSubnet(oc.lsManager, annotation)
// Don't allocate ip if this zone does not own the ip
if !zoneContainsPodSubnet {
continue
return err
}
} else if !oc.isPodScheduledinLocalZone(pod) {
} else {
continue
}

annotations, err := util.UnmarshalPodAnnotation(pod.Annotations, ovntypes.DefaultNetworkName)
if err != nil {
if annotations == nil {
continue
}
expectedLogicalPortName, err := oc.allocatePodIPsOnSwitch(pod, annotations, ovntypes.DefaultNetworkName, switchName)
if err != nil {
return err
}

if expectedLogicalPortName != "" {
expectedLogicalPorts[expectedLogicalPortName] = true
}
Expand Down Expand Up @@ -299,3 +283,38 @@ func (oc *DefaultNetworkController) addLogicalPort(pod *kapi.Pod) (err error) {
}
return nil
}

func (oc *DefaultNetworkController) allocateSyncPodsIPs(pod *kapi.Pod) (string, *util.PodAnnotation, error) {
annotations, err := util.UnmarshalPodAnnotation(pod.Annotations, ovntypes.DefaultNetworkName)
if err != nil {
return "", nil, nil
}
expectedLogicalPortName, err := oc.allocatePodIPsOnSwitch(pod, annotations, ovntypes.DefaultNetworkName, pod.Spec.NodeName)
if err != nil {
return "", nil, err
}
return expectedLogicalPortName, annotations, nil
}

func (oc *DefaultNetworkController) allocateSyncMigratablePodIPsOnZone(vms map[ktypes.NamespacedName]bool, pod *kapi.Pod) (map[ktypes.NamespacedName]bool, string, *util.PodAnnotation, error) {
allocatePodIPsOnSwitchWrapFn := func(liveMigratablePod *kapi.Pod, liveMigratablePodAnnotation *util.PodAnnotation, switchName, nadName string) (string, error) {
return oc.allocatePodIPsOnSwitch(liveMigratablePod, liveMigratablePodAnnotation, switchName, nadName)
}
vmKey, expectedLogicalPortName, podAnnotation, err := kubevirt.AllocateSyncMigratablePodIPsOnZone(oc.watchFactory, oc.lsManager, ovntypes.DefaultNetworkName, pod, allocatePodIPsOnSwitchWrapFn)
if err != nil {
return nil, "", nil, err
}

// If there is a vmKey this VM is not stale so it should be in sync
if vmKey != nil {
vms[*vmKey] = oc.isPodScheduledinLocalZone(pod)
}

// For remote pods we the logical switch port is not present so
// empty expectedLogicalPortName is returned
if _, ok := oc.localZoneNodes.Load(pod.Spec.NodeName); !ok {
expectedLogicalPortName = ""
}

return vms, expectedLogicalPortName, podAnnotation, nil
}

0 comments on commit ced79da

Please sign in to comment.