diff --git a/go.mod b/go.mod index 43643e0b46..5974ab5b5f 100644 --- a/go.mod +++ b/go.mod @@ -37,7 +37,7 @@ require ( k8s.io/client-go v0.33.4 k8s.io/component-base v0.33.4 k8s.io/klog/v2 v2.130.1 - k8s.io/kubernetes v1.33.4 + k8s.io/kubernetes v1.33.7 k8s.io/mount-utils v0.34.0 k8s.io/pod-security-admission v0.31.1 k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 @@ -215,7 +215,6 @@ replace ( k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.4 k8s.io/kubectl => k8s.io/kubectl v0.33.4 k8s.io/kubelet => k8s.io/kubelet v0.33.4 - k8s.io/kubernetes => k8s.io/kubernetes v1.33.4 k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.33.4 k8s.io/metrics => k8s.io/metrics v0.33.4 k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.4 diff --git a/go.sum b/go.sum index 4661cf0495..b8c315a7ec 100644 --- a/go.sum +++ b/go.sum @@ -473,8 +473,8 @@ k8s.io/kubectl v0.33.4 h1:nXEI6Vi+oB9hXxoAHyHisXolm/l1qutK3oZQMak4N98= k8s.io/kubectl v0.33.4/go.mod h1:Xe7P9X4DfILvKmlBsVqUtzktkI56lEj22SJW7cFy6nE= k8s.io/kubelet v0.33.4 h1:+sbpLmSq+Y8DF/OQeyw75OpuiF60tvlYcmc/yjN+nl4= k8s.io/kubelet v0.33.4/go.mod h1:wboarviFRQld5rzZUjTliv7x00YVx+YhRd/p1OahX7Y= -k8s.io/kubernetes v1.33.4 h1:T1d5FLUYm3/KyUeV7YJhKTR980zHCHb7K2xhCSo3lE8= -k8s.io/kubernetes v1.33.4/go.mod h1:nrt8sldmckKz2fCZhgRX3SKfS2e+CzXATPv6ITNkU00= +k8s.io/kubernetes v1.33.7 h1:Qhp1gwCPSOqt3du6A0uTGrrTcZDtShdSCIR5IZag16Y= +k8s.io/kubernetes v1.33.7/go.mod h1:eJiHC143tnNSvmDkCRwGNKA80yXqBvYC3U8L/i67nAY= k8s.io/mount-utils v0.34.0 h1:f2QzKU8ZLz5cJ/TmRRZnfAVJ8EPsF+FT1I6pP/HA4gk= k8s.io/mount-utils v0.34.0/go.mod h1:MIjjYlqJ0ziYQg0MO09kc9S96GIcMkhF/ay9MncF0GA= k8s.io/pod-security-admission v0.33.4 h1:adSwY7a/Q4Eoj+uCUfav90xRe6mB8waF0HAZ4gZeWD0= diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 2dc46a46ae..ff73903d0a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -36,6 +36,7 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/sets" "k8s.io/apimachinery/pkg/util/strategicpatch" "k8s.io/apimachinery/pkg/util/wait" @@ -87,12 +88,9 @@ const ( // PodNodeNameKeyIndex is the name of the index used by PodInformer to index pods by their node name. PodNodeNameKeyIndex = "spec.nodeName" - // OrphanPodIndexKey is used to index all Orphan pods to this key - OrphanPodIndexKey = "_ORPHAN_POD" - - // podControllerUIDIndex is the name for the Pod store's index function, - // which is to index by pods's controllerUID. - PodControllerUIDIndex = "podControllerUID" + // PodControllerIndex is the name for the Pod store's index function, + // which indexes by the key returned from PodControllerIndexKey. + PodControllerIndex = "podController" ) var UpdateTaintBackoff = wait.Backoff{ @@ -1083,30 +1081,74 @@ func AddPodNodeNameIndexer(podInformer cache.SharedIndexInformer) error { }) } -// AddPodControllerUIDIndexer adds an indexer for Pod's controllerRef.UID to the given PodInformer. +// PodControllerIndexKey returns the index key to locate pods with the specified controller ownerReference. +// If ownerReference is nil, the returned key locates pods in the namespace without a controller ownerReference. +func PodControllerIndexKey(namespace string, ownerReference *metav1.OwnerReference) string { + if ownerReference == nil { + return namespace + } + return namespace + "/" + ownerReference.Kind + "/" + ownerReference.Name + "/" + string(ownerReference.UID) +} + +// AddPodControllerIndexer adds an indexer for Pod's controllerRef.UID to the given PodInformer. // This indexer is used to efficiently look up pods by their ControllerRef.UID -func AddPodControllerUIDIndexer(podInformer cache.SharedIndexInformer) error { - if _, exists := podInformer.GetIndexer().GetIndexers()[PodControllerUIDIndex]; exists { +func AddPodControllerIndexer(podInformer cache.SharedIndexInformer) error { + if _, exists := podInformer.GetIndexer().GetIndexers()[PodControllerIndex]; exists { // indexer already exists, do nothing return nil } return podInformer.AddIndexers(cache.Indexers{ - PodControllerUIDIndex: func(obj interface{}) ([]string, error) { + PodControllerIndex: func(obj interface{}) ([]string, error) { pod, ok := obj.(*v1.Pod) if !ok { return nil, nil } - // Get the ControllerRef of the Pod to check if it's managed by a controller - if ref := metav1.GetControllerOf(pod); ref != nil { - return []string{string(ref.UID)}, nil - } - // If the Pod has no controller (i.e., it's orphaned), index it with the OrphanPodIndexKey - // This helps identify orphan pods for reconciliation and adoption by controllers - return []string{OrphanPodIndexKey}, nil + // Get the ControllerRef of the Pod to check if it's managed by a controller. + // Index with a non-nil controller (indicating an owned pod) or a nil controller (indicating an orphan pod). + return []string{PodControllerIndexKey(pod.Namespace, metav1.GetControllerOf(pod))}, nil }, }) } +// FilterPodsByOwner gets the Pods managed by an owner or orphan Pods in the owner's namespace +func FilterPodsByOwner(podIndexer cache.Indexer, owner *metav1.ObjectMeta, ownerKind string, includeOrphanedPods bool) ([]*v1.Pod, error) { + result := []*v1.Pod{} + + if len(owner.Namespace) == 0 { + return nil, fmt.Errorf("no owner namespace provided") + } + if len(owner.Name) == 0 { + return nil, fmt.Errorf("no owner name provided") + } + if len(owner.UID) == 0 { + return nil, fmt.Errorf("no owner uid provided") + } + if len(ownerKind) == 0 { + return nil, fmt.Errorf("no owner kind provided") + } + // Always include the owner key, which identifies Pods that are controlled by the owner + keys := []string{PodControllerIndexKey(owner.Namespace, &metav1.OwnerReference{Name: owner.Name, Kind: ownerKind, UID: owner.UID})} + if includeOrphanedPods { + // Optionally include the unowned key, which identifies orphaned Pods in the owner's namespace and might be adopted by the owner later + keys = append(keys, PodControllerIndexKey(owner.Namespace, nil)) + } + for _, key := range keys { + pods, err := podIndexer.ByIndex(PodControllerIndex, key) + if err != nil { + return nil, err + } + for _, obj := range pods { + pod, ok := obj.(*v1.Pod) + if !ok { + utilruntime.HandleError(fmt.Errorf("unexpected object type in pod indexer: %v", obj)) + continue + } + result = append(result, pod) + } + } + return result, nil +} + // PodKey returns a key unique to the given pod within a cluster. // It's used so we consistently use the same key scheme in this module. // It does exactly what cache.MetaNamespaceKeyFunc would have done diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go index fd37315810..e19e3efd2e 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/devicemanager/plugin/v1beta1/server.go @@ -57,8 +57,8 @@ type server struct { chandler ClientHandler clients map[string]Client - // isStarted indicates whether the service has started successfully. - isStarted bool + // lastError records the last runtime error. A server is considered healthy till an actual error occurs. + lastError error } // NewServer returns an initialized device plugin registration server. @@ -117,7 +117,7 @@ func (s *server) Start() error { defer s.wg.Done() s.setHealthy() if err = s.grpc.Serve(ln); err != nil { - s.setUnhealthy() + s.setUnhealthy(err) klog.ErrorS(err, "Error while serving device plugin registration grpc server") } }() @@ -208,18 +208,19 @@ func (s *server) Name() string { } func (s *server) Check(_ *http.Request) error { - if s.isStarted { - return nil - } - return fmt.Errorf("device plugin registration gRPC server failed and no device plugins can register") + return s.lastError } // setHealthy sets the health status of the gRPC server. func (s *server) setHealthy() { - s.isStarted = true + s.lastError = nil } // setUnhealthy sets the health status of the gRPC server to unhealthy. -func (s *server) setUnhealthy() { - s.isStarted = false +func (s *server) setUnhealthy(err error) { + if err == nil { + s.lastError = fmt.Errorf("device registration error: device plugin registration gRPC server failed and no device plugins can register") + return + } + s.lastError = fmt.Errorf("device registration error: device plugin registration gRPC server failed and no device plugins can register: %w", err) } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go index 21835f6c12..37b274a3f1 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/plugins/nodeports/node_ports.go @@ -50,9 +50,9 @@ const ( ErrReason = "node(s) didn't have free ports for the requested pod ports" ) -type preFilterState []*v1.ContainerPort +type preFilterState []v1.ContainerPort -// Clone the prefilter state. +// Clone the prefilter state.getContainerPorts(pod) func (s preFilterState) Clone() framework.StateData { // The state is not impacted by adding/removing existing pods, hence we don't need to make a deep copy. return s @@ -63,28 +63,9 @@ func (pl *NodePorts) Name() string { return Name } -// getContainerPorts returns the used host ports of Pods: if 'port' was used, a 'port:true' pair -// will be in the result; but it does not resolve port conflict. -func getContainerPorts(pods ...*v1.Pod) []*v1.ContainerPort { - ports := []*v1.ContainerPort{} - for _, pod := range pods { - for j := range pod.Spec.Containers { - container := &pod.Spec.Containers[j] - for k := range container.Ports { - // Only return ports with a host port specified. - if container.Ports[k].HostPort <= 0 { - continue - } - ports = append(ports, &container.Ports[k]) - } - } - } - return ports -} - // PreFilter invoked at the prefilter extension point. func (pl *NodePorts) PreFilter(ctx context.Context, cycleState *framework.CycleState, pod *v1.Pod) (*framework.PreFilterResult, *framework.Status) { - s := getContainerPorts(pod) + s := util.GetHostPorts(pod) // Skip if a pod has no ports. if len(s) == 0 { return nil, framework.NewStatus(framework.Skip) @@ -148,24 +129,19 @@ func (pl *NodePorts) isSchedulableAfterPodDeleted(logger klog.Logger, pod *v1.Po return framework.QueueSkip, nil } - // Get the used host ports of the deleted pod. - usedPorts := make(framework.HostPortInfo) - for _, container := range deletedPod.Spec.Containers { - for _, podPort := range container.Ports { - if podPort.HostPort > 0 { - usedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } - } - } - // If the deleted pod doesn't use any host ports, it doesn't make the target pod schedulable. - if len(usedPorts) == 0 { + ports := util.GetHostPorts(deletedPod) + if len(ports) == 0 { return framework.QueueSkip, nil } // Construct a fake NodeInfo that only has the deleted Pod. // If we can schedule `pod` to this fake node, it means that `pod` and the deleted pod don't have any common port(s). // So, deleting that pod couldn't make `pod` schedulable. + usedPorts := make(framework.HostPortInfo, len(ports)) + for _, p := range ports { + usedPorts.Add(p.HostIP, string(p.Protocol), p.HostPort) + } nodeInfo := framework.NodeInfo{UsedPorts: usedPorts} if Fits(pod, &nodeInfo) { logger.V(4).Info("the deleted pod and the target pod don't have any common port(s), returning QueueSkip as deleting this Pod won't make the Pod schedulable", "pod", klog.KObj(pod), "deletedPod", klog.KObj(deletedPod)) @@ -193,10 +169,10 @@ func (pl *NodePorts) Filter(ctx context.Context, cycleState *framework.CycleStat // Fits checks if the pod fits the node. func Fits(pod *v1.Pod, nodeInfo *framework.NodeInfo) bool { - return fitsPorts(getContainerPorts(pod), nodeInfo) + return fitsPorts(util.GetHostPorts(pod), nodeInfo) } -func fitsPorts(wantPorts []*v1.ContainerPort, nodeInfo *framework.NodeInfo) bool { +func fitsPorts(wantPorts []v1.ContainerPort, nodeInfo *framework.NodeInfo) bool { // try to see whether existingPorts and wantPorts will conflict or not existingPorts := nodeInfo.UsedPorts for _, cp := range wantPorts { diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go index fbc02a223a..8c3a7bde73 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/framework/types.go @@ -1164,13 +1164,11 @@ func (pi *PodInfo) calculateResource() podResource { // updateUsedPorts updates the UsedPorts of NodeInfo. func (n *NodeInfo) updateUsedPorts(pod *v1.Pod, add bool) { - for _, container := range pod.Spec.Containers { - for _, podPort := range container.Ports { - if add { - n.UsedPorts.Add(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } else { - n.UsedPorts.Remove(podPort.HostIP, string(podPort.Protocol), podPort.HostPort) - } + for _, port := range schedutil.GetHostPorts(pod) { + if add { + n.UsedPorts.Add(port.HostIP, string(port.Protocol), port.HostPort) + } else { + n.UsedPorts.Remove(port.HostIP, string(port.Protocol), port.HostPort) } } } diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go b/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go index e14b4e7745..751c727579 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/util/utils.go @@ -188,3 +188,39 @@ func As[T any](oldObj, newobj interface{}) (T, T, error) { } return oldTyped, newTyped, nil } + +// GetHostPorts returns the used host ports of pod containers and +// initContainers with restartPolicy: Always. +func GetHostPorts(pod *v1.Pod) []v1.ContainerPort { + var ports []v1.ContainerPort + if pod == nil { + return ports + } + + hostPort := func(p v1.ContainerPort) bool { + return p.HostPort > 0 + } + + for _, c := range pod.Spec.InitContainers { + // Only consider initContainers that will be running the entire + // duration of the Pod. + if c.RestartPolicy == nil || *c.RestartPolicy != v1.ContainerRestartPolicyAlways { + continue + } + for _, p := range c.Ports { + if !hostPort(p) { + continue + } + ports = append(ports, p) + } + } + for _, c := range pod.Spec.Containers { + for _, p := range c.Ports { + if !hostPort(p) { + continue + } + ports = append(ports, p) + } + } + return ports +} diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go index 28771b6df2..5e000f9333 100644 --- a/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/util.go @@ -17,6 +17,10 @@ limitations under the License. package securitycontext import ( + "fmt" + "os" + "sync" + v1 "k8s.io/api/core/v1" ) @@ -188,21 +192,32 @@ func AddNoNewPrivileges(sc *v1.SecurityContext) bool { var ( // These *must* be kept in sync with moby/moby. - // https://github.com/moby/moby/blob/master/oci/defaults.go#L105-L124 - // @jessfraz will watch changes to those files upstream. - defaultMaskedPaths = []string{ - "/proc/asound", - "/proc/acpi", - "/proc/kcore", - "/proc/keys", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - "/proc/scsi", - "/sys/firmware", - "/sys/devices/virtual/powercap", - } + // https://github.com/moby/moby/blob/ecb03c4cdae6f323150fc11b303dcc5dc4d82416/oci/defaults.go#L190-L218 + defaultMaskedPaths = sync.OnceValue(func() []string { + maskedPaths := []string{ + "/proc/asound", + "/proc/acpi", + "/proc/interrupts", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/proc/scsi", + "/sys/firmware", + "/sys/devices/virtual/powercap", + } + + for _, cpu := range possibleCPUs() { + path := fmt.Sprintf("/sys/devices/system/cpu/cpu%d/thermal_throttle", cpu) + if _, err := os.Stat(path); err == nil { + maskedPaths = append(maskedPaths, path) + } + } + + return maskedPaths + }) defaultReadonlyPaths = []string{ "/proc/bus", "/proc/fs", @@ -221,7 +236,7 @@ func ConvertToRuntimeMaskedPaths(opt *v1.ProcMountType) []string { } // Otherwise, add the default masked paths to the runtime security context. - return defaultMaskedPaths + return defaultMaskedPaths() } // ConvertToRuntimeReadonlyPaths converts the ProcMountType to the specified or default diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/util_darwin.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/util_darwin.go new file mode 100644 index 0000000000..9d14502acb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/util_darwin.go @@ -0,0 +1,21 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +func possibleCPUs() []int { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/util_linux.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/util_linux.go new file mode 100644 index 0000000000..bcaab4eb3e --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/util_linux.go @@ -0,0 +1,74 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +import ( + "os" + "runtime" + "strconv" + "strings" + "sync" +) + +// possibleCPUs returns the number of possible CPUs on this host. +func possibleCPUs() (cpus []int) { + if ncpu := possibleCPUsParsed(); ncpu != nil { + return ncpu + } + + for i := range runtime.NumCPU() { + cpus = append(cpus, i) + } + + return cpus +} + +// possibleCPUsParsed is parsing the amount of possible CPUs on this host from +// /sys/devices. +var possibleCPUsParsed = sync.OnceValue(func() (cpus []int) { + data, err := os.ReadFile("/sys/devices/system/cpu/possible") + if err != nil { + return nil + } + + ranges := strings.Split(strings.TrimSpace(string(data)), ",") + + for _, r := range ranges { + if rStart, rEnd, ok := strings.Cut(r, "-"); !ok { + cpu, err := strconv.Atoi(rStart) + if err != nil { + return nil + } + cpus = append(cpus, cpu) + } else { + var start, end int + start, err := strconv.Atoi(rStart) + if err != nil { + return nil + } + end, err = strconv.Atoi(rEnd) + if err != nil { + return nil + } + for i := start; i <= end; i++ { + cpus = append(cpus, i) + } + } + } + + return cpus +}) diff --git a/vendor/k8s.io/kubernetes/pkg/securitycontext/util_windows.go b/vendor/k8s.io/kubernetes/pkg/securitycontext/util_windows.go new file mode 100644 index 0000000000..9d14502acb --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/securitycontext/util_windows.go @@ -0,0 +1,21 @@ +/* +Copyright 2025 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package securitycontext + +func possibleCPUs() []int { + return nil +} diff --git a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go index c4e5fee285..acfff76d3b 100644 --- a/vendor/k8s.io/kubernetes/pkg/volume/plugins.go +++ b/vendor/k8s.io/kubernetes/pkg/volume/plugins.go @@ -1003,7 +1003,7 @@ func NewPersistentVolumeRecyclerPodTemplate() *v1.Pod { Containers: []v1.Container{ { Name: "pv-recycler", - Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.4", + Image: "registry.k8s.io/build-image/debian-base:bookworm-v1.0.6", Command: []string{"/bin/sh"}, Args: []string{"-c", "test -e /scrub && find /scrub -mindepth 1 -delete && test -z \"$(ls -A /scrub)\" || exit 1"}, VolumeMounts: []v1.VolumeMount{ diff --git a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go index dae697066b..6068593068 100644 --- a/vendor/k8s.io/kubernetes/test/utils/image/manifest.go +++ b/vendor/k8s.io/kubernetes/test/utils/image/manifest.go @@ -228,7 +228,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.29.2"} configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"} configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"} - configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.7"} + configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.7.13"} configs[Etcd] = Config{list.GcEtcdRegistry, "etcd", "3.5.21-0"} configs[Httpd] = Config{list.PromoterE2eRegistry, "httpd", "2.4.38-4"} configs[HttpdNew] = Config{list.PromoterE2eRegistry, "httpd", "2.4.39-4"} diff --git a/vendor/modules.txt b/vendor/modules.txt index fc53239ffc..901c997c24 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1532,7 +1532,7 @@ k8s.io/kubelet/pkg/apis/pluginregistration/v1 k8s.io/kubelet/pkg/apis/podresources/v1 k8s.io/kubelet/pkg/apis/podresources/v1alpha1 k8s.io/kubelet/pkg/apis/stats/v1alpha1 -# k8s.io/kubernetes v1.33.4 => k8s.io/kubernetes v1.33.4 +# k8s.io/kubernetes v1.33.7 ## explicit; go 1.24.0 k8s.io/kubernetes/pkg/api/legacyscheme k8s.io/kubernetes/pkg/api/service @@ -1895,7 +1895,6 @@ sigs.k8s.io/yaml/goyaml.v2 # k8s.io/kube-scheduler => k8s.io/kube-scheduler v0.33.4 # k8s.io/kubectl => k8s.io/kubectl v0.33.4 # k8s.io/kubelet => k8s.io/kubelet v0.33.4 -# k8s.io/kubernetes => k8s.io/kubernetes v1.33.4 # k8s.io/legacy-cloud-providers => k8s.io/legacy-cloud-providers v0.33.4 # k8s.io/metrics => k8s.io/metrics v0.33.4 # k8s.io/pod-security-admission => k8s.io/pod-security-admission v0.33.4