The main question is if there is a way to finish a pod from the client-go sdk, I'm not trying to delete a pod, I just want to finish it with a Phase-Status: Completed.
In the code, I'm trying to update the pod phase but It doesn't work, It does not return an error or panic but The pod does not finish. My code:
func main() {
// creates the in-cluster config
config, err := rest.InClusterConfig()
if err != nil {
panic(err.Error())
}
// creates the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
for {
pods, err := clientset.CoreV1().Pods("ns").List(context.TODO(), metav1.ListOptions{})
if err != nil {
panic(err.Error())
}
for _, pod := range pods.Items {
podName:= pod.Name
if strings.Contains(strings.ToLower(podName), "single-condition") {
fmt.Println("get pods metadatada")
fmt.Println(pod.Name)
fmt.Printf("pod.Name %s \n", pod.Name)
fmt.Printf("Status.Phase %s \n", pod.Status.Phase)
fmt.Printf("PodIP %s \n", pod.Status.PodIP)
containers := pod.Status.ContainerStatuses
if len(containers) > 0 {
for _ ,c := range containers {
fmt.Printf("c.Name %s \n", c.Name)
fmt.Printf("c.State %s \n", c.State)
fmt.Printf("c.State.Terminated %s \n", c.State.Terminated)
stateTerminated := c.State.Terminated
stateRunning := c.State.Running
if stateTerminated == nil && stateRunning != nil {
fmt.Printf("c.State.Terminated %s \n", c.State.Terminated)
fmt.Printf("stateRunning Reason: %s\n", reflect.TypeOf(c.State.Running))
getPod, getErr := clientset.CoreV1().Pods("ns").Get(context.TODO(), "single-condition-pipeline-9rqrs-1224102659" , metav1.GetOptions{})
if getErr != nil {
fmt.Println("error1")
panic(fmt.Errorf("Failed to get: %v", getErr))
}
fmt.Println("update values")
fmt.Printf(" getPodName %d \n", getPod.Name)
getPod.Status.Phase = "Succeeded"
fmt.Println("updated status phase")
getContainers := getPod.Status.ContainerStatuses
fmt.Printf("len get container %d \n", len(getContainers))
_, updateErr := clientset.CoreV1().Pods("argo-workflows").Update(context.TODO(), getPod, metav1.UpdateOptions{})
fmt.Println("commit update")
if updateErr != nil {
fmt.Println("error updated")
panic(fmt.Errorf("Failed to update: %v", updateErr))
}
} else {
fmt.Printf("c.State.Terminated %s \n", c.State.Terminated.Reason)
//fmt.Println("Not finished ready!!!")
//fmt.Printf("c.State.Running %s \n", c.State.Running)
//fmt.Printf("c.State.Waiting %s \n", c.State.Waiting)
}
}
}
}
}
time.Sleep(10 * time.Second)
}
}
and some logs:
single-condition-pipeline-9rqrs-1224102659
pod.Name single-condition-pipeline-9rqrs-1224102659
Status.Phase Running
PodIP XXXXXXXXXXXX
c.Name main
---------------------------------------------------------------------------------------------
c.State {nil &ContainerStateRunning{StartedAt:2021-10-29 04:41:51 0000 UTC,} nil}
c.State.Terminated nil
c.State.Terminated nil
stateRunning Reason: *v1.ContainerStateRunning
update values
getPodName %!d(string=single-condition-pipeline-9rqrs-1224102659)
updated status phase
len get container 2
commit update
c.Name wait
c.State {nil &ContainerStateRunning{StartedAt:2021-10-29 04:41:51 0000 UTC,} nil}
c.State.Terminated nil
c.State.Terminated nil
stateRunning Reason: *v1.ContainerStateRunning
update values
getPodName %!d(string=single-condition-pipeline-9rqrs-1224102659)
updated status phase
len get container 2
---------------------------------------------------------------------------------------------
commit update
---------------------------------------------------------------------------------------------
get pods metadatada
single-condition-pipeline-9rqrs-1224102659
pod.Name single-condition-pipeline-9rqrs-1224102659
Status.Phase Running
PodIP XXXXXXXXXX
c.Name main
c.State {nil &ContainerStateRunning{StartedAt:2021-10-29 04:41:51 0000 UTC,} nil}
c.State.Terminated nil
c.State.Terminated nil
stateRunning Reason: *v1.ContainerStateRunning
update values
getPodName %!d(string=single-condition-pipeline-9rqrs-1224102659)
updated status phase
len get container 2
commit update
c.Name wait
c.State {nil &ContainerStateRunning{StartedAt:2021-10-29 04:41:51 0000 UTC,} nil}
c.State.Terminated nil
c.State.Terminated nil
stateRunning Reason: *v1.ContainerStateRunning
update values
getPodName %!d(string=single-condition-pipeline-9rqrs-1224102659)
updated status phase
len get container 2
commit update
so here: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-readiness-status, It mentions a Patch but I don't know how to use it, so if somebody could help me or if there is another way to finish it.
CodePudding user response:
You cannot set the phase
or anything else in the Pod status
field, it is read only. According to the Pod Lifecycle documentation your pod will have a phase of Succeeded
after "All containers in the Pod have terminated in success, and will not be restarted." So this will only happen if you can cause all of your pod's containers to exit with status code 0
and if the pod restartPolicy
is set to onFailure
or Never
, if it is set to Always
(the default) then the containers will eventually restart and your pod will eventually return to the Running
phase.
In summary, you cannot do what you are attempting to do via the Kube API directly. You must:
- Ensure your pod has a
restartPolicy
that can support theSucceeded
phase. - Cause your application to terminate, possibly by sending it
SIGINT
orSIGTERM
, or possibly by commanding it via its own API.