package main
import (
"context"
"flag"
"log"
"os"
"os/signal"
"path/filepath"
"syscall"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
"k8s.io/client-go/util/homedir"
"k8s.io/klog"
)
func main() {
klog.InitFlags(nil)
var (
name string // 节点名称
lockKey string //锁名称
lockNS string // 锁在k8s的命名空间
podName = os.Getenv("POD_NAME") // Pod 名称
kubeconfig *string
)
flag.StringVar(&name, "name", podName, "identity name who holder the lock")
flag.StringVar(&lockKey, "key", "example", "lease lock name")
flag.StringVar(&lockNS, "ns", "default", "which namespace the lease lock create in")
// https://github.com/kubernetes/client-go/blob/release-1.30/examples/out-of-cluster-client-configuration/main.go
if home := homedir.HomeDir(); home != "" {
kubeconfig = flag.String("kubeconfig", filepath.Join(home, ".kube", "config"), "(optional) absolute path to the kubeconfig file")
} else {
kubeconfig = flag.String("kubeconfig", "", "absolute path to the kubeconfig file")
}
flag.Parse()
// config := clientcmd.GetConfigFromFileOrDie(*kubeconfig)
config, err := clientcmd.BuildConfigFromFlags("", *kubeconfig)
if err != nil {
panic(err.Error())
}
// create the clientset
clientset, err := kubernetes.NewForConfig(config)
if err != nil {
panic(err.Error())
}
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// A Signal represents an operating system signal.
ch := make(chan os.Signal, 1)
signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
go func() {
<-ch
log.Printf("Received termination, shutdown ...")
cancel()
}()
// start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: lockKey,
Namespace: lockNS,
},
Client: clientset.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: name,
},
},
ReleaseOnCancel: true,
LeaseDuration: 60 * time.Second,
RenewDeadline: 15 * time.Second,
RetryPeriod: 5 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
klog.Infof("%s: get leading", name)
},
OnStoppedLeading: func() {
klog.Infof("%s: lost leading", name)
},
OnNewLeader: func(identity string) {
if identity == name {
// I just got the lock
return
}
klog.Infof("new leader elected: %v", identity)
},
},
})
log.Printf("%s: exit", name)
}