typeInterfaceinterface{// Get returns the LeaderElectionRecord
Get(ctxcontext.Context)(*LeaderElectionRecord,[]byte,error)// Create attempts to create a LeaderElectionRecord
Create(ctxcontext.Context,lerLeaderElectionRecord)error// Update will update and existing LeaderElectionRecord
Update(ctxcontext.Context,lerLeaderElectionRecord)error// RecordEvent is used to record events
RecordEvent(string)// Identity will return the locks Identity
Identity()string// Describe is used to convert details on current resource lock
// into a string
Describe()string}
typeConfigMapLockstruct{// ConfigMapMeta should contain a Name and a Namespace of a
// ConfigMapMeta object that the LeaderElector will attempt to lead.
ConfigMapMetametav1.ObjectMetaClientcorev1client.ConfigMapsGetterLockConfigResourceLockConfigcm*v1.ConfigMap}...typeLeaseLockstruct{// LeaseMeta should contain a Name and a Namespace of a
// LeaseMeta object that the LeaderElector will attempt to lead.
LeaseMetametav1.ObjectMetaClientcoordinationv1client.LeasesGetterLockConfigResourceLockConfiglease*coordinationv1.Lease}...
// Manufacture will create a lock of a given type according to the input parameters
funcNew(lockTypestring,nsstring,namestring,coreClientcorev1.CoreV1Interface,coordinationClientcoordinationv1.CoordinationV1Interface,rlcResourceLockConfig)(Interface,error){endpointsLock:=&EndpointsLock{EndpointsMeta:metav1.ObjectMeta{Namespace:ns,Name:name,},Client:coreClient,LockConfig:rlc,}configmapLock:=&ConfigMapLock{ConfigMapMeta:metav1.ObjectMeta{Namespace:ns,Name:name,},Client:coreClient,LockConfig:rlc,}leaseLock:=&LeaseLock{LeaseMeta:metav1.ObjectMeta{Namespace:ns,Name:name,},Client:coordinationClient,LockConfig:rlc,}switchlockType{caseEndpointsResourceLock:returnendpointsLock,nilcaseConfigMapsResourceLock:returnconfigmapLock,nilcaseLeasesResourceLock:returnleaseLock,nilcaseEndpointsLeasesResourceLock:return&MultiLock{Primary:endpointsLock,Secondary:leaseLock,},nilcaseConfigMapsLeasesResourceLock:return&MultiLock{Primary:configmapLock,Secondary:leaseLock,},nildefault:returnnil,fmt.Errorf("Invalid lock-type %s",lockType)}}
// RunOrDie starts a client with the provided config or panics if the config
// fails to validate. RunOrDie blocks until leader election loop is
// stopped by ctx or it has stopped holding the leader lease
funcRunOrDie(ctxcontext.Context,lecLeaderElectionConfig){//這裡很簡單的去new一個LeaderElector物件,主要是檢查使用者輸入的config有沒有問題。
le,err:=NewLeaderElector(lec)iferr!=nil{panic(err)}//todo還不是很清楚watch dog的作用
iflec.WatchDog!=nil{lec.WatchDog.SetLeaderElection(le)}//主要在這個run function 等等會來解密這個function 做了什麼事
le.Run(ctx)}
LeaderElector run 解密
剛剛有提到 RunOrDie 背後的機制主要是透過 LeaderElector 物件底下的 run 方法去執行的,底下就開始講解 LeaderElector run做什麼吧!
// Run starts the leader election loop. Run will not return
// before leader election loop is stopped by ctx or it has
// stopped holding the leader lease
func(le*LeaderElector)Run(ctxcontext.Context){//kubernetes 內置處理 crash 的方法本篇不討論。
deferruntime.HandleCrash()//當使用者觸發了 context cancel 時會呼叫 OnStoppedLeading 方法。
deferfunc(){le.config.Callbacks.OnStoppedLeading()}()//在這裡會試圖取得會是創建 resource lock,沒有拿到資源鎖的話就會卡在這個function裡面喔!
if!le.acquire(ctx){return// ctx signalled done
}ctx,cancel:=context.WithCancel(ctx)defercancel()//啟動一個 goroutine 去執行 controller manger 的進入點 run function
gole.config.Callbacks.OnStartedLeading(ctx)//執行 resource lock 的續約動作。
le.renew(ctx)}