是一款代替 host-local 的 IPAM,可以本地或用 kubernetes 记录 ippool 和 已分配 IP;并且支持所有节点的 pod ip 可以使用同一个 cidr。
它是以 NAD 为地址段设定的模式,一个 nad 中描述 该 地址段的 ip 范围,网关等信息。
通常和cni bridge,ipvlan,macvlan 等相结合使用,负责分配和管理 IP 的功能。
git clone https://github.com/k8snetworkplumbingwg/whereabouts.git
cd whereabouts
kubectl apply -f doc/crds/daemonset-install.yaml -f doc/crds/whereabouts.cni.cncf.io_ippools.yaml -f doc/crds/whereabouts.cni.cncf.io_overlappingrangeipreservations.yaml
有一个 domainset 和 cni binary
三个 informer listen pod,nad,ippool 三种资源
podInformerFactory.Start(stopChannel)
netAttachDefInformerFactory.Start(stopChannel)
ipPoolInformerFactory.Start(stopChannel)
创建一个 nad
apiVersion: "k8s.cni.cncf.io/v1"
kind: NetworkAttachmentDefinition
metadata:
name: whereabouts-conf
spec:
config: '{
"cniVersion": "0.3.0",
"name": "whereaboutsexample",
"type": "ipvlan",
"master": "ens6",
"ipam": {
"type": "whereabouts",
"datastore": "kubernetes",
"range": "1.1.1.0/24",
"range_start": "1.1.1.66",
"range_end": "1.1.1.73",
"gateway": "1.1.1.1",
"log_file": "/var/log/whereabouts.log",
"log_level": "debug"
}
}'
创建 pod 时
apiVersion: v1
kind: Pod
metadata:
name: pod2
namespace: default
labels:
app: nginx
annotations:
k8s.v1.cni.cncf.io/networks: default/whereabouts-conf
spec:
containers:
- name: pod2
image: nginx
ports:
- name: nginx-port
containerPort: 80
protocol: TCP
ipamConf, confVersion, err := config.LoadIPAMConfig(args.StdinData, args.Args)
读取 ipRange
oldRange := types.RangeConfiguration{
OmitRanges: n.IPAM.OmitRanges, // exclude
Range: n.IPAM.Range, // range,可以 cidr,可以 "-" 隔开
RangeStart: n.IPAM.RangeStart,
RangeEnd: n.IPAM.RangeEnd,
}
func (i *KubernetesIPAM) getPool(ctx context.Context, name string, iprange string) (*whereaboutsv1alpha1.IPPool, error) {
ctxWithTimeout, cancel := context.WithTimeout(ctx, storage.RequestTimeout)
defer cancel()
pool, err := i.client.WhereaboutsV1alpha1().IPPools(i.namespace).Get(ctxWithTimeout, name, metav1.GetOptions{})
if err != nil && errors.IsNotFound(err) {
// pool does not exist, create it
newPool := &whereaboutsv1alpha1.IPPool{}
newPool.ObjectMeta.Name = name
newPool.Spec.Range = iprange
newPool.Spec.Allocations = make(map[string]whereaboutsv1alpha1.IPAllocation)
_, err = i.client.WhereaboutsV1alpha1().IPPools(i.namespace).Create(ctxWithTimeout, newPool, metav1.CreateOptions{})
if err != nil && errors.IsAlreadyExists(err) {
// the pool was just created -- allow retry
return nil, &temporaryError{err}
} else if err != nil {
return nil, fmt.Errorf("k8s create error: %s", err)
}
// if the pool was created for the first time, trigger another retry of the allocation loop
// so all of the metadata / resourceVersions are populated as necessary by the `client.Get` call
return nil, &temporaryError{fmt.Errorf("k8s pool initialized")}
} else if err != nil {
return nil, fmt.Errorf("k8s get error: %s", err)
}
return pool, nil
}
newip, updatedreservelist, err := IterateForAssignment(*ipnet, ipamConf.RangeStart, ipamConf.RangeEnd, reservelist, ipamConf.OmitRanges, containerID, podRef)
apiVersion: v1
items:
- apiVersion: whereabouts.cni.cncf.io/v1alpha1
kind: IPPool
metadata:
creationTimestamp: "2023-04-01T09:18:51Z"
generation: 3
name: 1.1.1.0-24
namespace: kube-system
resourceVersion: "1339436"
uid: c09f1faf-7c69-4c0d-8b08-9505eb5a6718
spec:
allocations:
"66":
id: be903950515693036730fc4522f62e5d626846f9f5e3dba5424bc2f3e97b94e5
podref: default/pod2
"67":
id: a2e9650287946248710a9b600f9c410e3f120b948f7d0dec1c5c0feafc2b8e56
podref: default/pod3
range: 1.1.1.0/24
- apiVersion: whereabouts.cni.cncf.io/v1alpha1
kind: OverlappingRangeIPReservation
metadata:
creationTimestamp: "2023-04-01T10:06:06Z"
generation: 1
name: 1.1.1.67
namespace: kube-system
resourceVersion: "1339437"
uid: d0d6ece7-7aea-42a9-a03d-04fc25a8ec1d
spec:
containerid: a2e9650287946248710a9b600f9c410e3f120b948f7d0dec1c5c0feafc2b8e56
podref: default/pod3
"reconciler_cron_expression": "30 4 * * *"