flexvolume GRPC调用执行 rookflex mount操作
/usr/local/bin/rookflex --help
Rook Flex volume pluginUsage:
rookflex [command]Available Commands:
help Help about any command
init Initialize the volume plugin
mount Mounts the volume to the pod volume
unmount Unmounts the pod volumeFlags:
-h, --help help for rookflexUse "rookflex [command] --help" for more information about a command.
attach,实现在agent 这个pod中,watch volumeAttach,执行相应操作
与agent建立GRPC连接,
client, err := getRPCClient()
if err != nil {
return fmt.Errorf("Rook: Error getting RPC client: %v", err)
}
var opts = &flexvolume.AttachOptions{}
if err = json.Unmarshal([]byte(args[1]), opts); err != nil {
return fmt.Errorf("Rook: Could not parse options for mounting %s. Got %v", args[1], err)
}
opts.MountDir = args[0]
if opts.FsType == cephFS {
return mountCephFS(client, opts)
}
GetAttachInfoFromMountDir这个方法主要是填充 attachOption结构
err = client.Call("Controller.GetAttachInfoFromMountDir", opts.MountDir, &opts)
if err != nil {
log(client, fmt.Sprintf("Attach volume %s/%s failed: %v", opts.BlockPool, opts.Image, err), true)
return fmt.Errorf("Rook: Mount volume failed: %v", err)
}
第二章节讲解
func attach(client *rpc.Client, opts *flexvolume.AttachOptions) (string, error) {
log(client, fmt.Sprintf("calling agent to attach volume %s/%s", opts.BlockPool, opts.Image), false)
var devicePath string
err := client.Call("Controller.Attach", opts, &devicePath)
if err != nil {
log(client, fmt.Sprintf("Attach volume %s/%s failed: %v", opts.BlockPool, opts.Image, err), true)
return "", fmt.Errorf("Rook: Mount volume failed: %v", err)
}
return devicePath, err
}
/var/lib/kubelet/plugins/ceph.rook.io/rook-ceph/mounts/pvc-abd9b2a7-7c82-11e9-84db-0800271c9f15
// Get global mount path
var globalVolumeMountPath string
err = client.Call("Controller.GetGlobalMountPath", globalMountPathInput, &globalVolumeMountPath)
if err != nil {
log(client, fmt.Sprintf("Attach volume %s/%s failed. Cannot get global volume mount path: %v", opts.BlockPool, opts.Image, err), true)
return fmt.Errorf("Rook: Mount volume failed. Cannot get global volume mount path: %v", err)
}
# mount | grep rbd
/dev/rbd0 on /var/lib/kubelet/plugins/ceph.rook.io/rook-ceph/mounts/pvc-d4329cba-8020-11e9-8f03-0800271c9f15 type xfs (rw,relatime,attr2,inode64,sunit=8192,swidth=8192,noquota)
IsLikelyNotMountPoint 检查是否已经挂载
如果未挂载则执行 FormatAndMount,格式化以及 mount
这个就是挂载到 node上
Mounting command: systemd-run
Mounting arguments: --description=Kubernetes transient mount for /var/lib/kubelet/plugins/ceph.rook.io/rook-ceph/mounts/pvc-d4329cba-8020-11e9-8f03-0800271c9f15 --scope -- mount -t xfs -o rw,defaults /dev/rbd0 /var/lib/kubelet/plugins/ceph.rook.io/rook-ceph/mounts/pvc-d4329cba-8020-11e9-8f03-0800271c9f15
Output: Running scope as unit run-14540.scope.
mount: wrong fs type, bad option, bad superblock on /dev/rbd0,
missing codepage or helper program, or other errorIn some cases useful info is found in syslog - try
dmesg | tail or so.
func mountDevice(client *rpc.Client, mounter *k8smount.SafeFormatAndMount, devicePath, globalVolumeMountPath string, opts *flexvolume.AttachOptions) error {
notMnt, err := mounter.Interface.IsLikelyNotMountPoint(globalVolumeMountPath)
options := []string{opts.RW}
if notMnt {
err = redirectStdout(
client,
func() error {
if err = mounter.FormatAndMount(devicePath, globalVolumeMountPath, opts.FsType, options); err != nil {
return fmt.Errorf("failed to mount volume %s [%s] to %s, error %v", devicePath, opts.FsType, globalVolumeMountPath, err)
}
return nil
},
)
}
return nil
}
/dev/rbd0 on /var/lib/kubelet/pods/bb7fa81a-8020-11e9-8f03-0800271c9f15/volumes/ceph.rook.io~rook-ceph/pvc-d4329cba-8020-11e9-8f03-0800271c9f15 type xfs (rw,relatime,attr2,inode64,sunit=8192,swidth=8192,noquota)
mount 操作就是将挂载在 node上目录挂载在 pod下
func mount(client *rpc.Client, mounter *k8smount.SafeFormatAndMount, globalVolumeMountPath string, opts *flexvolume.AttachOptions) error {
log(client, fmt.Sprintf("mounting global mount path %s on %s", globalVolumeMountPath, opts.MountDir), false)
// Perform a bind mount to the full path to allow duplicate mounts of the same volume. This is only supported for RO attachments.
options := []string{opts.RW, "bind"}
err := redirectStdout(
client,
func() error {
err := mounter.Interface.Mount(globalVolumeMountPath, opts.MountDir, "", options)
return nil
},
)
}
return err
}
需要pod挂载volume,attach操作
// Check if this volume has been attached
volumeattachObj, err := c.volumeAttachment.Get(namespace, crdName)
if err != nil {
if !errors.IsNotFound(err) {
return fmt.Errorf("failed to get volume CRD %s. %+v", crdName, err)
}
// No volumeattach CRD for this volume found. Create one
volumeattachObj = rookalpha.NewVolume(
crdName,
namespace,
node,
attachOpts.PodNamespace,
attachOpts.Pod,
attachOpts.ClusterNamespace,
attachOpts.MountDir,
strings.ToLower(attachOpts.RW) == ReadOnly,
)
logger.Infof("creating Volume attach Resource %s/%s: %+v", volumeattachObj.Namespace, volumeattachObj.Name, attachOpts)
err = c.volumeAttachment.Create(volumeattachObj)
if err != nil {
if !errors.IsAlreadyExists(err) {
return fmt.Errorf("failed to create volume CRD %s. %+v", crdName, err)
}
// Some other attacher beat us in this race. Kubernetes will retry again.
return fmt.Errorf("failed to attach volume %s for pod %s/%s. Volume is already attached by a different pod",
crdName, attachOpts.PodNamespace, attachOpts.Pod)
}
}
Image:pvc-db2685f5-710c-11e9-8a6c-0800271c9f15
BlockPool:replicapool
Pool:replicapool
ClusterNamespace:rook-ceph
ClusterName: StorageClass:rook-ceph-block
MountDir:/var/lib/kubelet/pods/22dc6766-710d-11e9-8a6c-0800271c9f15/volumes/ceph.rook.io~rook-ceph/pvc-db2685f5-710c-11e9-8a6c-0800271c9f15
FsName: Path: MountUser:admin MountSecret: RW:rw FsType:xfs VolumeName:pvc-db2685f5-710c-11e9-8a6c-0800271c9f15 Pod:wordpress-mysql-6858799448-7fnwv PodID:22dc6766-710d-11e9-8a6c-0800271c9f15 PodNamespace:default
*devicePath, err = c.volumeManager.Attach(attachOpts.Image, attachOpts.BlockPool, attachOpts.MountUser, attachOpts.MountSecret, attachOpts.ClusterNamespace)
if err != nil {
return fmt.Errorf("failed to attach volume %s/%s: %+v", attachOpts.BlockPool, attachOpts.Image, err)
}
路径:pkg/daemon/ceph/agent/flexvolume/manager/ceph/manager.go
attch rbd image到node节点
FindDevicePath 函数查询 /sys/bus/rdb/devices 目录下的 name 与 pool文件中的内容
还有 /dev/rdb 目录下的内容,如果rdb存在pool目录则证明已经attach了
// Check if the volume is attached
func (vm *VolumeManager) isAttached(image, pool, clusterNamespace string) (string, error) {
devicePath, err := vm.devicePathFinder.FindDevicePath(image, pool, clusterNamespace)
if err != nil {
return "", err
}
return devicePath, nil
}
调用命令 rbd map replicapool/pvc-db2685f5-710c-11e9-8a6c-0800271c9f15 --id=admin --cluster=rook-ceph --keyring=/tmp/rook-ceph.keyring324929687 -m 10.200.91.189:6789 --conf=/dev/null
// MapImage maps an RBD image using admin cephfx and returns the device path
func MapImage(context *clusterd.Context, imageName, poolName, id, keyring, clusterName, monitors string) error {
imageSpec := getImageSpec(imageName, poolName)
args := []string{
"map",
imageSpec,
fmt.Sprintf("--id=%s", id),
fmt.Sprintf("--cluster=%s", clusterName),
fmt.Sprintf("--keyring=%s", keyring),
"-m", monitors,
"--conf=/dev/null", // no config file needed because we are passing all required config as arguments
}
output, err := ExecuteRBDCommandWithTimeout(context, clusterName, args)
if err != nil {
return fmt.Errorf("failed to map image %s: %+v. output: %s", imageSpec, err, output)
}
return nil
}
// Poll for device path
retryCount := 0
for {
devicePath, err := vm.devicePathFinder.FindDevicePath(image, pool, clusterNamespace)
if err != nil {
return "", fmt.Errorf("failed to poll for mapped image %s/%s cluster %s. %+v", pool, image, clusterNamespace, err)
}
if devicePath != "" {
return devicePath, nil
}
retryCount++
if retryCount >= findDevicePathMaxRetries {
return "", fmt.Errorf("exceeded retry count while finding device path: %+v", err)
}
logger.Infof("failed to find device path, sleeping 1 second: %+v", err)
<-time.After(time.Second)
}
watch 则调用 volume manager,执行 rbd map命令
将 /dev/rbd 设备挂载到节点
将 plugins下的 Mount / pv 挂载在pod 下