diff --git a/pkg/controller/plan/adapter/base/doc.go b/pkg/controller/plan/adapter/base/doc.go index 453402b40..5a564c38f 100644 --- a/pkg/controller/plan/adapter/base/doc.go +++ b/pkg/controller/plan/adapter/base/doc.go @@ -53,11 +53,14 @@ type Builder interface { ResolvePersistentVolumeClaimIdentifier(pvc *core.PersistentVolumeClaim) string // Conversion Pod environment PodEnvironment(vmRef ref.Ref, sourceSecret *core.Secret) (env []core.EnvVar, err error) - // Create PersistentVolumeClaim with a DataSourceRef PersistentVolumeClaimWithSourceRef(da interface{}, storageName *string, populatorName string, accessModes []core.PersistentVolumeAccessMode, volumeMode *core.PersistentVolumeMode) *core.PersistentVolumeClaim // Add custom steps before creating PVC/DataVolume PreTransferActions(c Client, vmRef ref.Ref) (ready bool, err error) + // Build LUN PVs. + LunPersistentVolumes(vmRef ref.Ref) (pvs []core.PersistentVolume, err error) + // Build LUN PVCs. + LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.PersistentVolumeClaim, err error) } // Client API. @@ -83,6 +86,8 @@ type Client interface { Close() // Finalize migrations Finalize(vms []*planapi.VMStatus, planName string) + // Detach disks that are attached to the target VM without being cloned (e.g., LUNs). + DetachDisks(vmRef ref.Ref) error } // Validator API. diff --git a/pkg/controller/plan/adapter/openstack/builder.go b/pkg/controller/plan/adapter/openstack/builder.go index 3ce1f4f38..758125813 100644 --- a/pkg/controller/plan/adapter/openstack/builder.go +++ b/pkg/controller/plan/adapter/openstack/builder.go @@ -1153,3 +1153,15 @@ func (r *Builder) cleanup(c planbase.Client, imageName string) { } } } + +// Build LUN PVs. +func (r *Builder) LunPersistentVolumes(vmRef ref.Ref) (pvs []core.PersistentVolume, err error) { + // do nothing + return +} + +// Build LUN PVCs. +func (r *Builder) LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.PersistentVolumeClaim, err error) { + // do nothing + return +} diff --git a/pkg/controller/plan/adapter/openstack/client.go b/pkg/controller/plan/adapter/openstack/client.go index 1e817d627..d2900308d 100644 --- a/pkg/controller/plan/adapter/openstack/client.go +++ b/pkg/controller/plan/adapter/openstack/client.go @@ -395,3 +395,8 @@ func (r *Client) Finalize(vms []*planapi.VMStatus, migrationName string) { } } } + +func (r *Client) DetachDisks(vmRef ref.Ref) (err error) { + // no-op + return +} diff --git a/pkg/controller/plan/adapter/ovirt/builder.go b/pkg/controller/plan/adapter/ovirt/builder.go index 9f5710ee0..ae5de6500 100644 --- a/pkg/controller/plan/adapter/ovirt/builder.go +++ b/pkg/controller/plan/adapter/ovirt/builder.go @@ -198,7 +198,7 @@ func (r *Builder) DataVolumes(vmRef ref.Ref, secret *core.Secret, configMap *cor return } for _, da := range vm.DiskAttachments { - if da.Disk.StorageDomain == sd.ID { + if da.Disk.StorageType == "image" && da.Disk.StorageDomain == sd.ID { storageClass := mapped.Destination.StorageClass size := da.Disk.ProvisionedSize if da.Disk.ActualSize > size { @@ -427,16 +427,6 @@ func (r *Builder) mapDisks(vm *model.Workload, persistentVolumeClaims []core.Per for _, da := range vm.DiskAttachments { claimName := pvcMap[da.Disk.ID].Name volumeName := da.Disk.ID - volume := cnv.Volume{ - Name: volumeName, - VolumeSource: cnv.VolumeSource{ - PersistentVolumeClaim: &cnv.PersistentVolumeClaimVolumeSource{ - PersistentVolumeClaimVolumeSource: core.PersistentVolumeClaimVolumeSource{ - ClaimName: claimName, - }, - }, - }, - } var bus string switch da.Interface { case VirtioScsi: @@ -446,11 +436,34 @@ func (r *Builder) mapDisks(vm *model.Workload, persistentVolumeClaims []core.Per default: bus = Virtio } - disk := cnv.Disk{ + var disk cnv.Disk + if da.Disk.Disk.StorageType == "lun" { + claimName = volumeName + disk = cnv.Disk{ + Name: volumeName, + DiskDevice: cnv.DiskDevice{ + LUN: &cnv.LunTarget{ + Bus: cnv.DiskBus(bus), + }, + }, + } + } else { + disk = cnv.Disk{ + Name: volumeName, + DiskDevice: cnv.DiskDevice{ + Disk: &cnv.DiskTarget{ + Bus: cnv.DiskBus(bus), + }, + }, + } + } + volume := cnv.Volume{ Name: volumeName, - DiskDevice: cnv.DiskDevice{ - Disk: &cnv.DiskTarget{ - Bus: cnv.DiskBus(bus), + VolumeSource: cnv.VolumeSource{ + PersistentVolumeClaim: &cnv.PersistentVolumeClaimVolumeSource{ + PersistentVolumeClaimVolumeSource: core.PersistentVolumeClaimVolumeSource{ + ClaimName: claimName, + }, }, }, } @@ -478,18 +491,22 @@ func (r *Builder) Tasks(vmRef ref.Ref) (list []*plan.Task, err error) { vmRef.String()) } for _, da := range vm.DiskAttachments { - mB := da.Disk.ProvisionedSize / 0x100000 - list = append( - list, - &plan.Task{ - Name: da.Disk.ID, - Progress: libitr.Progress{ - Total: mB, - }, - Annotations: map[string]string{ - "unit": "MB", - }, - }) + // We don't add a task for LUNs because we don't copy their content but rather assume we can connect to + // the LUNs that are used in the source environment also from the target environment. + if da.Disk.StorageType != "lun" { + mB := da.Disk.ProvisionedSize / 0x100000 + list = append( + list, + &plan.Task{ + Name: da.Disk.ID, + Progress: libitr.Progress{ + Total: mB, + }, + Annotations: map[string]string{ + "unit": "MB", + }, + }) + } } return @@ -578,3 +595,110 @@ func (r *Builder) PersistentVolumeClaimWithSourceRef(da interface{}, storageName func (r *Builder) PreTransferActions(c planbase.Client, vmRef ref.Ref) (ready bool, err error) { return true, nil } + +// Create PVs specs for the VM LUNs. +func (r *Builder) LunPersistentVolumes(vmRef ref.Ref) (pvs []core.PersistentVolume, err error) { + vm := &model.Workload{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + "VM lookup failed.", + "vm", + vmRef.String()) + return + } + for _, da := range vm.DiskAttachments { + if da.Disk.StorageType == "lun" { + volMode := core.PersistentVolumeBlock + logicalUnit := da.Disk.Lun.LogicalUnits.LogicalUnit[0] + + pvSpec := core.PersistentVolume{ + ObjectMeta: meta.ObjectMeta{ + Name: da.Disk.ID, + Namespace: r.Plan.Spec.TargetNamespace, + Annotations: map[string]string{ + AnnImportDiskId: da.Disk.ID, + "vmID": vm.ID, + "plan": string(r.Plan.UID), + "lun": "true", + }, + Labels: map[string]string{ + "volume": fmt.Sprintf("%v-%v", vm.Name, da.ID), + }, + }, + Spec: core.PersistentVolumeSpec{ + PersistentVolumeSource: core.PersistentVolumeSource{ + ISCSI: &core.ISCSIPersistentVolumeSource{ + TargetPortal: logicalUnit.Address + ":" + logicalUnit.Port, + IQN: logicalUnit.Target, + Lun: logicalUnit.LunMapping, + ReadOnly: false, + }, + }, + Capacity: core.ResourceList{ + core.ResourceStorage: *resource.NewQuantity(logicalUnit.Size, resource.BinarySI), + }, + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteMany, + }, + VolumeMode: &volMode, + }, + } + pvs = append(pvs, pvSpec) + } + } + return +} + +// Create PVCs specs for the VM LUNs. +func (r *Builder) LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.PersistentVolumeClaim, err error) { + vm := &model.Workload{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + "VM lookup failed.", + "vm", + vmRef.String()) + return + } + for _, da := range vm.DiskAttachments { + if da.Disk.StorageType == "lun" { + sc := "" + volMode := core.PersistentVolumeBlock + pvcSpec := core.PersistentVolumeClaim{ + ObjectMeta: meta.ObjectMeta{ + Name: da.Disk.ID, + Namespace: r.Plan.Spec.TargetNamespace, + Annotations: map[string]string{ + AnnImportDiskId: da.Disk.ID, + "vmID": vm.ID, + "plan": string(r.Plan.UID), + "lun": "true", + }, + Labels: map[string]string{"migration": r.Migration.Name}, + }, + Spec: core.PersistentVolumeClaimSpec{ + AccessModes: []core.PersistentVolumeAccessMode{ + core.ReadWriteMany, + }, + Selector: &meta.LabelSelector{ + MatchLabels: map[string]string{ + "volume": fmt.Sprintf("%v-%v", vm.Name, da.ID), + }, + }, + StorageClassName: &sc, + VolumeMode: &volMode, + Resources: core.ResourceRequirements{ + Requests: core.ResourceList{ + core.ResourceStorage: *resource.NewQuantity(da.Disk.Lun.LogicalUnits.LogicalUnit[0].Size, resource.BinarySI), + }, + }, + }, + } + pvcs = append(pvcs, pvcSpec) + } + } + return +} diff --git a/pkg/controller/plan/adapter/ovirt/client.go b/pkg/controller/plan/adapter/ovirt/client.go index fc50eb1a4..1667221de 100644 --- a/pkg/controller/plan/adapter/ovirt/client.go +++ b/pkg/controller/plan/adapter/ovirt/client.go @@ -544,3 +544,37 @@ func allJobsFinished(jobs []*ovirtsdk.Job) bool { return true } + +func (r *Client) DetachDisks(vmRef ref.Ref) (err error) { + _, vmService, err := r.getVM(vmRef) + if err != nil { + return + } + vm := &model.Workload{} + err = r.Source.Inventory.Find(vm, vmRef) + if err != nil { + err = liberr.Wrap( + err, + "VM lookup failed.", + "vm", + vmRef.String()) + return + } + diskAttachments := vm.DiskAttachments + for _, da := range diskAttachments { + if da.Disk.StorageType == "lun" { + _, err = vmService.DiskAttachmentsService().AttachmentService(da.ID).Remove().Send() + if err != nil { + err = liberr.Wrap( + err, + "failed to detach LUN disk.", + "vm", + vmRef.String(), + "disk", + da) + return + } + } + } + return +} diff --git a/pkg/controller/plan/adapter/ovirt/validator.go b/pkg/controller/plan/adapter/ovirt/validator.go index a118ca821..e90da8eeb 100644 --- a/pkg/controller/plan/adapter/ovirt/validator.go +++ b/pkg/controller/plan/adapter/ovirt/validator.go @@ -106,7 +106,12 @@ func (r *Validator) StorageMapped(vmRef ref.Ref) (ok bool, err error) { return } for _, da := range vm.DiskAttachments { - if !r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: da.Disk.StorageDomain}) { + if da.Disk.StorageType != "lun" { + if !r.plan.Referenced.Map.Storage.Status.Refs.Find(ref.Ref{ID: da.Disk.StorageDomain}) { + return + } + } else if len(da.Disk.Lun.LogicalUnits.LogicalUnit) > 0 && da.Disk.Lun.LogicalUnits.LogicalUnit[0].Address == "" { + // Have LUN disk but without the relevant data. This might happen with older oVirt versions. return } } diff --git a/pkg/controller/plan/adapter/vsphere/builder.go b/pkg/controller/plan/adapter/vsphere/builder.go index 877ca4eef..ebb142848 100644 --- a/pkg/controller/plan/adapter/vsphere/builder.go +++ b/pkg/controller/plan/adapter/vsphere/builder.go @@ -823,3 +823,15 @@ func (r *Builder) PersistentVolumeClaimWithSourceRef(da interface{}, storageName func (r *Builder) PreTransferActions(c planbase.Client, vmRef ref.Ref) (ready bool, err error) { return true, nil } + +// Build LUN PVs. +func (r *Builder) LunPersistentVolumes(vmRef ref.Ref) (pvs []core.PersistentVolume, err error) { + // do nothing + return +} + +// Build LUN PVCs. +func (r *Builder) LunPersistentVolumeClaims(vmRef ref.Ref) (pvcs []core.PersistentVolumeClaim, err error) { + // do nothing + return +} diff --git a/pkg/controller/plan/adapter/vsphere/client.go b/pkg/controller/plan/adapter/vsphere/client.go index 571efc660..bcb81c184 100644 --- a/pkg/controller/plan/adapter/vsphere/client.go +++ b/pkg/controller/plan/adapter/vsphere/client.go @@ -370,3 +370,8 @@ func (r *Client) thumbprint() string { } return "" } + +func (r *Client) DetachDisks(vmRef ref.Ref) (err error) { + // no-op + return +} diff --git a/pkg/controller/plan/kubevirt.go b/pkg/controller/plan/kubevirt.go index 99de95664..e829dbd92 100644 --- a/pkg/controller/plan/kubevirt.go +++ b/pkg/controller/plan/kubevirt.go @@ -586,13 +586,13 @@ func (r *KubeVirt) getPVCs(vm *plan.VMStatus) (pvcs []core.PersistentVolumeClaim return } -func (r *KubeVirt) createVolumesForOvirt(vm ref.Ref) (pvcNames []string, err error) { - secret, err := r.ensureSecret(vm, r.copyDataFromProviderSecret) +func (r *KubeVirt) createVolumesForOvirt(vm *plan.VMStatus) (pvcNames []string, err error) { + secret, err := r.ensureSecret(vm.Ref, r.copyDataFromProviderSecret) if err != nil { return } ovirtVm := &ovirt.Workload{} - err = r.Source.Inventory.Find(ovirtVm, vm) + err = r.Source.Inventory.Find(ovirtVm, vm.Ref) if err != nil { return } @@ -601,8 +601,12 @@ func (r *KubeVirt) createVolumesForOvirt(vm ref.Ref) (pvcNames []string, err err return } - storageName := &r.Context.Map.Storage.Spec.Map[0].Destination.StorageClass for _, da := range ovirtVm.DiskAttachments { + if da.Disk.StorageType == "lun" { + continue + } + // The VM has a disk image so the storage map is necessarily not empty, and we can read the storage class from it. + storageName := &r.Context.Map.Storage.Spec.Map[0].Destination.StorageClass populatorCr := util.OvirtVolumePopulator(da, sourceUrl, r.Plan.Spec.TransferNetwork, r.Plan.Spec.TargetNamespace, secret.Name, vm.ID, string(r.Migration.UID)) failure := r.Client.Create(context.Background(), populatorCr, &client.CreateOptions{}) if failure != nil && !k8serr.IsAlreadyExists(failure) { @@ -626,6 +630,29 @@ func (r *KubeVirt) createVolumesForOvirt(vm ref.Ref) (pvcNames []string, err err pvcNames = append(pvcNames, pvc.Name) } + err = r.createLunDisks(vm) + + return +} + +// Creates the PVs and PVCs for LUN disks. +func (r *KubeVirt) createLunDisks(vm *plan.VMStatus) (err error) { + lunPvcs, err := r.Builder.LunPersistentVolumeClaims(vm.Ref) + if err != nil { + return + } + err = r.EnsurePersistentVolumeClaim(vm, lunPvcs) + if err != nil { + return + } + lunPvs, err := r.Builder.LunPersistentVolumes(vm.Ref) + if err != nil { + return + } + err = r.EnsurePersistentVolume(vm, lunPvs) + if err != nil { + return + } return } @@ -713,6 +740,9 @@ func (r *KubeVirt) areOvirtPVCsReady(vm ref.Ref, step *plan.Step) (ready bool, e ready = true for _, da := range ovirtVm.DiskAttachments { + if da.Disk.StorageType == "lun" { + continue + } obj := client.ObjectKey{Namespace: r.Plan.Spec.TargetNamespace, Name: da.Disk.ID} pvc := core.PersistentVolumeClaim{} err = r.Client.Get(context.Background(), obj, &pvc) @@ -1038,6 +1068,8 @@ func (r *KubeVirt) dataVolumes(vm *plan.VMStatus, secret *core.Secret, configMap return } + err = r.createLunDisks(vm) + return } @@ -1957,3 +1989,82 @@ func (r *KubeVirt) setPopulatorPodLabels(pod core.Pod, migrationId string) (err err = r.Destination.Client.Patch(context.TODO(), &pod, patch) return } + +// Ensure the PV exist on the destination. +func (r *KubeVirt) EnsurePersistentVolume(vm *plan.VMStatus, persistentVolumes []core.PersistentVolume) (err error) { + list := &core.PersistentVolumeList{} + err = r.Destination.Client.List( + context.TODO(), + list, + &client.ListOptions{ + LabelSelector: labels.SelectorFromSet(r.vmLabels(vm.Ref)), + Namespace: r.Plan.Spec.TargetNamespace, + }) + if err != nil { + err = liberr.Wrap(err) + return + } + + for _, pv := range persistentVolumes { + pvVolume := pv.Labels["volume"] + exists := false + for _, item := range list.Items { + if val, ok := item.Labels["volume"]; ok && val == pvVolume { + exists = true + break + } + } + + if !exists { + err = r.Destination.Client.Create(context.TODO(), &pv) + if err != nil { + err = liberr.Wrap(err) + return + } + r.Log.Info("Created PersistentVolume.", + "pv", + path.Join( + pv.Namespace, + pv.Name), + "vm", + vm.String()) + } + } + return +} + +// Ensure the PV exist on the destination. +func (r *KubeVirt) EnsurePersistentVolumeClaim(vm *plan.VMStatus, persistentVolumeClaims []core.PersistentVolumeClaim) (err error) { + list, err := r.getPVCs(vm) + if err != nil { + err = liberr.Wrap(err) + return + } + + for _, pvc := range persistentVolumeClaims { + pvcVolume := pvc.Labels["volume"] + exists := false + for _, item := range list { + if val, ok := item.Labels["volume"]; ok && val == pvcVolume { + exists = true + break + } + } + + if !exists { + err = r.Destination.Client.Create(context.TODO(), &pvc) + if err != nil { + err = liberr.Wrap(err) + return + } + r.Log.Info("Created PersistentVolumeClaim.", + "pvc", + path.Join( + pvc.Namespace, + pvc.Name), + "vm", + vm.String()) + } + } + return +} diff --git a/pkg/controller/plan/migration.go b/pkg/controller/plan/migration.go index b2e7f4c6b..4f8eb2861 100644 --- a/pkg/controller/plan/migration.go +++ b/pkg/controller/plan/migration.go @@ -673,7 +673,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { } if r.kubevirt.useOvirtPopulator(vm) { - pvcNames, err = r.kubevirt.createVolumesForOvirt(vm.Ref) + pvcNames, err = r.kubevirt.createVolumesForOvirt(vm) if err != nil { step.AddError(err.Error()) err = nil @@ -1025,6 +1025,20 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { } vm.ReflectPipeline() if vm.Phase == Completed && vm.Error == nil { + err = r.provider.DetachDisks(vm.Ref) + if err != nil { + step, found := vm.FindStep(r.step(vm)) + if !found { + vm.AddError(fmt.Sprintf("Step '%s' not found", r.step(vm))) + } + step.AddError(err.Error()) + r.Log.Error(err, + "Could not detach LUN disk(s) from the source VM.", + "vm", + vm.String()) + err = nil + return + } vm.SetCondition( libcnd.Condition{ Type: Succeeded, @@ -1033,6 +1047,7 @@ func (r *Migration) execute(vm *plan.VMStatus) (err error) { Message: "The VM migration has SUCCEEDED.", Durable: true, }) + // Power on the destination VM if the source VM was originally powered on. err = r.setRunning(vm, vm.RestorePowerState == On) if err != nil { @@ -1336,6 +1351,10 @@ func (r *Migration) updateCopyProgressForOvirt(vm *plan.VMStatus, step *plan.Ste return } for _, pvc := range pvcs { + if _, ok := pvc.Annotations["lun"]; ok { + // skip LUNs + continue + } claim := pvc.Spec.DataSource.Name task, found := step.FindTask(claim) if !found { diff --git a/pkg/controller/provider/container/ovirt/model.go b/pkg/controller/provider/container/ovirt/model.go index 67733d157..1ea9f4129 100644 --- a/pkg/controller/provider/container/ovirt/model.go +++ b/pkg/controller/provider/container/ovirt/model.go @@ -1011,6 +1011,12 @@ func (r *DiskAdapter) List(ctx *Context) (itr fb.Iterator, err error) { } list := fb.NewList() for _, object := range diskList.Items { + if object.StorageType == "lun" { + err = ctx.client.list(fmt.Sprintf("disks/%s", object.ID), &object) + if err != nil { + return + } + } m := &model.Disk{ Base: model.Base{ID: object.ID}, } diff --git a/pkg/controller/provider/container/ovirt/resource.go b/pkg/controller/provider/container/ovirt/resource.go index ac14346cd..875f74017 100644 --- a/pkg/controller/provider/container/ovirt/resource.go +++ b/pkg/controller/provider/container/ovirt/resource.go @@ -649,6 +649,23 @@ type Disk struct { ActualSize string `json:"actual_size"` Backup string `json:"backup"` StorageType string `json:"storage_type"` + Lun Lun `json:"lun_storage"` +} + +// LUN Resource. +type Lun struct { + LogicalUnits struct { + LogicalUnit []LogicalUnit `json:"logical_unit"` + } `json:"logical_units"` +} + +type LogicalUnit struct { + Base + Address string `json:"address"` + Port string `json:"port"` + Target string `json:"target"` + LunMapping string `json:"lun_mapping"` + Size string `json:"size"` } // Apply to (update) the model. @@ -663,6 +680,7 @@ func (r *Disk) ApplyTo(m *model.Disk) { m.StorageType = r.StorageType m.ProvisionedSize = r.int64(r.ProvisionedSize) r.setStorageDomain(m) + r.setLun(m) } func (r *Disk) setStorageDomain(m *model.Disk) { @@ -672,6 +690,24 @@ func (r *Disk) setStorageDomain(m *model.Disk) { } } +func (r *Disk) setLun(m *model.Disk) { + m.Lun = model.Lun{} + m.Lun.LogicalUnits.LogicalUnit = []model.LogicalUnit{} + for _, rlu := range r.Lun.LogicalUnits.LogicalUnit { + mlu := &model.LogicalUnit{} + rlu.ApplyTo(mlu) + m.Lun.LogicalUnits.LogicalUnit = append(m.Lun.LogicalUnits.LogicalUnit, *mlu) + } +} + +func (r *LogicalUnit) ApplyTo(m *model.LogicalUnit) { + m.Address = r.Address + m.Port = r.Port + m.Target = r.Target + m.LunMapping = r.int32(r.LunMapping) + m.Size = r.int64(r.Size) +} + // Disk (list). type DiskList struct { Items []Disk `json:"disk"` diff --git a/pkg/controller/provider/model/ovirt/model.go b/pkg/controller/provider/model/ovirt/model.go index 5475579cf..e6e070f75 100644 --- a/pkg/controller/provider/model/ovirt/model.go +++ b/pkg/controller/provider/model/ovirt/model.go @@ -234,4 +234,19 @@ type Disk struct { Backup string `sql:""` StorageType string `sql:""` ProvisionedSize int64 `sql:""` + Lun Lun `sql:""` +} + +type Lun struct { + LogicalUnits struct { + LogicalUnit []LogicalUnit `json:"logicalUnit"` + } +} + +type LogicalUnit struct { + Address string `json:"address"` + Port string `json:"port"` + Target string `json:"target"` + LunMapping int32 `json:"lunMapping"` + Size int64 `json:"size"` } diff --git a/pkg/controller/provider/web/ovirt/disk.go b/pkg/controller/provider/web/ovirt/disk.go index 00cb137f5..742078105 100644 --- a/pkg/controller/provider/web/ovirt/disk.go +++ b/pkg/controller/provider/web/ovirt/disk.go @@ -138,8 +138,11 @@ type Disk struct { ActualSize int64 `json:"actualSize"` StorageType string `json:"storageType"` Status string `json:"status"` + Lun Lun `json:"lunStorage"` } +type Lun = model.Lun + // Build the resource using the model. func (r *Disk) With(m *model.Disk) { r.Resource.With(&m.Base) @@ -150,6 +153,7 @@ func (r *Disk) With(m *model.Disk) { r.ActualSize = m.ActualSize r.Shared = m.Shared r.StorageDomain = m.StorageDomain + r.Lun = m.Lun } // Build self link (URI). diff --git a/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type.rego b/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type.rego index 47eddb7f5..61839fabd 100644 --- a/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type.rego +++ b/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type.rego @@ -5,11 +5,16 @@ valid_disk_storage_type [i] { input.diskAttachments[i].disk.storageType == "image" } +valid_disk_storage_type_lun [i] { + some i + input.diskAttachments[i].disk.storageType == "lun" +} + concerns[flag] { - count(valid_disk_storage_type) != count(number_of_disks) + count(valid_disk_storage_type) + count(valid_disk_storage_type_lun) != count(number_of_disks) flag := { "category": "Critical", "label": "Unsupported disk storage type detected", - "assessment": "The VM has a disk with a storage type other than 'image', which is not currently supported by OpenShift Virtualization. The VM disk transfer is likely to fail." + "assessment": "The VM has a disk with a storage type other than 'image' or 'lun', which is not currently supported by OpenShift Virtualization. The VM disk transfer is likely to fail." } } diff --git a/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type_test.rego b/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type_test.rego index 7952211aa..5c8ac5926 100644 --- a/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type_test.rego +++ b/validation/policies/io/konveyor/forklift/ovirt/disk_storage_type_test.rego @@ -45,4 +45,22 @@ test_with_invalid_storage_type { } results := concerns with input as mock_vm count(results) == 1 +} + +test_with_valid_lun_storage_type { + mock_vm := { + "name": "test", + "diskAttachments": [ + { + "id": "b749c132-bb97-4145-b86e-a1751cf75e21", + "interface": "virtio_scsi", + "disk": + { "storageType": "lun", + "status": "ok" + } + } + ] + } + results := concerns with input as mock_vm + count(results) == 0 } \ No newline at end of file