From a4d666186791ab80e8a090ed9c74a7d551ff87bf Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Tue, 8 Oct 2024 14:51:42 +0200 Subject: [PATCH 1/6] Simplify controller_test.go Use `buildDriver()` instead of `&Driver{...}` --- pkg/driver/controller_test.go | 455 ++++++---------------------------- 1 file changed, 81 insertions(+), 374 deletions(-) diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 70b9c5c5a..3c15e8f88 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -46,11 +46,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -111,11 +107,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -178,11 +170,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -260,11 +248,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -389,11 +373,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -479,11 +459,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -538,12 +514,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -598,11 +569,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -654,12 +621,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr("cluster:efs"), - } + driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -713,12 +675,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr("cluster-efs"), - } + driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -772,12 +729,8 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) + pvcNameVal := "test-pvc" req := &csi.CreateVolumeRequest{ @@ -835,12 +788,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) pvName := "foo" pvcName := "bar" @@ -905,12 +853,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) pvcName := "foo" directoryCreated := fmt.Sprintf("/%s", pvcName) @@ -973,12 +916,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) pvcName := "foo" basePath := "bash" @@ -1044,12 +982,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) pvcName := "foo" basePath := "bash" @@ -1116,12 +1049,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) pvcName := "foo" directoryCreated := fmt.Sprintf("/%s", pvcName) @@ -1185,12 +1113,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1250,12 +1173,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1316,12 +1234,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) pvcName := "foo" directoryCreated := fmt.Sprintf("/%s/%s", pvcName, pvcName) @@ -1384,11 +1297,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Parameters: map[string]string{ @@ -1412,11 +1321,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1441,11 +1346,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1473,11 +1374,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1515,12 +1412,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1563,11 +1455,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1605,11 +1493,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1640,11 +1524,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1674,11 +1554,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1708,11 +1584,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1743,11 +1615,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1779,11 +1647,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1815,11 +1679,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1851,11 +1711,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1887,11 +1743,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1923,11 +1775,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1959,11 +1807,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1996,11 +1840,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2033,11 +1873,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2069,11 +1905,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2105,11 +1937,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2145,11 +1973,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2183,11 +2007,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2223,11 +2043,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2261,11 +2077,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2301,11 +2113,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2339,11 +2147,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2378,11 +2182,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2417,11 +2217,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2476,12 +2272,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) secrets := map[string]string{} secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole" @@ -2525,11 +2316,7 @@ func TestCreateVolume(t *testing.T) { subPathPattern := "${.PVC.name}/${foo}" - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2569,11 +2356,7 @@ func TestCreateVolume(t *testing.T) { subPathPattern := "this-directory-name-is-far-too-long-for-any-practical-purposes-and-only-serves-to-prove-a-point" - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2613,11 +2396,7 @@ func TestCreateVolume(t *testing.T) { subPathPattern := "a/b/c/d/e/f" - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2674,11 +2453,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2700,13 +2475,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2739,13 +2508,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2767,13 +2530,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2795,13 +2552,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2823,13 +2574,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2859,13 +2604,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2896,13 +2635,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - mounter: mockMounter, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: true, - } + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2933,11 +2666,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2958,11 +2687,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2983,11 +2708,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -3008,11 +2729,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.DeleteVolumeRequest{ VolumeId: "fs-abcd1234", @@ -3032,12 +2749,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - gidAllocator: NewGidAllocator(), - tags: parseTagsFromStr(""), - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) secrets := map[string]string{} secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole" @@ -3096,10 +2808,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeId: volumeId, @@ -3126,10 +2835,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeId: volumeId, @@ -3156,10 +2862,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeCapabilities: []*csi.VolumeCapability{ @@ -3181,10 +2884,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeId: volumeId, @@ -3210,10 +2910,7 @@ func TestControllerGetCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := &Driver{ - endpoint: endpoint, - cloud: mockCloud, - } + driver := buildDriver(endpoint, mockCloud, "", nil, false) ctx := context.Background() _, err := driver.ControllerGetCapabilities(ctx, &csi.ControllerGetCapabilitiesRequest{}) @@ -3229,3 +2926,13 @@ func verifyPathWhenUUIDIncluded(pathToVerify string, expectedPathWithoutUUID str _, err := uuid.Parse(matches[2]) return err == nil && doesPathMatchWithUuid } + +func buildDriver(endpoint string, cloud cloud.Cloud, tags string, mounter Mounter, deleteAccessPointRootDir bool) *Driver { + return &Driver{ + endpoint: endpoint, + cloud: cloud, + tags: parseTagsFromStr(tags), + mounter: mounter, + deleteAccessPointRootDir: deleteAccessPointRootDir, + } +} From e9522805071f3713e261431daca17b8a0179dedf Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Tue, 7 Jan 2025 14:45:36 -0800 Subject: [PATCH 2/6] Introduce Provisioner interface Controller's CreateVolume and DeleteVolume now calls method of Provisioner interface. All heavy-lifting logic of create/delete specific for access-point provsioning is hidden now in AccessPointProvisioner struct. --- pkg/driver/controller.go | 400 ++-------------------------------- pkg/driver/controller_test.go | 9 +- pkg/driver/driver.go | 63 +++--- pkg/driver/gid_allocator.go | 4 +- pkg/driver/provisioner.go | 70 ++++++ pkg/driver/provisioner_ap.go | 382 ++++++++++++++++++++++++++++++++ pkg/driver/sanity_test.go | 5 +- 7 files changed, 508 insertions(+), 425 deletions(-) create mode 100644 pkg/driver/provisioner.go create mode 100644 pkg/driver/provisioner_ap.go diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 33027fcbd..78a5900c1 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -20,14 +20,9 @@ import ( "context" "crypto/sha256" "fmt" - "os" - "path" "sort" - "strconv" "strings" - "github.com/google/uuid" - "github.com/container-storage-interface/spec/lib/go/csi" "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud" "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/util" @@ -81,32 +76,6 @@ var ( func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { klog.V(4).Infof("CreateVolume: called with args %+v", util.SanitizeRequest(*req)) - var reuseAccessPoint bool - var err error - volumeParams := req.GetParameters() - volName := req.GetName() - clientToken := volName - - // if true, then use sha256 hash of pvcName as clientToken instead of PVC Id - // This allows users to reconnect to the same AP from different k8s cluster - if reuseAccessPointStr, ok := volumeParams[ReuseAccessPointKey]; ok { - reuseAccessPoint, err = strconv.ParseBool(reuseAccessPointStr) - if err != nil { - return nil, status.Error(codes.InvalidArgument, "Invalid value for reuseAccessPoint parameter") - } - if reuseAccessPoint { - clientToken = get64LenHash(volumeParams[PvcNameKey]) - klog.V(5).Infof("Client token : %s", clientToken) - } - } - if volName == "" { - return nil, status.Error(codes.InvalidArgument, "Volume name not provided") - } - - // Volume size is required to match PV to PVC by k8s. - // Volume size is not consumed by EFS for any purposes. - volSize := req.GetCapacityRange().GetRequiredBytes() - volCaps := req.GetVolumeCapabilities() if len(volCaps) == 0 { return nil, status.Error(codes.InvalidArgument, "Volume capabilities not provided") @@ -119,287 +88,39 @@ func (d *Driver) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("Volume fstype not supported: %s", err)) } - var ( - azName string - basePath string - gid int64 - gidMin int64 - gidMax int64 - localCloud cloud.Cloud - provisioningMode string - roleArn string - uid int64 - crossAccountDNSEnabled bool - ) - - //Parse parameters + volumeParams := req.GetParameters() if value, ok := volumeParams[ProvisioningMode]; ok { - provisioningMode = value - //TODO: Add FS provisioning mode check when implemented - if provisioningMode != AccessPointMode { - errStr := "Provisioning mode " + provisioningMode + " is not supported. Only Access point provisioning: 'efs-ap' is supported" - return nil, status.Error(codes.InvalidArgument, errStr) + if _, ok = d.provisioners[value]; !ok { + return nil, status.Errorf(codes.InvalidArgument, "Provisioning mode %s is not supported.", value) } } else { return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", ProvisioningMode) } - accessPointsOptions := &cloud.AccessPointOptions{ - CapacityGiB: volSize, - } + mode := volumeParams[ProvisioningMode] + provisioner := d.provisioners[mode] - if value, ok := volumeParams[FsId]; ok { - if strings.TrimSpace(value) == "" { - return nil, status.Errorf(codes.InvalidArgument, "Parameter %v cannot be empty", FsId) - } - accessPointsOptions.FileSystemId = value - } else { - return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", FsId) - } + klog.V(5).Infof("CreateVolume: provisioning mode %s selected. Support modes are %s", mode, + strings.Join(d.getProvisioningModes(), ",")) - localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), d) + volume, err := provisioner.Provision(ctx, req) if err != nil { return nil, err } - var accessPoint *cloud.AccessPoint - //if reuseAccessPoint is true, check for AP with same Root Directory exists in efs - // if found reuse that AP - if reuseAccessPoint { - existingAP, err := localCloud.FindAccessPointByClientToken(ctx, clientToken, accessPointsOptions.FileSystemId) - if err != nil { - return nil, fmt.Errorf("failed to find access point: %v", err) - } - if existingAP != nil { - //AP path already exists - klog.V(2).Infof("Existing AccessPoint found : %+v", existingAP) - accessPoint = &cloud.AccessPoint{ - AccessPointId: existingAP.AccessPointId, - FileSystemId: existingAP.FileSystemId, - CapacityGiB: accessPointsOptions.CapacityGiB, - } - } - } - - if accessPoint == nil { - // Create tags - tags := map[string]string{ - DefaultTagKey: DefaultTagValue, - } - - // Append input tags to default tag - if len(d.tags) != 0 { - for k, v := range d.tags { - tags[k] = v - } - } - - accessPointsOptions.Tags = tags - - uid = -1 - if value, ok := volumeParams[Uid]; ok { - uid, err = strconv.ParseInt(value, 10, 64) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Uid, err) - } - if uid < 0 { - return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Uid) - } - } - - gid = -1 - if value, ok := volumeParams[Gid]; ok { - gid, err = strconv.ParseInt(value, 10, 64) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Gid, err) - } - if uid < 0 { - return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Gid) - } - } - - if value, ok := volumeParams[GidMin]; ok { - gidMin, err = strconv.ParseInt(value, 10, 64) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMin, err) - } - if gidMin <= 0 { - return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than 0", GidMin) - } - } - - if value, ok := volumeParams[GidMax]; ok { - // Ensure GID min is provided with GID max - if gidMin == 0 { - return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMin) - } - gidMax, err = strconv.ParseInt(value, 10, 64) - if err != nil { - return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMax, err) - } - if gidMax <= gidMin { - return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than %v", GidMax, GidMin) - } - } else { - // Ensure GID max is provided with GID min - if gidMin != 0 { - return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMax) - } - } - - // Assign default GID ranges if not provided - if gidMin == 0 && gidMax == 0 { - gidMin = DefaultGidMin - gidMax = DefaultGidMax - } - - if value, ok := volumeParams[DirectoryPerms]; ok { - accessPointsOptions.DirectoryPerms = value - } - - // Storage class parameter `az` will be used to fetch preferred mount target for cross account mount. - // If the `az` storage class parameter is not provided, a random mount target will be picked for mounting. - // This storage class parameter different from `az` mount option provided by efs-utils https://github.com/aws/efs-utils/blob/v1.31.1/src/mount_efs/__init__.py#L195 - // The `az` mount option provided by efs-utils is used for cross az mount or to provide az of efs one zone file system mount within the same aws-account. - // To make use of the `az` mount option, add it under storage class's `mountOptions` section. https://kubernetes.io/docs/concepts/storage/storage-classes/#mount-options - if value, ok := volumeParams[AzName]; ok { - azName = value - } - - // Check if file system exists. Describe FS or List APs handle appropriate error codes - // With dynamic uid/gid provisioning we can save a call to describe FS, as list APs fails if FS ID does not exist - var accessPoints []*cloud.AccessPoint - if uid == -1 || gid == -1 { - accessPoints, err = localCloud.ListAccessPoints(ctx, accessPointsOptions.FileSystemId) - } else { - _, err = localCloud.DescribeFileSystem(ctx, accessPointsOptions.FileSystemId) - } - if err != nil { - if err == cloud.ErrAccessDenied { - return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) - } - if err == cloud.ErrNotFound { - return nil, status.Errorf(codes.InvalidArgument, "File System does not exist: %v", err) - } - return nil, status.Errorf(codes.Internal, "Failed to fetch Access Points or Describe File System: %v", err) - } - - var allocatedGid int64 - if uid == -1 || gid == -1 { - allocatedGid, err = d.gidAllocator.getNextGid(accessPointsOptions.FileSystemId, accessPoints, gidMin, gidMax) - if err != nil { - return nil, err - } - } - if uid == -1 { - uid = allocatedGid - } - if gid == -1 { - gid = allocatedGid - } - - if value, ok := volumeParams[BasePath]; ok { - basePath = value - } - - rootDirName := volName - // Check if a custom structure should be imposed on the access point directory - if value, ok := volumeParams[SubPathPattern]; ok { - // Try and construct the root directory and check it only contains supported components - val, err := interpolateRootDirectoryName(value, volumeParams) - if err == nil { - klog.Infof("Using user-specified structure for access point directory.") - rootDirName = val - if value, ok := volumeParams[EnsureUniqueDirectory]; ok { - if ensureUniqueDirectory, err := strconv.ParseBool(value); !ensureUniqueDirectory && err == nil { - klog.Infof("Not appending PVC UID to path.") - } else { - klog.Infof("Appending PVC UID to path.") - rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String()) - } - } else { - klog.Infof("Appending PVC UID to path.") - rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String()) - } - } else { - return nil, err - } - } else { - klog.Infof("Using PV name for access point directory.") - } - - rootDir := path.Join("/", basePath, rootDirName) - if ok, err := validateEfsPathRequirements(rootDir); !ok { - return nil, err - } - klog.Infof("Using %v as the access point directory.", rootDir) - - accessPointsOptions.Uid = uid - accessPointsOptions.Gid = gid - accessPointsOptions.DirectoryPath = rootDir - - accessPoint, err = localCloud.CreateAccessPoint(ctx, clientToken, accessPointsOptions) - if err != nil { - if err == cloud.ErrAccessDenied { - return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) - } - if err == cloud.ErrAlreadyExists { - return nil, status.Errorf(codes.AlreadyExists, "Access Point already exists") - } - return nil, status.Errorf(codes.Internal, "Failed to create Access point in File System %v : %v", accessPointsOptions.FileSystemId, err) - } - } - - volContext := map[string]string{} - - // Enable cross-account dns resolution or fetch mount target Ip for cross-account mount - if roleArn != "" { - if crossAccountDNSEnabled { - // This option indicates the customer would like to use DNS to resolve - // the cross-account mount target ip address (in order to mount to - // the same AZ-ID as the client instance); mounttargetip should - // not be used as a mount option in this case. - volContext[CrossAccount] = strconv.FormatBool(true) - } else { - mountTarget, err := localCloud.DescribeMountTargets(ctx, accessPointsOptions.FileSystemId, azName) - if err != nil { - klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", accessPointsOptions.FileSystemId, err) - } else { - volContext[MountTargetIp] = mountTarget.IPAddress - } - - } - } - return &csi.CreateVolumeResponse{ - Volume: &csi.Volume{ - CapacityBytes: volSize, - VolumeId: accessPointsOptions.FileSystemId + "::" + accessPoint.AccessPointId, - VolumeContext: volContext, - }, + Volume: volume, }, nil } func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { - var ( - localCloud cloud.Cloud - roleArn string - crossAccountDNSEnabled bool - err error - ) - - localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), d) - if err != nil { - return nil, err - } - - klog.V(4).Infof("DeleteVolume: called with args %+v", util.SanitizeRequest(*req)) + klog.V(4).Infof("DeleteVolume: called with args %+v", *req) volId := req.GetVolumeId() if volId == "" { return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } - fileSystemId, _, accessPointId, err := parseVolumeId(volId) + _, _, accessPointId, err := parseVolumeId(volId) if err != nil { //Returning success for an invalid volume ID. See here - https://github.com/kubernetes-csi/csi-test/blame/5deb83d58fea909b2895731d43e32400380aae3c/pkg/sanity/controller.go#L733 klog.V(5).Infof("DeleteVolume: Failed to parse volumeID: %v, err: %v, returning success", volId, err) @@ -411,70 +132,8 @@ func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) return &csi.DeleteVolumeResponse{}, nil } - //TODO: Add Delete File System when FS provisioning is implemented - // Delete access point root directory if delete-access-point-root-dir is set. - if d.deleteAccessPointRootDir { - // Check if Access point exists. - // If access point exists, retrieve its root directory and delete it/ - accessPoint, err := localCloud.DescribeAccessPoint(ctx, accessPointId) - if err != nil { - if err == cloud.ErrAccessDenied { - return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) - } - if err == cloud.ErrNotFound { - klog.V(5).Infof("DeleteVolume: Access Point %v not found, returning success", accessPointId) - return &csi.DeleteVolumeResponse{}, nil - } - return nil, status.Errorf(codes.Internal, "Could not get describe Access Point: %v , error: %v", accessPointId, err) - } - - //Mount File System at it root and delete access point root directory - mountOptions := []string{"tls", "iam"} - if roleArn != "" { - if crossAccountDNSEnabled { - // Connect via dns rather than mounttargetip - mountOptions = append(mountOptions, CrossAccount) - } else { - mountTarget, err := localCloud.DescribeMountTargets(ctx, fileSystemId, "") - if err == nil { - mountOptions = append(mountOptions, MountTargetIp+"="+mountTarget.IPAddress) - } else { - klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err) - } - } - } - - target := TempMountPathPrefix + "/" + accessPointId - if err := d.mounter.MakeDir(target); err != nil { - return nil, status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err) - } - if err := d.mounter.Mount(fileSystemId, target, "efs", mountOptions); err != nil { - os.Remove(target) - return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, target, err) - } - err = os.RemoveAll(target + accessPoint.AccessPointRootDir) - if err != nil { - return nil, status.Errorf(codes.Internal, "Could not delete access point root directory %q: %v", accessPoint.AccessPointRootDir, err) - } - err = d.mounter.Unmount(target) - if err != nil { - return nil, status.Errorf(codes.Internal, "Could not unmount %q: %v", target, err) - } - err = os.Remove(target) - if err != nil && !os.IsNotExist(err) { - return nil, status.Errorf(codes.Internal, "Could not delete %q: %v", target, err) - } - } - - // Delete access point - if err = localCloud.DeleteAccessPoint(ctx, accessPointId); err != nil { - if err == cloud.ErrAccessDenied { - return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) - } - if err == cloud.ErrNotFound { - klog.V(5).Infof("DeleteVolume: Access Point not found, returning success") - return &csi.DeleteVolumeResponse{}, nil - } + err = d.provisioners[AccessPointMode].Delete(ctx, req) + if err != nil { return nil, status.Errorf(codes.Internal, "Failed to Delete volume %v: %v", volId, err) } @@ -560,39 +219,6 @@ func (d *Driver) ControllerGetVolume(ctx context.Context, req *csi.ControllerGet return nil, status.Error(codes.Unimplemented, "") } -func getCloud(secrets map[string]string, driver *Driver) (cloud.Cloud, string, bool, error) { - - var localCloud cloud.Cloud - var roleArn string - var crossAccountDNSEnabled bool - var err error - - // Fetch aws role ARN for cross account mount from CSI secrets. Link to CSI secrets below - // https://kubernetes-csi.github.io/docs/secrets-and-credentials.html#csi-operation-secrets - if value, ok := secrets[RoleArn]; ok { - roleArn = value - } - if value, ok := secrets[CrossAccount]; ok { - crossAccountDNSEnabled, err = strconv.ParseBool(value) - if err != nil { - return nil, "", false, status.Error(codes.InvalidArgument, "crossaccount parameter must have boolean value.") - } - } else { - crossAccountDNSEnabled = false - } - - if roleArn != "" { - localCloud, err = cloud.NewCloudWithRole(roleArn, driver.adaptiveRetryMode) - if err != nil { - return nil, "", false, status.Errorf(codes.Unauthenticated, "Unable to initialize aws cloud: %v. Please verify role has the correct AWS permissions for cross account mount", err) - } - } else { - localCloud = driver.cloud - } - - return localCloud, roleArn, crossAccountDNSEnabled, nil -} - func interpolateRootDirectoryName(rootDirectoryPath string, volumeParams map[string]string) (string, error) { r := strings.NewReplacer(createListOfVariableSubstitutions(volumeParams)...) result := r.Replace(rootDirectoryPath) diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index 3c15e8f88..b9a5c2a95 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -2929,10 +2929,9 @@ func verifyPathWhenUUIDIncluded(pathToVerify string, expectedPathWithoutUUID str func buildDriver(endpoint string, cloud cloud.Cloud, tags string, mounter Mounter, deleteAccessPointRootDir bool) *Driver { return &Driver{ - endpoint: endpoint, - cloud: cloud, - tags: parseTagsFromStr(tags), - mounter: mounter, - deleteAccessPointRootDir: deleteAccessPointRootDir, + endpoint: endpoint, + cloud: cloud, + mounter: mounter, + provisioners: getProvisioners(cloud, mounter, parseTagsFromStr(tags), deleteAccessPointRootDir, false), } } diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index 8b76370aa..9df8d8de5 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -38,21 +38,18 @@ const ( ) type Driver struct { - endpoint string - nodeID string - srv *grpc.Server - mounter Mounter - efsWatchdog Watchdog - cloud cloud.Cloud - nodeCaps []csi.NodeServiceCapability_RPC_Type - volMetricsOptIn bool - volMetricsRefreshPeriod float64 - volMetricsFsRateLimit int - volStatter VolStatter - gidAllocator GidAllocator - deleteAccessPointRootDir bool - adaptiveRetryMode bool - tags map[string]string + endpoint string + nodeID string + srv *grpc.Server + mounter Mounter + efsWatchdog Watchdog + cloud cloud.Cloud + nodeCaps []csi.NodeServiceCapability_RPC_Type + volMetricsOptIn bool + volMetricsRefreshPeriod float64 + volMetricsFsRateLimit int + volStatter VolStatter + provisioners map[string]Provisioner } func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, volMetricsOptIn bool, volMetricsRefreshPeriod float64, volMetricsFsRateLimit int, deleteAccessPointRootDir bool, adaptiveRetryMode bool) *Driver { @@ -63,21 +60,21 @@ func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, nodeCaps := SetNodeCapOptInFeatures(volMetricsOptIn) watchdog := newExecWatchdog(efsUtilsCfgPath, efsUtilsStaticFilesPath, "amazon-efs-mount-watchdog") + mounter := newNodeMounter() + parsedTags := parseTagsFromStr(strings.TrimSpace(tags)) + provisioners := getProvisioners(cloud, mounter, parsedTags, deleteAccessPointRootDir, adaptiveRetryMode) return &Driver{ - endpoint: endpoint, - nodeID: cloud.GetMetadata().GetInstanceID(), - mounter: newNodeMounter(), - efsWatchdog: watchdog, - cloud: cloud, - nodeCaps: nodeCaps, - volStatter: NewVolStatter(), - volMetricsOptIn: volMetricsOptIn, - volMetricsRefreshPeriod: volMetricsRefreshPeriod, - volMetricsFsRateLimit: volMetricsFsRateLimit, - gidAllocator: NewGidAllocator(), - deleteAccessPointRootDir: deleteAccessPointRootDir, - adaptiveRetryMode: adaptiveRetryMode, - tags: parseTagsFromStr(strings.TrimSpace(tags)), + endpoint: endpoint, + nodeID: cloud.GetMetadata().GetInstanceID(), + mounter: mounter, + efsWatchdog: watchdog, + cloud: cloud, + nodeCaps: nodeCaps, + volStatter: NewVolStatter(), + volMetricsOptIn: volMetricsOptIn, + volMetricsRefreshPeriod: volMetricsRefreshPeriod, + volMetricsFsRateLimit: volMetricsFsRateLimit, + provisioners: provisioners, } } @@ -140,6 +137,14 @@ func (d *Driver) Run() error { return d.srv.Serve(listener) } +func (d *Driver) getProvisioningModes() []string { + var keys []string + for k := range d.provisioners { + keys = append(keys, k) + } + return keys +} + func parseTagsFromStr(tagStr string) map[string]string { defer func() { if r := recover(); r != nil { diff --git a/pkg/driver/gid_allocator.go b/pkg/driver/gid_allocator.go index 97348a2c4..961239f82 100644 --- a/pkg/driver/gid_allocator.go +++ b/pkg/driver/gid_allocator.go @@ -20,8 +20,8 @@ type GidAllocator struct { mu sync.Mutex } -func NewGidAllocator() GidAllocator { - return GidAllocator{} +func NewGidAllocator() *GidAllocator { + return &GidAllocator{} } // Retrieves the next available GID diff --git a/pkg/driver/provisioner.go b/pkg/driver/provisioner.go new file mode 100644 index 000000000..ad5f20b28 --- /dev/null +++ b/pkg/driver/provisioner.go @@ -0,0 +1,70 @@ +package driver + +import ( + "context" + "strconv" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +type Provisioner interface { + Provision(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.Volume, error) + Delete(ctx context.Context, req *csi.DeleteVolumeRequest) error +} + +type BaseProvisioner struct { + cloud cloud.Cloud + mounter Mounter + adaptiveRetryMode bool +} + +func getProvisioners(cloud cloud.Cloud, mounter Mounter, tags map[string]string, deleteAccessPointRootDir bool, adaptiveRetryMode bool) map[string]Provisioner { + return map[string]Provisioner{ + AccessPointMode: AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: cloud, + mounter: mounter, + adaptiveRetryMode: adaptiveRetryMode, + }, + tags: tags, + gidAllocator: NewGidAllocator(), + deleteAccessPointRootDir: deleteAccessPointRootDir, + }, + } +} + +func getCloud(secrets map[string]string, driverCloud cloud.Cloud, adaptiveRetryMode bool) (cloud.Cloud, string, bool, error) { + + var localCloud cloud.Cloud + var roleArn string + var crossAccountDNSEnabled bool + var err error + + // Fetch aws role ARN for cross account mount from CSI secrets. Link to CSI secrets below + // https://kubernetes-csi.github.io/docs/secrets-and-credentials.html#csi-operation-secrets + if value, ok := secrets[RoleArn]; ok { + roleArn = value + } + if value, ok := secrets[CrossAccount]; ok { + crossAccountDNSEnabled, err = strconv.ParseBool(value) + if err != nil { + return nil, "", false, status.Error(codes.InvalidArgument, "crossaccount parameter must have boolean value.") + } + } else { + crossAccountDNSEnabled = false + } + + if roleArn != "" { + localCloud, err = cloud.NewCloudWithRole(roleArn, adaptiveRetryMode) + if err != nil { + return nil, "", false, status.Errorf(codes.Unauthenticated, "Unable to initialize aws cloud: %v. Please verify role has the correct AWS permissions for cross account mount", err) + } + } else { + localCloud = driverCloud + } + + return localCloud, roleArn, crossAccountDNSEnabled, nil +} diff --git a/pkg/driver/provisioner_ap.go b/pkg/driver/provisioner_ap.go new file mode 100644 index 000000000..c3d4db745 --- /dev/null +++ b/pkg/driver/provisioner_ap.go @@ -0,0 +1,382 @@ +package driver + +import ( + "context" + "fmt" + "os" + "path" + "strconv" + "strings" + + "github.com/google/uuid" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/klog/v2" +) + +type AccessPointProvisioner struct { + BaseProvisioner + tags map[string]string + gidAllocator *GidAllocator + deleteAccessPointRootDir bool +} + +func (a AccessPointProvisioner) Provision(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.Volume, error) { + var reuseAccessPoint bool + var err error + volumeParams := req.GetParameters() + volName := req.GetName() + clientToken := volName + + // if true, then use sha256 hash of pvcName as clientToken instead of PVC Id + // This allows users to reconnect to the same AP from different k8s cluster + if reuseAccessPointStr, ok := volumeParams[ReuseAccessPointKey]; ok { + reuseAccessPoint, err = strconv.ParseBool(reuseAccessPointStr) + if err != nil { + return nil, status.Error(codes.InvalidArgument, "Invalid value for reuseAccessPoint parameter") + } + if reuseAccessPoint { + clientToken = get64LenHash(volumeParams[PvcNameKey]) + klog.V(5).Infof("Client token : %s", clientToken) + } + } + if volName == "" { + return nil, status.Error(codes.InvalidArgument, "Volume name not provided") + } + + // Volume size is required to match PV to PVC by k8s. + // Volume size is not consumed by EFS for any purposes. + volSize := req.GetCapacityRange().GetRequiredBytes() + + var ( + azName string + basePath string + gid int64 + gidMin int64 + gidMax int64 + localCloud cloud.Cloud + roleArn string + uid int64 + crossAccountDNSEnabled bool + ) + + accessPointsOptions := &cloud.AccessPointOptions{ + CapacityGiB: volSize, + } + + if value, ok := volumeParams[FsId]; ok { + if strings.TrimSpace(value) == "" { + return nil, status.Errorf(codes.InvalidArgument, "Parameter %v cannot be empty", FsId) + } + accessPointsOptions.FileSystemId = value + } else { + return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", FsId) + } + + localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), a.cloud, a.adaptiveRetryMode) + if err != nil { + return nil, err + } + + var accessPoint *cloud.AccessPoint + //if reuseAccessPoint is true, check for AP with same Root Directory exists in efs + // if found reuse that AP + if reuseAccessPoint { + existingAP, err := localCloud.FindAccessPointByClientToken(ctx, clientToken, accessPointsOptions.FileSystemId) + if err != nil { + return nil, fmt.Errorf("failed to find access point: %v", err) + } + if existingAP != nil { + //AP path already exists + klog.V(2).Infof("Existing AccessPoint found : %+v", existingAP) + accessPoint = &cloud.AccessPoint{ + AccessPointId: existingAP.AccessPointId, + FileSystemId: existingAP.FileSystemId, + CapacityGiB: accessPointsOptions.CapacityGiB, + } + } + } + + if accessPoint == nil { + // Create tags + tags := map[string]string{ + DefaultTagKey: DefaultTagValue, + } + + // Append input tags to default tag + if len(a.tags) != 0 { + for k, v := range a.tags { + tags[k] = v + } + } + + accessPointsOptions.Tags = tags + + uid = -1 + if value, ok := volumeParams[Uid]; ok { + uid, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Uid, err) + } + if uid < 0 { + return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Uid) + } + } + + gid = -1 + if value, ok := volumeParams[Gid]; ok { + gid, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", Gid, err) + } + if uid < 0 { + return nil, status.Errorf(codes.InvalidArgument, "%v must be greater or equal than 0", Gid) + } + } + + if value, ok := volumeParams[GidMin]; ok { + gidMin, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMin, err) + } + if gidMin <= 0 { + return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than 0", GidMin) + } + } + + if value, ok := volumeParams[GidMax]; ok { + // Ensure GID min is provided with GID max + if gidMin == 0 { + return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMin) + } + gidMax, err = strconv.ParseInt(value, 10, 64) + if err != nil { + return nil, status.Errorf(codes.InvalidArgument, "Failed to parse invalid %v: %v", GidMax, err) + } + if gidMax <= gidMin { + return nil, status.Errorf(codes.InvalidArgument, "%v must be greater than %v", GidMax, GidMin) + } + } else { + // Ensure GID max is provided with GID min + if gidMin != 0 { + return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", GidMax) + } + } + + // Assign default GID ranges if not provided + if gidMin == 0 && gidMax == 0 { + gidMin = DefaultGidMin + gidMax = DefaultGidMax + } + + if value, ok := volumeParams[DirectoryPerms]; ok { + accessPointsOptions.DirectoryPerms = value + } + + // Storage class parameter `az` will be used to fetch preferred mount target for cross account mount. + // If the `az` storage class parameter is not provided, a random mount target will be picked for mounting. + // This storage class parameter different from `az` mount option provided by efs-utils https://github.com/aws/efs-utils/blob/v1.31.1/src/mount_efs/__init__.py#L195 + // The `az` mount option provided by efs-utils is used for cross az mount or to provide az of efs one zone file system mount within the same aws-account. + // To make use of the `az` mount option, add it under storage class's `mountOptions` section. https://kubernetes.io/docs/concepts/storage/storage-classes/#mount-options + if value, ok := volumeParams[AzName]; ok { + azName = value + } + + // Check if file system exists. Describe FS or List APs handle appropriate error codes + // With dynamic uid/gid provisioning we can save a call to describe FS, as list APs fails if FS ID does not exist + var accessPoints []*cloud.AccessPoint + if uid == -1 || gid == -1 { + accessPoints, err = localCloud.ListAccessPoints(ctx, accessPointsOptions.FileSystemId) + } else { + _, err = localCloud.DescribeFileSystem(ctx, accessPointsOptions.FileSystemId) + } + if err != nil { + if err == cloud.ErrAccessDenied { + return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) + } + if err == cloud.ErrNotFound { + return nil, status.Errorf(codes.InvalidArgument, "File System does not exist: %v", err) + } + return nil, status.Errorf(codes.Internal, "Failed to fetch Access Points or Describe File System: %v", err) + } + + var allocatedGid int64 + if uid == -1 || gid == -1 { + allocatedGid, err = a.gidAllocator.getNextGid(accessPointsOptions.FileSystemId, accessPoints, gidMin, gidMax) + if err != nil { + return nil, err + } + } + if uid == -1 { + uid = allocatedGid + } + if gid == -1 { + gid = allocatedGid + } + + if value, ok := volumeParams[BasePath]; ok { + basePath = value + } + + rootDirName := volName + // Check if a custom structure should be imposed on the access point directory + if value, ok := volumeParams[SubPathPattern]; ok { + // Try and construct the root directory and check it only contains supported components + val, err := interpolateRootDirectoryName(value, volumeParams) + if err == nil { + klog.Infof("Using user-specified structure for access point directory.") + rootDirName = val + if value, ok := volumeParams[EnsureUniqueDirectory]; ok { + if ensureUniqueDirectory, err := strconv.ParseBool(value); !ensureUniqueDirectory && err == nil { + klog.Infof("Not appending PVC UID to path.") + } else { + klog.Infof("Appending PVC UID to path.") + rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String()) + } + } else { + klog.Infof("Appending PVC UID to path.") + rootDirName = fmt.Sprintf("%s-%s", val, uuid.New().String()) + } + } else { + return nil, err + } + } else { + klog.Infof("Using PV name for access point directory.") + } + + rootDir := path.Join("/", basePath, rootDirName) + if ok, err := validateEfsPathRequirements(rootDir); !ok { + return nil, err + } + klog.Infof("Using %v as the access point directory.", rootDir) + + accessPointsOptions.Uid = uid + accessPointsOptions.Gid = gid + accessPointsOptions.DirectoryPath = rootDir + + accessPoint, err = localCloud.CreateAccessPoint(ctx, clientToken, accessPointsOptions) + if err != nil { + if err == cloud.ErrAccessDenied { + return nil, status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) + } + if err == cloud.ErrAlreadyExists { + return nil, status.Errorf(codes.AlreadyExists, "Access Point already exists") + } + return nil, status.Errorf(codes.Internal, "Failed to create Access point in File System %v : %v", accessPointsOptions.FileSystemId, err) + } + } + + volContext := map[string]string{} + + // Enable cross-account dns resolution or fetch mount target Ip for cross-account mount + if roleArn != "" { + if crossAccountDNSEnabled { + // This option indicates the customer would like to use DNS to resolve + // the cross-account mount target ip address (in order to mount to + // the same AZ-ID as the client instance); mounttargetip should + // not be used as a mount option in this case. + volContext[CrossAccount] = strconv.FormatBool(true) + } else { + mountTarget, err := localCloud.DescribeMountTargets(ctx, accessPointsOptions.FileSystemId, azName) + if err != nil { + klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", accessPointsOptions.FileSystemId, err) + } else { + volContext[MountTargetIp] = mountTarget.IPAddress + } + + } + } + + return &csi.Volume{ + CapacityBytes: volSize, + VolumeId: accessPointsOptions.FileSystemId + "::" + accessPoint.AccessPointId, + VolumeContext: volContext, + }, nil +} + +func (a AccessPointProvisioner) Delete(ctx context.Context, req *csi.DeleteVolumeRequest) error { + var ( + localCloud cloud.Cloud + roleArn string + crossAccountDNSEnabled bool + err error + ) + + localCloud, roleArn, crossAccountDNSEnabled, err = getCloud(req.GetSecrets(), a.cloud, a.adaptiveRetryMode) + if err != nil { + return err + } + + volId := req.GetVolumeId() + fileSystemId, _, accessPointId, _ := parseVolumeId(volId) + // Delete access point root directory if delete-access-point-root-dir is set. + if a.deleteAccessPointRootDir { + // Check if Access point exists. + // If access point exists, retrieve its root directory and delete it/ + accessPoint, err := localCloud.DescribeAccessPoint(ctx, accessPointId) + if err != nil { + if err == cloud.ErrAccessDenied { + return status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) + } + if err == cloud.ErrNotFound { + klog.V(5).Infof("DeleteVolume: Access Point %v not found, returning success", accessPointId) + return nil + } + return status.Errorf(codes.Internal, "Could not get describe Access Point: %v , error: %v", accessPointId, err) + } + + //Mount File System at it root and delete access point root directory + mountOptions := []string{"tls", "iam"} + if roleArn != "" { + if crossAccountDNSEnabled { + // Connect via dns rather than mounttargetip + mountOptions = append(mountOptions, CrossAccount) + } else { + mountTarget, err := localCloud.DescribeMountTargets(ctx, fileSystemId, "") + if err == nil { + mountOptions = append(mountOptions, MountTargetIp+"="+mountTarget.IPAddress) + } else { + klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err) + } + } + } + + target := TempMountPathPrefix + "/" + accessPointId + if err := a.mounter.MakeDir(target); err != nil { + return status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err) + } + if err := a.mounter.Mount(fileSystemId, target, "efs", mountOptions); err != nil { + os.Remove(target) + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, target, err) + } + err = os.RemoveAll(target + accessPoint.AccessPointRootDir) + if err != nil { + return status.Errorf(codes.Internal, "Could not delete access point root directory %q: %v", accessPoint.AccessPointRootDir, err) + } + err = a.mounter.Unmount(target) + if err != nil { + return status.Errorf(codes.Internal, "Could not unmount %q: %v", target, err) + } + err = os.Remove(target) + if err != nil && !os.IsNotExist(err) { + return status.Errorf(codes.Internal, "Could not delete %q: %v", target, err) + } + } + + // Delete access point + if err = localCloud.DeleteAccessPoint(ctx, accessPointId); err != nil { + if err == cloud.ErrAccessDenied { + return status.Errorf(codes.Unauthenticated, "Access Denied. Please ensure you have the right AWS permissions: %v", err) + } + if err == cloud.ErrNotFound { + klog.V(5).Infof("DeleteVolume: Access Point not found, returning success") + return nil + } + return status.Errorf(codes.Internal, "Failed to Delete volume %v: %v", volId, err) + } + + return nil +} diff --git a/pkg/driver/sanity_test.go b/pkg/driver/sanity_test.go index d7d6af317..dbd53771e 100644 --- a/pkg/driver/sanity_test.go +++ b/pkg/driver/sanity_test.go @@ -70,16 +70,17 @@ func TestSanityEFSCSI(t *testing.T) { mockCtrl := gomock.NewController(t) mockCloud := cloud.NewFakeCloudProvider() + mounter := NewFakeMounter() drv := Driver{ endpoint: endpoint, nodeID: "sanity", - mounter: NewFakeMounter(), + mounter: mounter, efsWatchdog: &mockWatchdog{}, cloud: mockCloud, nodeCaps: nodeCaps, volMetricsOptIn: true, volStatter: NewVolStatter(), - gidAllocator: NewGidAllocator(), + provisioners: getProvisioners(mockCloud, mounter, nil, false, false), } defer func() { if r := recover(); r != nil { From 6bc821a653173125983f02c23f0144db9fecb56e Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Tue, 7 Jan 2025 15:15:12 -0800 Subject: [PATCH 3/6] Add directory provisioning mode Implement `DirectoryProvisioner` struct and its `Provision/Delete` methods. Add `delete-provisioned-dir` command-line option to control DeleteVolume behavior in efs-dir mode. --- cmd/main.go | 4 +- pkg/driver/controller.go | 15 ++- pkg/driver/controller_test.go | 2 +- pkg/driver/driver.go | 4 +- pkg/driver/os_client.go | 89 +++++++++++++++ pkg/driver/provisioner.go | 10 +- pkg/driver/provisioner_dir.go | 205 ++++++++++++++++++++++++++++++++++ pkg/driver/sanity_test.go | 2 +- 8 files changed, 321 insertions(+), 10 deletions(-) create mode 100644 pkg/driver/os_client.go create mode 100644 pkg/driver/provisioner_dir.go diff --git a/cmd/main.go b/cmd/main.go index 98e195a62..be97b0e35 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -41,6 +41,8 @@ func main() { volMetricsFsRateLimit = flag.Int("vol-metrics-fs-rate-limit", 5, "Volume metrics routines rate limiter per file system") deleteAccessPointRootDir = flag.Bool("delete-access-point-root-dir", false, "Opt in to delete access point root directory by DeleteVolume. By default, DeleteVolume will delete the access point behind Persistent Volume and deleting access point will not delete the access point root directory or its contents.") + deleteProvisionedDir = flag.Bool("delete-provisioned-dir", false, + "Opt in to delete any provisioned directories and their contents. By default, DeleteVolume will not delete the directory behind Persistent Volume. Note: If root squash is enabled on your EFS it's possible this option will not work.") adaptiveRetryMode = flag.Bool("adaptive-retry-mode", true, "Opt out to use standard sdk retry configuration. By default, adaptive retry mode will be used to more heavily client side rate limit EFS API requests.") tags = flag.String("tags", "", "Space separated key:value pairs which will be added as tags for EFS resources. For example, 'environment:prod region:us-east-1'") ) @@ -61,7 +63,7 @@ func main() { if err != nil { klog.Fatalln(err) } - drv := driver.NewDriver(*endpoint, etcAmazonEfs, *efsUtilsStaticFilesPath, *tags, *volMetricsOptIn, *volMetricsRefreshPeriod, *volMetricsFsRateLimit, *deleteAccessPointRootDir, *adaptiveRetryMode) + drv := driver.NewDriver(*endpoint, etcAmazonEfs, *efsUtilsStaticFilesPath, *tags, *volMetricsOptIn, *volMetricsRefreshPeriod, *volMetricsFsRateLimit, *deleteAccessPointRootDir, *adaptiveRetryMode, *deleteProvisionedDir) if err := drv.Run(); err != nil { klog.Fatalln(err) } diff --git a/pkg/driver/controller.go b/pkg/driver/controller.go index 78a5900c1..61c74241b 100644 --- a/pkg/driver/controller.go +++ b/pkg/driver/controller.go @@ -40,6 +40,7 @@ const ( DefaultTagKey = "efs.csi.aws.com/cluster" DefaultTagValue = "true" DirectoryPerms = "directoryPerms" + DirectoryMode = "efs-dir" EnsureUniqueDirectory = "ensureUniqueDirectory" FsId = "fileSystemId" Gid = "gid" @@ -120,19 +121,25 @@ func (d *Driver) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) return nil, status.Error(codes.InvalidArgument, "Volume ID not provided") } - _, _, accessPointId, err := parseVolumeId(volId) + _, subpath, accessPointId, err := parseVolumeId(volId) if err != nil { //Returning success for an invalid volume ID. See here - https://github.com/kubernetes-csi/csi-test/blame/5deb83d58fea909b2895731d43e32400380aae3c/pkg/sanity/controller.go#L733 klog.V(5).Infof("DeleteVolume: Failed to parse volumeID: %v, err: %v, returning success", volId, err) return &csi.DeleteVolumeResponse{}, nil } - if accessPointId == "" { - klog.V(5).Infof("DeleteVolume: No Access Point for volume %v, returning success", volId) + var provisioningMode string + if accessPointId != "" { + provisioningMode = AccessPointMode + } else if subpath != "" { + provisioningMode = DirectoryMode + } else { + klog.V(5).Infof("DeleteVolume: No Access Point or subpath for volume %v, returning success", volId) return &csi.DeleteVolumeResponse{}, nil } - err = d.provisioners[AccessPointMode].Delete(ctx, req) + klog.V(5).Infof("DeleteVolume: provisioningMode %v", provisioningMode) + err = d.provisioners[provisioningMode].Delete(ctx, req) if err != nil { return nil, status.Errorf(codes.Internal, "Failed to Delete volume %v: %v", volId, err) } diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index b9a5c2a95..e9c3033e3 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -2932,6 +2932,6 @@ func buildDriver(endpoint string, cloud cloud.Cloud, tags string, mounter Mounte endpoint: endpoint, cloud: cloud, mounter: mounter, - provisioners: getProvisioners(cloud, mounter, parseTagsFromStr(tags), deleteAccessPointRootDir, false), + provisioners: getProvisioners(cloud, mounter, parseTagsFromStr(tags), deleteAccessPointRootDir, false, &FakeOsClient{}, false), } } diff --git a/pkg/driver/driver.go b/pkg/driver/driver.go index 9df8d8de5..c0ca5c1ec 100644 --- a/pkg/driver/driver.go +++ b/pkg/driver/driver.go @@ -52,7 +52,7 @@ type Driver struct { provisioners map[string]Provisioner } -func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, volMetricsOptIn bool, volMetricsRefreshPeriod float64, volMetricsFsRateLimit int, deleteAccessPointRootDir bool, adaptiveRetryMode bool) *Driver { +func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, volMetricsOptIn bool, volMetricsRefreshPeriod float64, volMetricsFsRateLimit int, deleteAccessPointRootDir bool, adaptiveRetryMode bool, deleteProvisionedDir bool) *Driver { cloud, err := cloud.NewCloud(adaptiveRetryMode) if err != nil { klog.Fatalln(err) @@ -62,7 +62,7 @@ func NewDriver(endpoint, efsUtilsCfgPath, efsUtilsStaticFilesPath, tags string, watchdog := newExecWatchdog(efsUtilsCfgPath, efsUtilsStaticFilesPath, "amazon-efs-mount-watchdog") mounter := newNodeMounter() parsedTags := parseTagsFromStr(strings.TrimSpace(tags)) - provisioners := getProvisioners(cloud, mounter, parsedTags, deleteAccessPointRootDir, adaptiveRetryMode) + provisioners := getProvisioners(cloud, mounter, parsedTags, deleteAccessPointRootDir, adaptiveRetryMode, &RealOsClient{}, deleteProvisionedDir) return &Driver{ endpoint: endpoint, nodeID: cloud.GetMetadata().GetInstanceID(), diff --git a/pkg/driver/os_client.go b/pkg/driver/os_client.go new file mode 100644 index 000000000..6fa5ace80 --- /dev/null +++ b/pkg/driver/os_client.go @@ -0,0 +1,89 @@ +package driver + +import "os" + +type OsClient interface { + MkDirAllWithPerms(path string, perms os.FileMode, uid, gid int) error + MkDirAllWithPermsNoOwnership(path string, perms os.FileMode) error + GetPerms(path string) (os.FileMode, error) + Remove(path string) error + RemoveAll(path string) error +} + +// Real OsClient + +type RealOsClient struct{} + +func NewOsClient() *RealOsClient { + return &RealOsClient{} +} + +func (o *RealOsClient) MkDirAllWithPerms(path string, perms os.FileMode, uid, gid int) error { + err := os.MkdirAll(path, perms) + if err != nil { + return err + } + // Extra CHMOD guarantees we get the permissions we desire, inspite of the umask + err = os.Chmod(path, perms) + if err != nil { + return err + } + err = os.Chown(path, uid, gid) + if err != nil { + return err + } + return nil +} + +func (o *RealOsClient) MkDirAllWithPermsNoOwnership(path string, perms os.FileMode) error { + err := os.MkdirAll(path, perms) + if err != nil { + return err + } + // Extra CHMOD guarantees we get the permissions we desire, inspite of the umask + err = os.Chmod(path, perms) + if err != nil { + return err + } + return nil +} + +func (o *RealOsClient) Remove(path string) error { + return os.Remove(path) +} + +func (o *RealOsClient) RemoveAll(path string) error { + return os.RemoveAll(path) +} + +func (o *RealOsClient) GetPerms(path string) (os.FileMode, error) { + fInfo, err := os.Stat(path) + if err != nil { + return 0, err + } + return fInfo.Mode(), nil +} + +// Fake OsClient + +type FakeOsClient struct{} + +func (o *FakeOsClient) MkDirAllWithPerms(_ string, _ os.FileMode, _, _ int) error { + return nil +} + +func (o *FakeOsClient) MkDirAllWithPermsNoOwnership(_ string, _ os.FileMode) error { + return nil +} + +func (o *FakeOsClient) Remove(_ string) error { + return nil +} + +func (o *FakeOsClient) RemoveAll(_ string) error { + return nil +} + +func (o *FakeOsClient) GetPerms(_ string) (os.FileMode, error) { + return 0, nil +} diff --git a/pkg/driver/provisioner.go b/pkg/driver/provisioner.go index ad5f20b28..ab9176413 100644 --- a/pkg/driver/provisioner.go +++ b/pkg/driver/provisioner.go @@ -21,7 +21,7 @@ type BaseProvisioner struct { adaptiveRetryMode bool } -func getProvisioners(cloud cloud.Cloud, mounter Mounter, tags map[string]string, deleteAccessPointRootDir bool, adaptiveRetryMode bool) map[string]Provisioner { +func getProvisioners(cloud cloud.Cloud, mounter Mounter, tags map[string]string, deleteAccessPointRootDir bool, adaptiveRetryMode bool, osClient OsClient, deleteProvisionedDir bool) map[string]Provisioner { return map[string]Provisioner{ AccessPointMode: AccessPointProvisioner{ BaseProvisioner: BaseProvisioner{ @@ -33,6 +33,14 @@ func getProvisioners(cloud cloud.Cloud, mounter Mounter, tags map[string]string, gidAllocator: NewGidAllocator(), deleteAccessPointRootDir: deleteAccessPointRootDir, }, + DirectoryMode: DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: cloud, + mounter: mounter, + }, + osClient: osClient, + deleteProvisionedDir: deleteProvisionedDir, + }, } } diff --git a/pkg/driver/provisioner_dir.go b/pkg/driver/provisioner_dir.go new file mode 100644 index 000000000..1e4ae0762 --- /dev/null +++ b/pkg/driver/provisioner_dir.go @@ -0,0 +1,205 @@ +package driver + +import ( + "context" + "os" + "path" + "strconv" + "strings" + + "github.com/google/uuid" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/klog/v2" +) + +type DirectoryProvisioner struct { + BaseProvisioner + osClient OsClient + deleteProvisionedDir bool +} + +func (d DirectoryProvisioner) Provision(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.Volume, error) { + var provisionedPath string + + var fileSystemId string + volumeParams := req.GetParameters() + if value, ok := volumeParams[FsId]; ok { + if strings.TrimSpace(value) == "" { + return nil, status.Errorf(codes.InvalidArgument, "Parameter %v cannot be empty", FsId) + } + fileSystemId = value + } else { + return nil, status.Errorf(codes.InvalidArgument, "Missing %v parameter", FsId) + } + klog.V(5).Infof("Provisioning directory on FileSystem %s...", fileSystemId) + + localCloud, roleArn, crossAccountDNSEnabled, err := getCloud(req.GetSecrets(), d.cloud, d.adaptiveRetryMode) + if err != nil { + return nil, err + } + + mountOptions, err := getMountOptions(ctx, localCloud, fileSystemId, roleArn, crossAccountDNSEnabled) + if err != nil { + return nil, err + } + target := TempMountPathPrefix + "/" + uuid.New().String() + if err := d.mounter.MakeDir(target); err != nil { + return nil, status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err) + } + if err := d.mounter.Mount(fileSystemId, target, "efs", mountOptions); err != nil { + return nil, status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, target, err) + } + // Extract the basePath + var basePath string + if value, ok := volumeParams[BasePath]; ok { + basePath = value + } + + rootDirName := req.Name + provisionedPath = path.Join(basePath, rootDirName) + + klog.V(5).Infof("Provisioning directory at path %s", provisionedPath) + + // Grab the required permissions + perms := os.FileMode(0777) + if value, ok := volumeParams[DirectoryPerms]; ok { + parsedPerms, err := strconv.ParseUint(value, 8, 32) + if err == nil { + perms = os.FileMode(parsedPerms) + } + } + + klog.V(5).Infof("Provisioning directory with permissions %s", perms) + + provisionedDirectory := path.Join(target, provisionedPath) + err = d.osClient.MkDirAllWithPermsNoOwnership(provisionedDirectory, perms) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not provision directory: %v", err) + } + + // Check the permissions that actually got created + actualPerms, err := d.osClient.GetPerms(provisionedDirectory) + if err != nil { + klog.V(5).Infof("Could not load file info for '%s'", provisionedDirectory) + } + klog.V(5).Infof("Permissions of folder '%s' are '%s'", provisionedDirectory, actualPerms) + + err = d.mounter.Unmount(target) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not unmount %q: %v", target, err) + } + err = d.osClient.RemoveAll(target) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not delete %q: %v", target, err) + } + + volContext := map[string]string{} + + // Enable cross-account dns resolution or fetch mount target Ip for cross-account mount + if roleArn != "" { + if crossAccountDNSEnabled { + // This option indicates the customer would like to use DNS to resolve + // the cross-account mount target ip address (in order to mount to + // the same AZ-ID as the client instance); mounttargetip should + // not be used as a mount option in this case. + volContext[CrossAccount] = strconv.FormatBool(true) + } else { + var azName string + if value, ok := volumeParams[AzName]; ok { + azName = value + } + mountTarget, err := localCloud.DescribeMountTargets(ctx, fileSystemId, azName) + if err != nil { + klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err) + } else { + volContext[MountTargetIp] = mountTarget.IPAddress + } + + } + } + + return &csi.Volume{ + CapacityBytes: req.GetCapacityRange().GetRequiredBytes(), + VolumeId: fileSystemId + ":" + provisionedPath, + VolumeContext: volContext, + }, nil +} + +func (d DirectoryProvisioner) Delete(ctx context.Context, req *csi.DeleteVolumeRequest) (e error) { + if !d.deleteProvisionedDir { + return nil + } + fileSystemId, subpath, _, _ := parseVolumeId(req.GetVolumeId()) + klog.V(5).Infof("Running delete for EFS %s at subpath %s", fileSystemId, subpath) + + localCloud, roleArn, crossAccountDNSEnabled, err := getCloud(req.GetSecrets(), d.cloud, d.adaptiveRetryMode) + if err != nil { + return err + } + + mountOptions, err := getMountOptions(ctx, localCloud, fileSystemId, roleArn, crossAccountDNSEnabled) + if err != nil { + return err + } + + target := TempMountPathPrefix + "/" + uuid.New().String() + klog.V(5).Infof("Making temporary directory at '%s' to temporarily mount EFS folder in", target) + if err := d.mounter.MakeDir(target); err != nil { + return status.Errorf(codes.Internal, "Could not create dir %q: %v", target, err) + } + + defer func() { + // Try and unmount the directory + klog.V(5).Infof("Unmounting directory mounted at '%s'", target) + unmountErr := d.mounter.Unmount(target) + // If that fails then track the error but don't do anything else + if unmountErr != nil { + klog.V(5).Infof("Unmount failed at '%s'", target) + e = status.Errorf(codes.Internal, "Could not unmount %q: %v", target, err) + } else { + // If it is nil then it's safe to try and delete the directory as it should now be empty + klog.V(5).Infof("Deleting temporary directory at '%s'", target) + if err := d.osClient.RemoveAll(target); err != nil { + e = status.Errorf(codes.Internal, "Could not delete %q: %v", target, err) + } + } + }() + + klog.V(5).Infof("Mounting EFS '%s' into temporary directory at '%s'", fileSystemId, target) + if err := d.mounter.Mount(fileSystemId, target, "efs", mountOptions); err != nil { + // If this call throws an error we're about to return anyway and the mount has failed, so it's more + // important we return with that information than worry about the folder not being deleted + _ = d.osClient.Remove(target) + return status.Errorf(codes.Internal, "Could not mount %q at %q: %v", fileSystemId, target, err) + } + + pathToRemove := path.Join(target, subpath) + klog.V(5).Infof("Delete all files at %s, stored on EFS %s", pathToRemove, fileSystemId) + if err := d.osClient.RemoveAll(pathToRemove); err != nil { + return status.Errorf(codes.Internal, "Could not delete directory %q: %v", subpath, err) + } + + return nil +} + +func getMountOptions(ctx context.Context, cloud cloud.Cloud, fileSystemId string, roleArn string, crossAccountDNSEnabled bool) ([]string, error) { + mountOptions := []string{"tls", "iam"} + if roleArn != "" { + if crossAccountDNSEnabled { + // Connect via dns rather than mounttargetip + mountOptions = append(mountOptions, CrossAccount) + } else { + mountTarget, err := cloud.DescribeMountTargets(ctx, fileSystemId, "") + if err == nil { + mountOptions = append(mountOptions, MountTargetIp+"="+mountTarget.IPAddress) + } else { + klog.Warningf("Failed to describe mount targets for file system %v. Skip using `mounttargetip` mount option: %v", fileSystemId, err) + } + } + } + return mountOptions, nil +} diff --git a/pkg/driver/sanity_test.go b/pkg/driver/sanity_test.go index dbd53771e..11a41af69 100644 --- a/pkg/driver/sanity_test.go +++ b/pkg/driver/sanity_test.go @@ -80,7 +80,7 @@ func TestSanityEFSCSI(t *testing.T) { nodeCaps: nodeCaps, volMetricsOptIn: true, volStatter: NewVolStatter(), - provisioners: getProvisioners(mockCloud, mounter, nil, false, false), + provisioners: getProvisioners(mockCloud, mounter, nil, false, false, &FakeOsClient{}, false), } defer func() { if r := recover(); r != nil { From 03a7f1ab671ca91258358b71e8a762128eafaa2a Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Tue, 7 Jan 2025 16:36:00 -0800 Subject: [PATCH 4/6] Add unit-tests for directory and access-point provisioning modes The patch adds new unit-tests for `provisioner_ap.go` and `provisioner_dir.go`, and also adds a few unit-tests for `controller.go`. --- pkg/driver/controller_test.go | 291 +++++++++---- pkg/driver/os_client.go | 24 ++ pkg/driver/provisioner_ap_test.go | 640 +++++++++++++++++++++++++++++ pkg/driver/provisioner_dir_test.go | 615 +++++++++++++++++++++++++++ pkg/driver/provisioner_test.go | 41 ++ 5 files changed, 1526 insertions(+), 85 deletions(-) create mode 100644 pkg/driver/provisioner_ap_test.go create mode 100644 pkg/driver/provisioner_dir_test.go create mode 100644 pkg/driver/provisioner_test.go diff --git a/pkg/driver/controller_test.go b/pkg/driver/controller_test.go index e9c3033e3..b8d61a257 100644 --- a/pkg/driver/controller_test.go +++ b/pkg/driver/controller_test.go @@ -21,13 +21,14 @@ import ( func TestCreateVolume(t *testing.T) { var ( - endpoint = "endpoint" - volumeName = "volumeName" - fsId = "fs-abcd1234" - apId = "fsap-abcd1234xyz987" - volumeId = "fs-abcd1234::fsap-abcd1234xyz987" - capacityRange int64 = 5368709120 - stdVolCap = &csi.VolumeCapability{ + endpoint = "endpoint" + volumeName = "volumeName" + fsId = "fs-abcd1234" + apId = "fsap-abcd1234xyz987" + volumeId = "fs-abcd1234::fsap-abcd1234xyz987" + dirProvisioningVolumeId = "fs-abcd1234:/dynamic/" + volumeName + capacityRange int64 = 5368709120 + stdVolCap = &csi.VolumeCapability{ AccessType: &csi.VolumeCapability_Mount{ Mount: &csi.VolumeCapability_MountVolume{}, }, @@ -46,7 +47,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -107,7 +108,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -170,7 +171,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -248,7 +249,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -373,7 +374,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -459,7 +460,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -509,12 +510,12 @@ func TestCreateVolume(t *testing.T) { }, }, { - name: "Success: Normal flow", + name: "Success: Normal flow, Access Point Provisioning", testFunc: func(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -563,13 +564,58 @@ func TestCreateVolume(t *testing.T) { mockCtl.Finish() }, }, + { + name: "Success: Normal flow, Directory Provisioning", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + + driver := buildDriver(endpoint, nil, "", mockMounter, false, false) + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-dir", + FsId: fsId, + DirectoryPerms: "777", + BasePath: "/dynamic", + }, + } + + ctx := context.Background() + + res, err := driver.CreateVolume(ctx, req) + + if err != nil { + t.Fatalf("CreateVolume failed: %v", err) + } + + if res.Volume == nil { + t.Fatal("Volume is nil") + } + + if res.Volume.VolumeId != dirProvisioningVolumeId { + t.Fatalf("Volume Id mismatched. Expected: %v, Actual: %v", dirProvisioningVolumeId, res.Volume.VolumeId) + } + mockCtl.Finish() + }, + }, { name: "Success: Using Default GID ranges", testFunc: func(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -621,7 +667,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false) + driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -675,7 +721,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false) + driver := buildDriver(endpoint, mockCloud, "cluster:efs", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -729,7 +775,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvcNameVal := "test-pvc" @@ -788,7 +834,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvName := "foo" pvcName := "bar" @@ -853,7 +899,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvcName := "foo" directoryCreated := fmt.Sprintf("/%s", pvcName) @@ -916,7 +962,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvcName := "foo" basePath := "bash" @@ -982,7 +1028,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvcName := "foo" basePath := "bash" @@ -1049,7 +1095,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvcName := "foo" directoryCreated := fmt.Sprintf("/%s", pvcName) @@ -1113,7 +1159,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1173,7 +1219,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1234,7 +1280,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) pvcName := "foo" directoryCreated := fmt.Sprintf("/%s/%s", pvcName, pvcName) @@ -1297,7 +1343,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Parameters: map[string]string{ @@ -1321,7 +1367,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1346,7 +1392,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1374,7 +1420,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1412,7 +1458,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1455,7 +1501,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1493,7 +1539,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1524,7 +1570,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1554,7 +1600,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1584,7 +1630,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1615,7 +1661,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1647,7 +1693,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1679,7 +1725,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1711,7 +1757,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1743,7 +1789,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1775,7 +1821,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1807,7 +1853,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1840,7 +1886,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1873,7 +1919,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1905,7 +1951,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1937,7 +1983,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -1973,7 +2019,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2007,7 +2053,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2043,7 +2089,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2077,7 +2123,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2113,7 +2159,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2147,7 +2193,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2182,7 +2228,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2217,7 +2263,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2272,7 +2318,7 @@ func TestCreateVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) secrets := map[string]string{} secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole" @@ -2316,7 +2362,7 @@ func TestCreateVolume(t *testing.T) { subPathPattern := "${.PVC.name}/${foo}" - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2356,7 +2402,7 @@ func TestCreateVolume(t *testing.T) { subPathPattern := "this-directory-name-is-far-too-long-for-any-practical-purposes-and-only-serves-to-prove-a-point" - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2396,7 +2442,7 @@ func TestCreateVolume(t *testing.T) { subPathPattern := "a/b/c/d/e/f" - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.CreateVolumeRequest{ Name: volumeName, @@ -2437,10 +2483,11 @@ func TestCreateVolume(t *testing.T) { func TestDeleteVolume(t *testing.T) { var ( - apId = "fsap-abcd1234xyz987" - fsId = "fs-abcd1234" - endpoint = "endpoint" - volumeId = "fs-abcd1234::fsap-abcd1234xyz987" + apId = "fsap-abcd1234xyz987" + fsId = "fs-abcd1234" + endpoint = "endpoint" + volumeId = "fs-abcd1234::fsap-abcd1234xyz987" + dirProvisionedVolumeId = "fs-abcd1234:/dynamic/newVolume" ) testCases := []struct { @@ -2448,12 +2495,12 @@ func TestDeleteVolume(t *testing.T) { testFunc func(t *testing.T) }{ { - name: "Success: Normal flow", + name: "Success: Normal flow, Access Point Provisioning", testFunc: func(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2468,6 +2515,26 @@ func TestDeleteVolume(t *testing.T) { mockCtl.Finish() }, }, + { + name: "Success: Normal flow, Directory Provisioning", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) + + req := &csi.DeleteVolumeRequest{ + VolumeId: dirProvisionedVolumeId, + } + + ctx := context.Background() + _, err := driver.DeleteVolume(ctx, req) + if err != nil { + t.Fatalf("Delete Volume failed: %v", err) + } + mockCtl.Finish() + }, + }, { name: "Success: Normal flow with deleteAccessPointRootDir", testFunc: func(t *testing.T) { @@ -2475,7 +2542,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2501,6 +2568,30 @@ func TestDeleteVolume(t *testing.T) { mockCtl.Finish() }, }, + { + name: "Success: Normal flow, Directory Provisioning with deleteProvisionedDir", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + + driver := buildDriver(endpoint, mockCloud, "", mockMounter, false, true) + + req := &csi.DeleteVolumeRequest{ + VolumeId: dirProvisionedVolumeId, + } + + ctx := context.Background() + _, err := driver.DeleteVolume(ctx, req) + if err != nil { + t.Fatalf("Delete Volume failed: %v", err) + } + mockCtl.Finish() + }, + }, { name: "Success: DescribeAccessPoint Access Point Does not exist", testFunc: func(t *testing.T) { @@ -2508,7 +2599,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2530,7 +2621,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2552,7 +2643,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2574,7 +2665,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2604,7 +2695,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2635,7 +2726,7 @@ func TestDeleteVolume(t *testing.T) { mockCloud := mocks.NewMockCloud(mockCtl) mockMounter := mocks.NewMockMounter(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", mockMounter, true) + driver := buildDriver(endpoint, mockCloud, "", mockMounter, true, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2666,7 +2757,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2681,13 +2772,29 @@ func TestDeleteVolume(t *testing.T) { mockCtl.Finish() }, }, + { + name: "Success: Cannot parse details from volumeId", + testFunc: func(t *testing.T) { + driver := buildDriver(endpoint, nil, "", nil, false, true) + + req := &csi.DeleteVolumeRequest{ + VolumeId: "foobarbash", + } + + ctx := context.Background() + _, err := driver.DeleteVolume(ctx, req) + if err != nil { + t.Fatalf("Expected success but found: %v", err) + } + }, + }, { name: "Fail: DeleteAccessPoint access denied", testFunc: func(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2708,7 +2815,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.DeleteVolumeRequest{ VolumeId: volumeId, @@ -2729,7 +2836,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.DeleteVolumeRequest{ VolumeId: "fs-abcd1234", @@ -2749,7 +2856,7 @@ func TestDeleteVolume(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) secrets := map[string]string{} secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole" @@ -2808,7 +2915,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeId: volumeId, @@ -2835,7 +2942,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeId: volumeId, @@ -2862,7 +2969,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeCapabilities: []*csi.VolumeCapability{ @@ -2884,7 +2991,7 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) req := &csi.ValidateVolumeCapabilitiesRequest{ VolumeId: volumeId, @@ -2898,6 +3005,20 @@ func TestValidateVolumeCapabilities(t *testing.T) { mockCtl.Finish() }, }, + { + name: "Fail: volumeId not provided", + testFunc: func(t *testing.T) { + driver := buildDriver(endpoint, nil, "", nil, false, true) + + req := &csi.DeleteVolumeRequest{} + + ctx := context.Background() + _, err := driver.DeleteVolume(ctx, req) + if err == nil { + t.Fatal("Expected failure but operation succeeded") + } + }, + }, } for _, tc := range testCases { @@ -2910,7 +3031,7 @@ func TestControllerGetCapabilities(t *testing.T) { mockCtl := gomock.NewController(t) mockCloud := mocks.NewMockCloud(mockCtl) - driver := buildDriver(endpoint, mockCloud, "", nil, false) + driver := buildDriver(endpoint, mockCloud, "", nil, false, false) ctx := context.Background() _, err := driver.ControllerGetCapabilities(ctx, &csi.ControllerGetCapabilitiesRequest{}) @@ -2927,11 +3048,11 @@ func verifyPathWhenUUIDIncluded(pathToVerify string, expectedPathWithoutUUID str return err == nil && doesPathMatchWithUuid } -func buildDriver(endpoint string, cloud cloud.Cloud, tags string, mounter Mounter, deleteAccessPointRootDir bool) *Driver { +func buildDriver(endpoint string, cloud cloud.Cloud, tags string, mounter Mounter, deleteAccessPointRootDir bool, deleteProvisionedDir bool) *Driver { return &Driver{ endpoint: endpoint, cloud: cloud, mounter: mounter, - provisioners: getProvisioners(cloud, mounter, parseTagsFromStr(tags), deleteAccessPointRootDir, false, &FakeOsClient{}, false), + provisioners: getProvisioners(cloud, mounter, parseTagsFromStr(tags), deleteAccessPointRootDir, false, &FakeOsClient{}, deleteProvisionedDir), } } diff --git a/pkg/driver/os_client.go b/pkg/driver/os_client.go index 6fa5ace80..435abd28c 100644 --- a/pkg/driver/os_client.go +++ b/pkg/driver/os_client.go @@ -87,3 +87,27 @@ func (o *FakeOsClient) RemoveAll(_ string) error { func (o *FakeOsClient) GetPerms(_ string) (os.FileMode, error) { return 0, nil } + +// Broken OsClient + +type BrokenOsClient struct{} + +func (o *BrokenOsClient) MkDirAllWithPerms(_ string, _ os.FileMode, _, _ int) error { + return &os.PathError{} +} + +func (o *BrokenOsClient) MkDirAllWithPermsNoOwnership(_ string, _ os.FileMode) error { + return &os.PathError{} +} + +func (o *BrokenOsClient) Remove(_ string) error { + return &os.PathError{} +} + +func (o *BrokenOsClient) RemoveAll(_ string) error { + return &os.PathError{} +} + +func (o *BrokenOsClient) GetPerms(path string) (os.FileMode, error) { + return 0, &os.PathError{} +} diff --git a/pkg/driver/provisioner_ap_test.go b/pkg/driver/provisioner_ap_test.go new file mode 100644 index 000000000..1ae1141a6 --- /dev/null +++ b/pkg/driver/provisioner_ap_test.go @@ -0,0 +1,640 @@ +package driver + +import ( + "context" + "errors" + "fmt" + //"reflect" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud" + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/driver/mocks" +) + +func TestAccessPointProvisioner_Provision(t *testing.T) { + var ( + fsId = "fs-abcd1234" + volumeName = "volumeName" + capacityRange int64 = 5368709120 + stdVolCap = &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + } + ) + tests := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "Fail: File system does not exist", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + ctx := context.Background() + mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(nil, cloud.ErrNotFound) + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-ap", + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + _, err := apProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected Provision to fail but it didn't") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: DescribeFileSystem Access Denied", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + ctx := context.Background() + mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(nil, cloud.ErrAccessDenied) + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-ap", + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + _, err := apProv.Provision(ctx, req) + if err == nil { + t.Fatal("Expected Provision to fail but it didn't") + } + + mockCtl.Finish() + }, + }, + { + name: "Fail: Describe File system call fails", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + ctx := context.Background() + mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(nil, errors.New("DescribeFileSystem failed")) + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-ap", + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + _, err := apProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected Provision to fail but it didn't") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Create Access Point call fails", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + ctx := context.Background() + fileSystem := &cloud.FileSystem{ + FileSystemId: fsId, + } + + mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(fileSystem, nil) + mockCloud.EXPECT().CreateAccessPoint(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(nil, errors.New("CreateAccessPoint call failed")) + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-ap", + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + _, err := apProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected Provision to fail but it didn't") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: CreateAccessPoint Access Denied", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + ctx := context.Background() + fileSystem := &cloud.FileSystem{ + FileSystemId: fsId, + } + mockCloud.EXPECT().DescribeFileSystem(gomock.Eq(ctx), gomock.Any()).Return(fileSystem, nil) + mockCloud.EXPECT().CreateAccessPoint(gomock.Eq(ctx), gomock.Any(), gomock.Any()).Return(nil, cloud.ErrAccessDenied) + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-ap", + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + _, err := apProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected Provision to fail but it didn't") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Cannot assume role for x-account", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + secrets := map[string]string{} + secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole" + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: "efs-ap", + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + AzName: "us-east-1a", + Uid: "1000", + Gid: "1000", + }, + Secrets: secrets, + } + + ctx := context.Background() + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + _, err := apProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected Provision to fail but it didn't") + } + + mockCtl.Finish() + }, + }, + } + for _, test := range tests { + t.Run(test.name, test.testFunc) + } +} + +func TestAccessPointProvisioner_Delete(t *testing.T) { + var ( + fsId = "fs-abcd1234" + apId = "fsap-abcd1234xyz987" + volumeId = fmt.Sprintf("%s::%s", fsId, apId) + ) + + tests := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "Success: Setting deleteAccessPointRootDir causes rootDir to be deleted", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + accessPoint := &cloud.AccessPoint{ + AccessPointId: apId, + FileSystemId: fsId, + AccessPointRootDir: "", + CapacityGiB: 0, + } + + ctx := context.Background() + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil) + mockCloud.EXPECT().DeleteAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err != nil { + t.Fatalf("Expected Delete to succeed but it failed: %v", err) + } + mockCtl.Finish() + }, + }, + { + name: "Success: If AccessPoint does not exist success is returned as no work needs to be done", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + ctx := context.Background() + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil, cloud.ErrNotFound) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err != nil { + t.Fatalf("Expected Delete to succeed but it failed: %v", err) + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Return error if AccessDenied error from AWS", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + ctx := context.Background() + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil, cloud.ErrAccessDenied) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Return error if DescribeAccessPoints failed", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + ctx := context.Background() + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(nil, errors.New("Describe Access Point failed")) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Fail to make directory for access point mount", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + accessPoint := &cloud.AccessPoint{ + AccessPointId: apId, + FileSystemId: fsId, + AccessPointRootDir: "", + CapacityGiB: 0, + } + + ctx := context.Background() + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(errors.New("Failed to makeDir")) + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Fail to mount file system on directory for access point root directory removal", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + accessPoint := &cloud.AccessPoint{ + AccessPointId: apId, + FileSystemId: fsId, + AccessPointRootDir: "", + CapacityGiB: 0, + } + + ctx := context.Background() + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(errors.New("Failed to mount")) + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Fail to unmount file system after access point root directory removal", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + mockMounter := mocks.NewMockMounter(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + accessPoint := &cloud.AccessPoint{ + AccessPointId: apId, + FileSystemId: fsId, + AccessPointRootDir: "", + CapacityGiB: 0, + } + + ctx := context.Background() + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(errors.New("Failed to unmount")) + mockCloud.EXPECT().DescribeAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(accessPoint, nil) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + mounter: mockMounter, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: DeleteAccessPoint access denied", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + ctx := context.Background() + mockCloud.EXPECT().DeleteAccessPoint(gomock.Eq(ctx), gomock.Eq(apId)).Return(cloud.ErrAccessDenied) + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + { + name: "Fail: Cannot assume role for x-account", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + secrets := map[string]string{} + secrets["awsRoleArn"] = "arn:aws:iam::1234567890:role/EFSCrossAccountRole" + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + Secrets: secrets, + } + + ctx := context.Background() + + apProv := AccessPointProvisioner{ + BaseProvisioner: BaseProvisioner{ + cloud: mockCloud, + }, + tags: map[string]string{}, + deleteAccessPointRootDir: true, + } + + err := apProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected Delete to fail but it succeeded") + } + mockCtl.Finish() + }, + }, + } + + for _, test := range tests { + t.Run(test.name, test.testFunc) + } +} diff --git a/pkg/driver/provisioner_dir_test.go b/pkg/driver/provisioner_dir_test.go new file mode 100644 index 000000000..db065bf2e --- /dev/null +++ b/pkg/driver/provisioner_dir_test.go @@ -0,0 +1,615 @@ +package driver + +import ( + "context" + "errors" + "fmt" + "io" + "os" + "reflect" + "testing" + + "github.com/container-storage-interface/spec/lib/go/csi" + "github.com/golang/mock/gomock" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" + "k8s.io/mount-utils" + + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/cloud" + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/driver/mocks" +) + +const fileSystemId = "fs-123456789" + +func TestDirectoryProvisioner_Provision(t *testing.T) { + var ( + fsId = "fs-abcd1234" + volumeName = "volumeName" + capacityRange int64 = 5368709120 + stdVolCap = &csi.VolumeCapability{ + AccessType: &csi.VolumeCapability_Mount{ + Mount: &csi.VolumeCapability_MountVolume{}, + }, + AccessMode: &csi.VolumeCapability_AccessMode{ + Mode: csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER, + }, + } + ) + tests := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "Success: Check path created is sensible", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + BasePath: "/dynamic", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + osClient: &FakeOsClient{}, + } + + volume, err := dProv.Provision(ctx, req) + + if err != nil { + t.Fatalf("Expected provision call to succeed but failed: %v", err) + } + + expectedVolumeId := fmt.Sprintf("%s:/dynamic/%s", fsId, req.Name) + if volume.VolumeId != expectedVolumeId { + t.Fatalf("Expected volumeId to be %s but was %s", expectedVolumeId, volume.VolumeId) + } + }, + }, + { + name: "Fail: Return error for empty fsId", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + } + + _, err := dProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.InvalidArgument { + t.Fatalf("Expected InvalidArgument error but instead got %v", err) + } + }, + }, + { + name: "Fail: Mounter cannot create target directory on node", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return( + io.ErrUnexpectedEOF) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + } + + _, err := dProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), io.ErrUnexpectedEOF) { + t.Fatalf("Expected mount error but instead got %v", err) + } + }, + }, + { + name: "Fail: Mounter cannot mount into target directory", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return( + mount.NewMountError(mount.HasFilesystemErrors, "Errors")) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + } + + _, err := dProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), mount.MountError{}) { + t.Fatalf("Expected mount error but instead got %v", err) + } + }, + }, + { + name: "Fail: Could not create directory after mounting root", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + BasePath: "/dynamic", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + osClient: &BrokenOsClient{}, + } + + _, err := dProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) { + t.Fatalf("Expected path error but instead got %v", err) + } + }, + }, + { + name: "Fail: Could not unmount root directory post creation", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(mount.NewMountError(mount.FilesystemMismatch, "Error")) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + BasePath: "/dynamic", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + osClient: &FakeOsClient{}, + } + + _, err := dProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), mount.MountError{}) { + t.Fatalf("Expected mount error but instead got %v", err) + } + }, + }, + { + name: "Fail: Could not delete target directory once unmounted", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + + ctx := context.Background() + + req := &csi.CreateVolumeRequest{ + Name: volumeName, + VolumeCapabilities: []*csi.VolumeCapability{ + stdVolCap, + }, + CapacityRange: &csi.CapacityRange{ + RequiredBytes: capacityRange, + }, + Parameters: map[string]string{ + ProvisioningMode: DirectoryMode, + FsId: fsId, + GidMin: "1000", + GidMax: "2000", + DirectoryPerms: "777", + BasePath: "/dynamic", + Uid: "1000", + Gid: "1000", + }, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + osClient: &BrokenOsClient{}, + } + + _, err := dProv.Provision(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) { + t.Fatalf("Expected mount error but instead got %v", err) + } + }, + }, + } + for _, test := range tests { + t.Run(test.name, test.testFunc) + } +} + +func TestDirectoryProvisioner_Delete(t *testing.T) { + var ( + fsId = "fs-abcd1234" + volumeId = fmt.Sprintf("%s:%s", fsId, "/dynamic/newDir") + ) + + tests := []struct { + name string + testFunc func(t *testing.T) + }{ + { + name: "Success: If retain directory is set nothing happens", + testFunc: func(t *testing.T) { + ctx := context.Background() + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + dProv := DirectoryProvisioner{ + deleteProvisionedDir: false, + } + + err := dProv.Delete(ctx, req) + + if err != nil { + t.Fatalf("Expected success but found %v", err) + } + }, + }, + { + name: "Success: If not retaining directory folder and contents are deleted", + testFunc: func(t *testing.T) { + ctx := context.Background() + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + deleteProvisionedDir: true, + osClient: &FakeOsClient{}, + } + + err := dProv.Delete(ctx, req) + + if err != nil { + t.Fatalf("Expected success but found %v", err) + } + }, + }, + { + name: "Fail: Mounter cannot create target directory on node", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return( + io.ErrUnexpectedEOF) + + ctx := context.Background() + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + deleteProvisionedDir: true, + } + + err := dProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), io.ErrUnexpectedEOF) { + t.Fatalf("Expected mount error but instead got %v", err) + } + }, + }, + { + name: "Fail: Cannot delete contents of provisioned directory", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + + ctx := context.Background() + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + deleteProvisionedDir: true, + osClient: &BrokenOsClient{}, + } + + err := dProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) { + t.Fatalf("Expected path error but instead got %v", err) + } + }, + }, + { + name: "Fail: Cannot unmount directory after contents have been deleted", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(mount.NewMountError(mount.HasFilesystemErrors, "Errors")) + + ctx := context.Background() + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + deleteProvisionedDir: true, + osClient: &FakeOsClient{}, + } + + err := dProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), mount.MountError{}) { + t.Fatalf("Expected mount error but instead got %v", err) + } + }, + }, + { + name: "Fail: Cannot delete temporary directory after unmount", + testFunc: func(t *testing.T) { + mockCtl := gomock.NewController(t) + mockMounter := mocks.NewMockMounter(mockCtl) + mockMounter.EXPECT().MakeDir(gomock.Any()).Return(nil) + mockMounter.EXPECT().Mount(fsId, gomock.Any(), "efs", gomock.Any()).Return(nil) + mockMounter.EXPECT().Unmount(gomock.Any()).Return(nil) + + ctx := context.Background() + + req := &csi.DeleteVolumeRequest{ + VolumeId: volumeId, + } + + dProv := DirectoryProvisioner{ + BaseProvisioner: BaseProvisioner{ + mounter: mockMounter, + }, + deleteProvisionedDir: true, + osClient: &BrokenOsClient{}, + } + + err := dProv.Delete(ctx, req) + + if err == nil { + t.Fatal("Expected error but found none") + } + if status.Code(err) != codes.Internal && errors.Is(errors.Unwrap(err), &os.PathError{}) { + t.Fatalf("Expected path error but instead got %v", err) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, test.testFunc) + } +} + +func TestDirectoryProvisioner_GetMountOptions_NoRoleArnGivesStandardOptions(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + ctx := context.Background() + expectedOptions := []string{"tls", "iam"} + + options, _ := getMountOptions(ctx, mockCloud, fileSystemId, "", false) + + if !reflect.DeepEqual(options, expectedOptions) { + t.Fatalf("Expected returned options to be %v but was %v", expectedOptions, options) + } +} + +func TestDirectoryProvisioner_GetMountOptions_RoleArnAddsMountTargetIp(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + fakeMountTarget := cloud.MountTarget{ + AZName: "foo", + AZId: "", + MountTargetId: "", + IPAddress: "8.8.8.8", + } + ctx := context.Background() + mockCloud.EXPECT().DescribeMountTargets(ctx, fileSystemId, "").Return(&fakeMountTarget, nil) + + expectedOptions := []string{"tls", "iam", MountTargetIp + "=" + fakeMountTarget.IPAddress} + + options, _ := getMountOptions(ctx, mockCloud, fileSystemId, "roleArn", false) + + if !reflect.DeepEqual(options, expectedOptions) { + t.Fatalf("Expected returned options to be %v but was %v", expectedOptions, options) + } +} + +func TestDirectoryProvisioner_GetMountOptions_RoleArnCross(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + ctx := context.Background() + + expectedOptions := []string{"tls", "iam", CrossAccount} + + options, _ := getMountOptions(ctx, mockCloud, fileSystemId, "roleArn", true) + + if !reflect.DeepEqual(options, expectedOptions) { + t.Fatalf("Expected returned options to be %v but was %v", expectedOptions, options) + } +} diff --git a/pkg/driver/provisioner_test.go b/pkg/driver/provisioner_test.go new file mode 100644 index 000000000..21f513078 --- /dev/null +++ b/pkg/driver/provisioner_test.go @@ -0,0 +1,41 @@ +package driver + +import ( + "testing" + + "github.com/golang/mock/gomock" + + "github.com/kubernetes-sigs/aws-efs-csi-driver/pkg/driver/mocks" +) + +func TestProvisioner_GetCloud_NoRoleArnGivesOriginalObjectBack(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + actualCloud, _, _, err := getCloud(map[string]string{}, mockCloud, false) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if actualCloud != mockCloud { + t.Fatalf("Expected cloud object to be %v but was %v", mockCloud, actualCloud) + } + + mockCtl.Finish() +} + +func TestProvisioner_GetCloud_WithRoleArnGivesNewObject(t *testing.T) { + mockCtl := gomock.NewController(t) + mockCloud := mocks.NewMockCloud(mockCtl) + + actualCloud, _, _, err := getCloud(map[string]string{ + RoleArn: "arn:aws:iam::1234567890:role/EFSCrossAccountRole", + }, mockCloud, false) + if err != nil { + t.Fatalf("Unexpected error: %v", err) + } + if actualCloud == mockCloud { + t.Fatalf("Unexpected cloud object: %v", actualCloud) + } + + mockCtl.Finish() +} From 041ea4911776269249d9621704180281ba593182 Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Tue, 7 Jan 2025 16:54:01 -0800 Subject: [PATCH 5/6] Update charts, examples and documentation --- .../templates/controller-deployment.yaml | 1 + charts/aws-efs-csi-driver/values.yaml | 4 +- docs/README.md | 49 +++--- .../{ => access_points}/README.md | 5 +- .../{ => access_points}/specs/pod.yaml | 0 .../specs/storageclass.yaml | 0 .../directories/README.md | 150 ++++++++++++++++++ .../directories/specs/pod.yaml | 30 ++++ .../directories/specs/storageclass.yaml | 10 ++ hack/values.yaml | 6 +- hack/values_eksctl.yaml | 1 + 11 files changed, 228 insertions(+), 28 deletions(-) rename examples/kubernetes/dynamic_provisioning/{ => access_points}/README.md (97%) rename examples/kubernetes/dynamic_provisioning/{ => access_points}/specs/pod.yaml (100%) rename examples/kubernetes/dynamic_provisioning/{ => access_points}/specs/storageclass.yaml (100%) create mode 100644 examples/kubernetes/dynamic_provisioning/directories/README.md create mode 100644 examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml create mode 100644 examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml diff --git a/charts/aws-efs-csi-driver/templates/controller-deployment.yaml b/charts/aws-efs-csi-driver/templates/controller-deployment.yaml index 4753b16fa..d699da78d 100644 --- a/charts/aws-efs-csi-driver/templates/controller-deployment.yaml +++ b/charts/aws-efs-csi-driver/templates/controller-deployment.yaml @@ -79,6 +79,7 @@ spec: {{- end }} - --v={{ .Values.controller.logLevel }} - --delete-access-point-root-dir={{ hasKey .Values.controller "deleteAccessPointRootDir" | ternary .Values.controller.deleteAccessPointRootDir false }} + - --delete-provisioned-dir={{ hasKey .Values.controller "deleteProvisionedDir" | ternary .Values.controller.deleteProvisionedDir false }} env: - name: CSI_ENDPOINT value: unix:///var/lib/csi/sockets/pluginproxy/csi.sock diff --git a/charts/aws-efs-csi-driver/values.yaml b/charts/aws-efs-csi-driver/values.yaml index 640ec09eb..cc55ab48b 100644 --- a/charts/aws-efs-csi-driver/values.yaml +++ b/charts/aws-efs-csi-driver/values.yaml @@ -65,8 +65,10 @@ controller: # environment: prod # region: us-east-1 # Enable if you want the controller to also delete the - # path on efs when deleteing an access point + # path on efs when deleting an EFS access point deleteAccessPointRootDir: false + # Enable if you want the controller to delete any directories it also provisions + deleteProvisionedDir: false podAnnotations: {} podLabels: {} hostNetwork: false diff --git a/docs/README.md b/docs/README.md index aca51c2db..4902e8443 100644 --- a/docs/README.md +++ b/docs/README.md @@ -25,22 +25,27 @@ The following CSI interfaces are implemented: * Identity Service: GetPluginInfo, GetPluginCapabilities, Probe ### Storage Class Parameters for Dynamic Provisioning -| Parameters | Values | Default | Optional | Description | -|-----------------------|--------|-----------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| provisioningMode | efs-ap | | false | Type of volume provisioned by efs. Currently, Access Points are supported. | -| fileSystemId | | | false | File System under which access points are created. | -| directoryPerms | | | false | Directory permissions for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. | -| uid | | | true | POSIX user Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. | -| gid | | | true | POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. | -| gidRangeStart | | 50000 | true | Start range of the POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. Not used if uid/gid is set. | -| gidRangeEnd | | 7000000 | true | End range of the POSIX group Id. Not used if uid/gid is set. | -| basePath | | | true | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system | -| subPathPattern | | `/${.PV.name}` | true | The template used to construct the subPath under which each of the access points created under Dynamic Provisioning. Can be made up of fixed strings and limited variables, is akin to the 'subPathPattern' variable on the [nfs-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) chart. Supports `.PVC.name`,`.PVC.namespace` and `.PV.name` | -| ensureUniqueDirectory | | true | true | **NOTE: Only set this to false if you're sure this is the behaviour you want**.
Used when dynamic provisioning is enabled, if set to true, appends the a UID to the pattern specified in `subPathPattern` to ensure that access points will not accidentally point at the same directory. | -| az | | "" | true | Used for cross-account mount. `az` under storage class parameter is optional. If specified, mount target associated with the az will be used for cross-account mount. If not specified, a random mount target will be picked for cross account mount | -| reuseAccessPoint | | false | true | When set to true, it creates the Access Point client-token from the provided PVC name. So that the AccessPoint can be replicated from a different cluster if same PVC name and storageclass configuration are used. | +| Parameters | Values | Default | Optional | Description | +|-----------------------|----------------|-----------------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| provisioningMode | efs-ap/efs-dir | | false | Type of volume provisioned by efs. `efs-ap` will provision EFS Access Points, while `efs-dir` will provision directories on the EFS instead. | +| fileSystemId | | | false | File System under which access points are created. | +| directoryPerms | | | false | Directory permissions for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. | +| uid | | | true | POSIX user Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. | +| gid | | | true | POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. | +| gidRangeStart | | 50000 | true | Start range of the POSIX group Id to be applied for [Access Point root directory](https://docs.aws.amazon.com/efs/latest/ug/efs-access-points.html#enforce-root-directory-access-point) creation. Not used if uid/gid is set. | +| gidRangeEnd | | 7000000 | true | End range of the POSIX group Id. Not used if uid/gid is set. | +| basePath | | | true | Path under which access points for dynamic provisioning is created. If this parameter is not specified, access points are created under the root directory of the file system | +| subPathPattern | | `/${.PV.name}` | true | The template used to construct the subPath under which each of the access points created under Dynamic Provisioning. Can be made up of fixed strings and limited variables, is akin to the 'subPathPattern' variable on the [nfs-subdir-external-provisioner](https://github.com/kubernetes-sigs/nfs-subdir-external-provisioner) chart. Supports `.PVC.name`,`.PVC.namespace` and `.PV.name` | +| ensureUniqueDirectory | | true | true | **NOTE: Only set this to false if you're sure this is the behaviour you want**.
Used when dynamic provisioning is enabled, if set to true, appends the a UID to the pattern specified in `subPathPattern` to ensure that access points will not accidentally point at the same directory. | +| az | | "" | true | Used for cross-account mount. `az` under storage class parameter is optional. If specified, mount target associated with the az will be used for cross-account mount. If not specified, a random mount target will be picked for cross account mount | +| reuseAccessPoint | | false | true | When set to true, it creates the Access Point client-token from the provided PVC name. So that the AccessPoint can be replicated from a different cluster if same PVC name and storageclass configuration are used. | **Note** +* We suggest the following settings for each provisioning mode: + * `efs-ap` -> `directoryPerms: 770`, with `gidRangeStart` and `gidRangeEnd` set to sensible values + * `efs-dir` -> `directoryPerms: 777` and omit `gidRangeStart` and `gidRangeEnd` + * This ensures that important system utilities will still be to access to the directories created + * All parameters are available in all modes but please think very carefully about what you're doing otherwise directories could be created that no-one can read or write to. * Custom Posix group Id range for Access Point root directory must include both `gidRangeStart` and `gidRangeEnd` parameters. These parameters are optional only if both are omitted. If you specify one, the other becomes mandatory. * When using a custom Posix group ID range, there is a possibility for the driver to run out of available POSIX group Ids. We suggest ensuring custom group ID range is large enough or create a new storage class with a new file system to provision additional volumes. * `az` under storage class parameter is not be confused with efs-utils mount option `az`. The `az` mount option is used for cross-az mount or efs one zone file system mount within the same aws account as the cluster. @@ -160,7 +165,7 @@ You can find previous efs-csi-driver versions' images from [here](https://galler ### Features * Static provisioning - Amazon EFS file system needs to be created manually first, then it could be mounted inside container as a persistent volume (PV) using the driver. -* Dynamic provisioning - Uses a persistent volume claim (PVC) to dynamically provision a persistent volume (PV). On Creating a PVC, kuberenetes requests Amazon EFS to create an Access Point in a file system which will be used to mount the PV. +* Dynamic provisioning - Uses a persistent volume claim (PVC) to dynamically provision a persistent volume (PV). On Creating a PVC, kubernetes requests Amazon EFS to create an Access Point or a Directory in the file system which will be used to mount the PV. * Mount Options - Mount options can be specified in the persistent volume (PV) or storage class for dynamic provisioning to define how the volume should be mounted. * Encryption of data in transit - Amazon EFS file systems are mounted with encryption in transit enabled by default in the master branch version of the driver. * Cross account mount - Amazon EFS file systems from different aws accounts can be mounted from an Amazon EKS cluster. @@ -353,11 +358,12 @@ Enabling the vol-metrics-opt-in parameter activates the gathering of inode and d ### Container Arguments for deployment(controller) -| Parameters | Values | Default | Optional | Description | -|-----------------------------|--------|---------|----------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| delete-access-point-root-dir| | false | true | Opt in to delete access point root directory by DeleteVolume. By default, DeleteVolume will delete the access point behind Persistent Volume and deleting access point will not delete the access point root directory or its contents. | -| adaptive-retry-mode | | true | true | Opt out to use standard sdk retry mode for EFS API calls. By default, Driver will use adaptive mode for the sdk retry configuration which heavily rate limits EFS API requests to reduce throttling if throttling is observed. | -| tags | | | true | Space separated key:value pairs which will be added as tags for Amazon EFS resources. For example, '--tags=name:efs-tag-test date:Jan24' | +| Parameters | Values | Default | Optional | Description | +|------------------------------|--------|---------|----------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| delete-access-point-root-dir | | false | true | Opt in to delete access point root directory by DeleteVolume. By default, DeleteVolume will delete the access point behind Persistent Volume and deleting access point will not delete the access point root directory or its contents. | +| delete-provisioned-dir | | false | true | Opt in to delete any provisioned directories and their contents. By default, DeleteVolume will not delete the directory behind Persistent Volume. | +| adaptive-retry-mode | | true | true | Opt out to use standard sdk retry mode for EFS API calls. By default, Driver will use adaptive mode for the sdk retry configuration which heavily rate limits EFS API requests to reduce throttling if throttling is observed. | +| tags | | | true | Space separated key:value pairs which will be added as tags for Amazon EFS resources. For example, '--tags=name:efs-tag-test date:Jan24' | ### Upgrading the Amazon EFS CSI Driver @@ -391,7 +397,8 @@ Before following the examples, you need to: #### Example links * [Static provisioning](../examples/kubernetes/static_provisioning/README.md) -* [Dynamic provisioning](../examples/kubernetes/dynamic_provisioning/README.md) +* [Dynamic provisioning with Access Points](../examples/kubernetes/dynamic_provisioning/access_points/README.md) +* [Dynamic provisioning with Directories](../examples/kubernetes/dynamic_provisioning/directories/README.md) * [Encryption in transit](../examples/kubernetes/encryption_in_transit/README.md) * [Accessing the file system from multiple pods](../examples/kubernetes/multiple_pods/README.md) * [Consume Amazon EFS in StatefulSets](../examples/kubernetes/statefulset/README.md) diff --git a/examples/kubernetes/dynamic_provisioning/README.md b/examples/kubernetes/dynamic_provisioning/access_points/README.md similarity index 97% rename from examples/kubernetes/dynamic_provisioning/README.md rename to examples/kubernetes/dynamic_provisioning/access_points/README.md index 888a36191..808835d97 100644 --- a/examples/kubernetes/dynamic_provisioning/README.md +++ b/examples/kubernetes/dynamic_provisioning/access_points/README.md @@ -24,7 +24,7 @@ This example requires Kubernetes 1.17 or later and a driver version of 1.2.0 or 2. Download a `StorageClass` manifest for Amazon EFS. ```sh - curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml + curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/access_points/specs/storageclass.yaml ``` 3. Edit [the file](./specs/storageclass.yaml). Find the following line, and replace the value for `fileSystemId` with your file system ID. @@ -33,7 +33,6 @@ This example requires Kubernetes 1.17 or later and a driver version of 1.2.0 or fileSystemId: fs-582a03f3 ``` Modify the other values as needed: - * `provisioningMode` - The type of volume to be provisioned by Amazon EFS. Currently, only access point based provisioning is supported (`efs-ap`). * `fileSystemId` - The file system under which the access point is created. * `directoryPerms` - The directory permissions of the root directory created by the access point. * `gidRangeStart` (Optional) - The starting range of the Posix group ID to be applied onto the root directory of the access point. The default value is `50000`. @@ -57,7 +56,7 @@ This example requires Kubernetes 1.17 or later and a driver version of 1.2.0 or 1. Download a manifest that deploys a Pod and a PVC. ```sh - curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/specs/pod.yaml + curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/access_points/specs/pod.yaml ``` 2. Deploy the Pod with a sample app and the PVC used by the Pod. diff --git a/examples/kubernetes/dynamic_provisioning/specs/pod.yaml b/examples/kubernetes/dynamic_provisioning/access_points/specs/pod.yaml similarity index 100% rename from examples/kubernetes/dynamic_provisioning/specs/pod.yaml rename to examples/kubernetes/dynamic_provisioning/access_points/specs/pod.yaml diff --git a/examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml b/examples/kubernetes/dynamic_provisioning/access_points/specs/storageclass.yaml similarity index 100% rename from examples/kubernetes/dynamic_provisioning/specs/storageclass.yaml rename to examples/kubernetes/dynamic_provisioning/access_points/specs/storageclass.yaml diff --git a/examples/kubernetes/dynamic_provisioning/directories/README.md b/examples/kubernetes/dynamic_provisioning/directories/README.md new file mode 100644 index 000000000..e6be82186 --- /dev/null +++ b/examples/kubernetes/dynamic_provisioning/directories/README.md @@ -0,0 +1,150 @@ +## Dynamic Provisioning +**Important** +You can't use dynamic provisioning with Fargate nodes. + +This example shows how to create a dynamically provisioned volume created through a directory on the file system and a Persistent Volume Claim (PVC) and consume it from a pod. + +**Prerequisite** +This example requires Kubernetes 1.17 or later and a driver version of 1.5.x or later. + +1. Create a storage class for Amazon EFS. + + 1. Retrieve your Amazon EFS file system ID. You can find this in the Amazon EFS console, or use the following AWS CLI command. + + ```sh + aws efs describe-file-systems --query "FileSystems[*].FileSystemId" --output text + ``` + + The example output is as follows. + + ``` + fs-582a03f3 + ``` + + 2. Download a `StorageClass` manifest for Amazon EFS. + + ```sh + curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml + ``` + + 3. Edit [the file](./specs/storageclass.yaml). Find the following line, and replace the value for `fileSystemId` with your file system ID. + + ``` + fileSystemId: fs-582a03f3 + ``` + + Modify the other values as needed: + * `fileSystemId` - The file system under which the access point is created. + * `directoryPerms` - The directory permissions of the root directory created by the access point. + * `basePath` (Optional) - The path on the file system under which the access point root directory is created. If the path isn't provided, the access points root directory is created under the root of the file system. + + 4. Deploy the storage class. + + ```sh + kubectl apply -f storageclass.yaml + ``` + +2. Test automatic provisioning by deploying a Pod that makes use of the PVC: + + 1. Download a manifest that deploys a Pod and a PVC. + + ```sh + curl -O https://raw.githubusercontent.com/kubernetes-sigs/aws-efs-csi-driver/master/examples/kubernetes/dynamic_provisioning/specs/pod.yaml + ``` + + 2. Deploy the Pod with a sample app and the PVC used by the Pod. + + ```sh + kubectl apply -f pod.yaml + ``` + +3. Determine the names of the Pods running the controller. + + ```sh + kubectl get pods -n kube-system | grep efs-csi-controller + ``` + + The example output is as follows. + + ``` + efs-csi-controller-74ccf9f566-q5989 3/3 Running 0 40m + efs-csi-controller-74ccf9f566-wswg9 3/3 Running 0 40m + ``` + +4. After few seconds, you can observe the controller picking up the change \(edited for readability\). Replace `74ccf9f566-q5989` with a value from one of the Pods in your output from the previous command. + + ```sh + kubectl logs efs-csi-controller-74ccf9f566-q5989 \ + -n kube-system \ + -c csi-provisioner \ + --tail 10 + ``` + + The example output is as follows. + + ``` + [...] + 1 controller.go:737] successfully created PV pvc-5983ffec-96cf-40c1-9cd6-e5686ca84eca for PVC efs-claim... + ``` + + If you don't see the previous output, run the previous command using one of the other controller Pods. + +5. Confirm that a persistent volume was created with a status of `Bound` to a `PersistentVolumeClaim`: + + ```sh + kubectl get pv + ``` + + The example output is as follows. + + ``` + NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE + pvc-5983ffec-96cf-40c1-9cd6-e5686ca84eca 20Gi RWX Delete Bound default/efs-claim efs-sc 7m57s + ``` + +6. View details about the `PersistentVolumeClaim` that was created. + + ```sh + kubectl get pvc + ``` + + The example output is as follows. + + ``` + NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE + efs-claim Bound pvc-5983ffec-96cf-40c1-9cd6-e5686ca84eca 20Gi RWX efs-sc 9m7s + ``` + +7. View the sample app Pod's status until the `STATUS` becomes `Running`. + + ```sh + kubectl get pods -o wide + ``` + + The example output is as follows. + + ``` + NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES + efs-app 1/1 Running 0 10m 192.168.78.156 ip-192-168-73-191.region-code.compute.internal + ``` +**Note** +If a Pod doesn't have an IP address listed, make sure that you added a mount target for the subnet that your node is in \(as described at the end of [Create an Amazon EFS file system](#efs-create-filesystem)\). Otherwise the Pod won't leave `ContainerCreating` status. When an IP address is listed, it may take a few minutes for a Pod to reach the `Running` status. + +1. Confirm that the data is written to the volume. + + ```sh + kubectl exec efs-app -- bash -c "cat data/out" + ``` + + The example output is as follows. + + ``` + [...] + Tue Mar 23 14:29:16 UTC 2021 + Tue Mar 23 14:29:21 UTC 2021 + Tue Mar 23 14:29:26 UTC 2021 + Tue Mar 23 14:29:31 UTC 2021 + [...] + ``` + +2. \(Optional\) Terminate the Amazon EKS node that your Pod is running on and wait for the Pod to be re\-scheduled. Alternately, you can delete the Pod and redeploy it. Complete the previous step again, confirming that the output includes the previous output. diff --git a/examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml b/examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml new file mode 100644 index 000000000..522a690e1 --- /dev/null +++ b/examples/kubernetes/dynamic_provisioning/directories/specs/pod.yaml @@ -0,0 +1,30 @@ +--- +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: efs-claim +spec: + accessModes: + - ReadWriteMany + storageClassName: efs-sc + resources: + requests: + storage: 5Gi +--- +apiVersion: v1 +kind: Pod +metadata: + name: efs-app +spec: + containers: + - name: app + image: centos + command: ["/bin/sh"] + args: ["-c", "while true; do echo $(date -u) >> /data/out; sleep 5; done"] + volumeMounts: + - name: persistent-storage + mountPath: /data + volumes: + - name: persistent-storage + persistentVolumeClaim: + claimName: efs-claim \ No newline at end of file diff --git a/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml b/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml new file mode 100644 index 000000000..d9c048c84 --- /dev/null +++ b/examples/kubernetes/dynamic_provisioning/directories/specs/storageclass.yaml @@ -0,0 +1,10 @@ +kind: StorageClass +apiVersion: storage.k8s.io/v1 +metadata: + name: efs-sc +provisioner: efs.csi.aws.com +parameters: + provisioningMode: efs-dir + fileSystemId: fs-92107410 + directoryPerms: "700" + basePath: "/dynamic_provisioning" # optional diff --git a/hack/values.yaml b/hack/values.yaml index f863f750e..246788080 100644 --- a/hack/values.yaml +++ b/hack/values.yaml @@ -1,8 +1,8 @@ controller: create: true logLevel: 5 + serviceAccount: + create: true + deleteProvisionedDir: true node: logLevel: 5 -serviceAccount: - controller: - create: true diff --git a/hack/values_eksctl.yaml b/hack/values_eksctl.yaml index f99eec38d..d18e1b1df 100644 --- a/hack/values_eksctl.yaml +++ b/hack/values_eksctl.yaml @@ -2,6 +2,7 @@ controller: logLevel: 5 serviceAccount: create: false # let eksctl create it + deleteProvisionedDir: true node: logLevel: 5 serviceAccount: From ad2a4069be0f69de51e9b48604d55106bac36125 Mon Sep 17 00:00:00 2001 From: Maxim Patlasov Date: Tue, 7 Jan 2025 17:11:34 -0800 Subject: [PATCH 6/6] Add e2e tests for "efs-dir" provisioning mode. --- test/e2e/e2e.go | 94 +++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 91 insertions(+), 3 deletions(-) diff --git a/test/e2e/e2e.go b/test/e2e/e2e.go index 38f29b025..97903845c 100644 --- a/test/e2e/e2e.go +++ b/test/e2e/e2e.go @@ -3,6 +3,8 @@ package e2e import ( "context" "fmt" + e2epv "k8s.io/kubernetes/test/e2e/framework/pv" + e2evolume "k8s.io/kubernetes/test/e2e/framework/volume" "os" "strconv" "strings" @@ -416,6 +418,93 @@ var _ = ginkgo.Describe("[efs-csi] EFS CSI", func() { } }) + createProvisionedDirectory := func(f *framework.Framework, basePath string, pvcName string) (*v1.PersistentVolumeClaim, *storagev1.StorageClass) { + immediateBinding := storagev1.VolumeBindingImmediate + sc := storageframework.GetStorageClass("efs.csi.aws.com", map[string]string{ + "provisioningMode": "efs-dir", + "fileSystemId": FileSystemId, + "basePath": basePath, + }, &immediateBinding, f.Namespace.Name) + sc, err := f.ClientSet.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}) + framework.Logf("Created StorageClass %s", sc.Name) + framework.ExpectNoError(err, "creating dynamic provisioning storage class") + pvc := makeEFSPVC(f.Namespace.Name, pvcName, sc.Name) + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Create(context.TODO(), pvc, metav1.CreateOptions{}) + err = e2epv.WaitForPersistentVolumeClaimPhase(context.TODO(), v1.ClaimBound, f.ClientSet, f.Namespace.Name, pvc.Name, + time.Second*5, time.Minute) + framework.ExpectNoError(err, "waiting for pv to be provisioned and bound") + pvc, err = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Get(context.TODO(), pvc.Name, metav1.GetOptions{}) + + framework.Logf("Created PVC %s, bound to PV %s by dynamic provisioning", sc.Name, pvc.Name, pvc.Spec.VolumeName) + return pvc, sc + } + + ginkgo.It("should create a directory with the correct permissions when in directory provisioning mode", func() { + basePath := "dynamic_provisioning" + dynamicPvc, sc := createProvisionedDirectory(f, basePath, "directory-pvc-1") + defer func() { + err := f.ClientSet.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "removing provisioned StorageClass") + framework.Logf("Deleted StorageClass %s", sc.Name) + }() + + pvc, pv, err := createEFSPVCPV(f.ClientSet, f.Namespace.Name, "root-dir-pvc-create", "/", map[string]string{}) + defer func() { + _ = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) + _ = f.ClientSet.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) + }() + framework.ExpectNoError(err, "creating root mounted pv, pvc to check") + + podSpec := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelBaseline, "") + podSpec.Spec.RestartPolicy = v1.RestartPolicyNever + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + framework.ExpectNoError(err, "creating pod") + err = e2epod.WaitForPodRunningInNamespace(context.TODO(), f.ClientSet, pod) + framework.ExpectNoError(err, "pod started running successfully") + + provisionedPath := fmt.Sprintf("/mnt/volume1/%s/%s", basePath, dynamicPvc.Spec.VolumeName) + + perms, _, err := e2evolume.PodExec(f, pod, "stat -c \"%a\" "+provisionedPath) + framework.ExpectNoError(err, "ran stat command in /mnt/volume1 to get folder permissions") + framework.Logf("Perms Output: %s", perms) + framework.ExpectEqual(perms, fmt.Sprintf("%d", 777), "Checking File Permissions of mounted folder") + + _ = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + }) + + ginkgo.It("should delete a directory provisioned in directory provisioning mode", func() { + basePath := "dynamic_provisioning_delete" + pvc, sc := createProvisionedDirectory(f, basePath, "directory-pvc-2") + defer func() { + err := f.ClientSet.StorageV1().StorageClasses().Delete(context.TODO(), sc.Name, metav1.DeleteOptions{}) + framework.ExpectNoError(err, "removing provisioned StorageClass") + framework.Logf("Deleted StorageClass %s", sc.Name) + }() + volumeName := pvc.Spec.VolumeName + + err := f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, + metav1.DeleteOptions{}) + framework.ExpectNoError(err, "deleting pvc") + + pvc, pv, err := createEFSPVCPV(f.ClientSet, f.Namespace.Name, "root-dir-pvc-delete", "/", map[string]string{}) + defer func() { + _ = f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(context.TODO(), pvc.Name, metav1.DeleteOptions{}) + _ = f.ClientSet.CoreV1().PersistentVolumes().Delete(context.TODO(), pv.Name, metav1.DeleteOptions{}) + }() + framework.ExpectNoError(err, "creating root mounted pv, pvc to check") + + podSpec := e2epod.MakePod(f.Namespace.Name, nil, []*v1.PersistentVolumeClaim{pvc}, admissionapi.LevelBaseline, "") + podSpec.Spec.RestartPolicy = v1.RestartPolicyNever + pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(context.TODO(), podSpec, metav1.CreateOptions{}) + framework.ExpectNoError(err, "creating pod") + err = e2epod.WaitForPodRunningInNamespace(context.TODO(), f.ClientSet, pod) + framework.ExpectNoError(err, "pod started running successfully") + + e2evolume.VerifyExecInPodFail(f, pod, "test -d "+"/mnt/volume1/"+basePath+"/"+volumeName, 1) + + _ = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(context.TODO(), pod.Name, metav1.DeleteOptions{}) + }) + }) }) @@ -476,7 +565,7 @@ func createEFSPVCPV(c clientset.Interface, namespace, name, path string, volumeA } func makeEFSPVCPV(namespace, name, path string, volumeAttributes map[string]string) (*v1.PersistentVolumeClaim, *v1.PersistentVolume) { - pvc := makeEFSPVC(namespace, name) + pvc := makeEFSPVC(namespace, name, "") pv := makeEFSPV(name, path, volumeAttributes) pvc.Spec.VolumeName = pv.Name pv.Spec.ClaimRef = &v1.ObjectReference{ @@ -486,8 +575,7 @@ func makeEFSPVCPV(namespace, name, path string, volumeAttributes map[string]stri return pvc, pv } -func makeEFSPVC(namespace, name string) *v1.PersistentVolumeClaim { - storageClassName := "" +func makeEFSPVC(namespace, name string, storageClassName string) *v1.PersistentVolumeClaim { return &v1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: name,