diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index eede911d..5f303e0a 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -11,7 +11,6 @@ # Bharadwaj Sathavalli (bharadwajrs-dell) # Boya Murthy (boyamurthy) # Utkarsh Dubey (delldubey) -# Emily Kinuthia (EmilyKatdell) # Matt Schmaelzle (mjsdell) # Nida Taranum (nidtara) # Prasanna Muthukumaraswamy (prablr79) @@ -22,4 +21,4 @@ # Sushma T S (tssushma) # for all files -* @atye @bharadwajrs-dell @boyamurthy @delldubey @EmilyKatdell @mjsdell @nidtara @prablr79 @santhoshhs10 @SinuChacko @shanmydell @shaynafinocchiaro @tssushma +* @atye @bharadwajrs-dell @boyamurthy @delldubey @mjsdell @nidtara @prablr79 @santhoshhs10 @SinuChacko @shanmydell @shaynafinocchiaro @tssushma diff --git a/csireverseproxy/go.mod b/csireverseproxy/go.mod index 26a95f53..61fe1cae 100644 --- a/csireverseproxy/go.mod +++ b/csireverseproxy/go.mod @@ -3,7 +3,7 @@ module revproxy/v2 go 1.22 require ( - github.com/dell/gopowermax/v2 v2.6.0 + github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075 github.com/fsnotify/fsnotify v1.4.9 github.com/gorilla/mux v1.7.3 github.com/kubernetes-csi/csi-lib-utils v0.9.1 diff --git a/csireverseproxy/go.sum b/csireverseproxy/go.sum index 4cb9a2ca..43c5b5fc 100644 --- a/csireverseproxy/go.sum +++ b/csireverseproxy/go.sum @@ -84,8 +84,8 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/dell/gopowermax/v2 v2.6.0 h1:MyenUGEt1EkGoBRwC1nYAC43yBd9LsowIIqiHOMqq5o= -github.com/dell/gopowermax/v2 v2.6.0/go.mod h1:Z/DqRsmKztpvgkWnMzm/aHBvdbnoTfpzYhpsSQnLX7k= +github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075 h1:7GpPo1UrO2hJV2+H2EXPL6MJJ8IS37GkiJuuXAaqwa0= +github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075/go.mod h1:Z/DqRsmKztpvgkWnMzm/aHBvdbnoTfpzYhpsSQnLX7k= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= diff --git a/csireverseproxy/main_test.go b/csireverseproxy/main_test.go index 169ee7ae..9c5d9a0b 100644 --- a/csireverseproxy/main_test.go +++ b/csireverseproxy/main_test.go @@ -441,4 +441,13 @@ func TestSAHTTPRequest(t *testing.T) { return } fmt.Printf("RESPONSE_BODY: %s\n", resp) + + // make a request for performance + path = utils.Prefix + "/performance/Array/keys" + resp, err = doHTTPRequest(standAloneServer.Port, path) + if err != nil { + t.Error(err.Error()) + return + } + fmt.Printf("RESPONSE_BODY: %s\n", resp) } diff --git a/csireverseproxy/pkg/standaloneproxy/standaloneproxy.go b/csireverseproxy/pkg/standaloneproxy/standaloneproxy.go index 76813908..d1615f4a 100644 --- a/csireverseproxy/pkg/standaloneproxy/standaloneproxy.go +++ b/csireverseproxy/pkg/standaloneproxy/standaloneproxy.go @@ -392,6 +392,10 @@ func (revProxy *StandAloneProxy) GetRouter() http.Handler { router.HandleFunc(utils.Prefix+"/{version}/system/symmetrix", revProxy.ifNoSymIDInvoke(revProxy.ServeSymmetrix)) router.HandleFunc(utils.Prefix+"/{version}/system/version", revProxy.ifNoSymIDInvoke(revProxy.ServeVersions)) router.HandleFunc(utils.Prefix+"/version", revProxy.ifNoSymIDInvoke(revProxy.ServeVersions)) + // performance + router.HandleFunc(utils.Prefix+"/performance/Array/keys", revProxy.ifNoSymIDInvoke(revProxy.ServePerformance)) + router.HandleFunc(utils.Prefix+"/performance/Volume/metrics", revProxy.ifNoSymIDInvoke(revProxy.ServeVolumePerformance)) + router.HandleFunc(utils.Prefix+"/performance/file/filesystem/metrics", revProxy.ifNoSymIDInvoke(revProxy.ServeFSPerformance)) // Snapshot router.HandleFunc(utils.Prefix+"/{version}/replication/capabilities/symmetrix", revProxy.ifNoSymIDInvoke(revProxy.ServeReplicationCapabilities)) @@ -516,6 +520,70 @@ func (revProxy *StandAloneProxy) ServeVersions(res http.ResponseWriter, req *htt } } +// ServePerformance - handler function for the performance endpoint +func (revProxy *StandAloneProxy) ServePerformance(res http.ResponseWriter, req *http.Request) { + symIDs, err := revProxy.getAuthorisedArrays(res, req) + if err != nil { + return + } + for _, symID := range symIDs { + _, err := revProxy.getResponseIfAuthorised(res, req, symID) + if err != nil { + log.Errorf("Authorisation step fails for: (%s) symID with error (%s)", symID, err.Error()) + } + } +} + +// ServeVolumePerformance - handler function for the performance endpoint +func (revProxy *StandAloneProxy) ServeVolumePerformance(res http.ResponseWriter, req *http.Request) { + reqParam := new(types.VolumeMetricsParam) + decoder := json.NewDecoder(req.Body) + if err := decoder.Decode(reqParam); err != nil { + log.Errorf("Decoding fails for mertics req for volume: %s", err.Error()) + } + resp, err := revProxy.getResponseIfAuthorised(res, req, reqParam.SystemID) + if err != nil { + log.Errorf("Authorisation step fails for: (%s) symID with error (%s)", reqParam.SystemID, err.Error()) + } + defer resp.Body.Close() + err = utils.IsValidResponse(resp) + if err != nil { + log.Errorf("Get performace metrics step fails for: (%s) symID with error (%s)", reqParam.SystemID, err.Error()) + } else { + metricsIterator := new(types.VolumeMetricsIterator) + if err := json.NewDecoder(resp.Body).Decode(metricsIterator); err != nil { + utils.WriteHTTPError(res, "decoding error: "+err.Error(), 400) + log.Errorf("decoding error: %s", err.Error()) + } + utils.WriteHTTPResponse(res, metricsIterator) + } +} + +// ServeFSPerformance - handler function for the performance endpoint +func (revProxy *StandAloneProxy) ServeFSPerformance(res http.ResponseWriter, req *http.Request) { + reqParam := new(types.FileSystemMetricsParam) + decoder := json.NewDecoder(req.Body) + if err := decoder.Decode(reqParam); err != nil { + log.Errorf("Decoding fails for mertics req for volume: %s", err.Error()) + } + resp, err := revProxy.getResponseIfAuthorised(res, req, reqParam.SystemID) + if err != nil { + log.Errorf("Authorisation step fails for: (%s) symID with error (%s)", reqParam.SystemID, err.Error()) + } + defer resp.Body.Close() + err = utils.IsValidResponse(resp) + if err != nil { + log.Errorf("Get performace metrics step fails for: (%s) symID with error (%s)", reqParam.SystemID, err.Error()) + } else { + metricsIterator := new(types.FileSystemMetricsIterator) + if err := json.NewDecoder(resp.Body).Decode(metricsIterator); err != nil { + utils.WriteHTTPError(res, "decoding error: "+err.Error(), 400) + log.Errorf("decoding error: %s", err.Error()) + } + utils.WriteHTTPResponse(res, metricsIterator) + } +} + // ServeIterator - handler function for volume iterator endpoint func (revProxy *StandAloneProxy) ServeIterator(res http.ResponseWriter, req *http.Request) { vars := mux.Vars(req) diff --git a/go.mod b/go.mod index bd75e91d..17074286 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,8 @@ module github.com/dell/csi-powermax/v2 go 1.22 +toolchain go1.22.0 + require ( github.com/akutz/goof v0.1.2 github.com/container-storage-interface/spec v1.6.0 @@ -10,21 +12,24 @@ require ( github.com/cucumber/messages-go/v10 v10.0.3 github.com/dell/dell-csi-extensions/common v1.4.0 github.com/dell/dell-csi-extensions/migration v1.4.0 + github.com/dell/dell-csi-extensions/podmon v1.4.0 github.com/dell/dell-csi-extensions/replication v1.7.0 github.com/dell/gobrick v1.10.1 github.com/dell/gocsi v1.10.0 github.com/dell/gofsutil v1.15.0 github.com/dell/goiscsi v1.9.0 - github.com/dell/gopowermax/v2 v2.6.0 + github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075 github.com/fsnotify/fsnotify v1.4.9 + github.com/gorilla/mux v1.7.3 github.com/kubernetes-csi/csi-lib-utils v0.7.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/viper v1.7.1 github.com/stretchr/testify v1.7.1 github.com/vmware/govmomi v0.29.0 - golang.org/x/net v0.23.0 + golang.org/x/net v0.17.0 google.golang.org/grpc v1.57.1 - google.golang.org/protobuf v1.33.0 + google.golang.org/protobuf v1.32.0 + k8s.io/api v0.20.0 k8s.io/apimachinery v0.20.0 k8s.io/client-go v0.20.0 ) @@ -36,16 +41,16 @@ require ( github.com/cucumber/messages-go/v16 v16.0.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/dell/gonvme v1.7.0 // indirect + github.com/evanphx/json-patch v4.9.0+incompatible // indirect github.com/go-logr/logr v0.2.0 // indirect github.com/godbus/dbus/v5 v5.0.4 // indirect github.com/gofrs/uuid v4.4.0+incompatible // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect github.com/golang/mock v1.6.0 // indirect - github.com/golang/protobuf v1.5.4 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/googleapis/gnostic v0.4.1 // indirect - github.com/gorilla/mux v1.7.3 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect github.com/hashicorp/go-memdb v1.3.4 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect @@ -58,6 +63,7 @@ require ( github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.1 // indirect github.com/pelletier/go-toml v1.2.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.11.0 // indirect github.com/spf13/afero v1.2.2 // indirect @@ -71,11 +77,11 @@ require ( go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.17.0 // indirect - golang.org/x/crypto v0.21.0 // indirect + golang.org/x/crypto v0.17.0 // indirect golang.org/x/oauth2 v0.7.0 // indirect golang.org/x/sync v0.1.0 // indirect - golang.org/x/sys v0.18.0 // indirect - golang.org/x/term v0.18.0 // indirect + golang.org/x/sys v0.15.0 // indirect + golang.org/x/term v0.15.0 // indirect golang.org/x/text v0.14.0 // indirect golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e // indirect google.golang.org/appengine v1.6.7 // indirect @@ -86,7 +92,6 @@ require ( gopkg.in/ini.v1 v1.51.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/api v0.20.0 // indirect k8s.io/klog v1.0.0 // indirect k8s.io/klog/v2 v2.4.0 // indirect k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd // indirect diff --git a/go.sum b/go.sum index 72617557..10e23865 100644 --- a/go.sum +++ b/go.sum @@ -107,6 +107,8 @@ github.com/dell/dell-csi-extensions/common v1.4.0 h1:vZcKvr5EEZa4Gh2JRHSMR2xAES4 github.com/dell/dell-csi-extensions/common v1.4.0/go.mod h1:TEVaeIg7Yk1d1HorzFGiHyJhtaIPuSZc/gn1wZM0HhM= github.com/dell/dell-csi-extensions/migration v1.4.0 h1:3KINn0kRW58YsyFdeEqVz02BtXYvDL7TXK5SngKUKcA= github.com/dell/dell-csi-extensions/migration v1.4.0/go.mod h1:HeC8UO4P7SMSv6yvAM9u6RJSaPn5wcKGyO+/6fN2OuQ= +github.com/dell/dell-csi-extensions/podmon v1.4.0 h1:BejEC8JbPRZK1Rov1xyce9WbIq+vh+cLCdlCzuMkM10= +github.com/dell/dell-csi-extensions/podmon v1.4.0/go.mod h1:mn5T3eHo+1uGCfykZE5yrlgFQkD3UMndT24RIQ2xb/o= github.com/dell/dell-csi-extensions/replication v1.7.0 h1:kBIN91grXev54CyRFB17nVwFUSagG5y1RZB/4WWbATc= github.com/dell/dell-csi-extensions/replication v1.7.0/go.mod h1:gYaWz/MpCxaXxzXYdXaJqYA7QXIyg4OaFT3vff6d46Y= github.com/dell/gobrick v1.10.1 h1:YabNLVQstz1iKYUyhXEIr/y/xTF1T5W5Wmtjn0dxQdU= @@ -119,8 +121,8 @@ github.com/dell/goiscsi v1.9.0 h1:VvMHbAO4vk80oc/TAbQPYlxysscCqVBW78GyPoUxgik= github.com/dell/goiscsi v1.9.0/go.mod h1:NI/W/0O1UrMW2zVdMxy4z395Jn0r7utH6RQDFSZiFyQ= github.com/dell/gonvme v1.7.0 h1:ztJFhKQehZjfaoNv+hTbGbdhLWCAhPE44k1v7x5o2c0= github.com/dell/gonvme v1.7.0/go.mod h1:ajbuF+fswq+ty2tRTG5FN4ecIMJsG7aDu/bkMynTKAs= -github.com/dell/gopowermax/v2 v2.6.0 h1:MyenUGEt1EkGoBRwC1nYAC43yBd9LsowIIqiHOMqq5o= -github.com/dell/gopowermax/v2 v2.6.0/go.mod h1:Z/DqRsmKztpvgkWnMzm/aHBvdbnoTfpzYhpsSQnLX7k= +github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075 h1:7GpPo1UrO2hJV2+H2EXPL6MJJ8IS37GkiJuuXAaqwa0= +github.com/dell/gopowermax/v2 v2.6.1-0.20240603105557-59f78ebc1075/go.mod h1:Z/DqRsmKztpvgkWnMzm/aHBvdbnoTfpzYhpsSQnLX7k= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= @@ -214,8 +216,6 @@ github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaS github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= -github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= @@ -491,8 +491,8 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= -golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= +golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= +golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -557,8 +557,8 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs= -golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg= +golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -621,11 +621,11 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= -golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.15.0 h1:h48lPFYpsTvQJZF4EKyI4aLHaev3CxivZmv7yZig9pc= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= -golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= +golang.org/x/term v0.15.0 h1:y/Oo/a/q3IXu26lQgl04j/gjuBDOBlx7X6Om1j2CPW4= +golang.org/x/term v0.15.0/go.mod h1:BDl952bC7+uMoWR75FIrCDx79TPU9oHkTZ9yRbYOrX0= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -753,8 +753,8 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/k8smock/k8sutils_mock.go b/k8smock/k8sutils_mock.go new file mode 100644 index 00000000..562fdb5e --- /dev/null +++ b/k8smock/k8sutils_mock.go @@ -0,0 +1,55 @@ +/* + Copyright © 2021 Dell Inc. or its subsidiaries. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package k8smock + +import ( + "strings" + + kubernetes "k8s.io/client-go/kubernetes/fake" +) + +var mockUtils *MockUtils + +// MockUtils - mock kubernetes utils +type MockUtils struct { + KubernetesClient *kubernetes.Clientset +} + +// Init - initializes the mock k8s utils +func Init() *MockUtils { + if mockUtils != nil { + return mockUtils + } + kubernetesClient := kubernetes.NewSimpleClientset() + mockUtils = &MockUtils{ + KubernetesClient: kubernetesClient, + } + return mockUtils +} + +// GetNodeLabels is mock implementation for GetNodeLabels +func (m *MockUtils) GetNodeLabels(_ string) (map[string]string, error) { + // access the API to fetch node object + return nil, nil +} + +// GetNodeIPs is mock implementation for GetNodeIPs +func (m *MockUtils) GetNodeIPs(nodeID string) string { + nodeElem := strings.Split(nodeID, "-") + if len(nodeElem) < 2 { + return "" + } + return nodeElem[1] +} diff --git a/k8sutils/k8sutils.go b/k8sutils/k8sutils.go index 947db632..4157900a 100644 --- a/k8sutils/k8sutils.go +++ b/k8sutils/k8sutils.go @@ -20,6 +20,12 @@ import ( "context" "fmt" "os" + "strings" + + v12 "k8s.io/api/core/v1" + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + log "github.com/sirupsen/logrus" "github.com/kubernetes-csi/csi-lib-utils/leaderelection" "k8s.io/client-go/kubernetes" @@ -32,17 +38,54 @@ type leaderElection interface { WithNamespace(namespace string) } -// CreateKubeClientSet - Returns kubeclient set -func CreateKubeClientSet(kubeconfig string) (*kubernetes.Clientset, error) { - var clientset *kubernetes.Clientset - if kubeconfig != "" { - // use the current context in kubeconfig - config, err := clientcmd.BuildConfigFromFlags("", kubeconfig) +// UtilsInterface - interface which provides helper methods related to k8s +type UtilsInterface interface { + GetNodeLabels(string) (map[string]string, error) + GetNodeIPs(string) string +} + +// K8sUtils stores the configuration of the k8s client, k8s client and the informer +type K8sUtils struct { + KubernetesClient *KubernetesClient +} + +var k8sUtils *K8sUtils + +// KubernetesClient - client connection +type KubernetesClient struct { + ClientSet *kubernetes.Clientset +} + +// Init - Initializes the k8s client and creates the secret informer +func Init(kubeConfig string) (*K8sUtils, error) { + if k8sUtils != nil { + return k8sUtils, nil + } + var kubeClient *kubernetes.Clientset + kubeClient, err := CreateKubeClientSet(kubeConfig) + if err != nil { + log.Errorf("failed to create kube client. error: %s", err.Error()) + return nil, err + } + k8sUtils := &K8sUtils{ + KubernetesClient: &KubernetesClient{ + ClientSet: kubeClient, + }, + } + return k8sUtils, nil +} + +// CreateKubeClientSet - Returns kubeClient set +func CreateKubeClientSet(kubeConfig string) (*kubernetes.Clientset, error) { + var clientSet *kubernetes.Clientset + if kubeConfig != "" { + // use the current context in kubeConfig + config, err := clientcmd.BuildConfigFromFlags("", kubeConfig) if err != nil { return nil, err } - // create the clientset - clientset, err = kubernetes.NewForConfig(config) + // create the clientSet + clientSet, err = kubernetes.NewForConfig(config) if err != nil { return nil, err } @@ -51,21 +94,52 @@ func CreateKubeClientSet(kubeconfig string) (*kubernetes.Clientset, error) { if err != nil { return nil, err } - // creates the clientset - clientset, err = kubernetes.NewForConfig(config) + // creates the clientSet + clientSet, err = kubernetes.NewForConfig(config) if err != nil { return nil, err } } - return clientset, nil + return clientSet, nil } // LeaderElection ... -func LeaderElection(clientset *kubernetes.Clientset, lockName string, namespace string, runFunc func(ctx context.Context)) { - le := leaderelection.NewLeaderElection(clientset, lockName, runFunc) +func LeaderElection(clientSet *kubernetes.Clientset, lockName string, namespace string, runFunc func(ctx context.Context)) { + le := leaderelection.NewLeaderElection(clientSet, lockName, runFunc) le.WithNamespace(namespace) if err := le.Run(); err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to initialize leader election: %v", err) os.Exit(1) } } + +// GetNodeLabels returns back Node labels for the node name +func (c *K8sUtils) GetNodeLabels(nodeFullName string) (map[string]string, error) { + // access the API to fetch node object + node, err := c.KubernetesClient.ClientSet.CoreV1().Nodes().Get(context.TODO(), nodeFullName, v1.GetOptions{}) + if err != nil { + return nil, err + } + log.Debugf("Node %s details\n", node) + + return node.Labels, nil +} + +// GetNodeIPs returns cluster IP of the node object +func (c *K8sUtils) GetNodeIPs(nodeID string) string { + // access the API to fetch node object + nodeList, err := c.KubernetesClient.ClientSet.CoreV1().Nodes().List(context.TODO(), v1.ListOptions{}) + if err != nil { + return "" + } + for _, node := range nodeList.Items { + if strings.Contains(node.Name, nodeID) { + for _, addr := range node.Status.Addresses { + if addr.Type == v12.NodeInternalIP { + return addr.Address + } + } + } + } + return "" +} diff --git a/main.go b/main.go index 8e88fac0..8a2e8fcc 100644 --- a/main.go +++ b/main.go @@ -48,13 +48,13 @@ func main() { } else { driverName := strings.Replace(service.Name, ".", "-", -1) lockName := fmt.Sprintf("driver-%s", driverName) - k8sclientset, err := k8sutils.CreateKubeClientSet(*kubeconfig) + k8sClientSet, err := k8sutils.CreateKubeClientSet(*kubeconfig) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "failed to initialize leader election: %v", err) os.Exit(1) } // Attempt to become leader and start the driver - k8sutils.LeaderElection(k8sclientset, lockName, *leaderElectionNamespace, run) + k8sutils.LeaderElection(k8sClientSet, lockName, *leaderElectionNamespace, run) } } diff --git a/service/controller.go b/service/controller.go index 4acf0812..ed11018d 100644 --- a/service/controller.go +++ b/service/controller.go @@ -1501,9 +1501,9 @@ func (s *service) validateVolSize(ctx context.Context, cr *csi.CapacityRange, sy "bad capacity: requested minimum size (%d bytes) is greater than the maximum available capacity (%d bytes)", minSizeBytes, maxAvailBytes) } if minSizeBytes < MinVolumeSizeBytes { - return 0, status.Errorf( - codes.OutOfRange, - "bad capacity: requested minimum size (%d bytes) is less than the minimum volume size (%d bytes)", minSizeBytes, MinVolumeSizeBytes) + log.Warningf("bad capacity: requested size (%d bytes) is less than the minimum volume size (%d bytes) supported by PowerMax..", minSizeBytes, MinVolumeSizeBytes) + log.Warning("Proceeding with minimum volume size supported by PowerMax Array ......") + minSizeBytes = MinVolumeSizeBytes } if maxSizeBytes < minSizeBytes { return 0, status.Errorf( @@ -2250,7 +2250,9 @@ func (s *service) IsNodeISCSI(ctx context.Context, symID, nodeID string, pmaxCli // GetVolumeByID - Takes a CSI volume ID and checks for its existence on array // along with matching with the volume identifier. Returns back the volume name // on array, device ID, volume structure -func (s *service) GetVolumeByID(ctx context.Context, volID string, pmaxClient pmax.Pmax) (string, string, *types.Volume, error) { +func (s *service) GetVolumeByID(ctx context.Context, + volID string, pmaxClient pmax.Pmax, +) (string, string, *types.Volume, error) { // parse the volume and get the array serial and volume ID volName, symID, devID, _, _, err := s.parseCsiID(volID) if err != nil { diff --git a/service/csi_ctrl_to_node_connectivity.go b/service/csi_ctrl_to_node_connectivity.go new file mode 100644 index 00000000..7e1a374c --- /dev/null +++ b/service/csi_ctrl_to_node_connectivity.go @@ -0,0 +1,90 @@ +/* + Copyright © 2023-2024 Dell Inc. or its subsidiaries. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package service + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "time" + + log "github.com/sirupsen/logrus" +) + +// ArrayConnectivityStatus Status of the array probe +type ArrayConnectivityStatus struct { + LastSuccess int64 `json:"lastSuccess"` // connectivity status + LastAttempt int64 `json:"lastAttempt"` // last timestamp attempted to check connectivity +} + +const ( + // Timeout for making http requests + Timeout = time.Second * 5 +) + +// QueryArrayStatus make API call to the specified url to retrieve connection status +func (s *service) QueryArrayStatus(ctx context.Context, url string) (bool, error) { + defer func() { + if err := recover(); err != nil { + log.Println("panic occurred in queryStatus:", err) + } + }() + client := http.Client{ + Timeout: Timeout, + } + resp, err := client.Get(url) + + log.Debugf("Received response %+v for url %s", resp, url) + if err != nil { + log.Errorf("failed to call API %s due to %s ", url, err.Error()) + return false, err + } + defer resp.Body.Close() // #nosec G307 + bodyBytes, err := io.ReadAll(resp.Body) + if err != nil { + log.Errorf("failed to read API response due to %s ", err.Error()) + return false, err + } + if resp.StatusCode != 200 { + log.Errorf("Found unexpected response from the server while fetching array status %d ", resp.StatusCode) + return false, fmt.Errorf("unexpected response from the server") + } + var statusResponse ArrayConnectivityStatus + err = json.Unmarshal(bodyBytes, &statusResponse) + if err != nil { + log.Errorf("unable to unmarshal and determine connectivity due to %s ", err) + return false, err + } + log.Infof("API Response received is %+v\n", statusResponse) + // responseObject has last success and last attempt timestamp in Unix format + timeDiff := statusResponse.LastAttempt - statusResponse.LastSuccess + tolerance := s.SetPollingFrequency(ctx) + currTime := time.Now().Unix() + // checking if the status response is stale and connectivity test is still running + // since nodeProbe is run at frequency tolerance/2, ideally below check should never be true + if (currTime - statusResponse.LastAttempt) > tolerance*2 { + log.Errorf("seems like connectivity test is not being run, current time is %d and last run was at %d", currTime, statusResponse.LastAttempt) + // considering connectivity is broken + return false, nil + } + log.Debugf("last connectivity was %d sec back, tolerance is %d sec", timeDiff, tolerance) + // give 2s leeway for tolerance check + if timeDiff <= tolerance+2 { + return true, nil + } + return false, nil +} diff --git a/service/csi_extension_server.go b/service/csi_extension_server.go new file mode 100644 index 00000000..af37cf16 --- /dev/null +++ b/service/csi_extension_server.go @@ -0,0 +1,200 @@ +/* + Copyright © 2023-2024 Dell Inc. or its subsidiaries. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package service + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/dell/dell-csi-extensions/podmon" + log "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// OneHour is time used for metrics +const OneHour int64 = 3600000 + +var metricsQuery = []string{"HostMBs", "MBRead", "MBWritten", "IoRate", "Reads", "Writes", "ResponseTime"} + +func (s *service) ValidateVolumeHostConnectivity(ctx context.Context, req *podmon.ValidateVolumeHostConnectivityRequest) (*podmon.ValidateVolumeHostConnectivityResponse, error) { + log.Infof("ValidateVolumeHostConnectivity called %+v", req) + rep := &podmon.ValidateVolumeHostConnectivityResponse{ + Messages: make([]string, 0), + } + + if (len(req.GetVolumeIds()) == 0 || len(req.GetArrayId()) == 0) && len(req.GetNodeId()) == 0 { + // This is a nop call just testing the interface is present + rep.Messages = append(rep.Messages, "ValidateVolumeHostConnectivity is implemented") + return rep, nil + } + + if req.GetNodeId() == "" { + return nil, fmt.Errorf("the NodeID is a required field") + } + // create the map of all the array with array's symID as key + symIDs := make(map[string]bool) + symID := req.GetArrayId() + if symID == "" { + if len(req.GetVolumeIds()) == 0 { + log.Info("neither symID nor volumeID is present in request") + // Create from the default array + for _, arr := range s.opts.ManagedArrays { + symIDs[arr] = true + } + } + // for loop req.GetVolumeIds() + for _, volID := range req.GetVolumeIds() { + _, symID, _, _, _, err := s.parseCsiID(volID) + if err != nil || symID == "" { + log.Errorf("unable to retrieve array's symID after parsing volumeID") + for _, arr := range s.opts.ManagedArrays { + symIDs[arr] = true + } + } else { + symIDs[symID] = true + } + } + } else { + symIDs[symID] = true + } + + // Go through each of the symIDs + for symID := range symIDs { + // First - check if the array is visible from the node + err := s.checkIfNodeIsConnected(ctx, symID, req.GetNodeId(), rep) + if err != nil { + return rep, err + } + + // Check for IOinProgress only when volumes IDs are present in the request as the field is required only in the latter case also to reduce number of calls to the API making it efficient + if len(req.GetVolumeIds()) > 0 { + // Get array config + for _, volID := range req.GetVolumeIds() { + _, symIDForVol, devID, _, _, _ := s.parseCsiID(volID) + if symIDForVol != symID { + log.Errorf("Recived symID from podman is %s and retrieved from array is %s ", symID, symIDForVol) + return nil, fmt.Errorf("invalid symID %s is provided", symID) + } + // check if any IO is inProgress for the current symID/array + err := s.IsIOInProgress(ctx, devID, symIDForVol) + if err == nil { + rep.IosInProgress = true + return rep, nil + } + } + } + } + log.Infof("ValidateVolumeHostConnectivity reply %+v", rep) + return rep, nil +} + +// checkIfNodeIsConnected looks at the 'nodeId' to determine if there is connectivity to the 'arrayId' array. +// The 'rep' object will be filled with the results of the check. +func (s *service) checkIfNodeIsConnected(ctx context.Context, symID string, nodeID string, rep *podmon.ValidateVolumeHostConnectivityResponse) error { + log.Infof("Checking if array %s is connected to node %s", symID, nodeID) + var message string + rep.Connected = false + nodeIP := s.k8sUtils.GetNodeIPs(nodeID) + if len(nodeIP) == 0 { + log.Errorf("failed to parse node ID '%s'", nodeID) + return fmt.Errorf("failed to parse node ID") + } + // form url to call array on node + url := "http://" + nodeIP + s.opts.PodmonPort + ArrayStatus + "/" + symID + connected, err := s.QueryArrayStatus(ctx, url) + if err != nil { + message = fmt.Sprintf("connectivity unknown for array %s to node %s due to %s", symID, nodeID, err) + log.Error(message) + rep.Messages = append(rep.Messages, message) + log.Errorf(err.Error()) + } + + if connected { + rep.Connected = true + message = fmt.Sprintf("array %s is connected to node %s", symID, nodeID) + } else { + message = fmt.Sprintf("array %s is not connected to node %s", symID, nodeID) + } + log.Info(message) + rep.Messages = append(rep.Messages, message) + return nil +} + +// IsIOInProgress function check the IO operation status on array +func (s *service) IsIOInProgress(ctx context.Context, volID, symID string) (err error) { + // Call PerformanceMetricsByVolume or PerformanceMetricsByFileSystem in gopowermax based on the volume type + pmaxClient, err := s.GetPowerMaxClient(symID) + if err != nil { + log.Error(err.Error()) + return status.Error(codes.InvalidArgument, err.Error()) + } + arrayKeys, err := pmaxClient.GetArrayPerfKeys(ctx) + if err != nil { + log.Error(err.Error()) + return status.Errorf(codes.Internal, "error %s getting keys", err.Error()) + } + var endTime int64 + for _, info := range arrayKeys.ArrayInfos { + if strings.Compare(info.SymmetrixID, symID) == 0 { + endTime = info.LastAvailableDate + break + } + } + startTime := endTime - OneHour + _, err = pmaxClient.GetFileSystemByID(ctx, symID, volID) + if err != nil { + resp, err := pmaxClient.GetVolumesMetricsByID(ctx, symID, volID, metricsQuery, startTime, endTime) + if err != nil { + log.Errorf("Error %v while checking IsIOInProgress for array having symID %s for volumeId %s", err.Error(), symID, volID) + return fmt.Errorf("error %v while while checking IsIOInProgress", err.Error()) + } + // check last four entries status received in the response + for i := len(resp.ResultList.Result[0].VolumeResult) - 1; i >= (len(resp.ResultList.Result[0].VolumeResult)-4) && i >= 0; i-- { + if resp.ResultList.Result[0].VolumeResult[i].IoRate > 0.0 && checkIfEntryIsLatest(resp.ResultList.Result[0].VolumeResult[i].Timestamp) { + return nil + } + } + return fmt.Errorf("no IOInProgress") + } + // nfs volume type logic + resp, err := pmaxClient.GetFileSystemMetricsByID(ctx, symID, volID, metricsQuery, startTime, endTime) + if err != nil { + log.Errorf("Error %v while checking IsIOInProgress for array having symID %s for volumeId %s", err.Error(), symID, volID) + return fmt.Errorf("error %v while while checking IsIOInProgress", err.Error()) + } + // check last four entries status recieved in the response + fileMetrics := resp.ResultList.Result + for i := 0; i < len(fileMetrics); i++ { + if fileMetrics[i].PercentBusy > 0.0 && checkIfEntryIsLatest(fileMetrics[i].Timestamp) { + return nil + } + } + return fmt.Errorf("no IOInProgress") +} + +func checkIfEntryIsLatest(respTS int64) bool { + timeFromResponse := time.Unix(respTS/1000, 0) + log.Debugf("timestamp recieved from the response body is %v", timeFromResponse) + currentTime := time.Now().UTC() + log.Debugf("current time %v", currentTime) + if currentTime.Sub(timeFromResponse).Seconds() < 60 { + log.Debug("found a fresh metric") + return true + } + return false +} diff --git a/service/envvars.go b/service/envvars.go index 2935d32a..9e7bfbbb 100644 --- a/service/envvars.go +++ b/service/envvars.go @@ -154,6 +154,17 @@ const ( // EnvVCPassword is an env variable that has vCenter password EnvVCPassword = "X_CSI_VCENTER_PWD" // #nosec G101 + // EnvIfaceExcludeFilter is an env variable with a regex of interface names to exclude EnvIfaceExcludeFilter = "X_CSI_IFACE_EXCLUDE_FILTER" + + // EnvPodmonEnabled indicates that podmon is enabled + EnvPodmonEnabled = "X_CSI_PODMON_ENABLED" + + // EnvPodmonArrayConnectivityAPIPORT indicates the port to be used for exposing podmon API health + EnvPodmonArrayConnectivityAPIPORT = "X_CSI_PODMON_API_PORT" + + // EnvPodmonArrayConnectivityPollRate indicates the polling frequency to check array connectivity + EnvPodmonArrayConnectivityPollRate = "X_CSI_PODMON_ARRAY_CONNECTIVITY_POLL_RATE" + ) diff --git a/service/features/csi_extension.feature b/service/features/csi_extension.feature new file mode 100644 index 00000000..e3e6cd7a --- /dev/null +++ b/service/features/csi_extension.feature @@ -0,0 +1,86 @@ +Feature: PowerMax CSI interface + As a consumer of the CSI interface + I want to test csi extensions apis + So that they are known to work + + # add @resiliency tag in service_test to run all tests + @resiliency + @v2.11.0 + Scenario: Call ValidateVolumeHostConnectivity for implementation + Given a PowerMax service + And I call CreateVolume "volume1" + And a valid CreateVolumeResponse is returned + And I call ValidateVolumeHostConnectivity + Then the ValidateVolumeHost message contains "ValidateVolumeHostConnectivity is implemented" + + @resiliency + @v2.11.0 + Scenario: Call ValidateVolumeHostConnectivity for NodeID with no symID with no service + Given a PowerMax service + And I call CreateVolume "volume1" + And a valid CreateVolumeResponse is returned + And I call ValidateVolumeHostConnectivity with "node1-127.0.0.2" and symID "none" + Then no error was received + And the ValidateVolumeHost message contains "connectivity unknown for array" + + @resiliency + @v2.11.0 + Scenario Outline: Call ValidateVolumeHostConnectivity with various errors + Given a PowerMax service + And I call CreateVolume "volume1" + And a valid CreateVolumeResponse is returned + Then I induce error + And I call ValidateVolumeHostConnectivity with and symID + Then the error contains + Examples: + | nodeID | symID | induced | errormsg | + | "node1" | "default" | "none" | "failed to parse node ID" | + | "no-node" | "default" | "none" | "NodeID is a required field" | + | "node1" | "fromVolID" | "none" | "failed to parse node ID" | + | "node1" | "fromVolID" | "InvalidVolumeID" | "failed to parse node ID" | + + + @resiliency + Scenario: Call ValidateVolumeHostConnectivity with a connected node + Given a PowerMax service + And I call CreateVolume "volume1" + And a valid CreateVolumeResponse is returned + And I induce error "GetFreshMetrics" + And I start node API server + When I call ValidateVolumeHostConnectivity with "connected-node" and symID "default" + Then no error was received + And the ValidateVolumeHost message contains "connected to node" + Then I call ValidateVolumeHostConnectivity with "connected-node-faultyVolID" and symID "default" + Then the error contains "invalid symID" + + + @resiliency + @v2.11.0 + Scenario Outline: call IsIOInProgress with block volume and different errors + Given a PowerMax service + And I call CreateVolume "volume1" + And a valid CreateVolumeResponse is returned + And I induce error + When I call IsIOInProgress + Then the error contains + Examples: + | induced | error | + | "none" | "no IOInProgress" | + | "InvalidSymID" | "not found" | + | "GetArrayPerfKeyError" | "getting keys" | + | "GetVolumesMetricsError" | "error" | + | "GetFreshMetrics" | "none" | + + @resiliency + @v2.11.0 + Scenario Outline: call IsIOInProgress with file system and different errors + Given a PowerMax service + And I call fileSystem CreateVolume "volume1" + Then a valid CreateVolumeResponse is returned + And I induce error + When I call IsIOInProgress + Then the error contains + Examples: + | induced | error | + | "none" | "no IOInProgress" | + | "GetFileSysMetricsError" | "error" | diff --git a/service/features/service.feature b/service/features/service.feature index 46717cf2..dee00c9e 100644 --- a/service/features/service.feature +++ b/service/features/service.feature @@ -516,10 +516,10 @@ Feature: PowerMax CSI interface And arrays are logged in Examples: - | induced1 | errormsg | count | -# | "GetSymmetrixError" | "Unable to retrieve Array List" | 0 | - | "GOISCSIDiscoveryError"| "failed to login to (some) ISCSI targets" | 0 | - | "none" | "none" | 3 | + | induced1 | errormsg | count | +# | "GetSymmetrixError" | "Unable to retrieve Array List" | 0 | + | "GOISCSIDiscoveryError"| "failed to login" | 0 | + | "none" | "none" | 3 | @v1.3.0 Scenario Outline: Validate ensureLoggedIntoEveryArray with CHAP @@ -534,11 +534,11 @@ Feature: PowerMax CSI interface And arrays are logged in Examples: - | induced1 | errormsg | count | -# | "GetSymmetrixError" | "Unable to retrieve Array List" | 0 | - | "InduceLoginError" | "failed to login to (some) ISCSI targets" | 0 | - | "InduceSetCHAPError" | "set CHAP induced error" | 0 | - | "none" | "none" | 3 | + | induced1 | errormsg | count | +# | "GetSymmetrixError" | "Unable to retrieve Array List" | 0 | + | "InduceLoginError" | "failed to login" | 0 | + | "InduceSetCHAPError" | "set CHAP induced error" | 0 | + | "none" | "none" | 3 | @v1.0.0 Scenario Outline: Test GetVolumeByID function @@ -579,14 +579,14 @@ Feature: PowerMax CSI interface Examples: | induced | nCYL | errormsg | | "none" | 0 | "Invalid argument" | - | "none" | 2 | "bad capacity" | + | "none" | 2 | "Attempting to shrink the volume size" | | "none" | 29 | "Attempting to shrink the volume size" | | "none" | 30 | "none" | - | "none" | 24 | "bad capacity" | + | "none" | 24 | "Attempting to shrink the volume size" | | "none" | 35791396 | "bad capacity" | | "none" | 35791395 | "none" | | "none" | 32 | "none" | - | "NoVolumeID" | 2 | "Invalid volume id" | + | "NoVolumeID" | 2 | "Invalid volume id" | | "ExpandVolumeError" | 32 | "induced error" | diff --git a/service/node.go b/service/node.go index 608fbbea..a1080cb0 100644 --- a/service/node.go +++ b/service/node.go @@ -839,6 +839,92 @@ func (s *service) nodeProbe(ctx context.Context) error { return nil } +func (s *service) nodeProbeBySymID(ctx context.Context, symID string) error { + log.Debugf("Entering nodeProbe for array %s", symID) + defer log.Debugf("Exiting nodeProbe for array %s", symID) + + if s.opts.NodeName == "" { + return status.Errorf(codes.FailedPrecondition, + "Error getting NodeName from the environment") + } + + err := s.createPowerMaxClients(ctx) + if err != nil { + return err + } + pmaxClient, err := s.GetPowerMaxClient(symID) + if err != nil { + return err + } + // get the host from the array + hostID, _, _ := s.GetHostSGAndMVIDFromNodeID(s.opts.NodeName, !s.useFC) + host, err := pmaxClient.GetHostByID(ctx, symID, hostID) + if err != nil { + if strings.Contains(err.Error(), notFound) && s.useNFS { + log.Debugf("Error %s, while probing %s but since it's NFS this is expected", err.Error(), symID) + return nil + } + // nodeId is not right/it's not NFS and still host is not preset + log.Infof("Error %s, while probing %s", err.Error(), symID) + return err + } + + log.Debugf("Successfully got Host %s on %s", symID, host.HostID) + + // Check if host is connected to iscsi + // Get iscsi initiators. + if s.useFC { + log.Debugf("Checking if FC initiators are logged in or not") + initiatorList, err := pmaxClient.GetInitiatorList(ctx, symID, "", false, true) + if err != nil { + log.Error("Could not get initiator list: " + err.Error()) + return err + } + for _, arrayInitrID := range initiatorList.InitiatorIDs { + for _, hostInitID := range host.Initiators { + if arrayInitrID == hostInitID || strings.HasSuffix(arrayInitrID, hostInitID) { + initiator, err := pmaxClient.GetInitiatorByID(ctx, symID, arrayInitrID) + if err != nil { + return err + } + if initiator.OnFabric && initiator.LoggedIn { + return nil + } + } + } + } + return fmt.Errorf("no active fc sessions") + } + IQNs, iSCSIErr := s.iscsiClient.GetInitiators("") + if iSCSIErr != nil { + return iSCSIErr + } + if host.NumberMaskingViews > 0 { + err = s.performIscsiLoginOnSymID(ctx, symID, IQNs, host.MaskingviewIDs[0], pmaxClient) + if err != nil { + log.Errorf("error performing iscsi login %s", err.Error()) + return err + } + } else { + log.Infof("skippping login on host %s as no masking view exist", host.HostID) + } + + log.Debugf("Checking if iscsi sessions are active on node or not") + sessions, _ := s.iscsiClient.GetSessions() + for _, target := range s.iscsiTargets[symID] { + for _, session := range sessions { + log.Debugf("matching %v with %v", target, session) + if session.Target == target && session.ISCSISessionState == goiscsi.ISCSISessionStateLOGGEDIN { + if s.useNFS { + s.useNFS = false + } + return nil + } + } + } + return fmt.Errorf("no active iscsi sessions") +} + func (s *service) NodeGetCapabilities( _ context.Context, _ *csi.NodeGetCapabilitiesRequest) ( @@ -1074,7 +1160,7 @@ func (s *service) NodeGetInfo( } var maxPowerMaxVolumesPerNode int64 - labels, err := s.GetNodeLabels() + labels, err := s.k8sUtils.GetNodeLabels(s.opts.NodeFullName) if err != nil { log.Infof("failed to get Node Labels with error '%s'", err.Error()) } @@ -1332,7 +1418,9 @@ func (s *service) nodeStartup(ctx context.Context) error { log.Infof("TransportProtocol %s FC portWWNs: %s ... IQNs: %s", s.opts.TransportProtocol, portWWNs, IQNs) // The driver needs at least one FC or iSCSI initiator to be defined if len(portWWNs) == 0 && len(IQNs) == 0 { - return fmt.Errorf("No FC or iSCSI initiators were found and at least 1 is required") + log.Infof("No FC or iSCSI initiators were found, assuming NFS protocol configured") + s.useNFS = true + return nil } } else { err := s.setVMHost() @@ -1350,8 +1438,8 @@ func (s *service) nodeStartup(ctx context.Context) error { symmetrixIDs := arrays.SymmetrixIDs log.Debug(fmt.Sprintf("GetSymmetrixIDList returned: %v", symmetrixIDs)) - s.nodeHostSetup(ctx, portWWNs, IQNs, symmetrixIDs) // #nosec G20 - + _ = s.nodeHostSetup(ctx, portWWNs, IQNs, symmetrixIDs) // #nosec G20 + go s.startAPIService(ctx) return err } @@ -1483,8 +1571,6 @@ func (s *service) nodeHostSetup(ctx context.Context, portWWNs []string, IQNs []s // See if it's viable to use FC and/or ISCSI hostIDIscsi, _, _ := s.GetISCSIHostSGAndMVIDFromNodeID(s.opts.NodeName) hostIDFC, _, _ := s.GetFCHostSGAndMVIDFromNodeID(s.opts.NodeName) - var useFC bool - var useIscsi bool // Loop through the symmetrix, looking for existing initiators for _, symID := range symmetrixIDs { @@ -1512,14 +1598,14 @@ func (s *service) nodeHostSetup(ctx context.Context, portWWNs []string, IQNs []s log.Infof("valid FC initiators: %v", validFCs) if len(validFCs) > 0 && (s.opts.TransportProtocol == "" || s.opts.TransportProtocol == FcTransportProtocol) { // We do have to have pre-existing initiators that were zoned for FC - useFC = true + s.useFC = true } validIscsis, err := s.verifyAndUpdateInitiatorsInADiffHost(ctx, symID, IQNs, hostIDIscsi, pmaxClient) if err != nil { log.Error("Could not validate iSCSI initiators" + err.Error()) } else if s.opts.TransportProtocol == "" || s.opts.TransportProtocol == IscsiTransportProtocol { // We do not have to have pre-existing initiators to use Iscsi (we can create them) - useIscsi = true + s.useIscsi = true } log.Infof("valid (existing) iSCSI initiators (must be manually created): %v", validIscsis) if len(validIscsis) == 0 { @@ -1527,13 +1613,13 @@ func (s *service) nodeHostSetup(ctx context.Context, portWWNs []string, IQNs []s validIscsis = IQNs } - if !useFC && !useIscsi { + if !s.useFC && !s.useIscsi { log.Error("No valid initiators- could not initialize FC or iSCSI") return err } iscsiChroot, _ := csictx.LookupEnv(context.Background(), EnvISCSIChroot) - if useFC { + if s.useFC { formattedFCs := make([]string, 0) for _, initiatorID := range validFCs { elems := strings.Split(initiatorID, ":") @@ -1546,7 +1632,7 @@ func (s *service) nodeHostSetup(ctx context.Context, portWWNs []string, IQNs []s s.initFCConnector(iscsiChroot) s.arrayTransportProtocolMap[symID] = FcTransportProtocol isSymConnFC[symID] = true - } else if useIscsi { + } else if s.useIscsi { err := s.ensureISCSIDaemonStarted() if err != nil { log.Errorf("Failed to start the ISCSI Daemon. Error - %s", err.Error()) @@ -1642,7 +1728,7 @@ func (s *service) loginIntoISCSITargets(array string, targets []maskingViewTarge var err error loggedInAll := true if s.opts.EnableCHAP { - // CHAP is already enabled on the array, discovery will not work + // CHAP is already enabled on the array, discovery will not work, // so we need to do a login (as we have already setup the database(s) successfully for _, tgt := range targets { loginError := s.iscsiClient.PerformLogin(tgt.target) @@ -1652,6 +1738,7 @@ func (s *service) loginIntoISCSITargets(array string, targets []maskingViewTarge err = loginError loggedInAll = false } else { + s.iscsiTargets[array] = append(s.iscsiTargets[array], tgt.target.Target) log.Infof("Successfully logged into target: %s", tgt.target.Target) } } @@ -1666,6 +1753,7 @@ func (s *service) loginIntoISCSITargets(array string, targets []maskingViewTarge err = discoveryError loggedInAll = false } else { + s.iscsiTargets[array] = append(s.iscsiTargets[array], tgt.target.Target) log.Infof("Successfully logged into target: %s on portal :%s", tgt.target.Target, tgt.target.Portal) } @@ -1812,44 +1900,52 @@ func (s *service) ensureLoggedIntoEveryArray(ctx context.Context, _ bool) error } log.Debugf("(ISCSI) No logins were done earlier for %s", array) s.cacheMutex.Unlock() - // Try to get the masking view targets from the cache - mvTargets, ok := symToMaskingViewTargets.Load(array) - if ok { - // Entry is present in cache - // This means that we discovered the targets - // but haven't logged in for some reason - log.Debugf("Cache hit for %s", array) - maskingViewTargets := mvTargets.([]maskingViewTargetInfo) - // First set the CHAP credentials if required - err = s.setCHAPCredentials(array, maskingViewTargets, IQNs) - if err != nil { - // log the error and continue - log.Errorf("Failed to set CHAP credentials for %v", maskingViewTargets) - // Reset the error - err = nil - } - err = s.loginIntoISCSITargets(array, maskingViewTargets) - } else { - // Entry not in cache - // Configure MaskingView Targets - CHAP, Discovery/Login - _, tempErr := s.getAndConfigureMaskingViewTargets(ctx, array, mvName, IQNs, pmaxClient) - if tempErr != nil { - if strings.Contains(tempErr.Error(), "does not exist") { - // Ignore this error - log.Debugf("Couldn't configure ISCSI targets as masking view: %s doesn't exist for array: %s", - mvName, array) - tempErr = nil - } else { - err = tempErr - log.Errorf("Failed to configure ISCSI targets for masking view: %s, array: %s. Error: %s", - mvName, array, tempErr.Error()) - } - } + err = s.performIscsiLoginOnSymID(ctx, array, IQNs, mvName, pmaxClient) + if err != nil { + return fmt.Errorf("failed to login to (some) %s ISCSI targets. Error: %s", array, err.Error()) } + } + return nil +} + +func (s *service) performIscsiLoginOnSymID(ctx context.Context, array string, IQNs []string, mvName string, pmaxClient pmax.Pmax) (err error) { + // Try to get the masking view targets from the cache + mvTargets, ok := symToMaskingViewTargets.Load(array) + if ok { + // Entry is present in cache + // This means that we discovered the targets + // but haven't logged in for some reason + log.Debugf("Cache hit for %s", array) + maskingViewTargets := mvTargets.([]maskingViewTargetInfo) + // First set the CHAP credentials if required + err = s.setCHAPCredentials(array, maskingViewTargets, IQNs) if err != nil { - return fmt.Errorf("failed to login to (some) ISCSI targets. Error: %s", err.Error()) + // log the error and continue + log.Errorf("Failed to set CHAP credentials for %v", maskingViewTargets) + // Reset the error + err = nil + } + err = s.loginIntoISCSITargets(array, maskingViewTargets) + } else { + // Entry not in cache + // Configure MaskingView Targets - CHAP, Discovery/Login + _, tempErr := s.getAndConfigureMaskingViewTargets(ctx, array, mvName, IQNs, pmaxClient) + if tempErr != nil { + if strings.Contains(tempErr.Error(), "does not exist") { + // Ignore this error + log.Debugf("Couldn't configure ISCSI targets as masking view: %s doesn't exist for array: %s", + mvName, array) + tempErr = nil + } else { + err = tempErr + log.Errorf("Failed to configure ISCSI targets for masking view: %s, array: %s. Error: %s", + mvName, array, tempErr.Error()) + } } } + if err != nil { + return fmt.Errorf("failed to login in array: %s ISCSI targets. Error: %s", array, err.Error()) + } return nil } diff --git a/service/node_connectivity_checker.go b/service/node_connectivity_checker.go new file mode 100644 index 00000000..dc9a8c4f --- /dev/null +++ b/service/node_connectivity_checker.go @@ -0,0 +1,206 @@ +package service + +/* + * + * Copyright © 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +import ( + "context" + "encoding/json" + "fmt" + "net/http" + "sync" + "time" + + "github.com/gorilla/mux" + log "github.com/sirupsen/logrus" +) + +// pollingFrequency in seconds +var pollingFrequencyInSeconds int64 + +// probeStatus map[string]ArrayConnectivityStatus +var probeStatus *sync.Map + +// startAPIService reads nodes to array status periodically +func (s *service) startAPIService(ctx context.Context) { + if !s.opts.IsPodmonEnabled { + log.Info("podmon is not enabled") + return + } + pollingFrequencyInSeconds = s.SetPollingFrequency(ctx) + s.startNodeToArrayConnectivityCheck(ctx) + s.apiRouter(ctx) +} + +// apiRouter serves http requests +func (s *service) apiRouter(_ context.Context) { + log.Infof("starting http server on port %s", s.opts.PodmonPort) + // create a new mux router + router := mux.NewRouter() + // route to connectivity status + // connectivityStatus is the handlers + router.HandleFunc(ArrayStatus, connectivityStatus).Methods("GET") + router.HandleFunc(ArrayStatus+"/"+"{symID}", getArrayConnectivityStatus).Methods("GET") + // start http server to serve requests + server := &http.Server{ + Addr: s.opts.PodmonPort, + Handler: router, + ReadTimeout: Timeout, + WriteTimeout: Timeout, + } + err := server.ListenAndServe() + if err != nil { + log.Errorf("unable to start http server to serve status requests due to %s", err) + } + log.Infof("started http server to serve status requests at %s", s.opts.PodmonPort) +} + +// connectivityStatus handler returns array connectivity status +func connectivityStatus(w http.ResponseWriter, _ *http.Request) { + log.Infof("connectivityStatus called, status is %v \n", probeStatus) + // w.Header().Set("Content-Type", "application/json") + if probeStatus == nil { + log.Errorf("error probeStatus map in cache is empty") + w.WriteHeader(http.StatusInternalServerError) + w.Header().Set("Content-Type", "application/json") + return + } + + // convert struct to JSON + log.Debugf("ProbeStatus fetched from the cache has %+v", probeStatus) + + jsonResponse, err := MarshalSyncMapToJSON(probeStatus) + if err != nil { + log.Errorf("error %s during marshaling to json", err) + w.WriteHeader(http.StatusInternalServerError) + w.Header().Set("Content-Type", "application/json") + return + } + log.Info("sending connectivityStatus for all arrays ") + w.Header().Set("Content-Type", "application/json") + _, err = w.Write(jsonResponse) + if err != nil { + log.Errorf("unable to write response %s", err) + } +} + +// MarshalSyncMapToJSON marshal the sync Map to Json +func MarshalSyncMapToJSON(m *sync.Map) ([]byte, error) { + tmpMap := make(map[string]ArrayConnectivityStatus) + m.Range(func(k, value interface{}) bool { + // this check is not necessary but just in case is someone in future play around this + switch value.(type) { + case ArrayConnectivityStatus: + tmpMap[k.(string)] = value.(ArrayConnectivityStatus) + return true + default: + log.Errorf("invalid data is stored in cache") + return false + } + }) + log.Debugf("map value is %+v", tmpMap) + if len(tmpMap) == 0 { + return nil, fmt.Errorf("invalid data is stored in cache") + } + return json.Marshal(tmpMap) +} + +// getArrayConnectivityStatus handler lists status of the requested array +func getArrayConnectivityStatus(w http.ResponseWriter, r *http.Request) { + symID := mux.Vars(r)["symID"] + log.Infof("GetArrayConnectivityStatus called for array %s \n", symID) + status, found := probeStatus.Load(symID) + if !found { + // specify status code + w.WriteHeader(http.StatusNotFound) + w.Header().Set("Content-Type", "application/json") + // update response writer + fmt.Fprintf(w, "array %s not found \n", symID) + return + } + // convert status struct to JSON + jsonResponse, err := json.Marshal(status) + if err != nil { + log.Errorf("error %s during marshaling to json", err) + w.WriteHeader(http.StatusInternalServerError) + w.Header().Set("Content-Type", "application/json") + return + } + log.Infof("sending response %+v for array %s \n", status, symID) + // update response + _, err = w.Write(jsonResponse) + if err != nil { + log.Errorf("unable to write response %s", err) + } +} + +// startNodeToArrayConnectivityCheck starts connectivityTest as one goroutine for each array +func (s *service) startNodeToArrayConnectivityCheck(ctx context.Context) { + log.Debug("startNodeToArrayConnectivityCheck called") + probeStatus = new(sync.Map) + pMaxArrays, err := s.retryableGetSymmetrixIDList() + if err != nil { + log.Errorf("startNodeToArrayConnectivityCheck failed to get symID list %s ", err.Error()) + } else { + for _, arr := range pMaxArrays.SymmetrixIDs { + go s.testConnectivityAndUpdateStatus(ctx, arr, Timeout) + } + log.Infof("startNodeToArrayConnectivityCheck is running probes at pollingFrequency %d ", pollingFrequencyInSeconds/2) + } +} + +// testConnectivityAndUpdateStatus runs probe to test connectivity from node to array +// updates probeStatus map[array]ArrayConnectivityStatus +func (s *service) testConnectivityAndUpdateStatus(ctx context.Context, symID string, timeout time.Duration) { + defer func() { + if err := recover(); err != nil { + log.Errorf("panic occurred in testConnectivityAndUpdateStatus: %s", err) + } + // if panic occurs restart new goroutine + go s.testConnectivityAndUpdateStatus(ctx, symID, timeout) + }() + var status ArrayConnectivityStatus + for { + // add timeout to context + timeOutCtx, cancel := context.WithTimeout(ctx, timeout) + log.Debugf("Running probe for array %s at time %v \n", symID, time.Now()) + if existingStatus, ok := probeStatus.Load(symID); !ok { + log.Debugf("%s not in probeStatus ", symID) + } else { + if status, ok = existingStatus.(ArrayConnectivityStatus); !ok { + log.Errorf("failed to extract ArrayConnectivityStatus for array '%s'", symID) + } + } + // for the first time status will not be there. + log.Debugf("array %s , status is %+v", symID, status) + // run nodeProbe to test connectivity + err := s.nodeProbeBySymID(timeOutCtx, symID) + if err == nil { + log.Debugf("Probe successful for %s", symID) + status.LastSuccess = time.Now().Unix() + } else { + log.Debugf("Probe failed for array '%s' error:'%s'", symID, err) + } + status.LastAttempt = time.Now().Unix() + log.Debugf("array %s , storing status %+v", symID, status) + probeStatus.Store(symID, status) + cancel() + // sleep for half the pollingFrequency and run check again + time.Sleep(time.Second * time.Duration(pollingFrequencyInSeconds/2)) + } +} diff --git a/service/node_connectivity_checker_test.go b/service/node_connectivity_checker_test.go new file mode 100644 index 00000000..1271e011 --- /dev/null +++ b/service/node_connectivity_checker_test.go @@ -0,0 +1,219 @@ +/* + * + * Copyright © 2022-2023 Dell Inc. or its subsidiaries. All Rights Reserved. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package service + +import ( + "context" + "net/http" + "sync" + "testing" + "time" + + "github.com/dell/gopowermax/v2/mock" +) + +func TestApiRouter2(t *testing.T) { + // server should not be up and running + s.opts.PodmonPort = ":abc" + s.apiRouter(context.Background()) + + resp, err := http.Get("http://localhost:8083/node-status") + if err == nil || resp != nil { + t.Errorf("Error while probing node status") + } +} + +func TestApiRouter(t *testing.T) { + s.opts.PodmonPort = ":8083" + go s.apiRouter(context.Background()) + time.Sleep(2 * time.Second) + + resp4, err := http.Get("http://localhost:8083/array-status") + if err != nil || resp4.StatusCode != 500 { + t.Errorf("Error while probing array status %v", err) + } + // fill some invalid dummy data in the cache and try to fetch + probeStatus = new(sync.Map) + probeStatus.Store("SymID2", "status") + + resp5, err := http.Get("http://localhost:8083/array-status") + if err != nil || resp5.StatusCode != 500 { + t.Errorf("Error while probing array status %v, %d", err, resp5.StatusCode) + } + + // fill some dummy data in the cache and try to fetch + var status ArrayConnectivityStatus + status.LastSuccess = time.Now().Unix() + status.LastAttempt = time.Now().Unix() + probeStatus = new(sync.Map) + probeStatus.Store("SymID", status) + + // array status + resp2, err := http.Get("http://localhost:8083/array-status") + if err != nil || resp2.StatusCode != 200 { + t.Errorf("Error while probing array status %v", err) + } + + resp3, err := http.Get("http://localhost:8083/array-status/SymIDNotPresent") + if err != nil || resp3.StatusCode != 404 { + t.Errorf("Error while probing array status %v", err) + } + value := make(chan int) + probeStatus.Store("SymID3", value) + resp9, err := http.Get("http://localhost:8083/array-status/SymID3") + if err != nil || resp9.StatusCode != 500 { + t.Errorf("Error while probing array status %v", err) + } + resp10, err := http.Get("http://localhost:8083/array-status/SymID") + if err != nil || resp10.StatusCode != 200 { + t.Errorf("Error while probing array status %v", err) + } +} + +func TestMarshalSyncMapToJSON(t *testing.T) { + type args struct { + m *sync.Map + } + sample := new(sync.Map) + sample2 := new(sync.Map) + var status ArrayConnectivityStatus + status.LastSuccess = time.Now().Unix() + status.LastAttempt = time.Now().Unix() + + sample.Store("SymID", status) + sample2.Store("key", "2.adasd") + + tests := []struct { + name string + args args + }{ + {"storing valid value in map cache", args{m: sample}}, + {"storing valid value in map cache", args{m: sample2}}, + } + for i, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + data, _ := MarshalSyncMapToJSON(tt.args.m) + if len(data) == 0 && i == 0 { + t.Errorf("MarshalSyncMapToJSON() expecting some data from cache in the response") + return + } + }) + } +} + +func TestStartAPIService(_ *testing.T) { + s.opts.IsPodmonEnabled = true + s.opts.ManagedArrays = []string{mock.DefaultSymmetrixID} + s.startAPIService(context.Background()) +} + +func TestStartAPIServiceNoPodmon(_ *testing.T) { + s.opts.IsPodmonEnabled = false + s.startAPIService(context.Background()) +} + +// Run the following tests exclusively as they involve multiple sockets +/* +func TestQueryArrayStatus(t *testing.T) { + var status ArrayConnectivityStatus + status.LastAttempt = time.Now().Unix() + status.LastSuccess = time.Now().Unix() + input, _ := json.Marshal(status) + // responding with some dummy response that is for the case when array is connected and LastSuccess check was just finished + http.HandleFunc("/array/id1", func(w http.ResponseWriter, _ *http.Request) { + w.Write(input) + }) + + server := &http.Server{Addr: ":49160"} // #nosec G112 + fmt.Printf("Starting server at port 49160 \n") + go func() { + err := server.ListenAndServe() + if err != nil { + fmt.Println(err) + } + }() + check, err := s.QueryArrayStatus(context.Background(), "http://localhost:49160/array/id1") + if !check || err != nil { + t.Errorf("err: %s , expected check to be true but is %t", err.Error(), check) + } + server.Shutdown(context.Background()) +} + +func TestQASOnDisconnectedArr(t *testing.T) { + var status ArrayConnectivityStatus + status.LastAttempt = time.Now().Unix() + status.LastSuccess = time.Now().Unix() - 100 + input, _ := json.Marshal(status) + // responding with some dummy response that is for the case when array is connected and LastSuccess check was just finished + http.HandleFunc("/array/id2", func(w http.ResponseWriter, _ *http.Request) { + w.Write(input) + }) + + server := &http.Server{Addr: ":49159"} // #nosec G112 + fmt.Printf("Starting server at port 49159 \n") + go func() { + err := server.ListenAndServe() + if err != nil { + fmt.Println(err) + } + }() + check, err := s.QueryArrayStatus(context.Background(), "http://localhost:49159/array/id2") + if err != nil || check { + t.Errorf("err: %s , expected check to be false but is %t", err.Error(), check) + } + server.Shutdown(context.Background()) +} + +func TestQASWithDiffErr(t *testing.T) { + var status ArrayConnectivityStatus + status.LastAttempt = time.Now().Unix() - 200 + status.LastSuccess = time.Now().Unix() - 200 + input, _ := json.Marshal(status) + // Responding with a dummy response for the case when the array check was done a while ago + http.HandleFunc("/array/id3", func(w http.ResponseWriter, _ *http.Request) { + w.Write(input) + }) + + http.HandleFunc("/array/id4", func(w http.ResponseWriter, _ *http.Request) { + w.Write([]byte("invalid type response")) + }) + server := &http.Server{Addr: ":49157"} // #nosec G112 + fmt.Printf("Starting server at port 49157 \n") + go func() { + err := server.ListenAndServe() + if err != nil { + fmt.Println(err) + } + }() + check, err := s.QueryArrayStatus(context.Background(), "http://localhost:49157/array/id3") + if err != nil || check { + t.Errorf("err: %s , expected check to be false but is %t", err.Error(), check) + } + + check, err = s.QueryArrayStatus(context.Background(), "http://localhost:49157/array/id4") + if err == nil || check { + t.Errorf("err: %s , expected check to be false but is %t", err.Error(), check) + } + + check, err = s.QueryArrayStatus(context.Background(), "http://localhost:49157/array/id5") + if err == nil || check { + t.Errorf("err: %s , expected check to be false but is %t", err.Error(), check) + } + server.Shutdown(context.Background()) +}*/ diff --git a/service/service.go b/service/service.go index 9f335a91..4219ad1c 100644 --- a/service/service.go +++ b/service/service.go @@ -26,6 +26,9 @@ import ( "sync" "time" + "github.com/dell/csi-powermax/v2/k8sutils" + + "github.com/dell/dell-csi-extensions/podmon" "github.com/fsnotify/fsnotify" "github.com/spf13/viper" @@ -34,7 +37,6 @@ import ( "google.golang.org/grpc" "github.com/container-storage-interface/spec/lib/go/csi" - "github.com/dell/csi-powermax/v2/k8sutils" "github.com/dell/gocsi" csictx "github.com/dell/gocsi/context" "github.com/dell/goiscsi" @@ -47,7 +49,6 @@ import ( migrext "github.com/dell/dell-csi-extensions/migration" csiext "github.com/dell/dell-csi-extensions/replication" pmax "github.com/dell/gopowermax/v2" - v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // Constants for the service @@ -57,12 +58,13 @@ const ( defaultPrivDir = "/dev/disk/csi-powermax" defaultPmaxTimeout = 120 defaultLockCleanupDuration = 4 - defaultU4PVersion = "91" // defaultU4PVersion should be reset to base supported endpoint version for the CSI driver release csiPrefix = "csi-" logFields = "logFields" maxAuthenticateRetryCount = 4 CSILogLevelParam = "CSI_LOG_LEVEL" CSILogFormatParam = "CSI_LOG_FORMAT" + ArrayStatus = "/array-status" + DefaultPodmonPollRate = 60 ) type contextKey string // specific string type used for context keys @@ -118,19 +120,23 @@ type Opts struct { NonDefaultRetries bool // Indicates if non-default retry values to be used for deletion worker, only for unit testing NodeNameTemplate string ModifyHostName bool - ReplicationContextPrefix string // Enables sidecars to read required information from volume context - ReplicationPrefix string // Used as a prefix to find out if replication is enabled - IsHealthMonitorEnabled bool // used to check if health monitor for volume is enabled - IsTopologyControlEnabled bool // used to filter topology keys based on user config - IsVsphereEnabled bool // used to check if vSphere is enabled - VSpherePortGroup string // port group for vsphere - VSphereHostName string // host (initiator group) for vsphere - VCenterHostURL string // vCenter host url - VCenterHostUserName string // vCenter host username - VCenterHostPassword string // vCenter password - MaxVolumesPerNode int64 // to specify volume limits - KubeConfigPath string // to specify k8s configuration to be used CSI driver - IfaceExcludeFilter *regexp.Regexp // regex of interface names to exclude from consideration + ReplicationContextPrefix string // Enables sidecars to read required information from volume context + ReplicationPrefix string // Used as a prefix to find out if replication is enabled + IsHealthMonitorEnabled bool // used to check if health monitor for volume is enabled + IsTopologyControlEnabled bool // used to filter topology keys based on user config + IsVsphereEnabled bool // used to check if vSphere is enabled + VSpherePortGroup string // port group for vsphere + VSphereHostName string // host (initiator group) for vsphere + VCenterHostURL string // vCenter host url + VCenterHostUserName string // vCenter host username + VCenterHostPassword string // vCenter password + MaxVolumesPerNode int64 // to specify volume limits + KubeConfigPath string // to specify k8s configuration to be used CSI driver + IsPodmonEnabled bool // used to indicate that podmon is enabled + PodmonPort string // to indicates the port to be used for exposing podmon API health + PodmonPollingFreq string // indicates the polling frequency to check array connectivity + IfaceExcludeFilter *regexp.Regexp // regex of interface names to exclude from consideration + } // NodeConfig defines rules for given node @@ -162,6 +168,10 @@ type service struct { cacheMutex sync.Mutex nodeProbeMutex sync.Mutex nodeIsInitialized bool + useNFS bool + useFC bool + useIscsi bool + iscsiTargets map[string][]string // Timeout for storage pool cache storagePoolCacheDuration time.Duration // only used for testing, indicates if the deletion worked finished populating queue @@ -178,12 +188,15 @@ type service struct { topologyConfig *TopologyConfig allowedTopologyKeys map[string][]string // map of nodes to allowed topology keys deniedTopologyKeys map[string][]string // map of nodes to denied topology keys + + k8sUtils k8sutils.UtilsInterface } // New returns a new Service. func New() Service { svc := &service{ loggedInArrays: map[string]bool{}, + iscsiTargets: map[string][]string{}, } svc.sgSvc = newStorageGroupService(svc) svc.pmaxTimeoutSeconds = defaultPmaxTimeout @@ -276,6 +289,9 @@ func (s *service) BeforeServe( "VsphereHostNames": s.opts.VSphereHostName, "VsphereHostURL": s.opts.VCenterHostURL, "VsphereHostUsername": s.opts.VCenterHostUserName, + "isPodmonEnabled": s.opts.IsPodmonEnabled, + "PodmonPort": s.opts.PodmonPort, + "PodmonFrequency": s.opts.PodmonPollingFreq, } if s.opts.Password != "" { @@ -409,6 +425,14 @@ func (s *service) BeforeServe( } } + if podmonPort, ok := csictx.LookupEnv(ctx, EnvPodmonArrayConnectivityAPIPORT); ok { + opts.PodmonPort = fmt.Sprintf(":%s", podmonPort) + } + + if podmonPollRate, ok := csictx.LookupEnv(ctx, EnvPodmonArrayConnectivityPollRate); ok { + opts.PodmonPollingFreq = podmonPollRate + } + opts.TransportProtocol = s.getTransportProtocolFromEnv() opts.ProxyServiceHost, opts.ProxyServicePort, opts.UseProxy = s.getProxySettingsFromEnv() if !opts.UseProxy && !inducedMockReverseProxy { @@ -481,6 +505,7 @@ func (s *service) BeforeServe( opts.ModifyHostName = pb(EnvModifyHostName) opts.IsHealthMonitorEnabled = pb(EnvHealthMonitorEnabled) opts.IsTopologyControlEnabled = pb(EnvTopologyFilterEnabled) + opts.IsPodmonEnabled = pb(EnvPodmonEnabled) opts.IsVsphereEnabled = pb(EnvVSphereEnabled) if opts.IsVsphereEnabled { // read port group @@ -506,6 +531,14 @@ func (s *service) BeforeServe( } s.opts = opts + // setup the k8sClient + if s.k8sUtils == nil { + s.k8sUtils, err = k8sutils.Init(s.opts.KubeConfigPath) + if err != nil { + return fmt.Errorf("error creating k8sClient %s", err.Error()) + } + } + // setup the iscsi client iscsiOpts := make(map[string]string, 0) if chroot, ok := csictx.LookupEnv(ctx, EnvISCSIChroot); ok { @@ -628,6 +661,7 @@ func ReadConfig(configPath string) (*TopologyConfig, error) { func (s *service) RegisterAdditionalServers(server *grpc.Server) { csiext.RegisterReplicationServer(server, s) migrext.RegisterMigrationServer(server, s) + podmon.RegisterPodmonServer(server, s) } func (s *service) getProxySettingsFromEnv() (string, string, bool) { @@ -815,18 +849,15 @@ func getLogFields(ctx context.Context) log.Fields { return fields } -func (s *service) GetNodeLabels() (map[string]string, error) { - k8sclientset, err := k8sutils.CreateKubeClientSet(s.opts.KubeConfigPath) - if err != nil { - log.Errorf("init client failed: '%s'", err.Error()) - return nil, err - } - // access the API to fetch node object - node, err := k8sclientset.CoreV1().Nodes().Get(context.TODO(), s.opts.NodeFullName, v1.GetOptions{}) - if err != nil { - return nil, err +// SetPollingFrequency reads the pollingFrequency from Env, sets default vale if ENV not found +func (s *service) SetPollingFrequency(ctx context.Context) int64 { + var pollingFrequency int64 + if pollRateEnv, ok := csictx.LookupEnv(ctx, EnvPodmonArrayConnectivityPollRate); ok { + if pollingFrequency, _ = strconv.ParseInt(pollRateEnv, 10, 32); pollingFrequency != 0 { + log.Debugf("use pollingFrequency as %d seconds", pollingFrequency) + return pollingFrequency + } } - log.Debugf("Node %s details\n", node) - - return node.Labels, nil + log.Debugf("use default pollingFrequency as %d seconds", DefaultPodmonPollRate) + return DefaultPodmonPollRate } diff --git a/service/service_test.go b/service/service_test.go index 80ebf509..3685230f 100644 --- a/service/service_test.go +++ b/service/service_test.go @@ -51,8 +51,9 @@ func TestGoDog(t *testing.T) { runOptions := godog.Options{ Format: "pretty", Paths: []string{"features"}, - Tags: "v1.0.0, v1.1.0, v1.2.0, v1.3.0, v1.4.0, v1.5.0, v1.6.0, v2.2.0, v2.3.0, v2.4.0, v2.5.0, v2.6.0, v2.7.0, v2.8.0, v2.9.0", + Tags: "v1.0.0, v1.1.0, v1.2.0, v1.3.0, v1.4.0, v1.5.0, v1.6.0, v2.2.0, v2.3.0, v2.4.0, v2.5.0, v2.6.0, v2.7.0, v2.8.0, v2.9.0, v2.11.0", // Tags: "wip", + // Tags: "resiliency", // uncomment to run all node resiliency related tests, } testStatus = godog.TestSuite{ Name: "CSI Powermax Unit Test", diff --git a/service/service_unit_test.go b/service/service_unit_test.go index f96cdc54..e1fdadc5 100644 --- a/service/service_unit_test.go +++ b/service/service_unit_test.go @@ -160,7 +160,7 @@ func TestGetVolSize(t *testing.T) { RequiredBytes: MinVolumeSizeBytes - 1, LimitBytes: 0, }, - numOfCylinders: 0, + numOfCylinders: 26, }, { // requesting a negative required bytes diff --git a/service/step_defs_test.go b/service/step_defs_test.go index 4c6e9aed..faa28be9 100644 --- a/service/step_defs_test.go +++ b/service/step_defs_test.go @@ -16,9 +16,11 @@ limitations under the License. package service import ( + "encoding/json" "errors" "fmt" "net" + "net/http" "net/http/httptest" "os" "os/exec" @@ -28,6 +30,8 @@ import ( "sync" "time" + "github.com/dell/csi-powermax/v2/k8smock" + "github.com/dell/dell-csi-extensions/common" "github.com/cucumber/messages-go/v10" @@ -35,10 +39,10 @@ import ( migrext "github.com/dell/dell-csi-extensions/migration" "github.com/dell/gocsi" - csiext "github.com/dell/dell-csi-extensions/replication" - csi "github.com/container-storage-interface/spec/lib/go/csi" "github.com/cucumber/godog" + podmon "github.com/dell/dell-csi-extensions/podmon" + csiext "github.com/dell/dell-csi-extensions/replication" "github.com/dell/gofsutil" "github.com/dell/goiscsi" pmax "github.com/dell/gopowermax/v2" @@ -93,6 +97,7 @@ const ( defaultISCSIDirPort2 = "SE2-E:4" MaxRetries = 10 Namespace = "namespace-test" + kubeconfig = "/etc/kubernetes/admin.conf" ) var allBlockDevices = [2]string{nodePublishBlockDevicePath, altPublishBlockDevicePath} @@ -180,6 +185,7 @@ type feature struct { uDevID string nodeGetVolumeStatsResponse *csi.NodeGetVolumeStatsResponse setIOLimits bool + validateVHCResp *podmon.ValidateVolumeHostConnectivityResponse } var inducedErrors struct { @@ -315,6 +321,7 @@ func (f *feature) aPowerMaxService() error { f.uDevID = "" f.nodeGetVolumeStatsResponse = nil f.setIOLimits = false + f.validateVHCResp = nil inducedErrors.invalidSymID = false inducedErrors.invalidStoragePool = false inducedErrors.invalidServiceLevel = false @@ -420,7 +427,7 @@ func (f *feature) getService() *service { // This is a temp fix and needs to be handled in a different way mock.Data.JSONDir = "../../gopowermax/mock" svc.loggedInArrays = map[string]bool{} - + svc.iscsiTargets = map[string][]string{} var opts Opts opts.User = "username" opts.Password = "password" @@ -429,7 +436,7 @@ func (f *feature) getService() *service { opts.Insecure = true opts.DisableCerts = true opts.EnableBlock = true - opts.KubeConfigPath = "/etc/kubernetes/admin.conf" + opts.KubeConfigPath = kubeconfig opts.NodeName, _ = os.Hostname() opts.PortGroups = []string{"portgroup1", "portgroup2"} mock.AddPortGroup("portgroup1", "ISCSI", []string{defaultISCSIDirPort1, defaultISCSIDirPort2}) @@ -442,6 +449,7 @@ func (f *feature) getService() *service { opts.NonDefaultRetries = true opts.ModifyHostName = false opts.NodeNameTemplate = "" + opts.IsPodmonEnabled = true opts.Lsmod = ` Module Size Used by vsock_diag 12610 0 @@ -454,6 +462,7 @@ ip6t_rpfilter 12595 1 svc.fcConnector = &mockFCGobrick{} svc.iscsiConnector = &mockISCSIGobrick{} svc.dBusConn = &mockDbusConnection{} + svc.k8sUtils = k8smock.Init() mockGobrickReset() mockgosystemdReset() disconnectVolumeRetryTime = 10 * time.Millisecond @@ -1152,6 +1161,14 @@ func (f *feature) iInduceError(errtype string) error { mock.InducedErrors.ExecuteActionError = true case "GetFileSystemError": mock.InducedErrors.GetFileSystemError = true + case "GetArrayPerfKeyError": + mock.InducedErrors.GetArrayPerfKeyError = true + case "GetVolumesMetricsError": + mock.InducedErrors.GetVolumesMetricsError = true + case "GetFileSysMetricsError": + mock.InducedErrors.GetFileSysMetricsError = true + case "GetFreshMetrics": + mock.InducedErrors.GetFreshMetrics = true case "NoSymlinkForNodePublish": cmd := exec.Command("rm", "-rf", nodePublishSymlinkDir) _, err := cmd.CombinedOutput() @@ -2567,6 +2584,7 @@ func (f *feature) iCallBeforeServe() error { ctxOSEnviron := interface{}("os.Environ") stringSlice := f.getTypicalEnviron() stringSlice = append(stringSlice, EnvClusterPrefix+"=TST") + f.service.k8sUtils = k8smock.Init() ctx := context.WithValue(context.Background(), ctxOSEnviron, stringSlice) listener, err := net.Listen("tcp", "127.0.0.1:65000") if err != nil { @@ -2580,6 +2598,7 @@ func (f *feature) iCallBeforeServe() error { func (f *feature) iCallBeforeServeWithoutClusterPrefix() error { ctxOSEnviron := interface{}("os.Environ") stringSlice := f.getTypicalEnviron() + f.service.k8sUtils = k8smock.Init() ctx := context.WithValue(context.Background(), ctxOSEnviron, stringSlice) listener, err := net.Listen("tcp", "127.0.0.1:65000") if err != nil { @@ -2593,6 +2612,7 @@ func (f *feature) iCallBeforeServeWithoutClusterPrefix() error { func (f *feature) iCallBeforeServeWithAnInvalidClusterPrefix() error { ctxOSEnviron := interface{}("os.Environ") stringSlice := f.getTypicalEnviron() + f.service.k8sUtils = k8smock.Init() stringSlice = append(stringSlice, EnvClusterPrefix+"=LONG") ctx := context.WithValue(context.Background(), ctxOSEnviron, stringSlice) listener, err := net.Listen("tcp", "127.0.0.1:65000") @@ -2612,6 +2632,7 @@ func (f *feature) iCallBeforeServeWithTopologyConfigSetAt(path string) error { stringSlice = append(stringSlice, EnvTopoConfigFilePath+"="+path) } stringSlice = append(stringSlice, gocsi.EnvVarMode+"=node") + f.service.k8sUtils = k8smock.Init() ctx := context.WithValue(context.Background(), ctxOSEnviron, stringSlice) listener, err := net.Listen("tcp", "127.0.0.1:65000") if err != nil { @@ -4631,6 +4652,93 @@ func (f *feature) iCallFileSystemDeleteVolume() error { return nil } +func (f *feature) validateVolumeHostConnectivityCallIsValid() error { + if !strings.Contains(f.validateVHCResp.Messages[0], "ValidateVolumeHostConnectivity is implemented") { + return errors.New("validateVolumeHostConnectivity is not implemented") + } + return nil +} + +func (f *feature) iCallValidateVolumeHostConnectivity() error { + header := metadata.New(map[string]string{"csi.requestid": "1"}) + ctx := metadata.NewIncomingContext(context.Background(), header) + req := &podmon.ValidateVolumeHostConnectivityRequest{} + f.validateVHCResp, f.err = f.service.ValidateVolumeHostConnectivity(ctx, req) + if f.err != nil { + log.Printf("error in ValidateVolumeHostConnectivity: %s", f.err.Error()) + } + return nil +} + +func (f *feature) iCallValidateVolumeHostConnectivityWithAndSymID(nodeID, symID string) error { + header := metadata.New(map[string]string{"csi.requestid": "1"}) + ctx := metadata.NewIncomingContext(context.Background(), header) + req := &podmon.ValidateVolumeHostConnectivityRequest{NodeId: nodeID} + if symID == "default" { + req.ArrayId = f.symmetrixID + } + if symID == "fromVolID" { + req.VolumeIds = []string{f.volumeID} + if inducedErrors.invalidVolumeID { + req.VolumeIds = []string{"000-000"} + } + } + if nodeID == "no-node" { + req.NodeId = "" + req.VolumeIds = []string{f.volumeID} + } + switch nodeID { + case "connected-node": + req.NodeId = "node1-127.0.0.1" + req.VolumeIds = []string{f.volumeID} + break + case "connected-node-faultyVolID": + req.NodeId = "node1-127.0.0.1" + faultyVolID := strings.Replace(f.volumeID, f.symmetrixID, "000197900000", 1) + req.VolumeIds = []string{faultyVolID} + break + } + f.validateVHCResp, f.err = f.service.ValidateVolumeHostConnectivity(ctx, req) + if f.err != nil { + log.Printf("error in ValidateVolumeHostConnectivity: %s", f.err.Error()) + } + return nil +} + +func (f *feature) iStartNodeAPIServer() { + var status ArrayConnectivityStatus + status.LastAttempt = time.Now().Unix() + status.LastSuccess = time.Now().Unix() + input, _ := json.Marshal(status) + + // responding with some dummy response that is for the case when array is connected and LastSuccess check was just finished + http.HandleFunc(ArrayStatus+"/"+f.symmetrixID, func(w http.ResponseWriter, _ *http.Request) { + w.Write(input) + }) + + f.service.opts.PodmonPort = ":9028" + fmt.Printf("Starting server at port %s\n", f.service.opts.PodmonPort) + go http.ListenAndServe(f.service.opts.PodmonPort, nil) // #nosec G114 +} + +func (f *feature) iCallIsIOInProgress() error { + symID := f.symmetrixID + if inducedErrors.invalidSymID { + symID = "" + } + _, _, devID, _, _, _ := s.parseCsiID(f.volumeID) + f.err = f.service.IsIOInProgress(context.Background(), devID, symID) + return nil +} + +func (f *feature) theValidateVolumeHostMessageContains(msg string) error { + if !strings.Contains(f.validateVHCResp.Messages[0], msg) { + errMsg := fmt.Sprintf("validateVolumeHostConnectivity response is incorrect, expected: %s actual %s", msg, f.validateVHCResp.Messages[0]) + return errors.New(errMsg) + } + return nil +} + func FeatureContext(s *godog.ScenarioContext) { f := &feature{} s.Step(`^a PowerMax service$`, f.aPowerMaxService) @@ -4867,4 +4975,10 @@ func FeatureContext(s *godog.ScenarioContext) { s.Step(`^I mix the RDF personalities$`, f.iMixTheRDFPersonalities) s.Step(`^I call fileSystem CreateVolume "([^"]*)"$`, f.iCallFileSystemCreateVolume) s.Step(`^I call fileSystem DeleteVolume$`, f.iCallFileSystemDeleteVolume) + s.Step(`^ValidateVolumeHostConnectivity is valid$`, f.validateVolumeHostConnectivityCallIsValid) + s.Step(`^I call ValidateVolumeHostConnectivity$`, f.iCallValidateVolumeHostConnectivity) + s.Step(`^I call ValidateVolumeHostConnectivity with "([^"]*)" and symID "([^"]*)"$`, f.iCallValidateVolumeHostConnectivityWithAndSymID) + s.Step(`^the ValidateVolumeHost message contains "([^"]*)"$`, f.theValidateVolumeHostMessageContains) + s.Step(`^I start node API server$`, f.iStartNodeAPIServer) + s.Step(`^I call IsIOInProgress$`, f.iCallIsIOInProgress) }