1 Star 0 Fork 0

zhuchance/kubernetes

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
volume_provisioning.go 43.63 KB
一键复制 编辑 原始数据 按行查看 历史
12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"strings"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
apierrs "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/api/core/v1"
rbacv1beta1 "k8s.io/api/rbac/v1beta1"
storage "k8s.io/api/storage/v1"
storagebeta "k8s.io/api/storage/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/authentication/serviceaccount"
clientset "k8s.io/client-go/kubernetes"
storageutil "k8s.io/kubernetes/pkg/apis/storage/v1/util"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/test/e2e/framework"
"k8s.io/kubernetes/test/e2e/framework/providers/gce"
"k8s.io/kubernetes/test/e2e/storage/testsuites"
"k8s.io/kubernetes/test/e2e/storage/utils"
)
const (
// Plugin name of the external provisioner
externalPluginName = "example.com/nfs"
// Number of PVCs for multi PVC tests
multiPVCcount = 3
)
func testBindingWaitForFirstConsumer(client clientset.Interface, claim *v1.PersistentVolumeClaim, class *storage.StorageClass) (*v1.PersistentVolume, *v1.Node) {
pvs, node := testBindingWaitForFirstConsumerMultiPVC(client, []*v1.PersistentVolumeClaim{claim}, class)
return pvs[0], node
}
func testBindingWaitForFirstConsumerMultiPVC(client clientset.Interface, claims []*v1.PersistentVolumeClaim, class *storage.StorageClass) ([]*v1.PersistentVolume, *v1.Node) {
var err error
Expect(len(claims)).ToNot(Equal(0))
namespace := claims[0].Namespace
By("creating a storage class " + class.Name)
class, err = client.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(client, class.Name)
By("creating claims")
var claimNames []string
var createdClaims []*v1.PersistentVolumeClaim
for _, claim := range claims {
c, err := client.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
claimNames = append(claimNames, c.Name)
createdClaims = append(createdClaims, c)
Expect(err).NotTo(HaveOccurred())
}
defer func() {
var errors map[string]error
for _, claim := range createdClaims {
err := framework.DeletePersistentVolumeClaim(client, claim.Name, claim.Namespace)
if err != nil {
errors[claim.Name] = err
}
}
if len(errors) > 0 {
for claimName, err := range errors {
framework.Logf("Failed to delete PVC: %s due to error: %v", claimName, err)
}
}
}()
// Wait for ClaimProvisionTimeout (across all PVCs in parallel) and make sure the phase did not become Bound i.e. the Wait errors out
By("checking the claims are in pending state")
err = framework.WaitForPersistentVolumeClaimsPhase(v1.ClaimBound, client, namespace, claimNames, 2*time.Second, framework.ClaimProvisionShortTimeout, true)
Expect(err).To(HaveOccurred())
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
}
By("creating a pod referring to the claims")
// Create a pod referring to the claim and wait for it to get to running
pod, err := framework.CreatePod(client, namespace, nil /* nodeSelector */, createdClaims, true /* isPrivileged */, "" /* command */)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.DeletePodOrFail(client, pod.Namespace, pod.Name)
}()
// collect node details
node, err := client.CoreV1().Nodes().Get(pod.Spec.NodeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
By("re-checking the claims to see they binded")
var pvs []*v1.PersistentVolume
for _, claim := range createdClaims {
// Get new copy of the claim
claim, err = client.CoreV1().PersistentVolumeClaims(claim.Namespace).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
// make sure claim did bind
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, client, claim.Namespace, claim.Name, framework.Poll, framework.ClaimProvisionTimeout)
Expect(err).NotTo(HaveOccurred())
pv, err := client.CoreV1().PersistentVolumes().Get(claim.Spec.VolumeName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
pvs = append(pvs, pv)
}
Expect(len(pvs)).ToNot(Equal(0))
return pvs, node
}
func checkZoneFromLabelAndAffinity(pv *v1.PersistentVolume, zone string, matchZone bool) {
checkZonesFromLabelAndAffinity(pv, sets.NewString(zone), matchZone)
}
// checkZoneLabelAndAffinity checks the LabelZoneFailureDomain label of PV and terms
// with key LabelZoneFailureDomain in PV's node affinity contains zone
// matchZones is used to indicate if zones should match perfectly
func checkZonesFromLabelAndAffinity(pv *v1.PersistentVolume, zones sets.String, matchZones bool) {
By("checking PV's zone label and node affinity terms match expected zone")
if pv == nil {
framework.Failf("nil pv passed")
}
pvLabel, ok := pv.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on PV", kubeletapis.LabelZoneFailureDomain)
}
zonesFromLabel, err := volumeutil.LabelZonesToSet(pvLabel)
if err != nil {
framework.Failf("unable to parse zone labels %s: %v", pvLabel, err)
}
if matchZones && !zonesFromLabel.Equal(zones) {
framework.Failf("value[s] of %s label for PV: %v does not match expected zone[s]: %v", kubeletapis.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if !matchZones && !zonesFromLabel.IsSuperset(zones) {
framework.Failf("value[s] of %s label for PV: %v does not contain expected zone[s]: %v", kubeletapis.LabelZoneFailureDomain, zonesFromLabel, zones)
}
if pv.Spec.NodeAffinity == nil {
framework.Failf("node affinity not found in PV spec %v", pv.Spec)
}
if len(pv.Spec.NodeAffinity.Required.NodeSelectorTerms) == 0 {
framework.Failf("node selector terms not found in PV spec %v", pv.Spec)
}
for _, term := range pv.Spec.NodeAffinity.Required.NodeSelectorTerms {
keyFound := false
for _, r := range term.MatchExpressions {
if r.Key != kubeletapis.LabelZoneFailureDomain {
continue
}
keyFound = true
zonesFromNodeAffinity := sets.NewString(r.Values...)
if matchZones && !zonesFromNodeAffinity.Equal(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not equal expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
if !matchZones && !zonesFromNodeAffinity.IsSuperset(zones) {
framework.Failf("zones from NodeAffinity of PV: %v does not contain expected zone[s]: %v", zonesFromNodeAffinity, zones)
}
break
}
if !keyFound {
framework.Failf("label %s not found in term %v", kubeletapis.LabelZoneFailureDomain, term)
}
}
}
// checkAWSEBS checks properties of an AWS EBS. Test framework does not
// instantiate full AWS provider, therefore we need use ec2 API directly.
func checkAWSEBS(volume *v1.PersistentVolume, volumeType string, encrypted bool) error {
diskName := volume.Spec.AWSElasticBlockStore.VolumeID
var client *ec2.EC2
tokens := strings.Split(diskName, "/")
volumeID := tokens[len(tokens)-1]
zone := framework.TestContext.CloudConfig.Zone
if len(zone) > 0 {
region := zone[:len(zone)-1]
cfg := aws.Config{Region: &region}
framework.Logf("using region %s", region)
client = ec2.New(session.New(), &cfg)
} else {
framework.Logf("no region configured")
client = ec2.New(session.New())
}
request := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
info, err := client.DescribeVolumes(request)
if err != nil {
return fmt.Errorf("error querying ec2 for volume %q: %v", volumeID, err)
}
if len(info.Volumes) == 0 {
return fmt.Errorf("no volumes found for volume %q", volumeID)
}
if len(info.Volumes) > 1 {
return fmt.Errorf("multiple volumes found for volume %q", volumeID)
}
awsVolume := info.Volumes[0]
if awsVolume.VolumeType == nil {
return fmt.Errorf("expected volume type %q, got nil", volumeType)
}
if *awsVolume.VolumeType != volumeType {
return fmt.Errorf("expected volume type %q, got %q", volumeType, *awsVolume.VolumeType)
}
if encrypted && awsVolume.Encrypted == nil {
return fmt.Errorf("expected encrypted volume, got no encryption")
}
if encrypted && !*awsVolume.Encrypted {
return fmt.Errorf("expected encrypted volume, got %v", *awsVolume.Encrypted)
}
return nil
}
func checkGCEPD(volume *v1.PersistentVolume, volumeType string) error {
cloud, err := gce.GetGCECloud()
if err != nil {
return err
}
diskName := volume.Spec.GCEPersistentDisk.PDName
disk, err := cloud.GetDiskByNameUnknownZone(diskName)
if err != nil {
return err
}
if !strings.HasSuffix(disk.Type, volumeType) {
return fmt.Errorf("unexpected disk type %q, expected suffix %q", disk.Type, volumeType)
}
return nil
}
func testZonalDelayedBinding(c clientset.Interface, ns string, specifyAllowedTopology bool, pvcCount int) {
storageClassTestNameFmt := "Delayed binding %s storage class test %s"
storageClassTestNameSuffix := ""
if specifyAllowedTopology {
storageClassTestNameSuffix += " with AllowedTopologies"
}
tests := []testsuites.StorageClassTest{
{
Name: fmt.Sprintf(storageClassTestNameFmt, "EBS", storageClassTestNameSuffix),
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
DelayBinding: true,
},
{
Name: fmt.Sprintf(storageClassTestNameFmt, "GCE PD", storageClassTestNameSuffix),
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
ClaimSize: "2Gi",
DelayBinding: true,
},
}
for _, test := range tests {
if !framework.ProviderIs(test.CloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
action := "creating claims with class with waitForFirstConsumer"
suffix := "delayed"
var topoZone string
class := newStorageClass(test, ns, suffix)
if specifyAllowedTopology {
action += " and allowedTopologies"
suffix += "-topo"
topoZone = getRandomCloudZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, topoZone)
}
By(action)
var claims []*v1.PersistentVolumeClaim
for i := 0; i < pvcCount; i++ {
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claims = append(claims, claim)
}
pvs, node := testBindingWaitForFirstConsumerMultiPVC(c, claims, class)
if node == nil {
framework.Failf("unexpected nil node found")
}
zone, ok := node.Labels[kubeletapis.LabelZoneFailureDomain]
if !ok {
framework.Failf("label %s not found on Node", kubeletapis.LabelZoneFailureDomain)
}
if specifyAllowedTopology && topoZone != zone {
framework.Failf("zone specified in allowedTopologies: %s does not match zone of node where PV got provisioned: %s", topoZone, zone)
}
for _, pv := range pvs {
checkZoneFromLabelAndAffinity(pv, zone, true)
}
}
}
var _ = utils.SIGDescribe("Dynamic Provisioning", func() {
f := framework.NewDefaultFramework("volume-provisioning")
// filled in BeforeEach
var c clientset.Interface
var ns string
BeforeEach(func() {
c = f.ClientSet
ns = f.Namespace.Name
})
Describe("DynamicProvisioner [Slow]", func() {
It("should provision storage with different parameters", func() {
cloudZone := getRandomCloudZone(c)
// This test checks that dynamic provisioning can provision a volume
// that can be used to persist data among pods.
tests := []testsuites.StorageClassTest{
// GCE/GKE
{
Name: "SSD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-ssd",
"zone": cloudZone,
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-ssd")
},
},
{
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
},
// AWS
{
Name: "gp2 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "gp2",
"zone": cloudZone,
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", false)
},
},
{
Name: "io1 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "io1",
"iopsPerGB": "50",
},
ClaimSize: "3.5Gi",
ExpectedSize: "4Gi", // 4 GiB is minimum for io1
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "io1", false)
},
},
{
Name: "sc1 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "sc1",
},
ClaimSize: "500Gi", // minimum for sc1
ExpectedSize: "500Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "sc1", false)
},
},
{
Name: "st1 EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"type": "st1",
},
ClaimSize: "500Gi", // minimum for st1
ExpectedSize: "500Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "st1", false)
},
},
{
Name: "encrypted EBS on AWS",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
Parameters: map[string]string{
"encrypted": "true",
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkAWSEBS(volume, "gp2", true)
},
},
// OpenStack generic tests (works on all OpenStack deployments)
{
Name: "generic Cinder volume on OpenStack",
CloudProviders: []string{"openstack"},
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: nil, // there is currently nothing to check on OpenStack
},
{
Name: "Cinder volume with empty volume type and zone on OpenStack",
CloudProviders: []string{"openstack"},
Provisioner: "kubernetes.io/cinder",
Parameters: map[string]string{
"type": "",
"availability": "",
},
ClaimSize: "1.5Gi",
ExpectedSize: "2Gi",
PvCheck: nil, // there is currently nothing to check on OpenStack
},
// vSphere generic test
{
Name: "generic vSphere volume",
CloudProviders: []string{"vsphere"},
Provisioner: "kubernetes.io/vsphere-volume",
Parameters: map[string]string{},
ClaimSize: "1.5Gi",
ExpectedSize: "1.5Gi",
PvCheck: nil,
},
// Azure
{
Name: "Azure disk volume with empty sku and location",
CloudProviders: []string{"azure"},
Provisioner: "kubernetes.io/azure-disk",
Parameters: map[string]string{},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: nil,
},
}
var betaTest *testsuites.StorageClassTest
for i, t := range tests {
// Beware of clojure, use local variables instead of those from
// outer scope
test := t
if !framework.ProviderIs(test.CloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
// Remember the last supported test for subsequent test of beta API
betaTest = &test
By("Testing " + test.Name)
suffix := fmt.Sprintf("%d", i)
class := newStorageClass(test, ns, suffix)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
testsuites.TestDynamicProvisioning(test, c, claim, class)
}
// Run the last test with storage.k8s.io/v1beta1 on pvc
if betaTest != nil {
By("Testing " + betaTest.Name + " with beta volume provisioning")
class := newBetaStorageClass(*betaTest, "beta")
// we need to create the class manually, testDynamicProvisioning does not accept beta class
class, err := c.StorageV1beta1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
claim := newClaim(*betaTest, ns, "beta")
claim.Spec.StorageClassName = &(class.Name)
testsuites.TestDynamicProvisioning(*betaTest, c, claim, nil)
}
})
It("should provision storage with non-default reclaim policy Retain", func() {
framework.SkipUnlessProviderIs("gce", "gke")
test := testsuites.StorageClassTest{
Name: "HDD PD on GCE/GKE",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{
"type": "pd-standard",
},
ClaimSize: "1Gi",
ExpectedSize: "1Gi",
PvCheck: func(volume *v1.PersistentVolume) error {
return checkGCEPD(volume, "pd-standard")
},
}
class := newStorageClass(test, ns, "reclaimpolicy")
retain := v1.PersistentVolumeReclaimRetain
class.ReclaimPolicy = &retain
claim := newClaim(test, ns, "reclaimpolicy")
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
By(fmt.Sprintf("waiting for the provisioned PV %q to enter phase %s", pv.Name, v1.VolumeReleased))
framework.ExpectNoError(framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 1*time.Second, 30*time.Second))
By(fmt.Sprintf("deleting the storage asset backing the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.GCEPersistentDisk.PDName))
By(fmt.Sprintf("deleting the PV %q", pv.Name))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name), "Failed to delete PV ", pv.Name)
framework.ExpectNoError(framework.WaitForPersistentVolumeDeleted(c, pv.Name, 1*time.Second, 30*time.Second))
})
It("should not provision a volume in an unmanaged GCE zone.", func() {
framework.SkipUnlessProviderIs("gce", "gke")
var suffix string = "unmananged"
By("Discovering an unmanaged zone")
allZones := sets.NewString() // all zones in the project
managedZones := sets.NewString() // subset of allZones
gceCloud, err := gce.GetGCECloud()
Expect(err).NotTo(HaveOccurred())
// Get all k8s managed zones (same as zones with nodes in them for test)
managedZones, err = gceCloud.GetAllZonesFromCloudProvider()
Expect(err).NotTo(HaveOccurred())
// Get a list of all zones in the project
zones, err := gceCloud.ComputeServices().GA.Zones.List(framework.TestContext.CloudConfig.ProjectID).Do()
Expect(err).NotTo(HaveOccurred())
for _, z := range zones.Items {
allZones.Insert(z.Name)
}
// Get the subset of zones not managed by k8s
var unmanagedZone string
var popped bool
unmanagedZones := allZones.Difference(managedZones)
// And select one of them at random.
if unmanagedZone, popped = unmanagedZones.PopAny(); !popped {
framework.Skipf("No unmanaged zones found.")
}
By("Creating a StorageClass for the unmanaged zone")
test := testsuites.StorageClassTest{
Name: "unmanaged_zone",
Provisioner: "kubernetes.io/gce-pd",
Parameters: map[string]string{"zone": unmanagedZone},
ClaimSize: "1Gi",
}
sc := newStorageClass(test, ns, suffix)
sc, err = c.StorageV1().StorageClasses().Create(sc)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, sc.Name)
By("Creating a claim and expecting it to timeout")
pvc := newClaim(test, ns, suffix)
pvc.Spec.StorageClassName = &sc.Name
pvc, err = c.CoreV1().PersistentVolumeClaims(ns).Create(pvc)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, pvc.Name, ns), "Failed to delete PVC ", pvc.Name)
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, pvc.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
})
It("should test that deleting a claim before the volume is provisioned deletes the volume.", func() {
// This case tests for the regressions of a bug fixed by PR #21268
// REGRESSION: Deleting the PVC before the PV is provisioned can result in the PV
// not being deleted.
// NOTE: Polls until no PVs are detected, times out at 5 minutes.
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
const raceAttempts int = 100
var residualPVs []*v1.PersistentVolume
By(fmt.Sprintf("Creating and deleting PersistentVolumeClaims %d times", raceAttempts))
test := testsuites.StorageClassTest{
Name: "deletion race",
Provisioner: "", // Use a native one based on current cloud provider
ClaimSize: "1Gi",
}
class := newStorageClass(test, ns, "race")
class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer deleteStorageClass(c, class.Name)
// To increase chance of detection, attempt multiple iterations
for i := 0; i < raceAttempts; i++ {
suffix := fmt.Sprintf("race-%d", i)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
tmpClaim, err := framework.CreatePVC(c, ns, claim)
Expect(err).NotTo(HaveOccurred())
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, tmpClaim.Name, ns))
}
By(fmt.Sprintf("Checking for residual PersistentVolumes associated with StorageClass %s", class.Name))
residualPVs, err = waitForProvisionedVolumesDeleted(c, class.Name)
Expect(err).NotTo(HaveOccurred())
// Cleanup the test resources before breaking
defer deleteProvisionedVolumesAndDisks(c, residualPVs)
// Report indicators of regression
if len(residualPVs) > 0 {
framework.Logf("Remaining PersistentVolumes:")
for i, pv := range residualPVs {
framework.Logf("\t%d) %s", i+1, pv.Name)
}
framework.Failf("Expected 0 PersistentVolumes remaining. Found %d", len(residualPVs))
}
framework.Logf("0 PersistentVolumes remain.")
})
It("deletion should be idempotent", func() {
// This test ensures that deletion of a volume is idempotent.
// It creates a PV with Retain policy, deletes underlying AWS / GCE
// volume and changes the reclaim policy to Delete.
// PV controller should delete the PV even though the underlying volume
// is already deleted.
framework.SkipUnlessProviderIs("gce", "gke", "aws")
By("creating PD")
diskName, err := framework.CreatePDWithRetry()
framework.ExpectNoError(err)
By("creating PV")
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "volume-idempotent-delete-",
},
Spec: v1.PersistentVolumeSpec{
// Use Retain to keep the PV, the test will change it to Delete
// when the time comes.
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse("1Gi"),
},
// PV is bound to non-existing PVC, so it's reclaim policy is
// executed immediately
ClaimRef: &v1.ObjectReference{
Kind: "PersistentVolumeClaim",
APIVersion: "v1",
UID: types.UID("01234567890"),
Namespace: ns,
Name: "dummy-claim-name",
},
},
}
switch framework.TestContext.Provider {
case "aws":
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{
VolumeID: diskName,
},
}
case "gce", "gke":
pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: diskName,
},
}
}
pv, err = c.CoreV1().PersistentVolumes().Create(pv)
framework.ExpectNoError(err)
By("waiting for the PV to get Released")
err = framework.WaitForPersistentVolumePhase(v1.VolumeReleased, c, pv.Name, 2*time.Second, framework.PVReclaimingTimeout)
framework.ExpectNoError(err)
By("deleting the PD")
err = framework.DeletePVSource(&pv.Spec.PersistentVolumeSource)
framework.ExpectNoError(err)
By("changing the PV reclaim policy")
pv, err = c.CoreV1().PersistentVolumes().Get(pv.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
pv.Spec.PersistentVolumeReclaimPolicy = v1.PersistentVolumeReclaimDelete
pv, err = c.CoreV1().PersistentVolumes().Update(pv)
framework.ExpectNoError(err)
By("waiting for the PV to get deleted")
err = framework.WaitForPersistentVolumeDeleted(c, pv.Name, 5*time.Second, framework.PVDeletingTimeout)
Expect(err).NotTo(HaveOccurred())
})
})
Describe("DynamicProvisioner External", func() {
It("should let an external dynamic provisioner create and delete persistent volumes [Slow]", func() {
// external dynamic provisioner pods need additional permissions provided by the
// persistent-volume-provisioner clusterrole and a leader-locking role
serviceAccountName := "default"
subject := rbacv1beta1.Subject{
Kind: rbacv1beta1.ServiceAccountKind,
Namespace: ns,
Name: serviceAccountName,
}
framework.BindClusterRole(c.RbacV1beta1(), "system:persistent-volume-provisioner", ns, subject)
roleName := "leader-locking-nfs-provisioner"
_, err := f.ClientSet.RbacV1beta1().Roles(ns).Create(&rbacv1beta1.Role{
ObjectMeta: metav1.ObjectMeta{
Name: roleName,
},
Rules: []rbacv1beta1.PolicyRule{{
APIGroups: []string{""},
Resources: []string{"endpoints"},
Verbs: []string{"get", "list", "watch", "create", "update", "patch"},
}},
})
framework.ExpectNoError(err, "Failed to create leader-locking role")
framework.BindRoleInNamespace(c.RbacV1beta1(), roleName, ns, subject)
err = framework.WaitForAuthorizationUpdate(c.AuthorizationV1beta1(),
serviceaccount.MakeUsername(ns, serviceAccountName),
"", "get", schema.GroupResource{Group: "storage.k8s.io", Resource: "storageclasses"}, true)
framework.ExpectNoError(err, "Failed to update authorization")
By("creating an external dynamic provisioner pod")
pod := utils.StartExternalProvisioner(c, ns, externalPluginName)
defer framework.DeletePodOrFail(c, ns, pod.Name)
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Name: "external provisioner test",
Provisioner: externalPluginName,
ClaimSize: "1500Mi",
ExpectedSize: "1500Mi",
}
class := newStorageClass(test, ns, "external")
claim := newClaim(test, ns, "external")
claim.Spec.StorageClassName = &(class.Name)
By("creating a claim with a external provisioning annotation")
testsuites.TestDynamicProvisioning(test, c, claim, class)
})
})
Describe("DynamicProvisioner Default", func() {
It("should create and delete default persistent volumes [Slow]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
By("creating a claim with no annotation")
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
}
claim := newClaim(test, ns, "default")
testsuites.TestDynamicProvisioning(test, c, claim, nil)
})
// Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by changing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName := getDefaultStorageClassName(c)
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
By("setting the is-default StorageClass annotation to false")
verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true")
updateDefaultStorageClass(c, scName, "false")
By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
})
// Modifying the default storage class can be disruptive to other tests that depend on it
It("should be disabled by removing the default annotation [Serial] [Disruptive]", func() {
framework.SkipUnlessProviderIs("openstack", "gce", "aws", "gke", "vsphere", "azure")
scName := getDefaultStorageClassName(c)
test := testsuites.StorageClassTest{
Name: "default",
ClaimSize: "2Gi",
}
By("removing the is-default StorageClass annotation")
verifyDefaultStorageClass(c, scName, true)
defer updateDefaultStorageClass(c, scName, "true")
updateDefaultStorageClass(c, scName, "")
By("creating a claim with default storageclass and expecting it to timeout")
claim := newClaim(test, ns, "default")
claim, err := c.CoreV1().PersistentVolumeClaims(ns).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.ExpectNoError(framework.DeletePersistentVolumeClaim(c, claim.Name, ns))
}()
// The claim should timeout phase:Pending
err = framework.WaitForPersistentVolumeClaimPhase(v1.ClaimBound, c, ns, claim.Name, 2*time.Second, framework.ClaimProvisionShortTimeout)
Expect(err).To(HaveOccurred())
framework.Logf(err.Error())
claim, err = c.CoreV1().PersistentVolumeClaims(ns).Get(claim.Name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(claim.Status.Phase).To(Equal(v1.ClaimPending))
})
})
framework.KubeDescribe("GlusterDynamicProvisioner", func() {
It("should create and delete persistent volumes [fast]", func() {
By("creating a Gluster DP server Pod")
pod := startGlusterDpServerPod(c, ns)
serverUrl := "https://" + pod.Status.PodIP + ":8081"
By("creating a StorageClass")
test := testsuites.StorageClassTest{
Name: "Gluster Dynamic provisioner test",
Provisioner: "kubernetes.io/glusterfs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
Parameters: map[string]string{"resturl": serverUrl},
SkipWriteReadCheck: true,
}
suffix := fmt.Sprintf("glusterdptest")
class := newStorageClass(test, ns, suffix)
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
testsuites.TestDynamicProvisioning(test, c, claim, class)
})
})
Describe("Invalid AWS KMS key", func() {
It("should report an error and create no PV", func() {
framework.SkipUnlessProviderIs("aws")
test := testsuites.StorageClassTest{
Name: "AWS EBS with invalid KMS key",
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
Parameters: map[string]string{"kmsKeyId": "arn:aws:kms:us-east-1:123456789012:key/55555555-5555-5555-5555-555555555555"},
}
By("creating a StorageClass")
suffix := fmt.Sprintf("invalid-aws")
class := newStorageClass(test, ns, suffix)
class, err := c.StorageV1().StorageClasses().Create(class)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting storage class %s", class.Name)
framework.ExpectNoError(c.StorageV1().StorageClasses().Delete(class.Name, nil))
}()
By("creating a claim object with a suffix for gluster dynamic provisioner")
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
claim, err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Create(claim)
Expect(err).NotTo(HaveOccurred())
defer func() {
framework.Logf("deleting claim %q/%q", claim.Namespace, claim.Name)
err = c.CoreV1().PersistentVolumeClaims(claim.Namespace).Delete(claim.Name, nil)
if err != nil && !apierrs.IsNotFound(err) {
framework.Failf("Error deleting claim %q. Error: %v", claim.Name, err)
}
}()
// Watch events until the message about invalid key appears
err = wait.Poll(time.Second, framework.ClaimProvisionTimeout, func() (bool, error) {
events, err := c.CoreV1().Events(claim.Namespace).List(metav1.ListOptions{})
Expect(err).NotTo(HaveOccurred())
for _, event := range events.Items {
if strings.Contains(event.Message, "failed to create encrypted volume: the volume disappeared after creation, most likely due to inaccessible KMS encryption key") {
return true, nil
}
}
return false, nil
})
Expect(err).NotTo(HaveOccurred())
})
})
Describe("DynamicProvisioner delayed binding [Slow]", func() {
It("should create persistent volumes in the same zone as node after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, false /*specifyAllowedTopology*/, 3 /*pvcCount*/)
})
})
Describe("DynamicProvisioner allowedTopologies", func() {
It("should create persistent volume in the zone specified in allowedTopologies of storageclass", func() {
tests := []testsuites.StorageClassTest{
{
Name: "AllowedTopologies EBS storage class test",
CloudProviders: []string{"aws"},
Provisioner: "kubernetes.io/aws-ebs",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
},
{
Name: "AllowedTopologies GCE PD storage class test",
CloudProviders: []string{"gce", "gke"},
Provisioner: "kubernetes.io/gce-pd",
ClaimSize: "2Gi",
ExpectedSize: "2Gi",
},
}
for _, test := range tests {
if !framework.ProviderIs(test.CloudProviders...) {
framework.Logf("Skipping %q: cloud providers is not %v", test.Name, test.CloudProviders)
continue
}
By("creating a claim with class with allowedTopologies set")
suffix := "topology"
class := newStorageClass(test, ns, suffix)
zone := getRandomCloudZone(c)
addSingleZoneAllowedTopologyToStorageClass(c, class, zone)
claim := newClaim(test, ns, suffix)
claim.Spec.StorageClassName = &class.Name
pv := testsuites.TestDynamicProvisioning(test, c, claim, class)
checkZoneFromLabelAndAffinity(pv, zone, true)
}
})
})
Describe("DynamicProvisioner delayed binding with allowedTopologies [Slow]", func() {
It("should create persistent volumes in the same zone as specified in allowedTopologies after a pod mounting the claims is started", func() {
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 1 /*pvcCount*/)
testZonalDelayedBinding(c, ns, true /*specifyAllowedTopology*/, 3 /*pvcCount*/)
})
})
})
func getDefaultStorageClassName(c clientset.Interface) string {
list, err := c.StorageV1().StorageClasses().List(metav1.ListOptions{})
if err != nil {
framework.Failf("Error listing storage classes: %v", err)
}
var scName string
for _, sc := range list.Items {
if storageutil.IsDefaultAnnotation(sc.ObjectMeta) {
if len(scName) != 0 {
framework.Failf("Multiple default storage classes found: %q and %q", scName, sc.Name)
}
scName = sc.Name
}
}
if len(scName) == 0 {
framework.Failf("No default storage class found")
}
framework.Logf("Default storage class: %q", scName)
return scName
}
func verifyDefaultStorageClass(c clientset.Interface, scName string, expectedDefault bool) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
Expect(storageutil.IsDefaultAnnotation(sc.ObjectMeta)).To(Equal(expectedDefault))
}
func updateDefaultStorageClass(c clientset.Interface, scName string, defaultStr string) {
sc, err := c.StorageV1().StorageClasses().Get(scName, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())
if defaultStr == "" {
delete(sc.Annotations, storageutil.BetaIsDefaultStorageClassAnnotation)
delete(sc.Annotations, storageutil.IsDefaultStorageClassAnnotation)
} else {
if sc.Annotations == nil {
sc.Annotations = make(map[string]string)
}
sc.Annotations[storageutil.BetaIsDefaultStorageClassAnnotation] = defaultStr
sc.Annotations[storageutil.IsDefaultStorageClassAnnotation] = defaultStr
}
sc, err = c.StorageV1().StorageClasses().Update(sc)
Expect(err).NotTo(HaveOccurred())
expectedDefault := false
if defaultStr == "true" {
expectedDefault = true
}
verifyDefaultStorageClass(c, scName, expectedDefault)
}
func getClaim(claimSize string, ns string) *v1.PersistentVolumeClaim {
claim := v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "pvc-",
Namespace: ns,
},
Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
},
Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(claimSize),
},
},
},
}
return &claim
}
func newClaim(t testsuites.StorageClassTest, ns, suffix string) *v1.PersistentVolumeClaim {
return getClaim(t.ClaimSize, ns)
}
func getDefaultPluginName() string {
switch {
case framework.ProviderIs("gke"), framework.ProviderIs("gce"):
return "kubernetes.io/gce-pd"
case framework.ProviderIs("aws"):
return "kubernetes.io/aws-ebs"
case framework.ProviderIs("openstack"):
return "kubernetes.io/cinder"
case framework.ProviderIs("vsphere"):
return "kubernetes.io/vsphere-volume"
case framework.ProviderIs("azure"):
return "kubernetes.io/azure-disk"
}
return ""
}
func addSingleZoneAllowedTopologyToStorageClass(c clientset.Interface, sc *storage.StorageClass, zone string) {
term := v1.TopologySelectorTerm{
MatchLabelExpressions: []v1.TopologySelectorLabelRequirement{
{
Key: kubeletapis.LabelZoneFailureDomain,
Values: []string{zone},
},
},
}
sc.AllowedTopologies = append(sc.AllowedTopologies, term)
}
func newStorageClass(t testsuites.StorageClassTest, ns string, suffix string) *storage.StorageClass {
pluginName := t.Provisioner
if pluginName == "" {
pluginName = getDefaultPluginName()
}
if suffix == "" {
suffix = "sc"
}
bindingMode := storage.VolumeBindingImmediate
if t.DelayBinding {
bindingMode = storage.VolumeBindingWaitForFirstConsumer
}
return getStorageClass(pluginName, t.Parameters, &bindingMode, ns, suffix)
}
func getStorageClass(
provisioner string,
parameters map[string]string,
bindingMode *storage.VolumeBindingMode,
ns string,
suffix string,
) *storage.StorageClass {
if bindingMode == nil {
defaultBindingMode := storage.VolumeBindingImmediate
bindingMode = &defaultBindingMode
}
return &storage.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
// Name must be unique, so let's base it on namespace name
Name: ns + "-" + suffix,
},
Provisioner: provisioner,
Parameters: parameters,
VolumeBindingMode: bindingMode,
}
}
// TODO: remove when storage.k8s.io/v1beta1 is removed.
func newBetaStorageClass(t testsuites.StorageClassTest, suffix string) *storagebeta.StorageClass {
pluginName := t.Provisioner
if pluginName == "" {
pluginName = getDefaultPluginName()
}
if suffix == "" {
suffix = "default"
}
return &storagebeta.StorageClass{
TypeMeta: metav1.TypeMeta{
Kind: "StorageClass",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: suffix + "-",
},
Provisioner: pluginName,
Parameters: t.Parameters,
}
}
func startGlusterDpServerPod(c clientset.Interface, ns string) *v1.Pod {
podClient := c.CoreV1().Pods(ns)
provisionerPod := &v1.Pod{
TypeMeta: metav1.TypeMeta{
Kind: "Pod",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
GenerateName: "glusterdynamic-provisioner-",
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: "glusterdynamic-provisioner",
Image: "docker.io/humblec/glusterdynamic-provisioner:v1.0",
Args: []string{
"-config=" + "/etc/heketi/heketi.json",
},
Ports: []v1.ContainerPort{
{Name: "heketi", ContainerPort: 8081},
},
Env: []v1.EnvVar{
{
Name: "POD_IP",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{
FieldPath: "status.podIP",
},
},
},
},
ImagePullPolicy: v1.PullIfNotPresent,
},
},
},
}
provisionerPod, err := podClient.Create(provisionerPod)
framework.ExpectNoError(err, "Failed to create %s pod: %v", provisionerPod.Name, err)
framework.ExpectNoError(framework.WaitForPodRunningInNamespace(c, provisionerPod))
By("locating the provisioner pod")
pod, err := podClient.Get(provisionerPod.Name, metav1.GetOptions{})
framework.ExpectNoError(err, "Cannot locate the provisioner pod %v: %v", provisionerPod.Name, err)
return pod
}
// waitForProvisionedVolumesDelete is a polling wrapper to scan all PersistentVolumes for any associated to the test's
// StorageClass. Returns either an error and nil values or the remaining PVs and their count.
func waitForProvisionedVolumesDeleted(c clientset.Interface, scName string) ([]*v1.PersistentVolume, error) {
var remainingPVs []*v1.PersistentVolume
err := wait.Poll(10*time.Second, 300*time.Second, func() (bool, error) {
remainingPVs = []*v1.PersistentVolume{}
allPVs, err := c.CoreV1().PersistentVolumes().List(metav1.ListOptions{})
if err != nil {
return true, err
}
for _, pv := range allPVs.Items {
if pv.Spec.StorageClassName == scName {
remainingPVs = append(remainingPVs, &pv)
}
}
if len(remainingPVs) > 0 {
return false, nil // Poll until no PVs remain
} else {
return true, nil // No PVs remain
}
})
return remainingPVs, err
}
// deleteStorageClass deletes the passed in StorageClass and catches errors other than "Not Found"
func deleteStorageClass(c clientset.Interface, className string) {
err := c.StorageV1().StorageClasses().Delete(className, nil)
if err != nil && !apierrs.IsNotFound(err) {
Expect(err).NotTo(HaveOccurred())
}
}
// deleteProvisionedVolumes [gce||gke only] iteratively deletes persistent volumes and attached GCE PDs.
func deleteProvisionedVolumesAndDisks(c clientset.Interface, pvs []*v1.PersistentVolume) {
for _, pv := range pvs {
framework.ExpectNoError(framework.DeletePDWithRetry(pv.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName))
framework.ExpectNoError(framework.DeletePersistentVolume(c, pv.Name))
}
}
func getRandomCloudZone(c clientset.Interface) string {
zones, err := framework.GetClusterZones(c)
Expect(err).ToNot(HaveOccurred())
// return "" in case that no node has zone label
zone, _ := zones.PopAny()
return zone
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Go
1
https://gitee.com/meoom/kubernetes.git
git@gitee.com:meoom/kubernetes.git
meoom
kubernetes
kubernetes
v1.13.0-rc.1

搜索帮助