1 Star 0 Fork 0

zhuchance / kubernetes

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
克隆/下载
subpath.go 30.06 KB
一键复制 编辑 原始数据 按行查看 历史
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package storage
import (
"fmt"
"path/filepath"
"strings"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/rand"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/api/v1"
"k8s.io/kubernetes/test/e2e/framework"
"time"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
)
var (
volumePath = "/test-volume"
volumeName = "test-volume"
fileName = "test-file"
retryDuration = 180
mountImage = "gcr.io/google_containers/mounttest:0.8"
)
type volInfo struct {
source *v1.VolumeSource
node string
}
type volSource interface {
createVolume(f *framework.Framework) volInfo
cleanupVolume(f *framework.Framework)
}
var initVolSources = map[string]func() volSource{
"hostPath": initHostpath,
"hostPathSymlink": initHostpathSymlink,
"emptyDir": initEmptydir,
"gcePDPVC": initGCEPD,
"gcePDPartitioned": initGCEPDPartition,
"nfs": initNFS,
"nfsPVC": initNFSPVC,
"gluster": initGluster,
}
var forceReconstructionSkipTypes = map[string]interface{}{
"hostPath": nil,
"hostPathSymlink": nil,
"gluster": nil,
}
var _ = framework.KubeDescribe("Subpath", func() {
var (
subPath string
subPathDir string
filePathInSubpath string
filePathInVolume string
pod *v1.Pod
vol volSource
)
f := framework.NewDefaultFramework("subpath")
Context("Atomic writer volumes", func() {
var err error
BeforeEach(func() {
By("Setting up data")
secret := &v1.Secret{ObjectMeta: metav1.ObjectMeta{Name: "my-secret"}, Data: map[string][]byte{"secret-key": []byte("secret-value")}}
secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(secret)
if err != nil && !apierrors.IsAlreadyExists(err) {
Expect(err).ToNot(HaveOccurred())
}
configmap := &v1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "my-configmap"}, Data: map[string]string{"configmap-key": "configmap-value"}}
configmap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(configmap)
if err != nil && !apierrors.IsAlreadyExists(err) {
Expect(err).ToNot(HaveOccurred())
}
})
gracePeriod := int64(1)
It("should support subpaths with secret pod", func() {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-subpath-test-secret"},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test",
Image: mountImage,
Args: []string{"--file_content=" + volumePath},
VolumeMounts: []v1.VolumeMount{{Name: "secret", MountPath: volumePath, SubPath: "secret-key"}},
}},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{{Name: "secret", VolumeSource: v1.VolumeSource{Secret: &v1.SecretVolumeSource{SecretName: "my-secret"}}}},
},
}
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("secret", pod, 0, []string{"secret-value"})
})
It("should support subpaths with configmap pod", func() {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-subpath-test-configmap"},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test",
Image: mountImage,
Args: []string{"--file_content=" + volumePath},
VolumeMounts: []v1.VolumeMount{{Name: "configmap", MountPath: volumePath, SubPath: "configmap-key"}},
}},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{{Name: "configmap", VolumeSource: v1.VolumeSource{ConfigMap: &v1.ConfigMapVolumeSource{LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"}}}}},
},
}
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("configmap", pod, 0, []string{"configmap-value"})
})
It("should support subpaths with downward pod", func() {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-subpath-test-downward"},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test",
Image: mountImage,
Args: []string{"--file_content=" + volumePath},
VolumeMounts: []v1.VolumeMount{{Name: "downward", MountPath: volumePath, SubPath: "downward/podname"}},
}},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{
{Name: "downward", VolumeSource: v1.VolumeSource{
DownwardAPI: &v1.DownwardAPIVolumeSource{
Items: []v1.DownwardAPIVolumeFile{{Path: "downward/podname", FieldRef: &v1.ObjectFieldSelector{APIVersion: "v1", FieldPath: "metadata.name"}}},
},
}},
},
},
}
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("downward", pod, 0, []string{"content of file \"" + volumePath + "\": " + pod.Name})
})
It("should support subpaths with projected pod", func() {
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{Name: "pod-subpath-test-secret"},
Spec: v1.PodSpec{
Containers: []v1.Container{{
Name: "test",
Image: mountImage,
Args: []string{"--file_content=" + volumePath},
VolumeMounts: []v1.VolumeMount{{Name: "projected", MountPath: volumePath, SubPath: "projected/configmap-key"}},
}},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{
{Name: "projected", VolumeSource: v1.VolumeSource{
Projected: &v1.ProjectedVolumeSource{
Sources: []v1.VolumeProjection{
{ConfigMap: &v1.ConfigMapProjection{
LocalObjectReference: v1.LocalObjectReference{Name: "my-configmap"},
Items: []v1.KeyToPath{{Path: "projected/configmap-key", Key: "configmap-key"}},
}},
},
},
}},
},
},
}
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("projected", pod, 0, []string{"configmap-value"})
})
})
for volType, volInit := range initVolSources {
curVolType := volType
curVolInit := volInit
Context(fmt.Sprintf("[Volume type: %v]", curVolType), func() {
BeforeEach(func() {
By(fmt.Sprintf("Initializing %s volume", curVolType))
vol = curVolInit()
subPath = f.Namespace.Name
subPathDir = filepath.Join(volumePath, subPath)
filePathInSubpath = filepath.Join(volumePath, fileName)
filePathInVolume = filepath.Join(subPathDir, fileName)
volInfo := vol.createVolume(f)
pod = testPodSubpath(subPath, curVolType, volInfo.source)
pod.Namespace = f.Namespace.Name
pod.Spec.NodeName = volInfo.node
})
AfterEach(func() {
By("Deleting pod")
err := framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred())
By("Cleaning up volume")
vol.cleanupVolume(f)
})
It("should support non-existent path", func() {
// Write the file in the subPath from container 0
setWriteCommand(filePathInSubpath, &pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(f, filePathInVolume, pod, 1)
})
It("should support existing directory", func() {
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %s", subPathDir))
// Write the file in the subPath from container 0
setWriteCommand(filePathInSubpath, &pod.Spec.Containers[0])
// Read it from outside the subPath from container 1
testReadFile(f, filePathInVolume, pod, 1)
})
It("should support existing single file", func() {
// Create the file in the init container
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; echo \"mount-tester new file\" > %s", subPathDir, filePathInVolume))
// Read it from inside the subPath from container 0
testReadFile(f, filePathInSubpath, pod, 0)
})
It("should fail if subpath directory is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s /bin %s", subPathDir))
// Pod should fail
testPodFailSupath(f, pod)
})
It("should fail if subpath file is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s /bin/sh %s", subPathDir))
// Pod should fail
testPodFailSupath(f, pod)
})
It("should fail if non-existent subpath is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s /bin/notanexistingpath %s", subPathDir))
// Pod should fail
testPodFailSupath(f, pod)
})
It("should fail if subpath with backstepping is outside the volume [Slow]", func() {
// Create the subpath outside the volume
setInitCommand(pod, fmt.Sprintf("ln -s ../ %s", subPathDir))
// Pod should fail
testPodFailSupath(f, pod)
})
It("should support creating multiple subpath from same volumes [Slow]", func() {
subpathDir1 := filepath.Join(volumePath, "subpath1")
subpathDir2 := filepath.Join(volumePath, "subpath2")
filepath1 := filepath.Join("/test-subpath1", fileName)
filepath2 := filepath.Join("/test-subpath2", fileName)
setInitCommand(pod, fmt.Sprintf("mkdir -p %s; mkdir -p %s", subpathDir1, subpathDir2))
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath1",
SubPath: "subpath1",
})
addSubpathVolumeContainer(&pod.Spec.Containers[0], v1.VolumeMount{
Name: volumeName,
MountPath: "/test-subpath2",
SubPath: "subpath2",
})
addMultipleWrites(&pod.Spec.Containers[0], filepath1, filepath2)
testMultipleReads(f, pod, 0, filepath1, filepath2)
})
It("should support restarting containers [Slow]", func() {
// Create the directory
setInitCommand(pod, fmt.Sprintf("mkdir -p %v", subPathDir))
testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath)
})
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(f, pod, false)
})
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if _, ok := forceReconstructionSkipTypes[curVolType]; ok {
framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType)
}
testSubpathReconstruction(f, pod, true)
})
})
}
// TODO: add a test case for the same disk with two partitions
})
func testPodSubpath(subpath, volumeType string, source *v1.VolumeSource) *v1.Pod {
suffix := strings.ToLower(fmt.Sprintf("%s-%s", volumeType, rand.String(4)))
privileged := true
gracePeriod := int64(1)
return &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("pod-subpath-test-%s", suffix),
},
Spec: v1.PodSpec{
InitContainers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", suffix),
Image: "busybox",
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
Containers: []v1.Container{
{
Name: fmt.Sprintf("test-container-subpath-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
SubPath: subpath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
{
Name: fmt.Sprintf("test-container-volume-%s", suffix),
Image: mountImage,
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: volumePath,
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
TerminationGracePeriodSeconds: &gracePeriod,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: *source,
},
},
},
}
}
func setInitCommand(pod *v1.Pod, command string) {
pod.Spec.InitContainers[0].Command = []string{"/bin/sh", "-ec", command}
}
func setWriteCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file),
fmt.Sprintf("--file_mode=%v", file),
}
}
func addSubpathVolumeContainer(container *v1.Container, volumeMount v1.VolumeMount) {
existingMounts := container.VolumeMounts
container.VolumeMounts = append(existingMounts, volumeMount)
}
func addMultipleWrites(container *v1.Container, file1 string, file2 string) {
container.Args = []string{
fmt.Sprintf("--new_file_0644=%v", file1),
fmt.Sprintf("--new_file_0666=%v", file2),
}
}
func testMultipleReads(f *framework.Framework, pod *v1.Pod, containerIndex int, file1 string, file2 string) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("multi_subpath", pod, containerIndex, []string{
"content of file \"" + file1 + "\": mount-tester new file",
"content of file \"" + file2 + "\": mount-tester new file",
})
}
func setReadCommand(file string, container *v1.Container) {
container.Args = []string{
fmt.Sprintf("--file_content_in_loop=%v", file),
fmt.Sprintf("--retry_time=%d", retryDuration),
}
}
func testReadFile(f *framework.Framework, file string, pod *v1.Pod, containerIndex int) {
setReadCommand(file, &pod.Spec.Containers[containerIndex])
By(fmt.Sprintf("Creating pod %s", pod.Name))
f.TestContainerOutput("subpath", pod, containerIndex, []string{
"content of file \"" + file + "\": mount-tester new file",
})
}
func testPodFailSupath(f *framework.Framework, pod *v1.Pod) {
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred())
defer func() {
framework.DeletePodWithWait(f, f.ClientSet, pod)
}()
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
Expect(err).To(HaveOccurred())
By("Checking for subpath error event")
selector := fields.Set{
"involvedObject.kind": "Pod",
"involvedObject.name": pod.Name,
"involvedObject.namespace": f.Namespace.Name,
"reason": "Failed",
}.AsSelector().String()
options := metav1.ListOptions{FieldSelector: selector}
events, err := f.ClientSet.CoreV1().Events(f.Namespace.Name).List(options)
Expect(err).NotTo(HaveOccurred())
Expect(len(events.Items)).NotTo(Equal(0))
Expect(events.Items[0].Message).To(ContainSubstring("subPath"))
}
func testPodContainerRestart(f *framework.Framework, pod *v1.Pod, fileInVolume, fileInSubpath string) {
pod.Spec.RestartPolicy = v1.RestartPolicyOnFailure
// Add liveness probe to subpath container
pod.Spec.Containers[0].Image = "busybox"
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[0].LivenessProbe = &v1.Probe{
Handler: v1.Handler{
Exec: &v1.ExecAction{
Command: []string{"cat", fileInSubpath},
},
},
InitialDelaySeconds: 15,
FailureThreshold: 1,
PeriodSeconds: 2,
}
// Set volume container to write file
writeCmd := fmt.Sprintf("echo test > %v", fileInVolume)
pod.Spec.Containers[1].Image = "busybox"
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", fmt.Sprintf("%v; sleep 100000", writeCmd)}
// Start pod
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred())
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
Expect(err).ToNot(HaveOccurred())
By("Failing liveness probe")
out, err := podContainerExec(pod, 1, fmt.Sprintf("rm %v", fileInVolume))
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred())
// Check that container has restarted
By("Waiting for container to restart")
restarts := int32(0)
err = wait.PollImmediate(10*time.Second, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
framework.Logf("Container %v, restarts: %v", status.Name, status.RestartCount)
restarts = status.RestartCount
if restarts > 0 {
framework.Logf("Container has restart count: %v", restarts)
return true, nil
}
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred())
// Fix liveness probe
By("Rewriting the file")
writeCmd = fmt.Sprintf("echo test-after > %v", fileInVolume)
out, err = podContainerExec(pod, 1, writeCmd)
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred())
// Wait for container restarts to stabilize
By("Waiting for container to stop restarting")
stableCount := int(0)
stableThreshold := int(time.Minute / framework.Poll)
err = wait.PollImmediate(framework.Poll, 2*time.Minute, func() (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
for _, status := range pod.Status.ContainerStatuses {
if status.Name == pod.Spec.Containers[0].Name {
if status.RestartCount == restarts {
stableCount++
if stableCount > stableThreshold {
framework.Logf("Container restart has stabilized")
return true, nil
}
} else {
restarts = status.RestartCount
stableCount = 0
framework.Logf("Container has restart count: %v", restarts)
}
break
}
}
return false, nil
})
Expect(err).ToNot(HaveOccurred())
// Verify content of file in subpath
out, err = podContainerExec(pod, 0, fmt.Sprintf("cat %v", fileInSubpath))
framework.Logf("Pod exec output: %v", out)
Expect(err).ToNot(HaveOccurred())
Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after"))
}
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
// Change to busybox
pod.Spec.Containers[0].Image = "busybox"
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = "busybox"
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred())
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
Expect(err).ToNot(HaveOccurred())
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())
nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"
By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
By("Expecting the subpath volume mount to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
By("Stopping the kubelet.")
kubeletCommand(kStop, f.ClientSet, pod)
defer func() {
if err != nil {
kubeletCommand(kStart, f.ClientSet, pod)
}
}()
By(fmt.Sprintf("Deleting Pod %q", pod.Name))
if forceDelete {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
} else {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())
By("Starting the kubelet and waiting for pod to delete.")
kubeletCommand(kStart, f.ClientSet, pod)
err = f.WaitForPodTerminated(pod.Name, "")
if !apierrs.IsNotFound(err) && err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
}
if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(4 * time.Minute)
}
By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName)
By("Expecting the subpath volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName)
}
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
}
type hostpathSource struct {
}
func initHostpath() volSource {
return &hostpathSource{}
}
func (s *hostpathSource) createVolume(f *framework.Framework) volInfo {
return volInfo{
source: &v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp",
},
},
}
}
func (s *hostpathSource) cleanupVolume(f *framework.Framework) {
}
type hostpathSymlinkSource struct {
}
func initHostpathSymlink() volSource {
return &hostpathSymlinkSource{}
}
func (s *hostpathSymlinkSource) createVolume(f *framework.Framework) volInfo {
nodes := framework.GetReadySchedulableNodesOrDie(f.ClientSet)
Expect(len(nodes.Items)).NotTo(BeZero(), "No available nodes for scheduling")
node0 := &nodes.Items[0]
sourcePath := fmt.Sprintf("/tmp/%v", f.Namespace.Name)
targetPath := fmt.Sprintf("/tmp/%v-link", f.Namespace.Name)
cmd := fmt.Sprintf("mkdir %v -m 777 && ln -s %v %v", sourcePath, sourcePath, targetPath)
privileged := true
// Launch pod to initialize hostpath directory and symlink
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("hostpath-symlink-prep-%s", f.Namespace.Name),
},
Spec: v1.PodSpec{
Containers: []v1.Container{
{
Name: fmt.Sprintf("init-volume-%s", f.Namespace.Name),
Image: "busybox",
Command: []string{"/bin/sh", "-ec", cmd},
VolumeMounts: []v1.VolumeMount{
{
Name: volumeName,
MountPath: "/tmp",
},
},
SecurityContext: &v1.SecurityContext{
Privileged: &privileged,
},
},
},
RestartPolicy: v1.RestartPolicyNever,
Volumes: []v1.Volume{
{
Name: volumeName,
VolumeSource: v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: "/tmp",
},
},
},
},
NodeName: node0.Name,
},
}
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred())
err = framework.WaitForPodSuccessInNamespace(f.ClientSet, pod.Name, pod.Namespace)
Expect(err).ToNot(HaveOccurred())
err = framework.DeletePodWithWait(f, f.ClientSet, pod)
Expect(err).ToNot(HaveOccurred())
return volInfo{
source: &v1.VolumeSource{
HostPath: &v1.HostPathVolumeSource{
Path: targetPath,
},
},
node: node0.Name,
}
}
func (s *hostpathSymlinkSource) cleanupVolume(f *framework.Framework) {
}
type emptydirSource struct {
}
func initEmptydir() volSource {
return &emptydirSource{}
}
func (s *emptydirSource) createVolume(f *framework.Framework) volInfo {
return volInfo{
source: &v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
}
}
func (s *emptydirSource) cleanupVolume(f *framework.Framework) {
}
type gcepdSource struct {
pvc *v1.PersistentVolumeClaim
}
func initGCEPD() volSource {
framework.SkipUnlessProviderIs("gce", "gke")
return &gcepdSource{}
}
func (s *gcepdSource) createVolume(f *framework.Framework) volInfo {
var err error
framework.Logf("Creating GCE PD volume via dynamic provisioning")
testCase := storageClassTest{
name: "subpath",
claimSize: "2G",
}
pvc := newClaim(testCase, f.Namespace.Name, "subpath")
s.pvc, err = framework.CreatePVC(f.ClientSet, f.Namespace.Name, pvc)
framework.ExpectNoError(err, "Error creating PVC")
return volInfo{
source: &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: s.pvc.Name,
},
},
}
}
func (s *gcepdSource) cleanupVolume(f *framework.Framework) {
if s.pvc != nil {
err := f.ClientSet.CoreV1().PersistentVolumeClaims(f.Namespace.Name).Delete(s.pvc.Name, nil)
framework.ExpectNoError(err, "Error deleting PVC")
}
}
type gcepdPartitionSource struct {
diskName string
}
func initGCEPDPartition() volSource {
// Need to manually create, attach, partition, detach the GCE PD
// with disk name "subpath-partitioned-disk" before running this test
manual := true
if manual {
framework.Skipf("Skipping manual GCE PD partition test")
}
framework.SkipUnlessProviderIs("gce", "gke")
return &gcepdPartitionSource{diskName: "subpath-partitioned-disk"}
}
func (s *gcepdPartitionSource) createVolume(f *framework.Framework) volInfo {
// TODO: automate partitioned of GCE PD once it supports raw block volumes
// framework.Logf("Creating GCE PD volume")
// s.diskName, err = framework.CreatePDWithRetry()
// framework.ExpectNoError(err, "Error creating PD")
return volInfo{
source: &v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: s.diskName,
Partition: 1,
},
},
}
}
func (s *gcepdPartitionSource) cleanupVolume(f *framework.Framework) {
if s.diskName != "" {
// err := framework.DeletePDWithRetry(s.diskName)
// framework.ExpectNoError(err, "Error deleting PD")
}
}
type nfsSource struct {
serverPod *v1.Pod
}
func initNFS() volSource {
return &nfsSource{}
}
func (s *nfsSource) createVolume(f *framework.Framework) volInfo {
var serverIP string
framework.Logf("Creating NFS server")
_, s.serverPod, serverIP = framework.NewNFSServer(f.ClientSet, f.Namespace.Name, []string{"-G", "777", "/exports"})
return volInfo{
source: &v1.VolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/exports",
},
},
}
}
func (s *nfsSource) cleanupVolume(f *framework.Framework) {
if s.serverPod != nil {
framework.DeletePodWithWait(f, f.ClientSet, s.serverPod)
}
}
type glusterSource struct {
serverPod *v1.Pod
}
func initGluster() volSource {
framework.SkipUnlessNodeOSDistroIs("gci", "ubuntu")
return &glusterSource{}
}
func (s *glusterSource) createVolume(f *framework.Framework) volInfo {
framework.Logf("Creating GlusterFS server")
_, s.serverPod, _ = framework.NewGlusterfsServer(f.ClientSet, f.Namespace.Name)
return volInfo{
source: &v1.VolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{
EndpointsName: "gluster-server",
Path: "test_vol",
},
},
}
}
func (s *glusterSource) cleanupVolume(f *framework.Framework) {
if s.serverPod != nil {
framework.DeletePodWithWait(f, f.ClientSet, s.serverPod)
err := f.ClientSet.CoreV1().Endpoints(f.Namespace.Name).Delete("gluster-server", nil)
Expect(err).NotTo(HaveOccurred(), "Gluster delete endpoints failed")
}
}
// TODO: need a better way to wrap PVC. A generic framework should support both static and dynamic PV.
// For static PV, can reuse createVolume methods for inline volumes
type nfsPVCSource struct {
serverPod *v1.Pod
pvc *v1.PersistentVolumeClaim
pv *v1.PersistentVolume
}
func initNFSPVC() volSource {
return &nfsPVCSource{}
}
func (s *nfsPVCSource) createVolume(f *framework.Framework) volInfo {
var serverIP string
framework.Logf("Creating NFS server")
_, s.serverPod, serverIP = framework.NewNFSServer(f.ClientSet, f.Namespace.Name, []string{"-G", "777", "/exports"})
pvConfig := framework.PersistentVolumeConfig{
NamePrefix: "nfs-",
StorageClassName: f.Namespace.Name,
PVSource: v1.PersistentVolumeSource{
NFS: &v1.NFSVolumeSource{
Server: serverIP,
Path: "/exports",
},
},
}
pvcConfig := framework.PersistentVolumeClaimConfig{
StorageClassName: &f.Namespace.Name,
}
framework.Logf("Creating PVC and PV")
pv, pvc, err := framework.CreatePVCPV(f.ClientSet, pvConfig, pvcConfig, f.Namespace.Name, false)
Expect(err).NotTo(HaveOccurred(), "PVC, PV creation failed")
err = framework.WaitOnPVandPVC(f.ClientSet, f.Namespace.Name, pv, pvc)
Expect(err).NotTo(HaveOccurred(), "PVC, PV failed to bind")
s.pvc = pvc
s.pv = pv
return volInfo{
source: &v1.VolumeSource{
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
ClaimName: pvc.Name,
},
},
}
}
func (s *nfsPVCSource) cleanupVolume(f *framework.Framework) {
if s.pvc != nil || s.pv != nil {
if errs := framework.PVPVCCleanup(f.ClientSet, f.Namespace.Name, s.pv, s.pvc); len(errs) != 0 {
framework.Failf("Failed to delete PVC or PV: %v", utilerrors.NewAggregate(errs))
}
}
if s.serverPod != nil {
framework.DeletePodWithWait(f, f.ClientSet, s.serverPod)
}
}
Go
1
https://gitee.com/meoom/kubernetes.git
git@gitee.com:meoom/kubernetes.git
meoom
kubernetes
kubernetes
v1.7.17-beta.0

搜索帮助