1 Star 0 Fork 0

zhuchance/kubernetes

加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
克隆/下载
docker_sandbox.go 26.14 KB
一键复制 编辑 原始数据 按行查看 历史
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
"os"
"strings"
"time"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerfilters "github.com/docker/docker/api/types/filters"
"github.com/golang/glog"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/types"
)
const (
defaultSandboxImage = "k8s.gcr.io/pause:3.1"
// Various default sandbox resources requests/limits.
defaultSandboxCPUshares int64 = 2
// Name of the underlying container runtime
runtimeName = "docker"
)
var (
// Termination grace period
defaultSandboxGracePeriod = time.Duration(10) * time.Second
)
// Returns whether the sandbox network is ready, and whether the sandbox is known
func (ds *dockerService) getNetworkReady(podSandboxID string) (bool, bool) {
ds.networkReadyLock.Lock()
defer ds.networkReadyLock.Unlock()
ready, ok := ds.networkReady[podSandboxID]
return ready, ok
}
func (ds *dockerService) setNetworkReady(podSandboxID string, ready bool) {
ds.networkReadyLock.Lock()
defer ds.networkReadyLock.Unlock()
ds.networkReady[podSandboxID] = ready
}
func (ds *dockerService) clearNetworkReady(podSandboxID string) {
ds.networkReadyLock.Lock()
defer ds.networkReadyLock.Unlock()
delete(ds.networkReady, podSandboxID)
}
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state.
// For docker, PodSandbox is implemented by a container holding the network
// namespace for the pod.
// Note: docker doesn't use LogDirectory (yet).
func (ds *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
config := r.GetConfig()
// Step 1: Pull the image for the sandbox.
image := defaultSandboxImage
podSandboxImage := ds.podSandboxImage
if len(podSandboxImage) != 0 {
image = podSandboxImage
}
// NOTE: To use a custom sandbox image in a private repository, users need to configure the nodes with credentials properly.
// see: http://kubernetes.io/docs/user-guide/images/#configuring-nodes-to-authenticate-to-a-private-repository
// Only pull sandbox image when it's not present - v1.PullIfNotPresent.
if err := ensureSandboxImageExists(ds.client, image); err != nil {
return nil, err
}
// Step 2: Create the sandbox container.
if r.GetRuntimeHandler() != "" {
return nil, fmt.Errorf("RuntimeHandler %q not supported", r.GetRuntimeHandler())
}
createConfig, err := ds.makeSandboxDockerConfig(config, image)
if err != nil {
return nil, fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.Name, err)
}
createResp, err := ds.client.CreateContainer(*createConfig)
if err != nil {
createResp, err = recoverFromCreationConflictIfNeeded(ds.client, *createConfig, err)
}
if err != nil || createResp == nil {
return nil, fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.Name, err)
}
resp := &runtimeapi.RunPodSandboxResponse{PodSandboxId: createResp.ID}
ds.setNetworkReady(createResp.ID, false)
defer func(e *error) {
// Set networking ready depending on the error return of
// the parent function
if *e == nil {
ds.setNetworkReady(createResp.ID, true)
}
}(&err)
// Step 3: Create Sandbox Checkpoint.
if err = ds.checkpointManager.CreateCheckpoint(createResp.ID, constructPodSandboxCheckpoint(config)); err != nil {
return nil, err
}
// Step 4: Start the sandbox container.
// Assume kubelet's garbage collector would remove the sandbox later, if
// startContainer failed.
err = ds.client.StartContainer(createResp.ID)
if err != nil {
return nil, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.Name, err)
}
// Rewrite resolv.conf file generated by docker.
// NOTE: cluster dns settings aren't passed anymore to docker api in all cases,
// not only for pods with host network: the resolver conf will be overwritten
// after sandbox creation to override docker's behaviour. This resolv.conf
// file is shared by all containers of the same pod, and needs to be modified
// only once per pod.
if dnsConfig := config.GetDnsConfig(); dnsConfig != nil {
containerInfo, err := ds.client.InspectContainer(createResp.ID)
if err != nil {
return nil, fmt.Errorf("failed to inspect sandbox container for pod %q: %v", config.Metadata.Name, err)
}
if err := rewriteResolvFile(containerInfo.ResolvConfPath, dnsConfig.Servers, dnsConfig.Searches, dnsConfig.Options); err != nil {
return nil, fmt.Errorf("rewrite resolv.conf failed for pod %q: %v", config.Metadata.Name, err)
}
}
// Do not invoke network plugins if in hostNetwork mode.
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
return resp, nil
}
// Step 5: Setup networking for the sandbox.
// All pod networking is setup by a CNI plugin discovered at startup time.
// This plugin assigns the pod ip, sets up routes inside the sandbox,
// creates interfaces etc. In theory, its jurisdiction ends with pod
// sandbox networking, but it might insert iptables rules or open ports
// on the host as well, to satisfy parts of the pod spec that aren't
// recognized by the CNI standard yet.
cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
err = ds.network.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID, config.Annotations)
if err != nil {
errList := []error{fmt.Errorf("failed to set up sandbox container %q network for pod %q: %v", createResp.ID, config.Metadata.Name, err)}
// Ensure network resources are cleaned up even if the plugin
// succeeded but an error happened between that success and here.
err = ds.network.TearDownPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID)
if err != nil {
errList = append(errList, fmt.Errorf("failed to clean up sandbox container %q network for pod %q: %v", createResp.ID, config.Metadata.Name, err))
}
err = ds.client.StopContainer(createResp.ID, defaultSandboxGracePeriod)
if err != nil {
errList = append(errList, fmt.Errorf("failed to stop sandbox container %q for pod %q: %v", createResp.ID, config.Metadata.Name, err))
}
return resp, utilerrors.NewAggregate(errList)
}
return resp, nil
}
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
// TODO: This function blocks sandbox teardown on networking teardown. Is it
// better to cut our losses assuming an out of band GC routine will cleanup
// after us?
func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
var namespace, name string
var hostNetwork bool
podSandboxID := r.PodSandboxId
resp := &runtimeapi.StopPodSandboxResponse{}
// Try to retrieve minimal sandbox information from docker daemon or sandbox checkpoint.
inspectResult, metadata, statusErr := ds.getPodSandboxDetails(podSandboxID)
if statusErr == nil {
namespace = metadata.Namespace
name = metadata.Name
hostNetwork = (networkNamespaceMode(inspectResult) == runtimeapi.NamespaceMode_NODE)
} else {
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
checkpointErr := ds.checkpointManager.GetCheckpoint(podSandboxID, checkpoint)
// Proceed if both sandbox container and checkpoint could not be found. This means that following
// actions will only have sandbox ID and not have pod namespace and name information.
// Return error if encounter any unexpected error.
if checkpointErr != nil {
if checkpointErr != errors.ErrCheckpointNotFound {
err := ds.checkpointManager.RemoveCheckpoint(podSandboxID)
if err != nil {
glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err)
}
}
if libdocker.IsContainerNotFoundError(statusErr) {
glog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+
"Proceed without further sandbox information.", podSandboxID)
} else {
return nil, utilerrors.NewAggregate([]error{
fmt.Errorf("failed to get checkpoint for sandbox %q: %v", podSandboxID, checkpointErr),
fmt.Errorf("failed to get sandbox status: %v", statusErr)})
}
} else {
_, name, namespace, _, hostNetwork = checkpoint.GetData()
}
}
// WARNING: The following operations made the following assumption:
// 1. kubelet will retry on any error returned by StopPodSandbox.
// 2. tearing down network and stopping sandbox container can succeed in any sequence.
// This depends on the implementation detail of network plugin and proper error handling.
// For kubenet, if tearing down network failed and sandbox container is stopped, kubelet
// will retry. On retry, kubenet will not be able to retrieve network namespace of the sandbox
// since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best
// effort clean up and will not return error.
errList := []error{}
ready, ok := ds.getNetworkReady(podSandboxID)
if !hostNetwork && (ready || !ok) {
// Only tear down the pod network if we haven't done so already
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
err := ds.network.TearDownPod(namespace, name, cID)
if err == nil {
ds.setNetworkReady(podSandboxID, false)
} else {
errList = append(errList, err)
}
}
if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil {
// Do not return error if the container does not exist
if !libdocker.IsContainerNotFoundError(err) {
glog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err)
errList = append(errList, err)
} else {
// remove the checkpoint for any sandbox that is not found in the runtime
ds.checkpointManager.RemoveCheckpoint(podSandboxID)
}
}
if len(errList) == 0 {
return resp, nil
}
// TODO: Stop all running containers in the sandbox.
return nil, utilerrors.NewAggregate(errList)
}
// RemovePodSandbox removes the sandbox. If there are running containers in the
// sandbox, they should be forcibly removed.
func (ds *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
podSandboxID := r.PodSandboxId
var errs []error
opts := dockertypes.ContainerListOptions{All: true}
opts.Filters = dockerfilters.NewArgs()
f := newDockerFilter(&opts.Filters)
f.AddLabel(sandboxIDLabelKey, podSandboxID)
containers, err := ds.client.ListContainers(opts)
if err != nil {
errs = append(errs, err)
}
// Remove all containers in the sandbox.
for i := range containers {
if _, err := ds.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ContainerId: containers[i].ID}); err != nil && !libdocker.IsContainerNotFoundError(err) {
errs = append(errs, err)
}
}
// Remove the sandbox container.
err = ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
if err == nil || libdocker.IsContainerNotFoundError(err) {
// Only clear network ready when the sandbox has actually been
// removed from docker or doesn't exist
ds.clearNetworkReady(podSandboxID)
} else {
errs = append(errs, err)
}
// Remove the checkpoint of the sandbox.
if err := ds.checkpointManager.RemoveCheckpoint(podSandboxID); err != nil {
errs = append(errs, err)
}
if len(errs) == 0 {
return &runtimeapi.RemovePodSandboxResponse{}, nil
}
return nil, utilerrors.NewAggregate(errs)
}
// getIPFromPlugin interrogates the network plugin for an IP.
func (ds *dockerService) getIPFromPlugin(sandbox *dockertypes.ContainerJSON) (string, error) {
metadata, err := parseSandboxName(sandbox.Name)
if err != nil {
return "", err
}
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", metadata.Namespace, metadata.Name)
cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
networkStatus, err := ds.network.GetPodNetworkStatus(metadata.Namespace, metadata.Name, cID)
if err != nil {
return "", err
}
if networkStatus == nil {
return "", fmt.Errorf("%v: invalid network status for", msg)
}
return networkStatus.IP.String(), nil
}
// getIP returns the ip given the output of `docker inspect` on a pod sandbox,
// first interrogating any registered plugins, then simply trusting the ip
// in the sandbox itself. We look for an ipv4 address before ipv6.
func (ds *dockerService) getIP(podSandboxID string, sandbox *dockertypes.ContainerJSON) string {
if sandbox.NetworkSettings == nil {
return ""
}
if networkNamespaceMode(sandbox) == runtimeapi.NamespaceMode_NODE {
// For sandboxes using host network, the shim is not responsible for
// reporting the IP.
return ""
}
// Don't bother getting IP if the pod is known and networking isn't ready
ready, ok := ds.getNetworkReady(podSandboxID)
if ok && !ready {
return ""
}
ip, err := ds.getIPFromPlugin(sandbox)
if err == nil {
return ip
}
// TODO: trusting the docker ip is not a great idea. However docker uses
// eth0 by default and so does CNI, so if we find a docker IP here, we
// conclude that the plugin must have failed setup, or forgotten its ip.
// This is not a sensible assumption for plugins across the board, but if
// a plugin doesn't want this behavior, it can throw an error.
if sandbox.NetworkSettings.IPAddress != "" {
return sandbox.NetworkSettings.IPAddress
}
if sandbox.NetworkSettings.GlobalIPv6Address != "" {
return sandbox.NetworkSettings.GlobalIPv6Address
}
// If all else fails, warn but don't return an error, as pod status
// should generally not return anything except fatal errors
// FIXME: handle network errors by restarting the pod somehow?
glog.Warningf("failed to read pod IP from plugin/docker: %v", err)
return ""
}
// Returns the inspect container response, the sandbox metadata, and network namespace mode
func (ds *dockerService) getPodSandboxDetails(podSandboxID string) (*dockertypes.ContainerJSON, *runtimeapi.PodSandboxMetadata, error) {
resp, err := ds.client.InspectContainer(podSandboxID)
if err != nil {
return nil, nil, err
}
metadata, err := parseSandboxName(resp.Name)
if err != nil {
return nil, nil, err
}
return resp, metadata, nil
}
// PodSandboxStatus returns the status of the PodSandbox.
func (ds *dockerService) PodSandboxStatus(ctx context.Context, req *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
podSandboxID := req.PodSandboxId
r, metadata, err := ds.getPodSandboxDetails(podSandboxID)
if err != nil {
return nil, err
}
// Parse the timestamps.
createdAt, _, _, err := getContainerTimestamps(r)
if err != nil {
return nil, fmt.Errorf("failed to parse timestamp for container %q: %v", podSandboxID, err)
}
ct := createdAt.UnixNano()
// Translate container to sandbox state.
state := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
if r.State.Running {
state = runtimeapi.PodSandboxState_SANDBOX_READY
}
var IP string
// TODO: Remove this when sandbox is available on windows
// This is a workaround for windows, where sandbox is not in use, and pod IP is determined through containers belonging to the Pod.
if IP = ds.determinePodIPBySandboxID(podSandboxID); IP == "" {
IP = ds.getIP(podSandboxID, r)
}
labels, annotations := extractLabels(r.Config.Labels)
status := &runtimeapi.PodSandboxStatus{
Id: r.ID,
State: state,
CreatedAt: ct,
Metadata: metadata,
Labels: labels,
Annotations: annotations,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: IP,
},
Linux: &runtimeapi.LinuxPodSandboxStatus{
Namespaces: &runtimeapi.Namespace{
Options: &runtimeapi.NamespaceOption{
Network: networkNamespaceMode(r),
Pid: pidNamespaceMode(r),
Ipc: ipcNamespaceMode(r),
},
},
},
}
return &runtimeapi.PodSandboxStatusResponse{Status: status}, nil
}
// ListPodSandbox returns a list of Sandbox.
func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
filter := r.GetFilter()
// By default, list all containers whether they are running or not.
opts := dockertypes.ContainerListOptions{All: true}
filterOutReadySandboxes := false
opts.Filters = dockerfilters.NewArgs()
f := newDockerFilter(&opts.Filters)
// Add filter to select only sandbox containers.
f.AddLabel(containerTypeLabelKey, containerTypeLabelSandbox)
if filter != nil {
if filter.Id != "" {
f.Add("id", filter.Id)
}
if filter.State != nil {
if filter.GetState().State == runtimeapi.PodSandboxState_SANDBOX_READY {
// Only list running containers.
opts.All = false
} else {
// runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the
// container is in any of the non-running state (e.g., created,
// exited). We can't tell docker to filter out running
// containers directly, so we'll need to filter them out
// ourselves after getting the results.
filterOutReadySandboxes = true
}
}
if filter.LabelSelector != nil {
for k, v := range filter.LabelSelector {
f.AddLabel(k, v)
}
}
}
// Make sure we get the list of checkpoints first so that we don't include
// new PodSandboxes that are being created right now.
var err error
checkpoints := []string{}
if filter == nil {
checkpoints, err = ds.checkpointManager.ListCheckpoints()
if err != nil {
glog.Errorf("Failed to list checkpoints: %v", err)
}
}
containers, err := ds.client.ListContainers(opts)
if err != nil {
return nil, err
}
// Convert docker containers to runtime api sandboxes.
result := []*runtimeapi.PodSandbox{}
// using map as set
sandboxIDs := make(map[string]bool)
for i := range containers {
c := containers[i]
converted, err := containerToRuntimeAPISandbox(&c)
if err != nil {
glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err)
continue
}
if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}
sandboxIDs[converted.Id] = true
result = append(result, converted)
}
// Include sandbox that could only be found with its checkpoint if no filter is applied
// These PodSandbox will only include PodSandboxID, Name, Namespace.
// These PodSandbox will be in PodSandboxState_SANDBOX_NOTREADY state.
for _, id := range checkpoints {
if _, ok := sandboxIDs[id]; ok {
continue
}
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
err := ds.checkpointManager.GetCheckpoint(id, checkpoint)
if err != nil {
glog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err)
if err == errors.ErrCorruptCheckpoint {
err = ds.checkpointManager.RemoveCheckpoint(id)
if err != nil {
glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err)
}
}
continue
}
result = append(result, checkpointToRuntimeAPISandbox(id, checkpoint))
}
return &runtimeapi.ListPodSandboxResponse{Items: result}, nil
}
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
func (ds *dockerService) applySandboxLinuxOptions(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error {
if lc == nil {
return nil
}
// Apply security context.
if err := applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator); err != nil {
return err
}
// Set sysctls.
hc.Sysctls = lc.Sysctls
return nil
}
func (ds *dockerService) applySandboxResources(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig) error {
hc.Resources = dockercontainer.Resources{
MemorySwap: DefaultMemorySwap(),
CPUShares: defaultSandboxCPUshares,
// Use docker's default cpu quota/period.
}
if lc != nil {
// Apply Cgroup options.
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent)
if err != nil {
return err
}
hc.CgroupParent = cgroupParent
}
return nil
}
// makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeapi.PodSandboxConfig.
func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) {
// Merge annotations and labels because docker supports only labels.
labels := makeLabels(c.GetLabels(), c.GetAnnotations())
// Apply a label to distinguish sandboxes from regular containers.
labels[containerTypeLabelKey] = containerTypeLabelSandbox
// Apply a container name label for infra container. This is used in summary v1.
// TODO(random-liu): Deprecate this label once container metrics is directly got from CRI.
labels[types.KubernetesContainerNameLabel] = sandboxContainerName
hc := &dockercontainer.HostConfig{}
createConfig := &dockertypes.ContainerCreateConfig{
Name: makeSandboxName(c),
Config: &dockercontainer.Config{
Hostname: c.Hostname,
// TODO: Handle environment variables.
Image: image,
Labels: labels,
},
HostConfig: hc,
}
// Apply linux-specific options.
if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSeparator); err != nil {
return nil, err
}
// Set port mappings.
exposedPorts, portBindings := makePortsAndBindings(c.GetPortMappings())
createConfig.Config.ExposedPorts = exposedPorts
hc.PortBindings = portBindings
// TODO: Get rid of the dependency on kubelet internal package.
hc.OomScoreAdj = qos.PodInfraOOMAdj
// Apply resource options.
if err := ds.applySandboxResources(hc, c.GetLinux()); err != nil {
return nil, err
}
// Set security options.
securityOpts, err := ds.getSecurityOpts(c.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator)
if err != nil {
return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.Name, err)
}
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
applyExperimentalCreateConfig(createConfig, c.Annotations)
return createConfig, nil
}
// networkNamespaceMode returns the network runtimeapi.NamespaceMode for this container.
// Supports: POD, NODE
func networkNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
if container != nil && container.HostConfig != nil && string(container.HostConfig.NetworkMode) == namespaceModeHost {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_POD
}
// pidNamespaceMode returns the PID runtimeapi.NamespaceMode for this container.
// Supports: CONTAINER, NODE
// TODO(verb): add support for POD PID namespace sharing
func pidNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
if container != nil && container.HostConfig != nil && string(container.HostConfig.PidMode) == namespaceModeHost {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_CONTAINER
}
// ipcNamespaceMode returns the IPC runtimeapi.NamespaceMode for this container.
// Supports: POD, NODE
func ipcNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
if container != nil && container.HostConfig != nil && string(container.HostConfig.IpcMode) == namespaceModeHost {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_POD
}
func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) checkpointmanager.Checkpoint {
data := CheckpointData{}
for _, pm := range config.GetPortMappings() {
proto := toCheckpointProtocol(pm.Protocol)
data.PortMappings = append(data.PortMappings, &PortMapping{
HostPort: &pm.HostPort,
ContainerPort: &pm.ContainerPort,
Protocol: &proto,
})
}
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
data.HostNetwork = true
}
return NewPodSandboxCheckpoint(config.Metadata.Namespace, config.Metadata.Name, &data)
}
func toCheckpointProtocol(protocol runtimeapi.Protocol) Protocol {
switch protocol {
case runtimeapi.Protocol_TCP:
return protocolTCP
case runtimeapi.Protocol_UDP:
return protocolUDP
case runtimeapi.Protocol_SCTP:
return protocolSCTP
}
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
return protocolTCP
}
// rewriteResolvFile rewrites resolv.conf file generated by docker.
func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, dnsOptions []string) error {
if len(resolvFilePath) == 0 {
glog.Errorf("ResolvConfPath is empty.")
return nil
}
if _, err := os.Stat(resolvFilePath); os.IsNotExist(err) {
return fmt.Errorf("ResolvConfPath %q does not exist", resolvFilePath)
}
var resolvFileContent []string
for _, srv := range dns {
resolvFileContent = append(resolvFileContent, "nameserver "+srv)
}
if len(dnsSearch) > 0 {
resolvFileContent = append(resolvFileContent, "search "+strings.Join(dnsSearch, " "))
}
if len(dnsOptions) > 0 {
resolvFileContent = append(resolvFileContent, "options "+strings.Join(dnsOptions, " "))
}
if len(resolvFileContent) > 0 {
resolvFileContentStr := strings.Join(resolvFileContent, "\n")
resolvFileContentStr += "\n"
glog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent)
if err := rewriteFile(resolvFilePath, resolvFileContentStr); err != nil {
glog.Errorf("resolv.conf could not be updated: %v", err)
return err
}
}
return nil
}
func rewriteFile(filePath, stringToWrite string) error {
f, err := os.OpenFile(filePath, os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer f.Close()
_, err = f.WriteString(stringToWrite)
return err
}
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
Go
1
https://gitee.com/meoom/kubernetes.git
git@gitee.com:meoom/kubernetes.git
meoom
kubernetes
kubernetes
v1.12.5

搜索帮助