diff --git a/kata-v3.15.0/build.sh b/kata-v3.15.0/build.sh new file mode 100644 index 0000000000000000000000000000000000000000..72f49236d2ecdf54e9a32534298c103b6a67d605 --- /dev/null +++ b/kata-v3.15.0/build.sh @@ -0,0 +1,993 @@ +#!/bin/bash + +set -e + +# Configuration for the Confidential Containers environment +REGISTRY_DOMAIN="registry.hw.com" +REGISTRY_PORT="5000" +KATA_VERSION="3.15.0" +TRUSTEE_VERSION="v0.12.0" +GUEST_COMPONENTS_VERSION="v0.12.0" +OPERATOR_VERSION="v0.13.0" +WORK_DIR="$(pwd)" +REGISTRY_DIR="$WORK_DIR/registry" +KATA_SRC_DIR="$WORK_DIR/kata-containers" +DOCKERFILE_DIR="$KATA_SRC_DIR/build/virtCCA_sdk/kata-v3.15.0/conf" +CONTAINER_NAME="coco-build-env" +IMAGE_NAME="coco-builder:latest" +REMOTE_ATTESTATION_DIR="$WORK_DIR/coco/remote_attestation" +LOG_DIR="$REMOTE_ATTESTATION_DIR/logs" +CONFIG_DIR="$REMOTE_ATTESTATION_DIR/config" +DOCKERFILE_DIR="$WORK_DIR/build/virtCCA_sdk/kata-v3.15.0/conf" +HTTP_PROXY="http://90.255.72.178:3128" # Modify according to actual situation +USE_PROXY=true + +# Install and configure Containerd +function install_containerd() +{ + source /etc/profile + echo "===== Starting Containerd 1.7.27 installation =====" + + # 1. Download containerd + echo "Downloading containerd..." + filename="containerd-1.7.27-linux-arm64.tar.gz" + url="https://github.com/containerd/containerd/releases/download/v1.7.27/containerd-1.7.27-linux-arm64.tar.gz" + + # Check if file exists + if [ -f "$filename" ]; then + echo "File already exists, skipping download: $filename" + else + echo "Starting download: $filename" + wget "$url" + fi + + # 2. Extract files + echo "Extracting package..." + tar -xvf containerd-1.7.27-linux-arm64.tar.gz > /dev/null + + # 3. Copy binaries safely + echo "Copying binaries to /usr/local/bin..." + if pgrep containerd; then + echo "Stopping existing containerd process..." + pkill containerd + sleep 2 + fi + cp -f bin/* /usr/local/bin + + # 4. Install runc + echo "Installing runc..." + yum install -y runc > /dev/null + + # 5. Generate config file + echo "Generating containerd config..." + mkdir -p /etc/containerd/ + containerd config default > /etc/containerd/config.toml + + # 6. Add kata runtime config + echo "Configuring kata runtime..." + sed -i '/\[plugins."io.containerd.grpc.v1.cri".containerd\]/a \\n [plugins."io.containerd.grpc.v1.cri".containerd.runtimes.kata]\n runtime_type = "io.containerd.kata.v2"\n privileged_without_host_devices = false' /etc/containerd/config.toml + + # 7. Modify critical settings + echo "Modifying core configurations..." + sed -i 's/enable_unprivileged_ports = false/enable_unprivileged_ports = true/' /etc/containerd/config.toml + sed -i 's|sandbox_image = "registry.k8s.io/pause:3.8"|sandbox_image = "registry.k8s.io/pause:3.10"|' /etc/containerd/config.toml + + # 8. Install systemd service + echo "Installing containerd service..." + if [ -f "containerd.service" ]; then + echo "File already exists, skipping download: containerd.service" + else + echo "Starting download: containerd.service" + wget --no-check-certificate https://raw.githubusercontent.com/containerd/containerd/refs/tags/v1.7.27/containerd.service + fi + cp -f ./containerd.service /usr/lib/systemd/system/ + + # 9. Proxy configuration (enable as needed) + if [ "$USE_PROXY" != true ]; then + echo "> Proxy not enabled, skipping Containerd proxy settings" + return 0 + fi + + if [ -z "$HTTP_PROXY" ]; then + echo "> Warning: USE_PROXY=true but HTTP_PROXY not set, skipping Containerd proxy configuration" + return 1 + fi + + echo "> Configuring Containerd proxy: $HTTP_PROXY" + + # Create config directory + echo "> Creating config directory..." + mkdir -p /etc/systemd/system/containerd.service.d/ + + # Create proxy config file + echo "> Creating proxy config file..." + cat > /etc/systemd/system/containerd.service.d/http-proxy.conf < Error: Failed to create proxy config file!" + return 1 + fi + + echo "> Reloading systemd configuration..." + systemctl daemon-reload + systemctl restart containerd + + # 10. Start service + echo "Starting containerd service..." + systemctl daemon-reload + systemctl enable containerd > /dev/null + + if systemctl is-active --quiet containerd; then + systemctl stop containerd + sleep 2 + fi + systemctl start containerd + + # Verify service status + echo "Verifying service status..." + systemctl status containerd --no-pager | grep "Active:" + + # 11. Container test after fixes + echo "Waiting for service initialization..." + sleep 5 + + echo "Pulling test image..." + ctr image rm docker.io/library/busybox:latest + ctr images pull --skip-verify swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/library/busybox:latest-linuxarm64 + ctr images tag swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/library/busybox:latest-linuxarm64 docker.io/library/busybox:latest + + echo "Running container test..." + ctr run --rm \ + docker.io/library/busybox:latest \ + test-container \ + /bin/sh -c 'echo "Container test successful! Current time: $(date)"' + + echo "===== Containerd installation successful =====" +} + +# Initialize a single-node Kubernetes cluster +function init_k8s() { + echo "===== Starting Kubernetes single-node cluster installation =====" + + source /etc/profile + + # 1. Configure yum repositories + echo "Configuring Kubernetes yum repositories..." + cat < /etc/yum.repos.d/k8s.repo +[k8s] +name=Kubernetes +baseurl=https://pkgs.k8s.io/core:/stable:/v1.32/rpm/ +enabled=1 +gpgcheck=0 +repo_gpgcheck=0 +EOF + + echo "Cleaning and rebuilding yum cache..." + yum clean all --disablerepo="*" --enablerepo="k8s" > /dev/null + yum makecache --disablerepo="*" --enablerepo="k8s" > /dev/null + + # 2. Install K8s components + echo "Installing Kubernetes components..." + yum install -y kubelet-1.32.4 kubeadm-1.32.4 kubectl-1.32.4 kubernetes-cni --nobest > /dev/null + + # 3. System configuration + echo "Configuring system parameters..." + + # Disable firewall + systemctl stop firewalld 2>/dev/null || true + systemctl disable firewalld > /dev/null + + # Load kernel modules + modprobe br_netfilter + + # Enable NET.BRIDGE.BRIDGE-NF-CALL-IPTABLES kernel option + sysctl -w net.bridge.bridge-nf-call-iptables=1 + + # Disable swap + swapoff -a + cp -p /etc/fstab /etc/fstab.bak$(date '+%Y%m%d%H%M%S') + sed -i "s/\/dev\/mapper\/openeuler-swap/\#\/dev\/mapper\/openeuler-swap/g" /etc/fstab + + systemctl enable kubelet > /dev/null + + # 4. Initialize cluster + echo "Preparing cluster initialization..." + + # Cleanup proxy settings (critical fix) + echo "Clearing proxy settings..." + export -n http_proxy + export -n https_proxy + export -n no_proxy + unset http_proxy + unset https_proxy + + + # Create /etc/resolv.conf and modify /etc/hosts + touch /etc/resolv.conf && echo "$(hostname -I | awk '{print $1}') node" | sudo tee -a /etc/hosts + + # Generate initialization config + kubeadm config print init-defaults > kubeadm-init.yaml + + # Execute update script in same directory + chmod 755 update_kubeadm_init.sh + ./update_kubeadm_init.sh + + if ! kubeadm reset -f > /dev/null 2>&1; then + echo "Reset failed, performing deep cleanup..." + # Handle etcd configuration errors + rm -rf /etc/kubernetes/* + rm -rf /root/.kube/* + rm -rf /var/lib/etcd/* + + # Retry reset + if ! kubeadm reset -f > /dev/null 2>&1; then + echo "Error: Cluster reset failed! Manually check these directories:" + echo " /etc/kubernetes/" + echo " /var/lib/etcd/" + echo " /root/.kube/" + exit 1 + fi + fi + + export -n http_proxy + export -n https_proxy + export -n no_proxy + + if ! kubeadm init --config kubeadm-init.yaml > /dev/null 2>&1; then + kubeadm reset -f + rm -rf /var/lib/etcd + iptables -F && iptables -t nat -F && iptables -t mangle -F + + if ! kubeadm init --config kubeadm-init.yaml; then + echo "Error: Failed to initialize K8s node! Please check manually" + exit 1 + fi + fi + + # Configure kubectl + mkdir -p $HOME/.kube + cp -f /etc/kubernetes/admin.conf $HOME/.kube/config + chown $(id -u):$(id -g) $HOME/.kube/config + echo "export KUBECONFIG=/etc/kubernetes/admin.conf" >> /etc/profile + source /etc/profile + + # 5. Install CNI plugin + echo "Installing Flannel network plugin..." + + source /etc/profile + # Download CNI plugins + CNI_URL="https://github.com/containernetworking/plugins/releases/download/v1.5.1/cni-plugins-linux-arm64-v1.5.1.tgz" + mkdir -p /opt/cni/bin + if ! wget -qO- $CNI_URL | tar -xz -C /opt/cni/bin; then + echo "CNI plugin download failed! Trying mirror..." + wget -qO- https://mirror.ghproxy.com/$CNI_URL | tar -xz -C /opt/cni/bin || { + echo "CNI plugin download failed, download manually:" + echo "wget $CNI_URL" + exit 1 + } + fi + + + # Apply Flannel configuration + export -n http_proxy + export -n https_proxy + export -n no_proxy + kubectl apply -f kube-flannel.yaml > /dev/null + + # 6. Verify deployment + echo -e "\n===== Deployment complete, verifying cluster status =====" + + echo "Waiting for cluster components to initialize..." + sleep 15 + for i in {1..30}; do + # Check node status + nodes_ready=$(kubectl get nodes -o jsonpath='{.items[*].status.conditions[?(@.type=="Ready")].status}' | grep -c True) + + # Check Pod status (exclude Completed and Succeeded) + pods_not_ready=$(kubectl get pods -A -o jsonpath='{range .items[*]}{.status.phase}{"\n"}{end}' | grep -vE "Running|Succeeded|Completed" | wc -l) + + if [ $nodes_ready -ge 1 ] && [ $pods_not_ready -eq 0 ]; then + echo "All components ready" + break + fi + + sleep 10 + echo "Waiting for components to initialize...(${i}0s)" + + # Debug information: show current status + echo "=== Node status ===" + kubectl get nodes + echo "=== Pod status ===" + kubectl get pods -A + done + + echo -e "\nNode status:" + kubectl get nodes -o wide + + echo -e "\nPod status:" + kubectl get pods -A + + echo -e "\n===== Kubernetes cluster deployed successfully =====" +} + +# Deploy Kata Confidential Containers environment +function kata_deploy() +{ + +# 1. Install dependencies +echo "===== Installing system dependencies =====" +yum install -y docker httpd-tools git openssl kubectl || { + echo "Dependency installation failed! Check network or yum configuration" + exit 1 +} + +# 2. Configure Docker service +echo "===== Configuring Docker service =====" +systemctl start docker +systemctl enable docker + +mkdir -p /etc/docker +cat > /etc/docker/daemon.json < Proxy not enabled, skipping proxy settings" + return 0 + fi + + if [ -z "$HTTP_PROXY" ]; then + echo "> Warning: USE_PROXY=true but HTTP_PROXY not set" + return 1 + fi + + echo "> Configuring Docker proxy: $HTTP_PROXY" + + # Create config directory + echo "> Creating config directory..." + mkdir -p /etc/systemd/system/docker.service.d + + # Create proxy config file + echo "> Creating proxy config file..." + cat > /etc/systemd/system/docker.service.d/http-proxy.conf <> /etc/hosts + +# 8. Start private registry +echo "===== Starting local Registry ($REGISTRY_DOMAIN:$REGISTRY_PORT) =====" + +# Reliably check and clean up old containers +echo "> Cleaning up old containers..." +if docker container inspect "$REGISTRY_DOMAIN" &>/dev/null; then + echo "> Existing Registry container found, cleaning up..." + docker stop "$REGISTRY_DOMAIN" >/dev/null 2>&1 + docker rm "$REGISTRY_DOMAIN" >/dev/null 2>&1 + echo "> Old container removed" + # Additional safety: remove potential lock files + rm -f "$REGISTRY_DIR/data/.lock" >/dev/null 2>&1 +else + echo "> No existing container found" +fi + +# Generate registry config +echo "> Generating configuration file..." +cat > "$REGISTRY_DIR/config.yml" < Starting new container..." +docker run -d \ + -p ${REGISTRY_PORT}:${REGISTRY_PORT} \ + --restart=always \ + --name "$REGISTRY_DOMAIN" \ + -v "$REGISTRY_DIR/certs:/certs" \ + -v "$REGISTRY_DIR/data:/var/lib/registry" \ + -v "$REGISTRY_DIR/config.yml:/etc/docker/registry/config.yml" \ + registry:2 + +# Verify registry startup +echo "> Verifying startup..." +for i in {1..10}; do + if docker ps | grep -q "$REGISTRY_DOMAIN"; then + echo "> Registry started successfully: https://$REGISTRY_DOMAIN:$REGISTRY_PORT/v2/_catalog" + break + elif [ $i -eq 5 ]; then + echo "> Startup taking longer than expected..." + elif [ $i -eq 10 ]; then + echo "> Registry startup failed! Check logs: docker logs $REGISTRY_DOMAIN" + exit 1 + fi + sleep 1 +done + +# 9. Compile kata-deploy image +echo "===== Compiling kata-deploy =====" +cd "$KATA_SRC_DIR/tools/packaging/kata-deploy/local-build" +export USE_CACHE="no" +export AGENT_POLICY=no +make || { + echo "kata-deploy compilation failed!" + exit 1 +} + +# 10. Build and push kata-deploy image +echo "===== Building kata-deploy image =====" +cd "$KATA_SRC_DIR/tools/packaging/kata-deploy" +cp -v ./local-build/kata-static.tar.xz ./ + +# Build image (set proxy to access external resources) +docker build \ + --build-arg HTTP_PROXY="$HTTP_PROXY" \ + --build-arg HTTPS_PROXY="$HTTP_PROXY" \ + -t kata-deploy . || { + echo "kata-deploy image build failed!" + exit 1 +} + +# Tag image +docker tag kata-deploy:latest $REGISTRY_DOMAIN:$REGISTRY_PORT/kata-deploy:latest + +# Verify registry health before pushing +echo "> Verifying registry health status..." +if ! curl -sk --retry 3 --retry-delay 2 "https://$REGISTRY_DOMAIN:$REGISTRY_PORT/v2/_catalog" >/dev/null; then + echo "> Registry not responding! Attempting to restart service..." + docker restart "$REGISTRY_DOMAIN" || { + echo "Registry restart failed!" + exit 1 + } + sleep 5 +fi + +# Push image (bypass any proxies) +echo "> Pushing image to local Registry (bypassing proxies)..." +( + # Temporarily unset all proxies + unset HTTP_PROXY + unset HTTPS_PROXY + unset http_proxy + unset https_proxy + + # Set timeout + timeout 300 docker push "$REGISTRY_DOMAIN:$REGISTRY_PORT/kata-deploy:latest" +) && { + echo "> Image pushed successfully" +} || { + exit_code=$? + if [ $exit_code -eq 124 ]; then + echo "> Push operation timed out! Check Registry performance" + else + echo "> Image push failed! Error code: $exit_code" + fi + echo "> Check Registry logs: docker logs $REGISTRY_DOMAIN" + echo "> Attempt manual push:" + echo " unset HTTP_PROXY HTTPS_PROXY" + echo " docker push $REGISTRY_DOMAIN:$REGISTRY_PORT/kata-deploy:latest" + exit 1 +} + +# 11. Deploy Operator +echo "===== Deploying Operator ($OPERATOR_VERSION) =====" + +# Safely load profile file, ignore undefined variable errors +set +u # Temporarily disable undefined variable checking +source /etc/profile >/dev/null 2>&1 || true +set -u # Re-enable undefined variable checking + + +kubectl apply -k "github.com/confidential-containers/operator/config/release?ref=$OPERATOR_VERSION" || { + echo "Operator deployment failed!" + exit 1 +} + +kubectl taint nodes --all node-role.kubernetes.io/control-plane:NoSchedule- || true +kubectl label nodes --all node.kubernetes.io/worker= || true + +echo "Waiting for Operator initialization (15 seconds)..." +sleep 15 + + +# 13. Deploy VirtCCA Kata +echo "===== Deploying VirtCCA Kata Runtime =====" +set +e +# Create KataDeploy resource +kubectl apply -k github.com/confidential-containers/operator/config/release?ref=v0.13.0 +kubectl taint nodes node node-role.kubernetes.io/control-plane:NoSchedule- +kubectl label node node node.kubernetes.io/worker= +sleep 5s + +kubectl apply -f $KATA_SRC_DIR/build/virtCCA_sdk/kata-v3.15.0/conf/virtcca-kata-deploy.yaml +set -e +# 13. Verify deployment +echo "===== Verifying cluster status =====" +sleep 15 +kubectl get pods -A + +# 14. Create test Pod +echo "===== Creating test Pod =====" +cat > "$WORK_DIR/test-kata-qemu-virtcca.yaml" </dev/null 2>&1 || true + +# Set proxy parameters +if [ "$USE_PROXY" = true ]; then + BUILD_ARGS="--build-arg http_proxy=$HTTP_PROXY --build-arg https_proxy=$HTTP_PROXY" + RUN_ENV="-e http_proxy=$HTTP_PROXY -e https_proxy=$HTTP_PROXY" +else + BUILD_ARGS="" + RUN_ENV="" +fi + +echo -e "\n\033[32m[1] Building build environment container image\033[0m" +if [ "$USE_PROXY" = true ]; then + mkdir -p $DOCKERFILE_DIR/certs + +fi +docker build $BUILD_ARGS -t $IMAGE_NAME $DOCKERFILE_DIR +if [ $? -ne 0 ]; then + echo -e "\033[31mImage build failed! Check Dockerfile and proxy settings\033[0m" + exit 1 +fi + +echo -e "\n\033[32m[2] Creating build environment container\033[0m" +docker run -itd --name $CONTAINER_NAME -v $KATA_SRC_DIR:/coco $RUN_ENV $IMAGE_NAME +if [ $? -ne 0 ]; then + echo -e "\033[31mContainer creation failed! Check for name conflicts\033[0m" + exit 1 +fi + +echo -e "\n\033[32m[3] Executing compilation tasks in container\033[0m" + +# Execute all compilation commands in container (non-interactive) +docker exec $CONTAINER_NAME /bin/bash -c ' +set -e +echo "=== Compiling guest-components ===" +cd /coco/build/guest-components +make clean +make build TEE_PLATFORM=virtcca + +echo "=== Compiling measurement report tool ===" +cd /coco/build/guest-components/attestation-agent/attester +cargo build --no-default-features --features bin,virtcca-attester --bin evidence_getter --release + +echo "=== Compiling coco_keyprovider ===" +cd /coco/build/guest-components/attestation-agent/coco_keyprovider +cargo build --release + +echo "=== Compiling attestation-service ===" +cd /coco/build/trustee/attestation-service +make VERIFIER=virtcca-verifier + +echo "=== Compiling RVPS ===" +cd /coco/build/trustee/rvps +make build + +echo "=== Compiling KBS ===" +cd /coco/build/trustee/kbs +make background-check-kbs COCO_AS_INTEGRATION_TYPE=grpc + +echo "=== Compiling kata-agent ===" +cd /coco/src/agent +make SECCOMP=no + +echo "=== Compiling kata-shim and kata-runtime ===" +cd /coco +make -C src/runtime + +echo "=== All compilation tasks completed ====" +' + +echo -e "\n\033[32m[4] Cleaning up container environment\033[0m" +docker stop $CONTAINER_NAME >/dev/null +docker rm $CONTAINER_NAME >/dev/null + +# Show artifact locations +echo -e "\n\033[34mBuild artifacts location:\033[0m" +echo -e "guest-components: $KATA_SRC_DIR/build/guest-components/target/aarch64-unknown-linux-gnu/release/" +echo -e "Other components (kbs/rvps/etc): $KATA_SRC_DIR/build/trustee/target/release/" + +echo -e "\n\033[32mCompilation process completed!\033[0m" +} + +# Set up remote attestation services +function rats() +{ + +# Clean and ensure directory structure +echo "===== Preparing directory structure =====" +rm -rf "$REMOTE_ATTESTATION_DIR" +mkdir -p \ + "$REMOTE_ATTESTATION_DIR" \ + "$LOG_DIR" \ + "$CONFIG_DIR" \ + /opt/confidential-containers/{kbs/repository,attestation-service/rvps} \ + /etc/attestation/attestation-service/verifier/virtcca/ \ + /opt/confidential-containers/attestation-service/token/simple/policies/opa + +# Stop any existing services +echo "===== Stopping existing services =====" +pkill -f 'grpc-as|kbs|rvps' || true +sleep 1 # Ensure services fully stop + +# Copy remote attestation components +echo "===== Copying remote attestation components =====" +if [ ! -d "$KATA_SRC_DIR/build/trustee/target/release" ]; then + echo "Error: Could not find build output directory $KATA_SRC_DIR/build/trustee/target/release" + exit 1 +fi + +cd "$KATA_SRC_DIR/build/trustee/target/release" +for component in grpc-as kbs rvps; do + if [ ! -f "$component" ]; then + echo "Error: Could not find component $component" + exit 1 + fi + cp -v "$component" "$REMOTE_ATTESTATION_DIR" +done + +# Generate configuration files +echo "===== Generating configuration files =====" + +# kbs-config-grpc.toml +cat > "$CONFIG_DIR/kbs-config-grpc.toml" < "$CONFIG_DIR/rvps-config.json" < "$CONFIG_DIR/as-config.json" < /opt/confidential-containers/attestation-service/token/simple/policies/opa/default.rego </dev/null && rm -f "$DB_PATH/testfile" || { + echo "Error: Database directory not writable: $DB_PATH" + echo "Attempting to modify permissions..." + sudo chown -R $(whoami) "$DB_PATH" || { + echo "Permission modification failed, check manually" + exit 1 + } +} + +# Enhanced service startup function with logging +start_service_with_logging() { + local name=$1 + local cmd=$2 + local log_file="$LOG_DIR/$name.log" + + # Clear old logs + > "$log_file" + + echo "Starting $name..." + echo "Command: $cmd" >> "$log_file" + echo "Start time: $(date)" >> "$log_file" + echo "------------------------" >> "$log_file" + + # Start service and redirect output to log file + # Use bash -c to correctly parse environment variables + bash -c "$cmd" >> "$log_file" 2>&1 & + local pid=$! + + echo "Service PID: $pid" >> "$log_file" + echo $pid > "$LOG_DIR/$name.pid" + + # Wait for service to start + local timeout=10 + while [ $timeout -gt 0 ]; do + if ps -p $pid > /dev/null; then + # Check for errors in logs + if grep -q "Error:" "$log_file"; then + echo " $name start failed (PID: $pid)" + echo "Error details:" + grep "Error:" "$log_file" | tail -n 5 + return 1 + else + echo " $name started successfully (PID: $pid)" + return 0 + fi + fi + sleep 1 + ((timeout--)) + done + + echo " $name start timed out (PID: $pid)" + return 1 +} + +# Start services +echo "===== Starting remote attestation services =====" + +# First start RVPS +if ! start_service_with_logging "rvps" \ + "$REMOTE_ATTESTATION_DIR/rvps -a 127.0.0.1:50003 -c $CONFIG_DIR/rvps-config.json" +then + echo "RVPS startup failed! Check logs: $LOG_DIR/rvps.log" + tail -n 20 "$LOG_DIR/rvps.log" + exit 1 +fi + +# Then start AS +if ! start_service_with_logging "as" \ + "$REMOTE_ATTESTATION_DIR/grpc-as -c $CONFIG_DIR/as-config.json" +then + echo "AS startup failed! Check logs: $LOG_DIR/as.log" + tail -n 20 "$LOG_DIR/as.log" + exit 1 +fi + +# Finally start KBS (using bash -c to correctly parse environment variables) +if ! start_service_with_logging "kbs" \ + "RUST_LOG=DEBUG $REMOTE_ATTESTATION_DIR/kbs -c $CONFIG_DIR/kbs-config-grpc.toml" +then + echo "KBS startup failed! Check logs: $LOG_DIR/kbs.log" + tail -n 20 "$LOG_DIR/kbs.log" + exit 1 +fi + +# Let services stabilize +sleep 3 + +# Verify service status +echo -e "\n===== Service status =====" +echo "Service processes:" +ps -ef | grep -E 'kbs|rvps|grpc-as' | grep -v grep + +echo -e "\nService log locations:" +ls -l $LOG_DIR/*.log + +echo -e "\n===== Remote attestation environment deployment completed =====" +echo "KBS endpoint: http://0.0.0.0:8080" +echo "RVPS endpoint: 127.0.0.1:50003" +echo "AS endpoint: 127.0.0.1:50004" +} + +# Main command dispatcher +if [[ "$1" == "containerd"* ]];then + install_containerd +fi + +if [[ "$1" == "k8s"* ]];then + init_k8s +fi + +if [[ "$1" == "kdeploy"* ]];then + kata_deploy +fi + +if [[ "$1" == "rats"* ]];then + compile_coco + rats +fi + +if [[ "$1" == "all"* ]];then + install_containerd + init_k8s + kata_deploy + compile_coco + rats +fi \ No newline at end of file diff --git "a/kata-v3.15.0/doc/\344\270\200\351\224\256\351\203\250\347\275\262\346\234\272\345\257\206\345\256\271\345\231\250.md" "b/kata-v3.15.0/doc/\344\270\200\351\224\256\351\203\250\347\275\262\346\234\272\345\257\206\345\256\271\345\231\250.md" new file mode 100644 index 0000000000000000000000000000000000000000..b044dfe7ad2b99d038b3f6c883ecc891ea113a9e --- /dev/null +++ "b/kata-v3.15.0/doc/\344\270\200\351\224\256\351\203\250\347\275\262\346\234\272\345\257\206\345\256\271\345\231\250.md" @@ -0,0 +1,54 @@ +## 配置代理 + +>![](public_sys-resources/icon-note.gif) **说明:** +>若部署环境直通公网,则可跳过当前步骤。 + +1. 系统代理配置。 + 1. 打开配置文件。 + ``` + vim /etc/profile + ``` + + 2. 按“i”进入编辑模式,增加如下内容。 + ``` + export http_proxy="http://example.com:port" + export https_proxy=${http_proxy} + export no_proxy=localhost,registry.hw.com + ``` + 3. 修改脚本。 + + 修改build.sh脚本中的参数。 + ``` + vim /etc/profile + ``` + 将USE_PROXY修改为true,将HTTP_PROXY修改为代理IP。 + +## 使用脚本 + +1. 部署containerd。 + + ``` + ./build.sh containerd + ``` +2. 初始化k8s。 + + ``` + ./build.sh k8s + ``` +3. 部署kata-deploy。 + + ``` + ./build.sh kdeploy + ``` +4. 部署远程证明。 + + ``` + ./build.sh rats + ``` +5. 一键从头部署全部。 + + ``` + ./build.sh all + ``` + + diff --git a/kata-v3.15.0/kube-flannel.yaml b/kata-v3.15.0/kube-flannel.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3f6c4328531f51f9d3c3e081920351c8e111bca2 --- /dev/null +++ b/kata-v3.15.0/kube-flannel.yaml @@ -0,0 +1,215 @@ +--- +kind: Namespace +apiVersion: v1 +metadata: + name: kube-flannel + labels: + k8s-app: flannel + pod-security.kubernetes.io/enforce: privileged +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - get +- apiGroups: + - "" + resources: + - nodes + verbs: + - get + - list + - watch +- apiGroups: + - "" + resources: + - nodes/status + verbs: + - patch +- apiGroups: + - networking.k8s.io + resources: + - clustercidrs + verbs: + - list + - watch +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + k8s-app: flannel + name: flannel +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: flannel +subjects: +- kind: ServiceAccount + name: flannel + namespace: kube-flannel +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + k8s-app: flannel + name: flannel + namespace: kube-flannel +--- +kind: ConfigMap +apiVersion: v1 +metadata: + name: kube-flannel-cfg + namespace: kube-flannel + labels: + tier: node + k8s-app: flannel + app: flannel +data: + cni-conf.json: | + { + "name": "cbr0", + "cniVersion": "0.3.1", + "plugins": [ + { + "type": "flannel", + "delegate": { + "hairpinMode": true, + "isDefaultGateway": true + } + }, + { + "type": "portmap", + "capabilities": { + "portMappings": true + } + } + ] + } + net-conf.json: | + { + "Network": "10.244.0.0/16", + "Backend": { + "Type": "vxlan" + } + } +--- +apiVersion: apps/v1 +kind: DaemonSet +metadata: + name: kube-flannel-ds + namespace: kube-flannel + labels: + tier: node + app: flannel + k8s-app: flannel +spec: + selector: + matchLabels: + app: flannel + template: + metadata: + labels: + tier: node + app: flannel + spec: + affinity: + nodeAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: kubernetes.io/os + operator: In + values: + - linux + hostNetwork: true + priorityClassName: system-node-critical + tolerations: + - operator: Exists + effect: NoSchedule + serviceAccountName: flannel + initContainers: + - name: install-cni-plugin + image: docker.io/flannel/flannel-cni-plugin:v1.2.0 + command: + - cp + args: + - -f + - /flannel + - /opt/cni/bin/flannel + volumeMounts: + - name: cni-plugin + mountPath: /opt/cni/bin + - name: install-cni + image: docker.io/flannel/flannel:v0.22.3 + command: + - cp + args: + - -f + - /etc/kube-flannel/cni-conf.json + - /etc/cni/net.d/10-flannel.conflist + volumeMounts: + - name: cni + mountPath: /etc/cni/net.d + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + containers: + - name: kube-flannel + image: docker.io/flannel/flannel:v0.22.3 + command: + - /opt/bin/flanneld + args: + - --ip-masq + - --kube-subnet-mgr + resources: + requests: + cpu: "200m" + memory: "100Mi" + securityContext: + privileged: false + capabilities: + add: ["NET_ADMIN", "NET_RAW"] + env: + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: EVENT_QUEUE_DEPTH + value: "5000" + volumeMounts: + - name: run + mountPath: /run/flannel + - name: flannel-cfg + mountPath: /etc/kube-flannel/ + - name: xtables-lock + mountPath: /run/xtables.lock + volumes: + - name: run + hostPath: + path: /run/flannel + - name: cni-plugin + hostPath: + path: /opt/cni/bin + - name: cni + hostPath: + path: /etc/cni/net.d + - name: flannel-cfg + configMap: + name: kube-flannel-cfg + - name: xtables-lock + hostPath: + path: /run/xtables.lock + type: FileOrCreate diff --git a/kata-v3.15.0/update_kubeadm_init.sh b/kata-v3.15.0/update_kubeadm_init.sh new file mode 100644 index 0000000000000000000000000000000000000000..8bcddab4e36c97e86c33f44884aca4fd53b85c04 --- /dev/null +++ b/kata-v3.15.0/update_kubeadm_init.sh @@ -0,0 +1,17 @@ +#!/bin/bash + +IP_ADDRESS=$(hostname -I | awk '{print $1}') +CONFIG_FILE="kubeadm-init.yaml" + +sed -i "s/^ advertiseAddress: .*/ advertiseAddress: ${IP_ADDRESS}/" "$CONFIG_FILE" +sed -i "s|criSocket: unix:///var/run/containerd/containerd.sock|criSocket: unix:///run/containerd/containerd.sock|" "$CONFIG_FILE" +sed -i "s/^kubernetesVersion: .*/kubernetesVersion: 1.32.4/" "$CONFIG_FILE" +sed -i '/serviceSubnet: 10.96.0.0\/12/a\ podSubnet: 10.244.0.0/16' "$CONFIG_FILE" +sed -i '/imagePullSerial: true/d' "$CONFIG_FILE" + +cat <> "$CONFIG_FILE" +--- +kind: KubeletConfiguration +apiVersion: kubelet.config.k8s.io/v1beta1 +cgroupDriver: cgroupfs +EOF