diff --git a/bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch b/bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch new file mode 100644 index 0000000000000000000000000000000000000000..53f3f2f3960bbd5efff248b7bb728477d285ac47 --- /dev/null +++ b/bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch @@ -0,0 +1,318 @@ +--- a/heartbeat/gcp-vpc-move-vip.in 2020-08-03 15:48:28.497845842 +0200 ++++ b/heartbeat/gcp-vpc-move-vip.in 2020-08-03 15:49:24.129888908 +0200 +@@ -22,7 +22,8 @@ + import sys + import time + +-OCF_FUNCTIONS_DIR="%s/lib/heartbeat" % os.environ.get("OCF_ROOT") ++OCF_FUNCTIONS_DIR = os.environ.get("OCF_FUNCTIONS_DIR", "%s/lib/heartbeat" ++ % os.environ.get("OCF_ROOT")) + sys.path.append(OCF_FUNCTIONS_DIR) + + from ocf import * +@@ -43,6 +44,10 @@ + import urllib2 as urlrequest + + ++# Constants for alias add/remove modes ++ADD = 0 ++REMOVE = 1 ++ + CONN = None + THIS_VM = None + ALIAS = None +@@ -53,27 +58,27 @@ + + + 1.0 +- Floating IP Address on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP address to a running instance +- Floating IP Address on Google Cloud Platform ++ Floating IP Address or Range on Google Cloud Platform - Using Alias IP address functionality to attach a secondary IP range to a running instance ++ Floating IP Address or Range on Google Cloud Platform + + +- IP Address to be added including CIDR. E.g 192.168.0.1/32 +- IP Address to be added including CIDR. E.g 192.168.0.1/32 ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) ++ IP range to be added including CIDR netmask (e.g., 192.168.0.1/32) + + +- ++ + Subnet name for the Alias IP + Subnet name for the Alias IP + + +- +- List of hosts in the cluster ++ ++ List of hosts in the cluster, separated by spaces + Host list + + + +- If enabled (set to true), IP failover logs will be posted to stackdriver logging. Using stackdriver logging requires additional libraries (google-cloud-logging). +- Stackdriver-logging support. Requires additional libraries (google-cloud-logging). ++ If enabled (set to true), IP failover logs will be posted to stackdriver logging ++ Stackdriver-logging support + + + +@@ -107,7 +112,8 @@ + url = '%s?%s' % (metadata_url, params) + request = urlrequest.Request(url, headers=METADATA_HEADERS) + request_opener = urlrequest.build_opener(urlrequest.ProxyHandler({})) +- return request_opener.open(request, timeout=timeout * 1.1).read() ++ return request_opener.open( ++ request, timeout=timeout * 1.1).read().decode("utf-8") + + + def get_instance(project, zone, instance): +@@ -134,17 +140,21 @@ + time.sleep(1) + + +-def set_alias(project, zone, instance, alias, alias_range_name=None): +- fingerprint = get_network_ifaces(project, zone, instance)[0]['fingerprint'] ++def set_aliases(project, zone, instance, aliases, fingerprint): ++ """Sets the alias IP ranges for an instance. ++ ++ Args: ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ aliases: list, the list of dictionaries containing alias IP ranges ++ to be added to or removed from the instance. ++ fingerprint: string, the fingerprint of the network interface. ++ """ + body = { +- 'aliasIpRanges': [], +- 'fingerprint': fingerprint ++ 'aliasIpRanges': aliases, ++ 'fingerprint': fingerprint + } +- if alias: +- obj = {'ipCidrRange': alias} +- if alias_range_name: +- obj['subnetworkRangeName'] = alias_range_name +- body['aliasIpRanges'].append(obj) + + request = CONN.instances().updateNetworkInterface( + instance=instance, networkInterface='nic0', project=project, zone=zone, +@@ -153,21 +163,75 @@ + wait_for_operation(project, zone, operation) + + +-def get_alias(project, zone, instance): +- iface = get_network_ifaces(project, zone, instance) ++def add_rm_alias(mode, project, zone, instance, alias, alias_range_name=None): ++ """Adds or removes an alias IP range for a GCE instance. ++ ++ Args: ++ mode: int, a constant (ADD (0) or REMOVE (1)) indicating the ++ operation type. ++ project: string, the project in which the instance resides. ++ zone: string, the zone in which the instance resides. ++ instance: string, the name of the instance. ++ alias: string, the alias IP range to be added to or removed from ++ the instance. ++ alias_range_name: string, the subnet name for the alias IP range. ++ ++ Returns: ++ True if the existing list of alias IP ranges was modified, or False ++ otherwise. ++ """ ++ ifaces = get_network_ifaces(project, zone, instance) ++ fingerprint = ifaces[0]['fingerprint'] ++ ++ try: ++ old_aliases = ifaces[0]['aliasIpRanges'] ++ except KeyError: ++ old_aliases = [] ++ ++ new_aliases = [a for a in old_aliases if a['ipCidrRange'] != alias] ++ ++ if alias: ++ if mode == ADD: ++ obj = {'ipCidrRange': alias} ++ if alias_range_name: ++ obj['subnetworkRangeName'] = alias_range_name ++ new_aliases.append(obj) ++ elif mode == REMOVE: ++ pass # already removed during new_aliases build ++ else: ++ raise ValueError('Invalid value for mode: {}'.format(mode)) ++ ++ if (sorted(new_aliases) != sorted(old_aliases)): ++ set_aliases(project, zone, instance, new_aliases, fingerprint) ++ return True ++ else: ++ return False ++ ++ ++def add_alias(project, zone, instance, alias, alias_range_name=None): ++ return add_rm_alias(ADD, project, zone, instance, alias, alias_range_name) ++ ++ ++def remove_alias(project, zone, instance, alias): ++ return add_rm_alias(REMOVE, project, zone, instance, alias) ++ ++ ++def get_aliases(project, zone, instance): ++ ifaces = get_network_ifaces(project, zone, instance) + try: +- return iface[0]['aliasIpRanges'][0]['ipCidrRange'] ++ aliases = ifaces[0]['aliasIpRanges'] ++ return [a['ipCidrRange'] for a in aliases] + except KeyError: +- return '' ++ return [] + + +-def get_localhost_alias(): ++def get_localhost_aliases(): + net_iface = get_metadata('instance/network-interfaces', {'recursive': True}) +- net_iface = json.loads(net_iface.decode('utf-8')) ++ net_iface = json.loads(net_iface) + try: +- return net_iface[0]['ipAliases'][0] ++ return net_iface[0]['ipAliases'] + except (KeyError, IndexError): +- return '' ++ return [] + + + def get_zone(project, instance): +@@ -201,21 +265,17 @@ + + + def gcp_alias_start(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- # If I already have the IP, exit. If it has an alias IP that isn't the VIP, +- # then remove it +- if my_alias == alias: ++ if alias in my_aliases: ++ # TODO: Do we need to check alias_range_name? + logger.info( + '%s already has %s attached. No action required' % (THIS_VM, alias)) + sys.exit(OCF_SUCCESS) +- elif my_alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') + +- # Loops through all hosts & remove the alias IP from the host that has it ++ # If the alias is currently attached to another host, detach it. + hostlist = os.environ.get('OCF_RESKEY_hostlist', '') + if hostlist: + hostlist = hostlist.replace(THIS_VM, '').split() +@@ -223,47 +283,53 @@ + hostlist = get_instances_list(project, THIS_VM) + for host in hostlist: + host_zone = get_zone(project, host) +- host_alias = get_alias(project, host_zone, host) +- if alias == host_alias: ++ host_aliases = get_aliases(project, host_zone, host) ++ if alias in host_aliases: + logger.info( +- '%s is attached to %s - Removing all alias IP addresses from %s' % +- (alias, host, host)) +- set_alias(project, host_zone, host, '') ++ '%s is attached to %s - Removing %s from %s' % ++ (alias, host, alias, host)) ++ remove_alias(project, host_zone, host, alias) + break + +- # add alias IP to localhost +- set_alias( ++ # Add alias IP range to localhost ++ add_alias( + project, my_zone, THIS_VM, alias, + os.environ.get('OCF_RESKEY_alias_range_name')) + +- # Check the IP has been added +- my_alias = get_localhost_alias() +- if alias == my_alias: ++ # Verify that the IP range has been added ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: + logger.info('Finished adding %s to %s' % (alias, THIS_VM)) +- elif my_alias: +- logger.error( +- 'Failed to add IP. %s has an IP attached but it isn\'t %s' % +- (THIS_VM, alias)) +- sys.exit(OCF_ERR_GENERIC) + else: +- logger.error('Failed to add IP address %s to %s' % (alias, THIS_VM)) ++ if my_aliases: ++ logger.error( ++ 'Failed to add alias IP range %s. %s has alias IP ranges attached but' ++ + ' they don\'t include %s' % (alias, THIS_VM, alias)) ++ else: ++ logger.error( ++ 'Failed to add IP range %s. %s has no alias IP ranges attached' ++ % (alias, THIS_VM)) + sys.exit(OCF_ERR_GENERIC) + + + def gcp_alias_stop(alias): +- my_alias = get_localhost_alias() ++ my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] + project = get_metadata('project/project-id') + +- if my_alias == alias: +- logger.info('Removing %s from %s' % (my_alias, THIS_VM)) +- set_alias(project, my_zone, THIS_VM, '') ++ if alias in my_aliases: ++ logger.info('Removing %s from %s' % (alias, THIS_VM)) ++ remove_alias(project, my_zone, THIS_VM, alias) ++ else: ++ logger.info( ++ '%s is not attached to %s. No action required' ++ % (alias, THIS_VM)) + + + def gcp_alias_status(alias): +- my_alias = get_localhost_alias() +- if alias == my_alias: +- logger.info('%s has the correct IP address attached' % THIS_VM) ++ my_aliases = get_localhost_aliases() ++ if alias in my_aliases: ++ logger.info('%s has the correct IP range attached' % THIS_VM) + else: + sys.exit(OCF_NOT_RUNNING) + +@@ -275,7 +341,8 @@ + + # Populate global vars + try: +- CONN = googleapiclient.discovery.build('compute', 'v1') ++ CONN = googleapiclient.discovery.build('compute', 'v1', ++ cache_discovery=False) + except Exception as e: + logger.error('Couldn\'t connect with google api: ' + str(e)) + sys.exit(OCF_ERR_CONFIGURED) +@@ -283,7 +350,8 @@ + try: + THIS_VM = get_metadata('instance/name') + except Exception as e: +- logger.error('Couldn\'t get instance name, is this running inside GCE?: ' + str(e)) ++ logger.error('Couldn\'t get instance name, is this running inside GCE?: ' ++ + str(e)) + sys.exit(OCF_ERR_CONFIGURED) + + ALIAS = os.environ.get('OCF_RESKEY_alias_ip') +@@ -309,7 +377,8 @@ + formatter = logging.Formatter('gcp:alias "%(message)s"') + handler.setFormatter(formatter) + log.addHandler(handler) +- logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': OCF_RESOURCE_INSTANCE}) ++ logger = logging.LoggerAdapter(log, {'OCF_RESOURCE_INSTANCE': ++ OCF_RESOURCE_INSTANCE}) + except ImportError: + logger.error('Couldn\'t import google.cloud.logging, ' + 'disabling Stackdriver-logging support') diff --git a/bz1848673-sybaseASE-verify-start-action-only.patch b/bz1848673-sybaseASE-verify-start-action-only.patch new file mode 100644 index 0000000000000000000000000000000000000000..402bbd67b462a433becc3199f7bbfd9cb1c88c7f --- /dev/null +++ b/bz1848673-sybaseASE-verify-start-action-only.patch @@ -0,0 +1,41 @@ +From 953f689cb2a37606b6d4b2250ebec23f129f5095 Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Thu, 9 Jul 2020 23:32:22 -0700 +Subject: [PATCH] sybaseASE: Run verify_all() for start operation only + +The `sybaseASE` resource agent runs the `verify_all()` function at the +beginning of start, stop, and monitor operations. + +When `verify_all()` is run for a probe (monitor) operation and +`sybase_home` resides on a cluster-managed filesystem, the probe often +fails with `$OCF_ERR_GENERIC` because the filesystem isn't mounted yet. +This prevents the resource from starting on that node. + +For the stop operation, there's simply no reason to run `verify_all()`. + +This patch removes `verify_all()` for the stop and monitor operations. +It is now only run for the start operation. + +Resolves: RHBZ#1848673 +Resolves: RHBZ#1848025 +--- + heartbeat/sybaseASE.in | 2 -- + 1 file changed, 2 deletions(-) + +diff --git a/heartbeat/sybaseASE.in b/heartbeat/sybaseASE.in +index 9ddd429be..7ff30bd31 100755 +--- a/heartbeat/sybaseASE.in ++++ b/heartbeat/sybaseASE.in +@@ -864,12 +864,10 @@ case $__OCF_ACTION in + exit $? + ;; + stop) +- verify_all || exit $OCF_ERR_GENERIC + ase_stop + exit $? + ;; + status | monitor) +- verify_all || exit $OCF_ERR_GENERIC + ase_status $OCF_CHECK_LEVEL + exit $? + ;; diff --git a/bz1850779-azure-lb-fix-redirect-issue.patch b/bz1850779-azure-lb-fix-redirect-issue.patch new file mode 100644 index 0000000000000000000000000000000000000000..b1716130fe34ab670b2e9a1198a192a61ade84b5 --- /dev/null +++ b/bz1850779-azure-lb-fix-redirect-issue.patch @@ -0,0 +1,54 @@ +From d22700fc5d5098c683b465ea0fede43803fd4d6b Mon Sep 17 00:00:00 2001 +From: Reid wahl +Date: Tue, 7 Jul 2020 02:18:09 -0700 +Subject: [PATCH] azure-lb: Don't redirect nc listener output to pidfile + +The `lb_start()` function spawns an `nc` listener background process +and echoes the resulting pid to `$pidfile`. Due to a bug in the +redirection, all future data received by the `nc` process is also +appended to `$pidfile`. + +If binary data is received later and appended to `$pidfile`, the +monitor operation fails when `grep` searches the now-binary file. + +``` +line 97: kill: Binary: arguments must be process or job IDs ] +line 97: kill: file: arguments must be process or job IDs ] +line 97: kill: /var/run/nc_PF2_02.pid: arguments must be process or job + IDs ] +line 97: kill: matches: arguments must be process or job IDs ] +``` + +Then the start operation fails during recovery. `lb_start()` spawns a +new `nc` process, but the old process is still running and using the +configured port. + +``` +nc_PF2_02_start_0:777:stderr [ Ncat: bind to :::62502: Address + already in use. QUITTING. ] +``` + +This patch fixes the issue by removing the `nc &` command from the +section whose output gets redirected to `$pidfile`. Now, only the `nc` +PID is echoed to `$pidfile`. + +Resolves: RHBZ#1850778 +Resolves: RHBZ#1850779 +--- + heartbeat/azure-lb | 3 ++- + 1 file changed, 2 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 05c134514..05755d778 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -113,7 +113,8 @@ lb_start() { + if ! lb_monitor; then + ocf_log debug "Starting $process: $cmd" + # Execute the command as created above +- eval "$cmd & echo \$!" > $pidfile ++ $cmd & ++ echo $! > $pidfile + if lb_monitor; then + ocf_log debug "$process: $cmd started successfully, calling monitor" + lb_monitor diff --git a/bz1855888-SAPHana-use-actual-mode.patch b/bz1855888-SAPHana-use-actual-mode.patch new file mode 100644 index 0000000000000000000000000000000000000000..e4c43eb704412f4086c9f0137cb578fae4c1032d --- /dev/null +++ b/bz1855888-SAPHana-use-actual-mode.patch @@ -0,0 +1,59 @@ +From ec9fd4e526e572fe9bc0070186fa584b032eac22 Mon Sep 17 00:00:00 2001 +From: AngelaBriel +Date: Fri, 5 Mar 2021 19:18:02 +0100 +Subject: [PATCH] during the function 'check_for_primary' sometimes the command + 'hdbnsutil' does not work, but timed out. As a fallback we use + 'getParameter.py' to get some parameter values from the global.ini file. In + the past the use of the variable 'mode' was sufficient, but now we more often + see the problem, that this variable does not contain the current mode of the + node. So we will switch to the variable 'actual_mode', which will be more + reliable updated by the SAP software and will (hopefully) provide us with the + current mode of the node in times, where 'hdbnsutil' refuse to answer. This + change will help to avoid the irritating and confusing message 'secondary has + unexpected sync status PRIM ==> RESCORE' on a primary node as seen in + bsc#1181765 + +--- + heartbeat/SAPHana | 20 ++++++++++++++------ + 1 file changed, 14 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/SAPHana b/heartbeat/SAPHana +index 64e61e8..cd91ddf 100755 +--- a/heartbeat/SAPHana ++++ b/heartbeat/SAPHana +@@ -1054,21 +1054,29 @@ function check_for_primary() { + super_ocf_log info "FLOW $FUNCNAME ($*)" + local rc=$HANA_STATE_DEFECT + # TODO: PRIO 3: Check beginning from which SPS does SAP support HDBSettings.sh? +- # TODO: Limit the runtime of hdbnsutil and use getParameter.py as fallback +- # TODO: PRIO2: Maybe we need to use a fallback interface when hdbnsutil does not answer properly -> lookup in config files? +- # This might also solve some problems when we could not figure-out the ilocal or remote site name ++ # DONE: Limit the runtime of hdbnsutil and use getParameter.py as fallback ++ # DONE: PRIO2: Maybe we need to use a fallback interface when hdbnsutil does not answer properly -> lookup in config files? ++ # TODO: This might also solve some problems when we could not figure-out the local or remote site name (site_name,site_id from global.ini) + local chkMethod="" ++ local ini_mode="" + for chkMethod in hU hU hU gP; do + case "$chkMethod" in + gP ) ++ # fallback for 'hdbnsutil' failing 3 times. + local gpKeys="" +- gpKeys=$(echo --key=global.ini/system_replication/{mode,site_name,site_id}) ++ gpKeys=$(echo --key=global.ini/system_replication/{actual_mode,mode}) + node_full_status=$(HANA_CALL --timeout "$HANA_CALL_TIMEOUT" --cmd "HDBSettings.sh getParameter.py $gpKeys --sapcontrol=1" 2>&1 | awk -F/ 'BEGIN {out=0} /^SAPCONTROL-OK: / { out=1 } /^SAPCONTROL-OK: / { out=0 } /=/ {if (out==1) {print $3} }') +- node_status=$(echo "$node_full_status" | awk -F= '$1=="mode" {print $2}') ++ # first try to get the value of 'actual_mode' from the global.ini ++ ini_mode=$(echo "$node_full_status" | awk -F= '$1=="actual_mode" {print $2}') ++ # if 'actual_mode' is not available, fallback to 'mode' ++ if [ -z "$ini_mode" ]; then ++ ini_mode=$(echo "$node_full_status" | awk -F= '$1=="mode" {print $2}') ++ fi ++ node_status="$ini_mode" + super_ocf_log info "ACT: Using getParameter.py as fallback - node_status=$node_status" + ;; + hU | * ) +- # DONE: PRIO1: Begginning from SAP HANA rev 112.03 -sr_state is not longer supported ++ # DONE: PRIO1: Beginning from SAP HANA rev 112.03 -sr_state is not longer supported + node_full_status=$(HANA_CALL --timeout "$HANA_CALL_TIMEOUT" --cmd "$hdbState" 2>/dev/null ) + node_status=$(echo "$node_full_status" | awk '$1=="mode:" {print $2}') + super_ocf_log debug "DBG: check_for_primary: node_status=$node_status" diff --git a/bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch b/bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch new file mode 100644 index 0000000000000000000000000000000000000000..fa194c93757da562ecd34f17494d964318ae0df2 --- /dev/null +++ b/bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch @@ -0,0 +1,70 @@ +From 194909ff08cfe75cd5da9f704d8ed4cc9ab40341 Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 10:58:34 +0200 +Subject: [PATCH 1/2] azure-events: handle exceptions in urlopen The locking in + azure-events does not correctly handle some failures. + +If the metadata server is not recheable or has an error +handling the request, attr_globalPullState will never go +back to IDLE unless the administrator manually changes it. + +> azure-events: ERROR: [Errno 104] Connection reset by peer +> lrmd[2734]: notice: rsc_azure-events_monitor_10000:113088:stderr [ ocf-exit-reason:[Errno 104] Connection reset by peer ] +--- + heartbeat/azure-events.in | 16 +++++++++++++--- + 1 file changed, 13 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index 8709d97e3..bd812f4b2 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -82,9 +82,19 @@ class azHelper: + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) +- resp = urllib2.urlopen(req) +- data = resp.read() +- ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ try: ++ resp = urllib2.urlopen(req) ++ except URLError as e: ++ if hasattr(e, 'reason'): ++ print('We failed to reach a server. Reason: '), e.reason ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ elif hasattr(e, 'code'): ++ print('The server couldn\'t fulfill the request. Error code: '), e.code ++ clusterHelper.setAttr(attr_globalPullState, "IDLE") ++ else: ++ data = resp.read() ++ ocf.logger.debug("_sendMetadataRequest: response = %s" % data) ++ + if data: + data = json.loads(data) + + +From c4071ec4a82fcb831f170f341e0790633e4b904f Mon Sep 17 00:00:00 2001 +From: Gustavo Figueira +Date: Tue, 19 May 2020 12:53:22 +0200 +Subject: [PATCH 2/2] azure-events: use ocf.logger.warning instead of print + +--- + heartbeat/azure-events.in | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index bd812f4b2..a48a86309 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -86,10 +86,10 @@ class azHelper: + resp = urllib2.urlopen(req) + except URLError as e: + if hasattr(e, 'reason'): +- print('We failed to reach a server. Reason: '), e.reason ++ ocf.logger.warning("Failed to reach the server: %s" % e.reason) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + elif hasattr(e, 'code'): +- print('The server couldn\'t fulfill the request. Error code: '), e.code ++ ocf.logger.warning("The server couldn\'t fulfill the request. Error code: %s" % e.code) + clusterHelper.setAttr(attr_globalPullState, "IDLE") + else: + data = resp.read() diff --git a/bz1862121-azure-events-2-import-urlerror-encode-postdata.patch b/bz1862121-azure-events-2-import-urlerror-encode-postdata.patch new file mode 100644 index 0000000000000000000000000000000000000000..7795e782086b47f4b55c6039be085d939df61900 --- /dev/null +++ b/bz1862121-azure-events-2-import-urlerror-encode-postdata.patch @@ -0,0 +1,68 @@ +From f2bf1d8a07ea810099b03469883cb7f485ab9ac1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Mon, 27 Jul 2020 10:09:43 +0200 +Subject: [PATCH 1/2] azure-events: import URLError and encode postData when + necessary + +--- + heartbeat/azure-events.in | 6 ++++++ + 1 file changed, 6 insertions(+) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index d4a166d9f..a7f359468 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -13,8 +13,10 @@ import subprocess + import json + try: + import urllib2 ++ from urllib2 import URLError + except ImportError: + import urllib.request as urllib2 ++ from urllib.error import URLError + import socket + from collections import defaultdict + +@@ -76,9 +78,13 @@ class azHelper: + Send a request to Azure's Azure Metadata Service API + """ + url = "%s/%s?api-version=%s" % (azHelper.metadata_host, endpoint, azHelper.api_version) ++ data = "" + ocf.logger.debug("_sendMetadataRequest: begin; endpoint = %s, postData = %s" % (endpoint, postData)) + ocf.logger.debug("_sendMetadataRequest: url = %s" % url) + ++ if postData and type(postData) != bytes: ++ postData = postData.encode() ++ + req = urllib2.Request(url, postData) + req.add_header("Metadata", "true") + req.add_header("User-Agent", USER_AGENT) + +From 1ab5d71bff95eb271f1e1bbc401961dc313219d9 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Wed, 29 Jul 2020 21:25:43 +0200 +Subject: [PATCH 2/2] azure-events: report error if jsondata not received + +--- + heartbeat/azure-events.in | 8 ++++++-- + 1 file changed, 6 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/azure-events.in b/heartbeat/azure-events.in +index a7f359468..3a24d6358 100644 +--- a/heartbeat/azure-events.in ++++ b/heartbeat/azure-events.in +@@ -117,8 +117,12 @@ class azHelper: + jsondata = azHelper._sendMetadataRequest(azHelper.instance_api) + ocf.logger.debug("getInstanceInfo: json = %s" % jsondata) + +- ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) +- return attrDict(jsondata["compute"]) ++ if jsondata: ++ ocf.logger.debug("getInstanceInfo: finished, returning {}".format(jsondata["compute"])) ++ return attrDict(jsondata["compute"]) ++ else: ++ ocf.ocf_exit_reason("getInstanceInfo: Unable to get instance info") ++ sys.exit(ocf.OCF_ERR_GENERIC) + + @staticmethod + def pullScheduledEvents(): diff --git a/bz1862121-azure-events-3-decode-improvement.patch b/bz1862121-azure-events-3-decode-improvement.patch new file mode 100644 index 0000000000000000000000000000000000000000..437aca51e82b119b4f6d6a3dcd7858b2f31f7f1b --- /dev/null +++ b/bz1862121-azure-events-3-decode-improvement.patch @@ -0,0 +1,47 @@ +--- a/heartbeat/azure-events.in 2020-08-03 16:55:42.336402080 +0200 ++++ b/heartbeat/azure-events.in 2020-07-31 14:27:32.336656383 +0200 +@@ -189,6 +189,8 @@ + ocf.logger.debug("_exec: cmd = %s" % " ".join(command)) + try: + ret = subprocess.check_output(command) ++ if type(ret) != str: ++ ret = ret.decode() + ocf.logger.debug("_exec: return = %s" % ret) + return ret.rstrip() + except Exception as err: +@@ -242,7 +244,7 @@ + + nodes = [] + nodeList = clusterHelper._exec("crm_node", "--list") +- for n in nodeList.decode().split("\n"): ++ for n in nodeList.split("\n"): + nodes.append(n.split()[1]) + ocf.logger.debug("getAllNodes: finished; return %s" % str(nodes)) + +@@ -313,7 +315,7 @@ + ocf.logger.warning("transitionSummary: received unexpected transition summary: %s" % summary) + return False + summary = summary.split("Transition Summary:")[1] +- ret = summary.decode().split("\n").pop(0) ++ ret = summary.split("\n").pop(0) + + ocf.logger.debug("transitionSummary: finished; return = %s" % str(ret)) + return ret +@@ -334,7 +336,7 @@ + if len(resources) == 0: + ret = [] + else: +- ret = resources.decode().split("\n") ++ ret = resources.split("\n") + + ocf.logger.debug("listOperationsOnNode: finished; return = %s" % str(ret)) + return ret +@@ -480,7 +482,7 @@ + + eventIDStr = clusterHelper.getAttr(attr_pendingEventIDs, node=node) + if eventIDStr: +- eventIDs = eventIDStr.decode().split(",") ++ eventIDs = eventIDStr.split(",") + else: + eventIDs = None + diff --git a/bz1905737-aws-add-imdsv2-support.patch b/bz1905737-aws-add-imdsv2-support.patch new file mode 100644 index 0000000000000000000000000000000000000000..09772ccefbb4a2a2c45d964ecdd810204a6053a1 --- /dev/null +++ b/bz1905737-aws-add-imdsv2-support.patch @@ -0,0 +1,97 @@ +From 8f10d0eb1e33d38ab6e89015a903620c54edd7c1 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Fri, 13 Nov 2020 16:36:20 +0100 +Subject: [PATCH] AWS agents: add support for IMDSv2 + +--- + heartbeat/aws-vpc-move-ip | 5 +++-- + heartbeat/aws-vpc-route53.in | 3 ++- + heartbeat/awseip | 9 +++++---- + heartbeat/awsvip | 7 ++++--- + 4 files changed, 14 insertions(+), 10 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 72a89ecb1..cbb629b00 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -215,7 +215,8 @@ ec2ip_validate() { + return $OCF_ERR_CONFIGURED + fi + +- EC2_INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ EC2_INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + if [ -z "${EC2_INSTANCE_ID}" ]; then + ocf_exit_reason "Instance ID not found. Is this a EC2 instance?" +@@ -329,7 +330,7 @@ ec2ip_get_instance_eni() { + fi + ocf_log debug "MAC address associated with interface ${OCF_RESKEY_interface}: ${MAC_ADDR}" + +- cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id" ++ cmd="curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDR}/interface-id -H \"X-aws-ec2-metadata-token: $TOKEN\"" + ocf_log debug "executing command: $cmd" + EC2_NETWORK_INTERFACE_ID="$(eval $cmd)" + rc=$? +diff --git a/heartbeat/aws-vpc-route53.in b/heartbeat/aws-vpc-route53.in +index b06b93726..4fb17019b 100644 +--- a/heartbeat/aws-vpc-route53.in ++++ b/heartbeat/aws-vpc-route53.in +@@ -347,7 +347,8 @@ r53_monitor() { + _get_ip() { + case $OCF_RESKEY_ip in + local|public) +- IPADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4)";; ++ TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++ IPADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/${OCF_RESKEY_ip}-ipv4 -H "X-aws-ec2-metadata-token: $TOKEN");; + *.*.*.*) + IPADDRESS="${OCF_RESKEY_ip}";; + esac +diff --git a/heartbeat/awseip b/heartbeat/awseip +index 445a03666..de1967774 100755 +--- a/heartbeat/awseip ++++ b/heartbeat/awseip +@@ -149,12 +149,12 @@ awseip_start() { + awseip_monitor && return $OCF_SUCCESS + + if [ -n "${PRIVATE_IP_ADDRESS}" ]; then +- NETWORK_INTERFACES_MACS="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/)" ++ NETWORK_INTERFACES_MACS=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/ -H "X-aws-ec2-metadata-token: $TOKEN") + for MAC in ${NETWORK_INTERFACES_MACS}; do +- curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s | ++ curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/local-ipv4s -H "X-aws-ec2-metadata-token: $TOKEN" | + grep -q "^${PRIVATE_IP_ADDRESS}$" + if [ $? -eq 0 ]; then +- NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id)" ++ NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") + fi + done + $AWSCLI --profile $OCF_RESKEY_profile ec2 associate-address \ +@@ -244,7 +244,8 @@ AWSCLI="${OCF_RESKEY_awscli}" + ELASTIC_IP="${OCF_RESKEY_elastic_ip}" + ALLOCATION_ID="${OCF_RESKEY_allocation_id}" + PRIVATE_IP_ADDRESS="${OCF_RESKEY_private_ip_address}" +-INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" ++TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in + start) +diff --git a/heartbeat/awsvip b/heartbeat/awsvip +index 3eb31e6ae..8050107e8 100755 +--- a/heartbeat/awsvip ++++ b/heartbeat/awsvip +@@ -206,9 +206,10 @@ esac + + AWSCLI="${OCF_RESKEY_awscli}" + SECONDARY_PRIVATE_IP="${OCF_RESKEY_secondary_private_ip}" +-INSTANCE_ID="$(curl -s http://169.254.169.254/latest/meta-data/instance-id)" +-MAC_ADDRESS="$(curl -s http://169.254.169.254/latest/meta-data/mac)" +-NETWORK_ID="$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id)" ++TOKEN=$(curl -X PUT "http://169.254.169.254/latest/api/token" -H "X-aws-ec2-metadata-token-ttl-seconds: 21600") ++INSTANCE_ID=$(curl -s http://169.254.169.254/latest/meta-data/instance-id -H "X-aws-ec2-metadata-token: $TOKEN") ++MAC_ADDRESS=$(curl -s http://169.254.169.254/latest/meta-data/mac -H "X-aws-ec2-metadata-token: $TOKEN") ++NETWORK_ID=$(curl -s http://169.254.169.254/latest/meta-data/network/interfaces/macs/${MAC_ADDRESS}/interface-id -H "X-aws-ec2-metadata-token: $TOKEN") + + case $__OCF_ACTION in + start) diff --git a/bz1913936-1-gcp-vpc-move-add-project-parameter.patch b/bz1913936-1-gcp-vpc-move-add-project-parameter.patch new file mode 100644 index 0000000000000000000000000000000000000000..16cfb10443981cfbcb1c2bca65ab8b5f6c2c1994 --- /dev/null +++ b/bz1913936-1-gcp-vpc-move-add-project-parameter.patch @@ -0,0 +1,86 @@ +From 560683500b3f9d5d8e183a569daea27422ae5268 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Thu, 7 Jan 2021 12:25:04 -0800 +Subject: [PATCH] gcp-vpc-move-route, gcp-vpc-move-vip: Parameterize project ID + +Resolves: RHBZ#1913932 +Resolves: RHBZ#1913936 + +Signed-off-by: Reid Wahl +--- + heartbeat/gcp-vpc-move-route.in | 13 ++++++++++++- + heartbeat/gcp-vpc-move-vip.in | 16 ++++++++++++++-- + 2 files changed, 26 insertions(+), 3 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index d8e8ea8dd..179eba15a 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -106,6 +106,16 @@ Name of the VPC network + + + ++ ++ ++Project ID of the instance. It can be useful to set this attribute if ++the instance is in a shared service project. Otherwise, the agent should ++be able to determine the project ID automatically. ++ ++Project ID ++ ++ ++ + + + Name of the network interface +@@ -215,7 +225,8 @@ def validate(ctx): + try: + ctx.instance = get_metadata('instance/name') + ctx.zone = get_metadata('instance/zone').split('/')[-1] +- ctx.project = get_metadata('project/project-id') ++ ctx.project = os.environ.get( ++ 'OCF_RESKEY_project', get_metadata('project/project-id')) + except Exception as e: + logger.error( + 'Instance information not found. Is this a GCE instance ?: %s', str(e)) +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index 01d91a59d..e792f71d5 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -75,6 +75,16 @@ METADATA = \ + Host list + + ++ ++ ++ Project ID of the instance. It can be useful to set this ++ attribute if the instance is in a shared service project. ++ Otherwise, the agent should be able to determine the project ID ++ automatically. ++ ++ Project ID ++ ++ + + If enabled (set to true), IP failover logs will be posted to stackdriver logging + Stackdriver-logging support +@@ -267,7 +277,8 @@ def get_instances_list(project, exclude): + def gcp_alias_start(alias): + my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] +- project = get_metadata('project/project-id') ++ project = os.environ.get( ++ 'OCF_RESKEY_project', get_metadata('project/project-id')) + + if alias in my_aliases: + # TODO: Do we need to check alias_range_name? +@@ -315,7 +326,8 @@ def gcp_alias_start(alias): + def gcp_alias_stop(alias): + my_aliases = get_localhost_aliases() + my_zone = get_metadata('instance/zone').split('/')[-1] +- project = get_metadata('project/project-id') ++ project = os.environ.get( ++ 'OCF_RESKEY_project', get_metadata('project/project-id')) + + if alias in my_aliases: + logger.info('Removing %s from %s' % (alias, THIS_VM)) diff --git a/bz1913936-2-gcp-vpc-move-route-fixes.patch b/bz1913936-2-gcp-vpc-move-route-fixes.patch new file mode 100644 index 0000000000000000000000000000000000000000..a94f0eec2185c99c494585ac09b84f1d15d3b8ef --- /dev/null +++ b/bz1913936-2-gcp-vpc-move-route-fixes.patch @@ -0,0 +1,106 @@ +From 523c4cee64b3b8ee9f603a940d83a6628531078d Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 19 Jan 2021 10:56:47 +0100 +Subject: [PATCH 1/2] gcp-vpc-move-route: fix stop-action when route stopped, + and fix check_conflicting_routes() + +--- + heartbeat/gcp-vpc-move-route.in | 23 +++++++++++++++++------ + 1 file changed, 17 insertions(+), 6 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 179eba15a..9fe985832 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -252,8 +252,19 @@ def validate(ctx): + def check_conflicting_routes(ctx): + fl = '(destRange = "%s*") AND (network = "%s") AND (name != "%s")' % ( + ctx.ip, ctx.vpc_network_url, ctx.route_name) +- request = ctx.conn.routes().list(project=ctx.project, filter=fl) +- response = request.execute() ++ try: ++ request = ctx.conn.routes().list(project=ctx.project, filter=fl) ++ response = request.execute() ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ logger.error('VPC network not found') ++ if 'stop' in sys.argv[1]: ++ sys.exit(OCF_SUCCESS) ++ else: ++ sys.exit(OCF_ERR_CONFIGURED) ++ else: ++ raise ++ + route_list = response.get('items', None) + if route_list: + logger.error( +@@ -353,16 +364,16 @@ def route_monitor(ctx): + logger.info('GCP route monitor: checking route table') + + # Ensure that there is no route that we are not aware of that is also handling our IP +- check_conflicting_routes ++ check_conflicting_routes(ctx) + + try: + request = ctx.conn.routes().get(project=ctx.project, route=ctx.route_name) + response = request.execute() + except googleapiclient.errors.HttpError as e: +- if 'Insufficient Permission' in e.content: +- return OCF_ERR_PERM +- elif e.resp.status == 404: ++ if e.resp.status == 404: + return OCF_NOT_RUNNING ++ elif 'Insufficient Permission' in e.content: ++ return OCF_ERR_PERM + else: + raise + + +From 50dbfc3230e87b8d29163c235e6866d15fd6fc1b Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Tue, 19 Jan 2021 11:50:22 +0100 +Subject: [PATCH 2/2] gcp-vpc-move-vip: correctly return error when no + instances are returned + +--- + heartbeat/gcp-vpc-move-vip.in | 20 +++++++++++++++----- + 1 file changed, 15 insertions(+), 5 deletions(-) + +diff --git a/heartbeat/gcp-vpc-move-vip.in b/heartbeat/gcp-vpc-move-vip.in +index e792f71d5..bbbd87b7a 100755 +--- a/heartbeat/gcp-vpc-move-vip.in ++++ b/heartbeat/gcp-vpc-move-vip.in +@@ -263,8 +263,14 @@ def get_instances_list(project, exclude): + hostlist = [] + request = CONN.instances().aggregatedList(project=project) + while request is not None: +- response = request.execute() +- zones = response.get('items', {}) ++ try: ++ response = request.execute() ++ zones = response.get('items', {}) ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ logger.debug('get_instances_list(): no instances found') ++ return '' ++ + for zone in zones.values(): + for inst in zone.get('instances', []): + if inst['name'] != exclude: +@@ -303,9 +309,13 @@ def gcp_alias_start(alias): + break + + # Add alias IP range to localhost +- add_alias( +- project, my_zone, THIS_VM, alias, +- os.environ.get('OCF_RESKEY_alias_range_name')) ++ try: ++ add_alias( ++ project, my_zone, THIS_VM, alias, ++ os.environ.get('OCF_RESKEY_alias_range_name')) ++ except googleapiclient.errors.HttpError as e: ++ if e.resp.status == 404: ++ sys.exit(OCF_ERR_CONFIGURED) + + # Verify that the IP range has been added + my_aliases = get_localhost_aliases() diff --git a/bz1913936-3-gcp-vpc-move-route-make-vpc_network-optional.patch b/bz1913936-3-gcp-vpc-move-route-make-vpc_network-optional.patch new file mode 100644 index 0000000000000000000000000000000000000000..5def90aa8c8612d8876e98447c8251b619f682e4 --- /dev/null +++ b/bz1913936-3-gcp-vpc-move-route-make-vpc_network-optional.patch @@ -0,0 +1,22 @@ +From 4812c67894063f8125a3915d32da168931f088c6 Mon Sep 17 00:00:00 2001 +From: Oyvind Albrigtsen +Date: Thu, 25 Feb 2021 16:49:55 +0100 +Subject: [PATCH] gcp-vpc-move-route: make "vpc_network" optional + +--- + heartbeat/gcp-vpc-move-route.in | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-vpc-move-route.in b/heartbeat/gcp-vpc-move-route.in +index 9fe985832..fd2d2ec59 100644 +--- a/heartbeat/gcp-vpc-move-route.in ++++ b/heartbeat/gcp-vpc-move-route.in +@@ -98,7 +98,7 @@ subnet ranges + + + +- ++ + + Name of the VPC network + diff --git a/bz1935798-gcp-pd-move-fix-partially-matched-disk_name.patch b/bz1935798-gcp-pd-move-fix-partially-matched-disk_name.patch new file mode 100644 index 0000000000000000000000000000000000000000..83aef938782c70061857db16e2277e4dea9bc52a --- /dev/null +++ b/bz1935798-gcp-pd-move-fix-partially-matched-disk_name.patch @@ -0,0 +1,58 @@ +From 2927279ba1677e9dda202121176a8245a7ef76ca Mon Sep 17 00:00:00 2001 +From: tositaka77 <45960626+tositaka77@users.noreply.github.com> +Date: Wed, 14 Oct 2020 22:22:56 +0900 +Subject: [PATCH] fixes and improvements + +- Fixed "regional" PD functionality in attach_disk() +- Improve to exact match disk_name with disks.source in detach_disk() +--- + heartbeat/gcp-pd-move.in | 8 +++++++- + 1 file changed, 7 insertions(+), 1 deletion(-) + +diff --git a/heartbeat/gcp-pd-move.in b/heartbeat/gcp-pd-move.in +index f82bd25e5..e99cc71f8 100644 +--- a/heartbeat/gcp-pd-move.in ++++ b/heartbeat/gcp-pd-move.in +@@ -49,6 +49,7 @@ else: + CONN = None + PROJECT = None + ZONE = None ++REGION = None + LIST_DISK_ATTACHED_INSTANCES = None + INSTANCE_NAME = None + +@@ -148,6 +149,7 @@ def populate_vars(): + global INSTANCE_NAME + global PROJECT + global ZONE ++ global REGION + global LIST_DISK_ATTACHED_INSTANCES + + # Populate global vars +@@ -175,6 +177,7 @@ def populate_vars(): + PROJECT = get_metadata('project/project-id') + if PARAMETERS['disk_scope'] in ['detect', 'regional']: + ZONE = get_metadata('instance/zone').split('/')[-1] ++ REGION = ZONE[:-2] + else: + ZONE = PARAMETERS['disk_scope'] + LIST_DISK_ATTACHED_INSTANCES = get_disk_attached_instances( +@@ -255,7 +258,7 @@ def detach_disk(instance, disk_name): + + device_name = None + for disk in response['disks']: +- if disk_name in disk['source']: ++ if disk_name == re.sub('.*disks/',"",disk['source']): + device_name = disk['deviceName'] + break + +@@ -273,6 +276,9 @@ def detach_disk(instance, disk_name): + + def attach_disk(instance, disk_name): + location = 'zones/%s' % ZONE ++ if PARAMETERS['disk_scope'] == 'regional': ++ location = 'regions/%s' % REGION ++ + prefix = 'https://www.googleapis.com/compute/v1' + body = { + 'source': '%(prefix)s/projects/%(project)s/%(location)s/disks/%(disk)s' % { diff --git a/bz1937151-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch b/bz1937151-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch new file mode 100644 index 0000000000000000000000000000000000000000..699dc50d6230d844f1c5aa736db6b8c6454bc674 --- /dev/null +++ b/bz1937151-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch @@ -0,0 +1,118 @@ +From 760680df771b6e2a9fbcd2f6d9862df4ec1a86de Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 9 Mar 2021 18:25:52 -0800 +Subject: [PATCH 1/2] azure-lb: Be quiet during stop operation + +Currently, it logs "kill () No such process" to stderr during stops. + +A stop operation is expected to run `kill -s 0 $pid` for a nonexistent +PID, so log that at debug level. + +A start or monitor operation's `kill -s 0 $pid` should always succeed, +so any output is unexpected and an error. + +Also remove "local" bashism. + +Signed-off-by: Reid Wahl +--- + heartbeat/azure-lb | 22 ++++++++++++++-------- + 1 file changed, 14 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 65a12235b..863132744 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -93,12 +93,18 @@ getpid() { + + lb_monitor() { + if test -f "$pidfile"; then +- if pid=`getpid $pidfile` && [ "$pid" ] && kill -s 0 $pid; then +- return $OCF_SUCCESS +- else +- # pidfile w/o process means the process died +- return $OCF_ERR_GENERIC ++ [ "$__OCF_ACTION" = "stop" ] && level="debug" || level="err" ++ ++ if pid=$(getpid "$pidfile") && [ -n "$pid" ]; then ++ output=$(kill -s 0 "$pid" 2>&1) ++ mon_rc=$? ++ ++ [ -n "$output" ] && ocf_log "$level" "$output" ++ [ "$mon_rc" -eq 0 ] && return $OCF_SUCCESS + fi ++ ++ # pidfile w/o process means the process died ++ return $OCF_ERR_GENERIC + else + return $OCF_NOT_RUNNING + fi +@@ -131,7 +137,7 @@ lb_start() { + } + + lb_stop() { +- local rc=$OCF_SUCCESS ++ stop_rc=$OCF_SUCCESS + + if [ -n "$OCF_RESKEY_CRM_meta_timeout" ]; then + # Allow 2/3 of the action timeout for the orderly shutdown +@@ -160,7 +166,7 @@ lb_stop() { + while :; do + if ! lb_monitor; then + ocf_log warn "SIGKILL did the job." +- rc=$OCF_SUCCESS ++ stop_rc=$OCF_SUCCESS + break + fi + ocf_log info "The job still hasn't stopped yet. Waiting..." +@@ -168,7 +174,7 @@ lb_stop() { + done + fi + rm -f $pidfile +- return $rc ++ return $stop_rc + } + + lb_validate() { + +From 10f39e90d6b04c28752a4f9adc94dfc03d9d61b8 Mon Sep 17 00:00:00 2001 +From: Reid Wahl +Date: Tue, 9 Mar 2021 18:32:45 -0800 +Subject: [PATCH 2/2] azure-lb: Redirect stdout and stderr to /dev/null + +This fixes a regression introduced in commit d22700fc. + +When the nc listener process created by an azure-lb resource attempts to +write to stdout, it dies with an EPIPE error. + +This can happen when random/garbage input is sent to the nc listener, as +may happen during a port scan. For example, if the listener is on port +62000, and a client sends some text (e.g., `echo test | nc node1 +62000`), then the listener attempts to echo "test" to its stdout. This +fails with an EPIPE. + +Prior to commit d22700fc, all output was redirected to the pid file. +This caused its own problems, but it prevented this particular behavior. + +The fix is to redirect the listener's stdout and stderr to /dev/null. + +Resolves: RHBZ#1937142 +Resolves: RHBZ#1937151 + +Signed-off-by: Reid Wahl +--- + heartbeat/azure-lb | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +diff --git a/heartbeat/azure-lb b/heartbeat/azure-lb +index 863132744..ade1b4577 100755 +--- a/heartbeat/azure-lb ++++ b/heartbeat/azure-lb +@@ -119,7 +119,7 @@ lb_start() { + if ! lb_monitor; then + ocf_log debug "Starting $process: $cmd" + # Execute the command as created above +- $cmd & ++ $cmd >/dev/null 2>&1 & + echo $! > $pidfile + if lb_monitor; then + ocf_log debug "$process: $cmd started successfully, calling monitor" diff --git a/bz1939282-aws-vpc-move-ip-add-ENI-lookup.patch b/bz1939282-aws-vpc-move-ip-add-ENI-lookup.patch new file mode 100644 index 0000000000000000000000000000000000000000..8f046520548c238c2a0d9ab87718f20aafc3c0d6 --- /dev/null +++ b/bz1939282-aws-vpc-move-ip-add-ENI-lookup.patch @@ -0,0 +1,159 @@ +From b727fe4e2a0f4c88fca0ed9f90f57e570253c961 Mon Sep 17 00:00:00 2001 +From: Costas Tyfoxylos +Date: Wed, 26 Aug 2020 15:18:00 +0300 +Subject: [PATCH 1/2] aws-vpc-move-ip: Implemented optional eni lookup instead + of the default instance id. + +In a shared network pattern where the cluster resides in shared subnets the instance ids of the nodes are not retrievable but the eni ids are and this optional feature gives transparent support in that situation. +--- + heartbeat/aws-vpc-move-ip | 41 +++++++++++++++++++++++++++++++-------- + 1 file changed, 33 insertions(+), 8 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index 1b540caec..bc82428e5 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -44,6 +44,7 @@ OCF_RESKEY_routing_table_default="" + OCF_RESKEY_routing_table_role_default="" + OCF_RESKEY_interface_default="eth0" + OCF_RESKEY_monapi_default="false" ++OCF_RESKEY_lookup_type_default="InstanceId" + + : ${OCF_RESKEY_awscli=${OCF_RESKEY_awscli_default}} + : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} +@@ -54,6 +55,7 @@ OCF_RESKEY_monapi_default="false" + : ${OCF_RESKEY_profile=${OCF_RESKEY_profile_default}} + : ${OCF_RESKEY_routing_table_role=${OCF_RESKEY_routing_table_role_default}} + : ${OCF_RESKEY_monapi=${OCF_RESKEY_monapi_default}} ++: ${OCF_RESKEY_lookup_type=${OCF_RESKEY_lookup_type_default}} + ####################################################################### + + ####################################################################### +@@ -154,6 +156,17 @@ Enable enhanced monitoring using AWS API calls to check route table entry + Enhanced Monitoring + + ++ ++ ++ ++Name of resource type to lookup in route table. ++"InstanceId" : EC2 instance ID. (default) ++"NetworkInterfaceId" : ENI ID. (useful in shared VPC setups). ++ ++lookup type for route table resource ++ ++ ++ + + + +@@ -187,7 +200,7 @@ execute_cmd_as_role(){ + + ec2ip_set_address_param_compat(){ + # Include backward compatibility for the deprecated address parameter +- if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then ++ if [ -z "$OCF_RESKEY_ip" ] && [ -n "$OCF_RESKEY_address" ]; then + OCF_RESKEY_ip="$OCF_RESKEY_address" + fi + } +@@ -213,16 +226,24 @@ ec2ip_validate() { + } + + ec2ip_monitor() { +- MON_RES="" ++ MON_RES="" ++ if [ "${OCF_RESKEY_lookup_type}" = "NetworkInterfaceId" ]; then ++ EC2_ID="$(ec2ip_get_instance_eni)" ++ RESOURCE_TYPE="interface" ++ else ++ EC2_ID="$EC2_INSTANCE_ID" ++ RESOURCE_TYPE="instance" ++ fi ++ + if ocf_is_true ${OCF_RESKEY_monapi} || [ "$__OCF_ACTION" = "start" ] || ocf_is_probe; then + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + ocf_log info "monitor: check routing table (API call) - $rtb" + if [[ -z "${OCF_RESKEY_routing_table_role}" ]]; then +- cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE="$($cmd)" + else +- cmd="$OCF_RESKEY_awscli --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].InstanceId" ++ cmd="$OCF_RESKEY_awscli --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" + ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" +@@ -230,8 +251,8 @@ ec2ip_monitor() { + ROUTE_TO_INSTANCE="" + fi + +- if [ "$EC2_INSTANCE_ID" != "$ROUTE_TO_INSTANCE" ]; then +- ocf_log warn "not routed to this instance ($EC2_INSTANCE_ID) but to instance $ROUTE_TO_INSTANCE on $rtb" ++ if [ "$EC2_ID" != "$ROUTE_TO_INSTANCE" ]; then ++ ocf_log warn "not routed to this $RESOURCE_TYPE ($EC2_ID) but to $RESOURCE_TYPE $ROUTE_TO_INSTANCE on $rtb" + MON_RES="$MON_RES $rtb" + fi + sleep 1 +@@ -275,7 +296,7 @@ ec2ip_drop() { + return $OCF_SUCCESS + } + +-ec2ip_get_and_configure() { ++ec2ip_get_instance_eni() { + MAC_FILE="/sys/class/net/${OCF_RESKEY_interface}/address" + if [ -f $MAC_FILE ]; then + cmd="cat ${MAC_FILE}" +@@ -300,7 +321,11 @@ ec2ip_get_and_configure() { + return $OCF_ERR_GENERIC + fi + ocf_log debug "network interface id associated MAC address ${MAC_ADDR}: ${EC2_NETWORK_INTERFACE_ID}" ++ echo $EC2_NETWORK_INTERFACE_ID ++} + ++ec2ip_get_and_configure() { ++ EC2_NETWORK_INTERFACE_ID="$(ec2ip_get_instance_eni)" + for rtb in $(echo $OCF_RESKEY_routing_table | sed -e 's/,/ /g'); do + if [ -z "${OCF_RESKEY_routing_table_role}" ]; then + cmd="$OCF_RESKEY_awscli --profile $OCF_RESKEY_profile --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" +@@ -417,7 +442,7 @@ case $__OCF_ACTION in + ec2ip_monitor;; + validate-all) + exit $?;; +- *) ++ *) + echo $USAGE + exit $OCF_ERR_UNIMPLEMENTED + ;; + +From f4c8daae098dd33bdd5136ca4846eb505110e006 Mon Sep 17 00:00:00 2001 +From: Sander Botman +Date: Fri, 28 Aug 2020 22:01:03 +0200 +Subject: [PATCH 2/2] aws-vpc-move-ip: Fix the region option + +--- + heartbeat/aws-vpc-move-ip | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/heartbeat/aws-vpc-move-ip b/heartbeat/aws-vpc-move-ip +index bc82428e5..a5b28ad92 100755 +--- a/heartbeat/aws-vpc-move-ip ++++ b/heartbeat/aws-vpc-move-ip +@@ -243,7 +243,7 @@ ec2ip_monitor() { + ocf_log debug "executing command: $cmd" + ROUTE_TO_INSTANCE="$($cmd)" + else +- cmd="$OCF_RESKEY_awscli --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" ++ cmd="$OCF_RESKEY_awscli --output text ec2 describe-route-tables --route-table-ids $rtb --query RouteTables[*].Routes[?DestinationCidrBlock=='$OCF_RESKEY_ip/32'].$OCF_RESKEY_lookup_type" + ROUTE_TO_INSTANCE="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + ocf_log debug "Overlay IP is currently routed to ${ROUTE_TO_INSTANCE}" +@@ -332,7 +332,7 @@ ec2ip_get_and_configure() { + ocf_log debug "executing command: $cmd" + $cmd + else +- cmd="$OCF_RESKEY_awscli --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" ++ cmd="$OCF_RESKEY_awscli --output text ec2 replace-route --route-table-id $rtb --destination-cidr-block ${OCF_RESKEY_ip}/32 --network-interface-id $EC2_NETWORK_INTERFACE_ID" + update_response="$(execute_cmd_as_role "$cmd" $OCF_RESKEY_routing_table_role)" + fi + rc=$? diff --git a/httplib2-0.18.1.tar.gz b/httplib2-0.18.1.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..0506d415c4c7e3741a405e750acef51ab6d08ec6 Binary files /dev/null and b/httplib2-0.18.1.tar.gz differ diff --git a/resource-agents.spec b/resource-agents.spec index c80bda0d3ad5fb1679d1076bd0b91eaeef3c2435..966089606e451efdb819292793292897ab39ea42 100644 --- a/resource-agents.spec +++ b/resource-agents.spec @@ -55,6 +55,10 @@ %global googlecloudsdk google-cloud-sdk %global googlecloudsdk_version 206.0.0 %global googlecloudsdk_dir %{bundled_lib_dir}/%{googlecloudsdk} +# python-httplib2 bundle +%global httplib2 httplib2 +%global httplib2_version 0.18.1 +%global httplib2_dir %{bundled_lib_dir}/%{httplib2} # python-pyroute2 bundle %global pyroute2 pyroute2 %global pyroute2_version 0.4.13 @@ -95,7 +99,7 @@ Name: resource-agents Summary: Open Source HA Reusable Cluster Resource Scripts Version: 4.1.1 -Release: 61%{?dist} +Release: 61%{?dist}.13 License: GPLv2+ and LGPLv2+ and ASL 2.0 URL: https://github.com/ClusterLabs/resource-agents %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} @@ -109,14 +113,15 @@ Source2: %{sap_script_package_prefix}-%{sap_script_package_hash}.tar.gz Source3: %{saphana_prefix}-%{saphana_version}.tar.gz Source4: %{saphana_scaleout_prefix}-%{saphana_scaleout_hash}.tar.gz Source5: %{googlecloudsdk}-%{googlecloudsdk_version}-linux-x86_64.tar.gz -Source6: %{pyroute2}-%{pyroute2_version}.tar.gz -Source7: %{colorama}-%{colorama_version}.tar.gz -Source8: %{jmespath}-%{jmespath_version}.tar.gz -Source9: %{pycryptodome}-%{pycryptodome_version}.tar.gz -Source10: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz -Source11: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz -Source12: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz -Source13: %{aliyuncli}-%{aliyuncli_version}.tar.gz +Source6: %{httplib2}-%{httplib2_version}.tar.gz +Source7: %{pyroute2}-%{pyroute2_version}.tar.gz +Source8: %{colorama}-%{colorama_version}.tar.gz +Source9: %{jmespath}-%{jmespath_version}.tar.gz +Source10: %{pycryptodome}-%{pycryptodome_version}.tar.gz +Source11: %{aliyunsdkcore}-%{aliyunsdkcore_version}.tar.gz +Source12: %{aliyunsdkecs}-%{aliyunsdkecs_version}.tar.gz +Source13: %{aliyunsdkvpc}-%{aliyunsdkvpc_version}.tar.gz +Source14: %{aliyuncli}-%{aliyuncli_version}.tar.gz Patch0: bz1596139-1-nova-compute-wait-NovaEvacuate.patch Patch1: bz1470840-LVM-volume_group_check_only.patch Patch2: bz1538689-vdo-vol.patch @@ -235,6 +240,22 @@ Patch114: bz1633249-gcp-pd-move-4-fixes-and-improvements.patch Patch115: bz1633249-gcp-pd-move-5-bundle.patch Patch116: bz1840750-nfsserver-fix-nfsv4-only-support.patch Patch117: bz1601950-exportfs-2-fix-monitor-action.patch +Patch118: bz1846732-gcp-vpc-move-vip-support-multiple-alias-ips.patch +Patch119: bz1848673-sybaseASE-verify-start-action-only.patch +Patch120: bz1862121-azure-events-1-handle-exceptions-in-urlopen.patch +Patch121: bz1862121-azure-events-2-import-urlerror-encode-postdata.patch +Patch122: bz1862121-azure-events-3-decode-improvement.patch +Patch123: bz1850779-azure-lb-fix-redirect-issue.patch +Patch124: bz1905737-aws-add-imdsv2-support.patch +Patch125: bz1913936-1-gcp-vpc-move-add-project-parameter.patch +Patch126: bz1913936-2-gcp-vpc-move-route-fixes.patch +Patch127: bz1913936-3-gcp-vpc-move-route-make-vpc_network-optional.patch +Patch128: bz1937151-azure-lb-redirect-to-avoid-nc-dying-EPIPE-error.patch +Patch129: bz1939282-aws-vpc-move-ip-add-ENI-lookup.patch +Patch130: bz1935798-gcp-pd-move-fix-partially-matched-disk_name.patch + +# SAPHana* +Patch500: bz1855888-SAPHana-use-actual-mode.patch # bundle patches Patch1000: bz1568588-7-gcp-bundled.patch @@ -429,7 +450,7 @@ SAP instances to be managed in a cluster environment. License: GPLv2+ Summary: SAP HANA Scale-Out cluster resource agents Version: 0.164.0 -Release: 6%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +Release: 6%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.13 %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} Group: System Environment/Base %else @@ -449,7 +470,7 @@ environment. License: GPLv2+ Summary: SAP cluster connector script Version: 3.0.1 -Release: 37%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist} +Release: 37%{?rcver:%{rcver}}%{?numcomm:.%{numcomm}}%{?alphatag:.%{alphatag}}%{?dirty:.%{dirty}}%{?dist}.13 %if 0%{?fedora} || 0%{?centos_version} || 0%{?rhel} Group: System Environment/Base %else @@ -591,6 +612,19 @@ exit 1 %patch115 -p1 %patch116 -p1 %patch117 -p1 +%patch118 -p1 +%patch119 -p1 +%patch120 -p1 +%patch121 -p1 +%patch122 -p1 +%patch123 -p1 +%patch124 -p1 +%patch125 -p1 +%patch126 -p1 +%patch127 -p1 +%patch128 -p1 +%patch129 -p1 -F2 +%patch130 -p1 # add SAPHana agents to Makefile.am mv %{saphana_prefix}-%{saphana_version}/ra/SAPHana* heartbeat @@ -602,6 +636,7 @@ sed -i -e '/ ocf_heartbeat_SAPInstance.7 \\/a\ # change provider company sed -i -e 's/\("provider_company": \)"SUSE"/\1"Red Hat"/g' %{saphana_prefix}-%{saphana_version}/srHook/SAPHanaSR.py +%patch500 -p1 # bundles mkdir -p %{bundled_lib_dir} @@ -609,6 +644,16 @@ mkdir -p %{bundled_lib_dir} # google-cloud-sdk bundle %ifarch x86_64 tar -xzf %SOURCE5 -C %{bundled_lib_dir} + +## upgrade httplib2 to fix CVE-2020-11078 +pushd %{googlecloudsdk_dir} +rm -rf lib/third_party/httplib2 +popd + +# python-httplib2 bundle +tar -xzf %SOURCE6 -C %{bundled_lib_dir} +mv %{bundled_lib_dir}/%{httplib2}-%{httplib2_version} %{httplib2_dir} + # gcp*: append bundled-directory to search path, gcloud-ra %patch1000 -p1 # google-cloud-sdk fixes @@ -633,7 +678,7 @@ rm -rf %{googlecloudsdk_dir}/lib/third_party/grpc cp %{googlecloudsdk_dir}/README %{googlecloudsdk}_README cp %{googlecloudsdk_dir}/lib/third_party/argparse/README.txt %{googlecloudsdk}_argparse_README.txt cp %{googlecloudsdk_dir}/LICENSE %{googlecloudsdk}_LICENSE -cp %{googlecloudsdk_dir}/lib/third_party/httplib2/LICENSE %{googlecloudsdk}_httplib2_LICENSE +cp %{httplib2_dir}/LICENSE %{googlecloudsdk}_httplib2_LICENSE cp %{googlecloudsdk_dir}/lib/third_party/contextlib2/LICENSE %{googlecloudsdk}_contextlib2_LICENSE cp %{googlecloudsdk_dir}/lib/third_party/concurrent/LICENSE %{googlecloudsdk}_concurrent_LICENSE cp %{googlecloudsdk_dir}/lib/third_party/yaml/LICENSE %{googlecloudsdk}_yaml_LICENSE @@ -679,7 +724,7 @@ cp %{googlecloudsdk_dir}/lib/third_party/containerregistry/LICENSE %{googlecloud cp %{googlecloudsdk_dir}/lib/third_party/prompt_toolkit/LICENSE %{googlecloudsdk}_prompt_toolkit_LICENSE # python-pyroute2 bundle -tar -xzf %SOURCE6 -C %{bundled_lib_dir} +tar -xzf %SOURCE7 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{pyroute2}-%{pyroute2_version} %{pyroute2_dir} cp %{pyroute2_dir}/README.md %{pyroute2}_README.md cp %{pyroute2_dir}/README.license.md %{pyroute2}_README.license.md @@ -687,7 +732,7 @@ cp %{pyroute2_dir}/LICENSE.Apache.v2 %{pyroute2}_LICENSE.Apache.v2 cp %{pyroute2_dir}/LICENSE.GPL.v2 %{pyroute2}_LICENSE.GPL.v2 # python-colorama bundle -tar -xzf %SOURCE7 -C %{bundled_lib_dir} +tar -xzf %SOURCE8 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{colorama}-%{colorama_version} %{colorama_dir} cp %{colorama_dir}/LICENSE.txt %{colorama}_LICENSE.txt cp %{colorama_dir}/README.rst %{colorama}_README.rst @@ -698,7 +743,7 @@ rm -rf *.egg-info popd # python-jmespath bundle -tar -xzf %SOURCE8 -C %{bundled_lib_dir} +tar -xzf %SOURCE9 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/jmespath.py-%{jmespath_version} %{jmespath_dir} cp %{jmespath_dir}/LICENSE.txt %{jmespath}_LICENSE.txt cp %{jmespath_dir}/README.rst %{jmespath}_README.rst @@ -708,28 +753,28 @@ rm -rf jmespath.egg-info popd # python-pycryptodome bundle -tar -xzf %SOURCE9 -C %{bundled_lib_dir} +tar -xzf %SOURCE10 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{pycryptodome}-%{pycryptodome_version} %{pycryptodome_dir} cp %{pycryptodome_dir}/README.rst %{pycryptodome}_README.rst cp %{pycryptodome_dir}/LICENSE.rst %{pycryptodome}_LICENSE.rst # python-aliyun-sdk-core bundle -tar -xzf %SOURCE10 -C %{bundled_lib_dir} +tar -xzf %SOURCE11 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyunsdkcore}-%{aliyunsdkcore_version} %{aliyunsdkcore_dir} cp %{aliyunsdkcore_dir}/README.rst %{aliyunsdkcore}_README.rst # python-aliyun-sdk-ecs bundle -tar -xzf %SOURCE11 -C %{bundled_lib_dir} +tar -xzf %SOURCE12 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyunsdkecs}-%{aliyunsdkecs_version} %{aliyunsdkecs_dir} cp %{aliyunsdkecs_dir}/README.rst %{aliyunsdkecs}_README.rst # python-aliyun-sdk-vpc bundle -tar -xzf %SOURCE12 -C %{bundled_lib_dir} +tar -xzf %SOURCE13 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyunsdkvpc}-%{aliyunsdkvpc_version} %{aliyunsdkvpc_dir} cp %{aliyunsdkvpc_dir}/README.rst %{aliyunsdkvpc}_README.rst # aliyuncli bundle -tar -xzf %SOURCE13 -C %{bundled_lib_dir} +tar -xzf %SOURCE14 -C %{bundled_lib_dir} mv %{bundled_lib_dir}/%{aliyuncli}-%{aliyuncli_version} %{aliyuncli_dir} cp %{aliyuncli_dir}/README.rst %{aliyuncli}_README.rst cp %{aliyuncli_dir}/LICENSE %{aliyuncli}_LICENSE @@ -786,8 +831,13 @@ JFLAGS="$(echo '%{_smp_mflags}')" make $JFLAGS -# python-pyroute2 bundle %ifarch x86_64 +# python-httplib2 bundle +pushd %{httplib2_dir} +%{__python2} setup.py build +popd + +# python-pyroute2 bundle pushd %{pyroute2_dir} %{__python2} setup.py build popd @@ -799,7 +849,7 @@ popd # python-jmespath bundle pushd %{jmespath_dir} -CFLAGS="%{optflags}" %{__python} setup.py %{?py_setup_args} build --executable="%{__python2} -s" +CFLAGS="%{optflags}" %{__python2} setup.py %{?py_setup_args} build --executable="%{__python2} -s" popd # python-pycryptodome bundle @@ -866,6 +916,11 @@ test -d %{buildroot}/%{_bindir} || mkdir %{buildroot}/%{_bindir} ln -s /usr/lib/%{name}/%{googlecloudsdk_dir}/bin/gcloud-ra %{buildroot}/%{_bindir} popd +# python-httplib2 bundle +pushd %{httplib2_dir} +%{__python2} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{googlecloudsdk_dir}/lib/third_party +popd + # python-pyroute2 bundle pushd %{pyroute2_dir} %{__python2} setup.py install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir} @@ -877,7 +932,7 @@ popd # python-jmespath bundle pushd %{jmespath_dir} -CFLAGS="%{optflags}" %{__python} setup.py %{?py_setup_args} install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir} +CFLAGS="%{optflags}" %{__python2} setup.py %{?py_setup_args} install -O1 --skip-build --root %{buildroot} --install-lib /usr/lib/%{name}/%{bundled_lib_dir} popd rm %{buildroot}/%{_bindir}/jp.py @@ -1231,6 +1286,57 @@ ccs_update_schema > /dev/null 2>&1 ||: %endif %changelog +* Thu Jul 22 2021 Oyvind Albrigtsen - 4.1.1-61.13 +- SAPHana: use actual_mode from global.ini and fallback to mode when + it's not set + + Resolves: rhbz#1855888 + +* Wed Apr 21 2021 Oyvind Albrigtsen - 4.1.1-61.11 +- gcp-pd-move: dont stop partially matched "disk_name" + + Resolves: rhbz#1935798 + +* Thu Mar 25 2021 Oyvind Albrigtsen - 4.1.1-61.10 +- aws-vpc-move-ip: add ENI lookup + + Resolves: rhbz#1939282 + +* Thu Mar 11 2021 Oyvind Albrigtsen - 4.1.1-61.9 +- azure-lb: redirect to avoid nc dying with EPIPE error + + Resolves: rhbz#1937151 + +* Mon Mar 1 2021 Oyvind Albrigtsen - 4.1.1-61.8 +- gcp-vpc-move-route, gcp-vpc-move-vip: add project parameter and + make vpc_network parameter optional + + Resolves: rhbz#1913936 + +* Mon Jan 4 2021 Oyvind Albrigtsen - 4.1.1-61.5 +- AWS agents: add support for IMDSv2 + + Resolves: rhbz#1905737 + +* Fri Oct 2 2020 Oyvind Albrigtsen - 4.1.1-61.4 +- Upgrade bundled python-httplib2 to fix CVE-2020-11078 + + Resolves: rhbz#1850992 + +* Wed Aug 26 2020 Oyvind Albrigtsen - 4.1.1-61.2 +- azure-lb: fix redirect issue + + Resolves: rhbz#1850779 + +* Mon Aug 3 2020 Oyvind Albrigtsen - 4.1.1-61.1 +- gcp-vpc-move-vip: add support for multiple alias IPs +- sybaseASE: run verify action during start action only +- azure-events: handle exceptions in urlopen + + Resolves: rhbz#1846732 + Resolves: rhbz#1848673 + Resolves: rhbz#1862121 + * Tue Jun 9 2020 Oyvind Albrigtsen - 4.1.1-61 - exportfs: add symlink support